| /* SPDX-License-Identifier: GPL-2.0+ AND bzip2-1.0.6 */ |
| /* |
| This file is part of Valgrind, a dynamic binary instrumentation |
| framework. |
| |
| Copyright (C) 2000-2017 Julian Seward. All rights reserved. |
| Copyright (C) 2021 Sean Anderson <seanga2@gmail.com> |
| */ |
| |
| /* This file is for inclusion into client (your!) code. |
| |
| You can use these macros to manipulate and query Valgrind's |
| execution inside your own programs. |
| |
| The resulting executables will still run without Valgrind, just a |
| little bit more slowly than they otherwise would, but otherwise |
| unchanged. When not running on valgrind, each client request |
| consumes very few (eg. 7) instructions, so the resulting performance |
| loss is negligible unless you plan to execute client requests |
| millions of times per second. Nevertheless, if that is still a |
| problem, you can compile with the NVALGRIND symbol defined (gcc |
| -DNVALGRIND) so that client requests are not even compiled in. */ |
| |
| #ifndef __VALGRIND_H |
| #define __VALGRIND_H |
| |
| |
| /* ------------------------------------------------------------------ */ |
| /* VERSION NUMBER OF VALGRIND */ |
| /* ------------------------------------------------------------------ */ |
| |
| /* Specify Valgrind's version number, so that user code can |
| conditionally compile based on our version number. Note that these |
| were introduced at version 3.6 and so do not exist in version 3.5 |
| or earlier. The recommended way to use them to check for "version |
| X.Y or later" is (eg) |
| |
| #if defined(__VALGRIND_MAJOR__) && defined(__VALGRIND_MINOR__) \ |
| && (__VALGRIND_MAJOR__ > 3 \ |
| || (__VALGRIND_MAJOR__ == 3 && __VALGRIND_MINOR__ >= 6)) |
| */ |
| #define __VALGRIND_MAJOR__ 3 |
| #define __VALGRIND_MINOR__ 16 |
| |
| |
| #include <stdarg.h> |
| |
| /* Nb: this file might be included in a file compiled with -ansi. So |
| we can't use C++ style "//" comments nor the "asm" keyword (instead |
| use "__asm__"). */ |
| |
| /* Derive some tags indicating what the target platform is. Note |
| that in this file we're using the compiler's CPP symbols for |
| identifying architectures, which are different to the ones we use |
| within the rest of Valgrind. Note, __powerpc__ is active for both |
| 32 and 64-bit PPC, whereas __powerpc64__ is only active for the |
| latter (on Linux, that is). |
| |
| Misc note: how to find out what's predefined in gcc by default: |
| gcc -Wp,-dM somefile.c |
| */ |
| #undef PLAT_x86_darwin |
| #undef PLAT_amd64_darwin |
| #undef PLAT_x86_win32 |
| #undef PLAT_amd64_win64 |
| #undef PLAT_x86_linux |
| #undef PLAT_amd64_linux |
| #undef PLAT_ppc32_linux |
| #undef PLAT_ppc64be_linux |
| #undef PLAT_ppc64le_linux |
| #undef PLAT_arm_linux |
| #undef PLAT_arm64_linux |
| #undef PLAT_s390x_linux |
| #undef PLAT_mips32_linux |
| #undef PLAT_mips64_linux |
| #undef PLAT_nanomips_linux |
| #undef PLAT_x86_solaris |
| #undef PLAT_amd64_solaris |
| |
| |
| #if defined(__APPLE__) && defined(__i386__) |
| # define PLAT_x86_darwin 1 |
| #elif defined(__APPLE__) && defined(__x86_64__) |
| # define PLAT_amd64_darwin 1 |
| #elif (defined(__MINGW32__) && defined(__i386__)) \ |
| || defined(__CYGWIN32__) \ |
| || (defined(_WIN32) && defined(_M_IX86)) |
| # define PLAT_x86_win32 1 |
| #elif (defined(__MINGW32__) && defined(__x86_64__)) \ |
| || (defined(_WIN32) && defined(_M_X64)) |
| /* __MINGW32__ and _WIN32 are defined in 64 bit mode as well. */ |
| # define PLAT_amd64_win64 1 |
| #elif defined(__linux__) && defined(__i386__) |
| # define PLAT_x86_linux 1 |
| #elif defined(__linux__) && defined(__x86_64__) && !defined(__ILP32__) |
| # define PLAT_amd64_linux 1 |
| #elif defined(__linux__) && defined(__powerpc__) && !defined(__powerpc64__) |
| # define PLAT_ppc32_linux 1 |
| #elif defined(__linux__) && defined(__powerpc__) && defined(__powerpc64__) && _CALL_ELF != 2 |
| /* Big Endian uses ELF version 1 */ |
| # define PLAT_ppc64be_linux 1 |
| #elif defined(__linux__) && defined(__powerpc__) && defined(__powerpc64__) && _CALL_ELF == 2 |
| /* Little Endian uses ELF version 2 */ |
| # define PLAT_ppc64le_linux 1 |
| #elif defined(__linux__) && defined(__arm__) && !defined(__aarch64__) |
| # define PLAT_arm_linux 1 |
| #elif defined(__linux__) && defined(__aarch64__) && !defined(__arm__) |
| # define PLAT_arm64_linux 1 |
| #elif defined(__linux__) && defined(__s390__) && defined(__s390x__) |
| # define PLAT_s390x_linux 1 |
| #elif defined(__linux__) && defined(__mips__) && (__mips==64) |
| # define PLAT_mips64_linux 1 |
| #elif defined(__linux__) && defined(__mips__) && (__mips==32) |
| # define PLAT_mips32_linux 1 |
| #elif defined(__linux__) && defined(__nanomips__) |
| # define PLAT_nanomips_linux 1 |
| #elif defined(__sun) && defined(__i386__) |
| # define PLAT_x86_solaris 1 |
| #elif defined(__sun) && defined(__x86_64__) |
| # define PLAT_amd64_solaris 1 |
| #else |
| /* If we're not compiling for our target platform, don't generate |
| any inline asms. */ |
| # if IS_ENABLED(CONFIG_VALGRIND) |
| # error "Unsupported platform for valgrind" |
| # endif |
| #endif |
| |
| |
| /* ------------------------------------------------------------------ */ |
| /* ARCHITECTURE SPECIFICS for SPECIAL INSTRUCTIONS. There is nothing */ |
| /* in here of use to end-users -- skip to the next section. */ |
| /* ------------------------------------------------------------------ */ |
| |
| /* |
| * VALGRIND_DO_CLIENT_REQUEST(): a statement that invokes a Valgrind client |
| * request. Accepts both pointers and integers as arguments. |
| * |
| * VALGRIND_DO_CLIENT_REQUEST_STMT(): a statement that invokes a Valgrind |
| * client request that does not return a value. |
| |
| * VALGRIND_DO_CLIENT_REQUEST_EXPR(): a C expression that invokes a Valgrind |
| * client request and whose value equals the client request result. Accepts |
| * both pointers and integers as arguments. Note that such calls are not |
| * necessarily pure functions -- they may have side effects. |
| */ |
| |
| #define VALGRIND_DO_CLIENT_REQUEST(_zzq_rlval, _zzq_default, \ |
| _zzq_request, _zzq_arg1, _zzq_arg2, \ |
| _zzq_arg3, _zzq_arg4, _zzq_arg5) \ |
| do { (_zzq_rlval) = VALGRIND_DO_CLIENT_REQUEST_EXPR((_zzq_default), \ |
| (_zzq_request), (_zzq_arg1), (_zzq_arg2), \ |
| (_zzq_arg3), (_zzq_arg4), (_zzq_arg5)); } while (0) |
| |
| #define VALGRIND_DO_CLIENT_REQUEST_STMT(_zzq_request, _zzq_arg1, \ |
| _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ |
| do { (void) VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \ |
| (_zzq_request), (_zzq_arg1), (_zzq_arg2), \ |
| (_zzq_arg3), (_zzq_arg4), (_zzq_arg5)); } while (0) |
| |
| #if !IS_ENABLED(CONFIG_VALGRIND) |
| |
| /* Define NVALGRIND to completely remove the Valgrind magic sequence |
| from the compiled code (analogous to NDEBUG's effects on |
| assert()) */ |
| #define VALGRIND_DO_CLIENT_REQUEST_EXPR( \ |
| _zzq_default, _zzq_request, \ |
| _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ |
| (_zzq_default) |
| |
| #else /* ! CONFIG_VALGRIND */ |
| |
| /* The following defines the magic code sequences which the JITter |
| spots and handles magically. Don't look too closely at them as |
| they will rot your brain. |
| |
| The assembly code sequences for all architectures is in this one |
| file. This is because this file must be stand-alone, and we don't |
| want to have multiple files. |
| |
| For VALGRIND_DO_CLIENT_REQUEST, we must ensure that the default |
| value gets put in the return slot, so that everything works when |
| this is executed not under Valgrind. Args are passed in a memory |
| block, and so there's no intrinsic limit to the number that could |
| be passed, but it's currently five. |
| |
| The macro args are: |
| _zzq_rlval result lvalue |
| _zzq_default default value (result returned when running on real CPU) |
| _zzq_request request code |
| _zzq_arg1..5 request params |
| |
| The other two macros are used to support function wrapping, and are |
| a lot simpler. VALGRIND_GET_NR_CONTEXT returns the value of the |
| guest's NRADDR pseudo-register and whatever other information is |
| needed to safely run the call original from the wrapper: on |
| ppc64-linux, the R2 value at the divert point is also needed. This |
| information is abstracted into a user-visible type, OrigFn. |
| |
| VALGRIND_CALL_NOREDIR_* behaves the same as the following on the |
| guest, but guarantees that the branch instruction will not be |
| redirected: x86: call *%eax, amd64: call *%rax, ppc32/ppc64: |
| branch-and-link-to-r11. VALGRIND_CALL_NOREDIR is just text, not a |
| complete inline asm, since it needs to be combined with more magic |
| inline asm stuff to be useful. |
| */ |
| |
| /* ----------------- x86-{linux,darwin,solaris} ---------------- */ |
| |
| #if defined(PLAT_x86_linux) || defined(PLAT_x86_darwin) \ |
| || (defined(PLAT_x86_win32) && defined(__GNUC__)) \ |
| || defined(PLAT_x86_solaris) |
| |
| typedef |
| struct { |
| unsigned int nraddr; /* where's the code? */ |
| } |
| OrigFn; |
| |
| #define __SPECIAL_INSTRUCTION_PREAMBLE \ |
| "roll $3, %%edi ; roll $13, %%edi\n\t" \ |
| "roll $29, %%edi ; roll $19, %%edi\n\t" |
| |
| #define VALGRIND_DO_CLIENT_REQUEST_EXPR( \ |
| _zzq_default, _zzq_request, \ |
| _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ |
| __extension__ \ |
| ({volatile unsigned int _zzq_args[6]; \ |
| volatile unsigned int _zzq_result; \ |
| _zzq_args[0] = (unsigned int)(_zzq_request); \ |
| _zzq_args[1] = (unsigned int)(_zzq_arg1); \ |
| _zzq_args[2] = (unsigned int)(_zzq_arg2); \ |
| _zzq_args[3] = (unsigned int)(_zzq_arg3); \ |
| _zzq_args[4] = (unsigned int)(_zzq_arg4); \ |
| _zzq_args[5] = (unsigned int)(_zzq_arg5); \ |
| __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ |
| /* %EDX = client_request ( %EAX ) */ \ |
| "xchgl %%ebx,%%ebx" \ |
| : "=d" (_zzq_result) \ |
| : "a" (&_zzq_args[0]), "0" (_zzq_default) \ |
| : "cc", "memory" \ |
| ); \ |
| _zzq_result; \ |
| }) |
| |
| #define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \ |
| { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \ |
| volatile unsigned int __addr; \ |
| __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ |
| /* %EAX = guest_NRADDR */ \ |
| "xchgl %%ecx,%%ecx" \ |
| : "=a" (__addr) \ |
| : \ |
| : "cc", "memory" \ |
| ); \ |
| _zzq_orig->nraddr = __addr; \ |
| } |
| |
| #define VALGRIND_CALL_NOREDIR_EAX \ |
| __SPECIAL_INSTRUCTION_PREAMBLE \ |
| /* call-noredir *%EAX */ \ |
| "xchgl %%edx,%%edx\n\t" |
| |
| #define VALGRIND_VEX_INJECT_IR() \ |
| do { \ |
| __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ |
| "xchgl %%edi,%%edi\n\t" \ |
| : : : "cc", "memory" \ |
| ); \ |
| } while (0) |
| |
| #endif /* PLAT_x86_linux || PLAT_x86_darwin || (PLAT_x86_win32 && __GNUC__) |
| || PLAT_x86_solaris */ |
| |
| /* ------------------------- x86-Win32 ------------------------- */ |
| |
| #if defined(PLAT_x86_win32) && !defined(__GNUC__) |
| |
| typedef |
| struct { |
| unsigned int nraddr; /* where's the code? */ |
| } |
| OrigFn; |
| |
| #if defined(_MSC_VER) |
| |
| #define __SPECIAL_INSTRUCTION_PREAMBLE \ |
| __asm rol edi, 3 __asm rol edi, 13 \ |
| __asm rol edi, 29 __asm rol edi, 19 |
| |
| #define VALGRIND_DO_CLIENT_REQUEST_EXPR( \ |
| _zzq_default, _zzq_request, \ |
| _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ |
| valgrind_do_client_request_expr((uintptr_t)(_zzq_default), \ |
| (uintptr_t)(_zzq_request), (uintptr_t)(_zzq_arg1), \ |
| (uintptr_t)(_zzq_arg2), (uintptr_t)(_zzq_arg3), \ |
| (uintptr_t)(_zzq_arg4), (uintptr_t)(_zzq_arg5)) |
| |
| static __inline uintptr_t |
| valgrind_do_client_request_expr(uintptr_t _zzq_default, uintptr_t _zzq_request, |
| uintptr_t _zzq_arg1, uintptr_t _zzq_arg2, |
| uintptr_t _zzq_arg3, uintptr_t _zzq_arg4, |
| uintptr_t _zzq_arg5) |
| { |
| volatile uintptr_t _zzq_args[6]; |
| volatile unsigned int _zzq_result; |
| _zzq_args[0] = (uintptr_t)(_zzq_request); |
| _zzq_args[1] = (uintptr_t)(_zzq_arg1); |
| _zzq_args[2] = (uintptr_t)(_zzq_arg2); |
| _zzq_args[3] = (uintptr_t)(_zzq_arg3); |
| _zzq_args[4] = (uintptr_t)(_zzq_arg4); |
| _zzq_args[5] = (uintptr_t)(_zzq_arg5); |
| __asm { __asm lea eax, _zzq_args __asm mov edx, _zzq_default |
| __SPECIAL_INSTRUCTION_PREAMBLE |
| /* %EDX = client_request ( %EAX ) */ |
| __asm xchg ebx,ebx |
| __asm mov _zzq_result, edx |
| } |
| return _zzq_result; |
| } |
| |
| #define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \ |
| { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \ |
| volatile unsigned int __addr; \ |
| __asm { __SPECIAL_INSTRUCTION_PREAMBLE \ |
| /* %EAX = guest_NRADDR */ \ |
| __asm xchg ecx,ecx \ |
| __asm mov __addr, eax \ |
| } \ |
| _zzq_orig->nraddr = __addr; \ |
| } |
| |
| #define VALGRIND_CALL_NOREDIR_EAX ERROR |
| |
| #define VALGRIND_VEX_INJECT_IR() \ |
| do { \ |
| __asm { __SPECIAL_INSTRUCTION_PREAMBLE \ |
| __asm xchg edi,edi \ |
| } \ |
| } while (0) |
| |
| #else |
| #error Unsupported compiler. |
| #endif |
| |
| #endif /* PLAT_x86_win32 */ |
| |
| /* ----------------- amd64-{linux,darwin,solaris} --------------- */ |
| |
| #if defined(PLAT_amd64_linux) || defined(PLAT_amd64_darwin) \ |
| || defined(PLAT_amd64_solaris) \ |
| || (defined(PLAT_amd64_win64) && defined(__GNUC__)) |
| |
| typedef |
| struct { |
| unsigned long int nraddr; /* where's the code? */ |
| } |
| OrigFn; |
| |
| #define __SPECIAL_INSTRUCTION_PREAMBLE \ |
| "rolq $3, %%rdi ; rolq $13, %%rdi\n\t" \ |
| "rolq $61, %%rdi ; rolq $51, %%rdi\n\t" |
| |
| #define VALGRIND_DO_CLIENT_REQUEST_EXPR( \ |
| _zzq_default, _zzq_request, \ |
| _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ |
| __extension__ \ |
| ({ volatile unsigned long int _zzq_args[6]; \ |
| volatile unsigned long int _zzq_result; \ |
| _zzq_args[0] = (unsigned long int)(_zzq_request); \ |
| _zzq_args[1] = (unsigned long int)(_zzq_arg1); \ |
| _zzq_args[2] = (unsigned long int)(_zzq_arg2); \ |
| _zzq_args[3] = (unsigned long int)(_zzq_arg3); \ |
| _zzq_args[4] = (unsigned long int)(_zzq_arg4); \ |
| _zzq_args[5] = (unsigned long int)(_zzq_arg5); \ |
| __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ |
| /* %RDX = client_request ( %RAX ) */ \ |
| "xchgq %%rbx,%%rbx" \ |
| : "=d" (_zzq_result) \ |
| : "a" (&_zzq_args[0]), "0" (_zzq_default) \ |
| : "cc", "memory" \ |
| ); \ |
| _zzq_result; \ |
| }) |
| |
| #define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \ |
| { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \ |
| volatile unsigned long int __addr; \ |
| __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ |
| /* %RAX = guest_NRADDR */ \ |
| "xchgq %%rcx,%%rcx" \ |
| : "=a" (__addr) \ |
| : \ |
| : "cc", "memory" \ |
| ); \ |
| _zzq_orig->nraddr = __addr; \ |
| } |
| |
| #define VALGRIND_CALL_NOREDIR_RAX \ |
| __SPECIAL_INSTRUCTION_PREAMBLE \ |
| /* call-noredir *%RAX */ \ |
| "xchgq %%rdx,%%rdx\n\t" |
| |
| #define VALGRIND_VEX_INJECT_IR() \ |
| do { \ |
| __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ |
| "xchgq %%rdi,%%rdi\n\t" \ |
| : : : "cc", "memory" \ |
| ); \ |
| } while (0) |
| |
| #endif /* PLAT_amd64_linux || PLAT_amd64_darwin || PLAT_amd64_solaris */ |
| |
| /* ------------------------- amd64-Win64 ------------------------- */ |
| |
| #if defined(PLAT_amd64_win64) && !defined(__GNUC__) |
| |
| #error Unsupported compiler. |
| |
| #endif /* PLAT_amd64_win64 */ |
| |
| /* ------------------------ ppc32-linux ------------------------ */ |
| |
| #if defined(PLAT_ppc32_linux) |
| |
| typedef |
| struct { |
| unsigned int nraddr; /* where's the code? */ |
| } |
| OrigFn; |
| |
| #define __SPECIAL_INSTRUCTION_PREAMBLE \ |
| "rlwinm 0,0,3,0,31 ; rlwinm 0,0,13,0,31\n\t" \ |
| "rlwinm 0,0,29,0,31 ; rlwinm 0,0,19,0,31\n\t" |
| |
| #define VALGRIND_DO_CLIENT_REQUEST_EXPR( \ |
| _zzq_default, _zzq_request, \ |
| _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ |
| \ |
| __extension__ \ |
| ({ unsigned int _zzq_args[6]; \ |
| unsigned int _zzq_result; \ |
| unsigned int* _zzq_ptr; \ |
| _zzq_args[0] = (unsigned int)(_zzq_request); \ |
| _zzq_args[1] = (unsigned int)(_zzq_arg1); \ |
| _zzq_args[2] = (unsigned int)(_zzq_arg2); \ |
| _zzq_args[3] = (unsigned int)(_zzq_arg3); \ |
| _zzq_args[4] = (unsigned int)(_zzq_arg4); \ |
| _zzq_args[5] = (unsigned int)(_zzq_arg5); \ |
| _zzq_ptr = _zzq_args; \ |
| __asm__ volatile("mr 3,%1\n\t" /*default*/ \ |
| "mr 4,%2\n\t" /*ptr*/ \ |
| __SPECIAL_INSTRUCTION_PREAMBLE \ |
| /* %R3 = client_request ( %R4 ) */ \ |
| "or 1,1,1\n\t" \ |
| "mr %0,3" /*result*/ \ |
| : "=b" (_zzq_result) \ |
| : "b" (_zzq_default), "b" (_zzq_ptr) \ |
| : "cc", "memory", "r3", "r4"); \ |
| _zzq_result; \ |
| }) |
| |
| #define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \ |
| { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \ |
| unsigned int __addr; \ |
| __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ |
| /* %R3 = guest_NRADDR */ \ |
| "or 2,2,2\n\t" \ |
| "mr %0,3" \ |
| : "=b" (__addr) \ |
| : \ |
| : "cc", "memory", "r3" \ |
| ); \ |
| _zzq_orig->nraddr = __addr; \ |
| } |
| |
| #define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ |
| __SPECIAL_INSTRUCTION_PREAMBLE \ |
| /* branch-and-link-to-noredir *%R11 */ \ |
| "or 3,3,3\n\t" |
| |
| #define VALGRIND_VEX_INJECT_IR() \ |
| do { \ |
| __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ |
| "or 5,5,5\n\t" \ |
| ); \ |
| } while (0) |
| |
| #endif /* PLAT_ppc32_linux */ |
| |
| /* ------------------------ ppc64-linux ------------------------ */ |
| |
| #if defined(PLAT_ppc64be_linux) |
| |
| typedef |
| struct { |
| unsigned long int nraddr; /* where's the code? */ |
| unsigned long int r2; /* what tocptr do we need? */ |
| } |
| OrigFn; |
| |
| #define __SPECIAL_INSTRUCTION_PREAMBLE \ |
| "rotldi 0,0,3 ; rotldi 0,0,13\n\t" \ |
| "rotldi 0,0,61 ; rotldi 0,0,51\n\t" |
| |
| #define VALGRIND_DO_CLIENT_REQUEST_EXPR( \ |
| _zzq_default, _zzq_request, \ |
| _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ |
| \ |
| __extension__ \ |
| ({ unsigned long int _zzq_args[6]; \ |
| unsigned long int _zzq_result; \ |
| unsigned long int* _zzq_ptr; \ |
| _zzq_args[0] = (unsigned long int)(_zzq_request); \ |
| _zzq_args[1] = (unsigned long int)(_zzq_arg1); \ |
| _zzq_args[2] = (unsigned long int)(_zzq_arg2); \ |
| _zzq_args[3] = (unsigned long int)(_zzq_arg3); \ |
| _zzq_args[4] = (unsigned long int)(_zzq_arg4); \ |
| _zzq_args[5] = (unsigned long int)(_zzq_arg5); \ |
| _zzq_ptr = _zzq_args; \ |
| __asm__ volatile("mr 3,%1\n\t" /*default*/ \ |
| "mr 4,%2\n\t" /*ptr*/ \ |
| __SPECIAL_INSTRUCTION_PREAMBLE \ |
| /* %R3 = client_request ( %R4 ) */ \ |
| "or 1,1,1\n\t" \ |
| "mr %0,3" /*result*/ \ |
| : "=b" (_zzq_result) \ |
| : "b" (_zzq_default), "b" (_zzq_ptr) \ |
| : "cc", "memory", "r3", "r4"); \ |
| _zzq_result; \ |
| }) |
| |
| #define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \ |
| { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \ |
| unsigned long int __addr; \ |
| __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ |
| /* %R3 = guest_NRADDR */ \ |
| "or 2,2,2\n\t" \ |
| "mr %0,3" \ |
| : "=b" (__addr) \ |
| : \ |
| : "cc", "memory", "r3" \ |
| ); \ |
| _zzq_orig->nraddr = __addr; \ |
| __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ |
| /* %R3 = guest_NRADDR_GPR2 */ \ |
| "or 4,4,4\n\t" \ |
| "mr %0,3" \ |
| : "=b" (__addr) \ |
| : \ |
| : "cc", "memory", "r3" \ |
| ); \ |
| _zzq_orig->r2 = __addr; \ |
| } |
| |
| #define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ |
| __SPECIAL_INSTRUCTION_PREAMBLE \ |
| /* branch-and-link-to-noredir *%R11 */ \ |
| "or 3,3,3\n\t" |
| |
| #define VALGRIND_VEX_INJECT_IR() \ |
| do { \ |
| __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ |
| "or 5,5,5\n\t" \ |
| ); \ |
| } while (0) |
| |
| #endif /* PLAT_ppc64be_linux */ |
| |
| #if defined(PLAT_ppc64le_linux) |
| |
| typedef |
| struct { |
| unsigned long int nraddr; /* where's the code? */ |
| unsigned long int r2; /* what tocptr do we need? */ |
| } |
| OrigFn; |
| |
| #define __SPECIAL_INSTRUCTION_PREAMBLE \ |
| "rotldi 0,0,3 ; rotldi 0,0,13\n\t" \ |
| "rotldi 0,0,61 ; rotldi 0,0,51\n\t" |
| |
| #define VALGRIND_DO_CLIENT_REQUEST_EXPR( \ |
| _zzq_default, _zzq_request, \ |
| _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ |
| \ |
| __extension__ \ |
| ({ unsigned long int _zzq_args[6]; \ |
| unsigned long int _zzq_result; \ |
| unsigned long int* _zzq_ptr; \ |
| _zzq_args[0] = (unsigned long int)(_zzq_request); \ |
| _zzq_args[1] = (unsigned long int)(_zzq_arg1); \ |
| _zzq_args[2] = (unsigned long int)(_zzq_arg2); \ |
| _zzq_args[3] = (unsigned long int)(_zzq_arg3); \ |
| _zzq_args[4] = (unsigned long int)(_zzq_arg4); \ |
| _zzq_args[5] = (unsigned long int)(_zzq_arg5); \ |
| _zzq_ptr = _zzq_args; \ |
| __asm__ volatile("mr 3,%1\n\t" /*default*/ \ |
| "mr 4,%2\n\t" /*ptr*/ \ |
| __SPECIAL_INSTRUCTION_PREAMBLE \ |
| /* %R3 = client_request ( %R4 ) */ \ |
| "or 1,1,1\n\t" \ |
| "mr %0,3" /*result*/ \ |
| : "=b" (_zzq_result) \ |
| : "b" (_zzq_default), "b" (_zzq_ptr) \ |
| : "cc", "memory", "r3", "r4"); \ |
| _zzq_result; \ |
| }) |
| |
| #define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \ |
| { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \ |
| unsigned long int __addr; \ |
| __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ |
| /* %R3 = guest_NRADDR */ \ |
| "or 2,2,2\n\t" \ |
| "mr %0,3" \ |
| : "=b" (__addr) \ |
| : \ |
| : "cc", "memory", "r3" \ |
| ); \ |
| _zzq_orig->nraddr = __addr; \ |
| __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ |
| /* %R3 = guest_NRADDR_GPR2 */ \ |
| "or 4,4,4\n\t" \ |
| "mr %0,3" \ |
| : "=b" (__addr) \ |
| : \ |
| : "cc", "memory", "r3" \ |
| ); \ |
| _zzq_orig->r2 = __addr; \ |
| } |
| |
| #define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \ |
| __SPECIAL_INSTRUCTION_PREAMBLE \ |
| /* branch-and-link-to-noredir *%R12 */ \ |
| "or 3,3,3\n\t" |
| |
| #define VALGRIND_VEX_INJECT_IR() \ |
| do { \ |
| __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ |
| "or 5,5,5\n\t" \ |
| ); \ |
| } while (0) |
| |
| #endif /* PLAT_ppc64le_linux */ |
| |
| /* ------------------------- arm-linux ------------------------- */ |
| |
| #if defined(PLAT_arm_linux) |
| |
| typedef |
| struct { |
| unsigned int nraddr; /* where's the code? */ |
| } |
| OrigFn; |
| |
| #define __SPECIAL_INSTRUCTION_PREAMBLE \ |
| "mov r12, r12, ror #3 ; mov r12, r12, ror #13 \n\t" \ |
| "mov r12, r12, ror #29 ; mov r12, r12, ror #19 \n\t" |
| |
| #define VALGRIND_DO_CLIENT_REQUEST_EXPR( \ |
| _zzq_default, _zzq_request, \ |
| _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ |
| \ |
| __extension__ \ |
| ({volatile unsigned int _zzq_args[6]; \ |
| volatile unsigned int _zzq_result; \ |
| _zzq_args[0] = (unsigned int)(_zzq_request); \ |
| _zzq_args[1] = (unsigned int)(_zzq_arg1); \ |
| _zzq_args[2] = (unsigned int)(_zzq_arg2); \ |
| _zzq_args[3] = (unsigned int)(_zzq_arg3); \ |
| _zzq_args[4] = (unsigned int)(_zzq_arg4); \ |
| _zzq_args[5] = (unsigned int)(_zzq_arg5); \ |
| __asm__ volatile("mov r3, %1\n\t" /*default*/ \ |
| "mov r4, %2\n\t" /*ptr*/ \ |
| __SPECIAL_INSTRUCTION_PREAMBLE \ |
| /* R3 = client_request ( R4 ) */ \ |
| "orr r10, r10, r10\n\t" \ |
| "mov %0, r3" /*result*/ \ |
| : "=r" (_zzq_result) \ |
| : "r" (_zzq_default), "r" (&_zzq_args[0]) \ |
| : "cc","memory", "r3", "r4"); \ |
| _zzq_result; \ |
| }) |
| |
| #define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \ |
| { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \ |
| unsigned int __addr; \ |
| __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ |
| /* R3 = guest_NRADDR */ \ |
| "orr r11, r11, r11\n\t" \ |
| "mov %0, r3" \ |
| : "=r" (__addr) \ |
| : \ |
| : "cc", "memory", "r3" \ |
| ); \ |
| _zzq_orig->nraddr = __addr; \ |
| } |
| |
| #define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ |
| __SPECIAL_INSTRUCTION_PREAMBLE \ |
| /* branch-and-link-to-noredir *%R4 */ \ |
| "orr r12, r12, r12\n\t" |
| |
| #define VALGRIND_VEX_INJECT_IR() \ |
| do { \ |
| __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ |
| "orr r9, r9, r9\n\t" \ |
| : : : "cc", "memory" \ |
| ); \ |
| } while (0) |
| |
| #endif /* PLAT_arm_linux */ |
| |
| /* ------------------------ arm64-linux ------------------------- */ |
| |
| #if defined(PLAT_arm64_linux) |
| |
| typedef |
| struct { |
| unsigned long int nraddr; /* where's the code? */ |
| } |
| OrigFn; |
| |
| #define __SPECIAL_INSTRUCTION_PREAMBLE \ |
| "ror x12, x12, #3 ; ror x12, x12, #13 \n\t" \ |
| "ror x12, x12, #51 ; ror x12, x12, #61 \n\t" |
| |
| #define VALGRIND_DO_CLIENT_REQUEST_EXPR( \ |
| _zzq_default, _zzq_request, \ |
| _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ |
| \ |
| __extension__ \ |
| ({volatile unsigned long int _zzq_args[6]; \ |
| volatile unsigned long int _zzq_result; \ |
| _zzq_args[0] = (unsigned long int)(_zzq_request); \ |
| _zzq_args[1] = (unsigned long int)(_zzq_arg1); \ |
| _zzq_args[2] = (unsigned long int)(_zzq_arg2); \ |
| _zzq_args[3] = (unsigned long int)(_zzq_arg3); \ |
| _zzq_args[4] = (unsigned long int)(_zzq_arg4); \ |
| _zzq_args[5] = (unsigned long int)(_zzq_arg5); \ |
| __asm__ volatile("mov x3, %1\n\t" /*default*/ \ |
| "mov x4, %2\n\t" /*ptr*/ \ |
| __SPECIAL_INSTRUCTION_PREAMBLE \ |
| /* X3 = client_request ( X4 ) */ \ |
| "orr x10, x10, x10\n\t" \ |
| "mov %0, x3" /*result*/ \ |
| : "=r" (_zzq_result) \ |
| : "r" ((unsigned long int)(_zzq_default)), \ |
| "r" (&_zzq_args[0]) \ |
| : "cc","memory", "x3", "x4"); \ |
| _zzq_result; \ |
| }) |
| |
| #define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \ |
| { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \ |
| unsigned long int __addr; \ |
| __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ |
| /* X3 = guest_NRADDR */ \ |
| "orr x11, x11, x11\n\t" \ |
| "mov %0, x3" \ |
| : "=r" (__addr) \ |
| : \ |
| : "cc", "memory", "x3" \ |
| ); \ |
| _zzq_orig->nraddr = __addr; \ |
| } |
| |
| #define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \ |
| __SPECIAL_INSTRUCTION_PREAMBLE \ |
| /* branch-and-link-to-noredir X8 */ \ |
| "orr x12, x12, x12\n\t" |
| |
| #define VALGRIND_VEX_INJECT_IR() \ |
| do { \ |
| __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ |
| "orr x9, x9, x9\n\t" \ |
| : : : "cc", "memory" \ |
| ); \ |
| } while (0) |
| |
| #endif /* PLAT_arm64_linux */ |
| |
| /* ------------------------ s390x-linux ------------------------ */ |
| |
| #if defined(PLAT_s390x_linux) |
| |
| typedef |
| struct { |
| unsigned long int nraddr; /* where's the code? */ |
| } |
| OrigFn; |
| |
| /* __SPECIAL_INSTRUCTION_PREAMBLE will be used to identify Valgrind specific |
| * code. This detection is implemented in platform specific toIR.c |
| * (e.g. VEX/priv/guest_s390_decoder.c). |
| */ |
| #define __SPECIAL_INSTRUCTION_PREAMBLE \ |
| "lr 15,15\n\t" \ |
| "lr 1,1\n\t" \ |
| "lr 2,2\n\t" \ |
| "lr 3,3\n\t" |
| |
| #define __CLIENT_REQUEST_CODE "lr 2,2\n\t" |
| #define __GET_NR_CONTEXT_CODE "lr 3,3\n\t" |
| #define __CALL_NO_REDIR_CODE "lr 4,4\n\t" |
| #define __VEX_INJECT_IR_CODE "lr 5,5\n\t" |
| |
| #define VALGRIND_DO_CLIENT_REQUEST_EXPR( \ |
| _zzq_default, _zzq_request, \ |
| _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ |
| __extension__ \ |
| ({volatile unsigned long int _zzq_args[6]; \ |
| volatile unsigned long int _zzq_result; \ |
| _zzq_args[0] = (unsigned long int)(_zzq_request); \ |
| _zzq_args[1] = (unsigned long int)(_zzq_arg1); \ |
| _zzq_args[2] = (unsigned long int)(_zzq_arg2); \ |
| _zzq_args[3] = (unsigned long int)(_zzq_arg3); \ |
| _zzq_args[4] = (unsigned long int)(_zzq_arg4); \ |
| _zzq_args[5] = (unsigned long int)(_zzq_arg5); \ |
| __asm__ volatile(/* r2 = args */ \ |
| "lgr 2,%1\n\t" \ |
| /* r3 = default */ \ |
| "lgr 3,%2\n\t" \ |
| __SPECIAL_INSTRUCTION_PREAMBLE \ |
| __CLIENT_REQUEST_CODE \ |
| /* results = r3 */ \ |
| "lgr %0, 3\n\t" \ |
| : "=d" (_zzq_result) \ |
| : "a" (&_zzq_args[0]), "0" (_zzq_default) \ |
| : "cc", "2", "3", "memory" \ |
| ); \ |
| _zzq_result; \ |
| }) |
| |
| #define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \ |
| { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \ |
| volatile unsigned long int __addr; \ |
| __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ |
| __GET_NR_CONTEXT_CODE \ |
| "lgr %0, 3\n\t" \ |
| : "=a" (__addr) \ |
| : \ |
| : "cc", "3", "memory" \ |
| ); \ |
| _zzq_orig->nraddr = __addr; \ |
| } |
| |
| #define VALGRIND_CALL_NOREDIR_R1 \ |
| __SPECIAL_INSTRUCTION_PREAMBLE \ |
| __CALL_NO_REDIR_CODE |
| |
| #define VALGRIND_VEX_INJECT_IR() \ |
| do { \ |
| __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ |
| __VEX_INJECT_IR_CODE); \ |
| } while (0) |
| |
| #endif /* PLAT_s390x_linux */ |
| |
| /* ------------------------- mips32-linux ---------------- */ |
| |
| #if defined(PLAT_mips32_linux) |
| |
| typedef |
| struct { |
| unsigned int nraddr; /* where's the code? */ |
| } |
| OrigFn; |
| |
| /* .word 0x342 |
| * .word 0x742 |
| * .word 0xC2 |
| * .word 0x4C2*/ |
| #define __SPECIAL_INSTRUCTION_PREAMBLE \ |
| "srl $0, $0, 13\n\t" \ |
| "srl $0, $0, 29\n\t" \ |
| "srl $0, $0, 3\n\t" \ |
| "srl $0, $0, 19\n\t" |
| |
| #define VALGRIND_DO_CLIENT_REQUEST_EXPR( \ |
| _zzq_default, _zzq_request, \ |
| _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ |
| __extension__ \ |
| ({ volatile unsigned int _zzq_args[6]; \ |
| volatile unsigned int _zzq_result; \ |
| _zzq_args[0] = (unsigned int)(_zzq_request); \ |
| _zzq_args[1] = (unsigned int)(_zzq_arg1); \ |
| _zzq_args[2] = (unsigned int)(_zzq_arg2); \ |
| _zzq_args[3] = (unsigned int)(_zzq_arg3); \ |
| _zzq_args[4] = (unsigned int)(_zzq_arg4); \ |
| _zzq_args[5] = (unsigned int)(_zzq_arg5); \ |
| __asm__ volatile("move $11, %1\n\t" /*default*/ \ |
| "move $12, %2\n\t" /*ptr*/ \ |
| __SPECIAL_INSTRUCTION_PREAMBLE \ |
| /* T3 = client_request ( T4 ) */ \ |
| "or $13, $13, $13\n\t" \ |
| "move %0, $11\n\t" /*result*/ \ |
| : "=r" (_zzq_result) \ |
| : "r" (_zzq_default), "r" (&_zzq_args[0]) \ |
| : "$11", "$12", "memory"); \ |
| _zzq_result; \ |
| }) |
| |
| #define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \ |
| { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \ |
| volatile unsigned int __addr; \ |
| __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ |
| /* %t9 = guest_NRADDR */ \ |
| "or $14, $14, $14\n\t" \ |
| "move %0, $11" /*result*/ \ |
| : "=r" (__addr) \ |
| : \ |
| : "$11" \ |
| ); \ |
| _zzq_orig->nraddr = __addr; \ |
| } |
| |
| #define VALGRIND_CALL_NOREDIR_T9 \ |
| __SPECIAL_INSTRUCTION_PREAMBLE \ |
| /* call-noredir *%t9 */ \ |
| "or $15, $15, $15\n\t" |
| |
| #define VALGRIND_VEX_INJECT_IR() \ |
| do { \ |
| __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ |
| "or $11, $11, $11\n\t" \ |
| ); \ |
| } while (0) |
| |
| |
| #endif /* PLAT_mips32_linux */ |
| |
| /* ------------------------- mips64-linux ---------------- */ |
| |
| #if defined(PLAT_mips64_linux) |
| |
| typedef |
| struct { |
| unsigned long nraddr; /* where's the code? */ |
| } |
| OrigFn; |
| |
| /* dsll $0,$0, 3 |
| * dsll $0,$0, 13 |
| * dsll $0,$0, 29 |
| * dsll $0,$0, 19*/ |
| #define __SPECIAL_INSTRUCTION_PREAMBLE \ |
| "dsll $0,$0, 3 ; dsll $0,$0,13\n\t" \ |
| "dsll $0,$0,29 ; dsll $0,$0,19\n\t" |
| |
| #define VALGRIND_DO_CLIENT_REQUEST_EXPR( \ |
| _zzq_default, _zzq_request, \ |
| _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ |
| __extension__ \ |
| ({ volatile unsigned long int _zzq_args[6]; \ |
| volatile unsigned long int _zzq_result; \ |
| _zzq_args[0] = (unsigned long int)(_zzq_request); \ |
| _zzq_args[1] = (unsigned long int)(_zzq_arg1); \ |
| _zzq_args[2] = (unsigned long int)(_zzq_arg2); \ |
| _zzq_args[3] = (unsigned long int)(_zzq_arg3); \ |
| _zzq_args[4] = (unsigned long int)(_zzq_arg4); \ |
| _zzq_args[5] = (unsigned long int)(_zzq_arg5); \ |
| __asm__ volatile("move $11, %1\n\t" /*default*/ \ |
| "move $12, %2\n\t" /*ptr*/ \ |
| __SPECIAL_INSTRUCTION_PREAMBLE \ |
| /* $11 = client_request ( $12 ) */ \ |
| "or $13, $13, $13\n\t" \ |
| "move %0, $11\n\t" /*result*/ \ |
| : "=r" (_zzq_result) \ |
| : "r" (_zzq_default), "r" (&_zzq_args[0]) \ |
| : "$11", "$12", "memory"); \ |
| _zzq_result; \ |
| }) |
| |
| #define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \ |
| { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \ |
| volatile unsigned long int __addr; \ |
| __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ |
| /* $11 = guest_NRADDR */ \ |
| "or $14, $14, $14\n\t" \ |
| "move %0, $11" /*result*/ \ |
| : "=r" (__addr) \ |
| : \ |
| : "$11"); \ |
| _zzq_orig->nraddr = __addr; \ |
| } |
| |
| #define VALGRIND_CALL_NOREDIR_T9 \ |
| __SPECIAL_INSTRUCTION_PREAMBLE \ |
| /* call-noredir $25 */ \ |
| "or $15, $15, $15\n\t" |
| |
| #define VALGRIND_VEX_INJECT_IR() \ |
| do { \ |
| __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ |
| "or $11, $11, $11\n\t" \ |
| ); \ |
| } while (0) |
| |
| #endif /* PLAT_mips64_linux */ |
| |
| #if defined(PLAT_nanomips_linux) |
| |
| typedef |
| struct { |
| unsigned int nraddr; /* where's the code? */ |
| } |
| OrigFn; |
| /* |
| 8000 c04d srl zero, zero, 13 |
| 8000 c05d srl zero, zero, 29 |
| 8000 c043 srl zero, zero, 3 |
| 8000 c053 srl zero, zero, 19 |
| */ |
| |
| #define __SPECIAL_INSTRUCTION_PREAMBLE "srl[32] $zero, $zero, 13 \n\t" \ |
| "srl[32] $zero, $zero, 29 \n\t" \ |
| "srl[32] $zero, $zero, 3 \n\t" \ |
| "srl[32] $zero, $zero, 19 \n\t" |
| |
| #define VALGRIND_DO_CLIENT_REQUEST_EXPR( \ |
| _zzq_default, _zzq_request, \ |
| _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ |
| __extension__ \ |
| ({ volatile unsigned int _zzq_args[6]; \ |
| volatile unsigned int _zzq_result; \ |
| _zzq_args[0] = (unsigned int)(_zzq_request); \ |
| _zzq_args[1] = (unsigned int)(_zzq_arg1); \ |
| _zzq_args[2] = (unsigned int)(_zzq_arg2); \ |
| _zzq_args[3] = (unsigned int)(_zzq_arg3); \ |
| _zzq_args[4] = (unsigned int)(_zzq_arg4); \ |
| _zzq_args[5] = (unsigned int)(_zzq_arg5); \ |
| __asm__ volatile("move $a7, %1\n\t" /* default */ \ |
| "move $t0, %2\n\t" /* ptr */ \ |
| __SPECIAL_INSTRUCTION_PREAMBLE \ |
| /* $a7 = client_request( $t0 ) */ \ |
| "or[32] $t0, $t0, $t0\n\t" \ |
| "move %0, $a7\n\t" /* result */ \ |
| : "=r" (_zzq_result) \ |
| : "r" (_zzq_default), "r" (&_zzq_args[0]) \ |
| : "$a7", "$t0", "memory"); \ |
| _zzq_result; \ |
| }) |
| |
| #define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \ |
| { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \ |
| volatile unsigned long int __addr; \ |
| __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ |
| /* $a7 = guest_NRADDR */ \ |
| "or[32] $t1, $t1, $t1\n\t" \ |
| "move %0, $a7" /*result*/ \ |
| : "=r" (__addr) \ |
| : \ |
| : "$a7"); \ |
| _zzq_orig->nraddr = __addr; \ |
| } |
| |
| #define VALGRIND_CALL_NOREDIR_T9 \ |
| __SPECIAL_INSTRUCTION_PREAMBLE \ |
| /* call-noredir $25 */ \ |
| "or[32] $t2, $t2, $t2\n\t" |
| |
| #define VALGRIND_VEX_INJECT_IR() \ |
| do { \ |
| __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ |
| "or[32] $t3, $t3, $t3\n\t" \ |
| ); \ |
| } while (0) |
| |
| #endif |
| /* Insert assembly code for other platforms here... */ |
| |
| #endif /* CONFIG_VALGRIND */ |
| |
| |
| /* ------------------------------------------------------------------ */ |
| /* PLATFORM SPECIFICS for FUNCTION WRAPPING. This is all very */ |
| /* ugly. It's the least-worst tradeoff I can think of. */ |
| /* ------------------------------------------------------------------ */ |
| |
| /* This section defines magic (a.k.a appalling-hack) macros for doing |
| guaranteed-no-redirection macros, so as to get from function |
| wrappers to the functions they are wrapping. The whole point is to |
| construct standard call sequences, but to do the call itself with a |
| special no-redirect call pseudo-instruction that the JIT |
| understands and handles specially. This section is long and |
| repetitious, and I can't see a way to make it shorter. |
| |
| The naming scheme is as follows: |
| |
| CALL_FN_{W,v}_{v,W,WW,WWW,WWWW,5W,6W,7W,etc} |
| |
| 'W' stands for "word" and 'v' for "void". Hence there are |
| different macros for calling arity 0, 1, 2, 3, 4, etc, functions, |
| and for each, the possibility of returning a word-typed result, or |
| no result. |
| */ |
| |
| /* Use these to write the name of your wrapper. NOTE: duplicates |
| VG_WRAP_FUNCTION_Z{U,Z} in pub_tool_redir.h. NOTE also: inserts |
| the default behaviour equivalance class tag "0000" into the name. |
| See pub_tool_redir.h for details -- normally you don't need to |
| think about this, though. */ |
| |
| /* Use an extra level of macroisation so as to ensure the soname/fnname |
| args are fully macro-expanded before pasting them together. */ |
| #define VG_CONCAT4(_aa,_bb,_cc,_dd) _aa##_bb##_cc##_dd |
| |
| #define I_WRAP_SONAME_FNNAME_ZU(soname,fnname) \ |
| VG_CONCAT4(_vgw00000ZU_,soname,_,fnname) |
| |
| #define I_WRAP_SONAME_FNNAME_ZZ(soname,fnname) \ |
| VG_CONCAT4(_vgw00000ZZ_,soname,_,fnname) |
| |
| /* Use this macro from within a wrapper function to collect the |
| context (address and possibly other info) of the original function. |
| Once you have that you can then use it in one of the CALL_FN_ |
| macros. The type of the argument _lval is OrigFn. */ |
| #define VALGRIND_GET_ORIG_FN(_lval) VALGRIND_GET_NR_CONTEXT(_lval) |
| |
| /* Also provide end-user facilities for function replacement, rather |
| than wrapping. A replacement function differs from a wrapper in |
| that it has no way to get hold of the original function being |
| called, and hence no way to call onwards to it. In a replacement |
| function, VALGRIND_GET_ORIG_FN always returns zero. */ |
| |
| #define I_REPLACE_SONAME_FNNAME_ZU(soname,fnname) \ |
| VG_CONCAT4(_vgr00000ZU_,soname,_,fnname) |
| |
| #define I_REPLACE_SONAME_FNNAME_ZZ(soname,fnname) \ |
| VG_CONCAT4(_vgr00000ZZ_,soname,_,fnname) |
| |
| /* Derivatives of the main macros below, for calling functions |
| returning void. */ |
| |
| #define CALL_FN_v_v(fnptr) \ |
| do { volatile unsigned long _junk; \ |
| CALL_FN_W_v(_junk,fnptr); } while (0) |
| |
| #define CALL_FN_v_W(fnptr, arg1) \ |
| do { volatile unsigned long _junk; \ |
| CALL_FN_W_W(_junk,fnptr,arg1); } while (0) |
| |
| #define CALL_FN_v_WW(fnptr, arg1,arg2) \ |
| do { volatile unsigned long _junk; \ |
| CALL_FN_W_WW(_junk,fnptr,arg1,arg2); } while (0) |
| |
| #define CALL_FN_v_WWW(fnptr, arg1,arg2,arg3) \ |
| do { volatile unsigned long _junk; \ |
| CALL_FN_W_WWW(_junk,fnptr,arg1,arg2,arg3); } while (0) |
| |
| #define CALL_FN_v_WWWW(fnptr, arg1,arg2,arg3,arg4) \ |
| do { volatile unsigned long _junk; \ |
| CALL_FN_W_WWWW(_junk,fnptr,arg1,arg2,arg3,arg4); } while (0) |
| |
| #define CALL_FN_v_5W(fnptr, arg1,arg2,arg3,arg4,arg5) \ |
| do { volatile unsigned long _junk; \ |
| CALL_FN_W_5W(_junk,fnptr,arg1,arg2,arg3,arg4,arg5); } while (0) |
| |
| #define CALL_FN_v_6W(fnptr, arg1,arg2,arg3,arg4,arg5,arg6) \ |
| do { volatile unsigned long _junk; \ |
| CALL_FN_W_6W(_junk,fnptr,arg1,arg2,arg3,arg4,arg5,arg6); } while (0) |
| |
| #define CALL_FN_v_7W(fnptr, arg1,arg2,arg3,arg4,arg5,arg6,arg7) \ |
| do { volatile unsigned long _junk; \ |
| CALL_FN_W_7W(_junk,fnptr,arg1,arg2,arg3,arg4,arg5,arg6,arg7); } while (0) |
| |
| /* ----------------- x86-{linux,darwin,solaris} ---------------- */ |
| |
| #if defined(PLAT_x86_linux) || defined(PLAT_x86_darwin) \ |
| || defined(PLAT_x86_solaris) |
| |
| /* These regs are trashed by the hidden call. No need to mention eax |
| as gcc can already see that, plus causes gcc to bomb. */ |
| #define __CALLER_SAVED_REGS /*"eax"*/ "ecx", "edx" |
| |
| /* Macros to save and align the stack before making a function |
| call and restore it afterwards as gcc may not keep the stack |
| pointer aligned if it doesn't realise calls are being made |
| to other functions. */ |
| |
| #define VALGRIND_ALIGN_STACK \ |
| "movl %%esp,%%edi\n\t" \ |
| "andl $0xfffffff0,%%esp\n\t" |
| #define VALGRIND_RESTORE_STACK \ |
| "movl %%edi,%%esp\n\t" |
| |
| /* These CALL_FN_ macros assume that on x86-linux, sizeof(unsigned |
| long) == 4. */ |
| |
| #define CALL_FN_W_v(lval, orig) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[1]; \ |
| volatile unsigned long _res; \ |
| _argvec[0] = (unsigned long)_orig.nraddr; \ |
| __asm__ volatile( \ |
| VALGRIND_ALIGN_STACK \ |
| "movl (%%eax), %%eax\n\t" /* target->%eax */ \ |
| VALGRIND_CALL_NOREDIR_EAX \ |
| VALGRIND_RESTORE_STACK \ |
| : /*out*/ "=a" (_res) \ |
| : /*in*/ "a" (&_argvec[0]) \ |
| : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_W(lval, orig, arg1) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[2]; \ |
| volatile unsigned long _res; \ |
| _argvec[0] = (unsigned long)_orig.nraddr; \ |
| _argvec[1] = (unsigned long)(arg1); \ |
| __asm__ volatile( \ |
| VALGRIND_ALIGN_STACK \ |
| "subl $12, %%esp\n\t" \ |
| "pushl 4(%%eax)\n\t" \ |
| "movl (%%eax), %%eax\n\t" /* target->%eax */ \ |
| VALGRIND_CALL_NOREDIR_EAX \ |
| VALGRIND_RESTORE_STACK \ |
| : /*out*/ "=a" (_res) \ |
| : /*in*/ "a" (&_argvec[0]) \ |
| : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_WW(lval, orig, arg1,arg2) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[3]; \ |
| volatile unsigned long _res; \ |
| _argvec[0] = (unsigned long)_orig.nraddr; \ |
| _argvec[1] = (unsigned long)(arg1); \ |
| _argvec[2] = (unsigned long)(arg2); \ |
| __asm__ volatile( \ |
| VALGRIND_ALIGN_STACK \ |
| "subl $8, %%esp\n\t" \ |
| "pushl 8(%%eax)\n\t" \ |
| "pushl 4(%%eax)\n\t" \ |
| "movl (%%eax), %%eax\n\t" /* target->%eax */ \ |
| VALGRIND_CALL_NOREDIR_EAX \ |
| VALGRIND_RESTORE_STACK \ |
| : /*out*/ "=a" (_res) \ |
| : /*in*/ "a" (&_argvec[0]) \ |
| : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[4]; \ |
| volatile unsigned long _res; \ |
| _argvec[0] = (unsigned long)_orig.nraddr; \ |
| _argvec[1] = (unsigned long)(arg1); \ |
| _argvec[2] = (unsigned long)(arg2); \ |
| _argvec[3] = (unsigned long)(arg3); \ |
| __asm__ volatile( \ |
| VALGRIND_ALIGN_STACK \ |
| "subl $4, %%esp\n\t" \ |
| "pushl 12(%%eax)\n\t" \ |
| "pushl 8(%%eax)\n\t" \ |
| "pushl 4(%%eax)\n\t" \ |
| "movl (%%eax), %%eax\n\t" /* target->%eax */ \ |
| VALGRIND_CALL_NOREDIR_EAX \ |
| VALGRIND_RESTORE_STACK \ |
| : /*out*/ "=a" (_res) \ |
| : /*in*/ "a" (&_argvec[0]) \ |
| : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[5]; \ |
| volatile unsigned long _res; \ |
| _argvec[0] = (unsigned long)_orig.nraddr; \ |
| _argvec[1] = (unsigned long)(arg1); \ |
| _argvec[2] = (unsigned long)(arg2); \ |
| _argvec[3] = (unsigned long)(arg3); \ |
| _argvec[4] = (unsigned long)(arg4); \ |
| __asm__ volatile( \ |
| VALGRIND_ALIGN_STACK \ |
| "pushl 16(%%eax)\n\t" \ |
| "pushl 12(%%eax)\n\t" \ |
| "pushl 8(%%eax)\n\t" \ |
| "pushl 4(%%eax)\n\t" \ |
| "movl (%%eax), %%eax\n\t" /* target->%eax */ \ |
| VALGRIND_CALL_NOREDIR_EAX \ |
| VALGRIND_RESTORE_STACK \ |
| : /*out*/ "=a" (_res) \ |
| : /*in*/ "a" (&_argvec[0]) \ |
| : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[6]; \ |
| volatile unsigned long _res; \ |
| _argvec[0] = (unsigned long)_orig.nraddr; \ |
| _argvec[1] = (unsigned long)(arg1); \ |
| _argvec[2] = (unsigned long)(arg2); \ |
| _argvec[3] = (unsigned long)(arg3); \ |
| _argvec[4] = (unsigned long)(arg4); \ |
| _argvec[5] = (unsigned long)(arg5); \ |
| __asm__ volatile( \ |
| VALGRIND_ALIGN_STACK \ |
| "subl $12, %%esp\n\t" \ |
| "pushl 20(%%eax)\n\t" \ |
| "pushl 16(%%eax)\n\t" \ |
| "pushl 12(%%eax)\n\t" \ |
| "pushl 8(%%eax)\n\t" \ |
| "pushl 4(%%eax)\n\t" \ |
| "movl (%%eax), %%eax\n\t" /* target->%eax */ \ |
| VALGRIND_CALL_NOREDIR_EAX \ |
| VALGRIND_RESTORE_STACK \ |
| : /*out*/ "=a" (_res) \ |
| : /*in*/ "a" (&_argvec[0]) \ |
| : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[7]; \ |
| volatile unsigned long _res; \ |
| _argvec[0] = (unsigned long)_orig.nraddr; \ |
| _argvec[1] = (unsigned long)(arg1); \ |
| _argvec[2] = (unsigned long)(arg2); \ |
| _argvec[3] = (unsigned long)(arg3); \ |
| _argvec[4] = (unsigned long)(arg4); \ |
| _argvec[5] = (unsigned long)(arg5); \ |
| _argvec[6] = (unsigned long)(arg6); \ |
| __asm__ volatile( \ |
| VALGRIND_ALIGN_STACK \ |
| "subl $8, %%esp\n\t" \ |
| "pushl 24(%%eax)\n\t" \ |
| "pushl 20(%%eax)\n\t" \ |
| "pushl 16(%%eax)\n\t" \ |
| "pushl 12(%%eax)\n\t" \ |
| "pushl 8(%%eax)\n\t" \ |
| "pushl 4(%%eax)\n\t" \ |
| "movl (%%eax), %%eax\n\t" /* target->%eax */ \ |
| VALGRIND_CALL_NOREDIR_EAX \ |
| VALGRIND_RESTORE_STACK \ |
| : /*out*/ "=a" (_res) \ |
| : /*in*/ "a" (&_argvec[0]) \ |
| : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ |
| arg7) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[8]; \ |
| volatile unsigned long _res; \ |
| _argvec[0] = (unsigned long)_orig.nraddr; \ |
| _argvec[1] = (unsigned long)(arg1); \ |
| _argvec[2] = (unsigned long)(arg2); \ |
| _argvec[3] = (unsigned long)(arg3); \ |
| _argvec[4] = (unsigned long)(arg4); \ |
| _argvec[5] = (unsigned long)(arg5); \ |
| _argvec[6] = (unsigned long)(arg6); \ |
| _argvec[7] = (unsigned long)(arg7); \ |
| __asm__ volatile( \ |
| VALGRIND_ALIGN_STACK \ |
| "subl $4, %%esp\n\t" \ |
| "pushl 28(%%eax)\n\t" \ |
| "pushl 24(%%eax)\n\t" \ |
| "pushl 20(%%eax)\n\t" \ |
| "pushl 16(%%eax)\n\t" \ |
| "pushl 12(%%eax)\n\t" \ |
| "pushl 8(%%eax)\n\t" \ |
| "pushl 4(%%eax)\n\t" \ |
| "movl (%%eax), %%eax\n\t" /* target->%eax */ \ |
| VALGRIND_CALL_NOREDIR_EAX \ |
| VALGRIND_RESTORE_STACK \ |
| : /*out*/ "=a" (_res) \ |
| : /*in*/ "a" (&_argvec[0]) \ |
| : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ |
| arg7,arg8) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[9]; \ |
| volatile unsigned long _res; \ |
| _argvec[0] = (unsigned long)_orig.nraddr; \ |
| _argvec[1] = (unsigned long)(arg1); \ |
| _argvec[2] = (unsigned long)(arg2); \ |
| _argvec[3] = (unsigned long)(arg3); \ |
| _argvec[4] = (unsigned long)(arg4); \ |
| _argvec[5] = (unsigned long)(arg5); \ |
| _argvec[6] = (unsigned long)(arg6); \ |
| _argvec[7] = (unsigned long)(arg7); \ |
| _argvec[8] = (unsigned long)(arg8); \ |
| __asm__ volatile( \ |
| VALGRIND_ALIGN_STACK \ |
| "pushl 32(%%eax)\n\t" \ |
| "pushl 28(%%eax)\n\t" \ |
| "pushl 24(%%eax)\n\t" \ |
| "pushl 20(%%eax)\n\t" \ |
| "pushl 16(%%eax)\n\t" \ |
| "pushl 12(%%eax)\n\t" \ |
| "pushl 8(%%eax)\n\t" \ |
| "pushl 4(%%eax)\n\t" \ |
| "movl (%%eax), %%eax\n\t" /* target->%eax */ \ |
| VALGRIND_CALL_NOREDIR_EAX \ |
| VALGRIND_RESTORE_STACK \ |
| : /*out*/ "=a" (_res) \ |
| : /*in*/ "a" (&_argvec[0]) \ |
| : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ |
| arg7,arg8,arg9) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[10]; \ |
| volatile unsigned long _res; \ |
| _argvec[0] = (unsigned long)_orig.nraddr; \ |
| _argvec[1] = (unsigned long)(arg1); \ |
| _argvec[2] = (unsigned long)(arg2); \ |
| _argvec[3] = (unsigned long)(arg3); \ |
| _argvec[4] = (unsigned long)(arg4); \ |
| _argvec[5] = (unsigned long)(arg5); \ |
| _argvec[6] = (unsigned long)(arg6); \ |
| _argvec[7] = (unsigned long)(arg7); \ |
| _argvec[8] = (unsigned long)(arg8); \ |
| _argvec[9] = (unsigned long)(arg9); \ |
| __asm__ volatile( \ |
| VALGRIND_ALIGN_STACK \ |
| "subl $12, %%esp\n\t" \ |
| "pushl 36(%%eax)\n\t" \ |
| "pushl 32(%%eax)\n\t" \ |
| "pushl 28(%%eax)\n\t" \ |
| "pushl 24(%%eax)\n\t" \ |
| "pushl 20(%%eax)\n\t" \ |
| "pushl 16(%%eax)\n\t" \ |
| "pushl 12(%%eax)\n\t" \ |
| "pushl 8(%%eax)\n\t" \ |
| "pushl 4(%%eax)\n\t" \ |
| "movl (%%eax), %%eax\n\t" /* target->%eax */ \ |
| VALGRIND_CALL_NOREDIR_EAX \ |
| VALGRIND_RESTORE_STACK \ |
| : /*out*/ "=a" (_res) \ |
| : /*in*/ "a" (&_argvec[0]) \ |
| : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ |
| arg7,arg8,arg9,arg10) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[11]; \ |
| volatile unsigned long _res; \ |
| _argvec[0] = (unsigned long)_orig.nraddr; \ |
| _argvec[1] = (unsigned long)(arg1); \ |
| _argvec[2] = (unsigned long)(arg2); \ |
| _argvec[3] = (unsigned long)(arg3); \ |
| _argvec[4] = (unsigned long)(arg4); \ |
| _argvec[5] = (unsigned long)(arg5); \ |
| _argvec[6] = (unsigned long)(arg6); \ |
| _argvec[7] = (unsigned long)(arg7); \ |
| _argvec[8] = (unsigned long)(arg8); \ |
| _argvec[9] = (unsigned long)(arg9); \ |
| _argvec[10] = (unsigned long)(arg10); \ |
| __asm__ volatile( \ |
| VALGRIND_ALIGN_STACK \ |
| "subl $8, %%esp\n\t" \ |
| "pushl 40(%%eax)\n\t" \ |
| "pushl 36(%%eax)\n\t" \ |
| "pushl 32(%%eax)\n\t" \ |
| "pushl 28(%%eax)\n\t" \ |
| "pushl 24(%%eax)\n\t" \ |
| "pushl 20(%%eax)\n\t" \ |
| "pushl 16(%%eax)\n\t" \ |
| "pushl 12(%%eax)\n\t" \ |
| "pushl 8(%%eax)\n\t" \ |
| "pushl 4(%%eax)\n\t" \ |
| "movl (%%eax), %%eax\n\t" /* target->%eax */ \ |
| VALGRIND_CALL_NOREDIR_EAX \ |
| VALGRIND_RESTORE_STACK \ |
| : /*out*/ "=a" (_res) \ |
| : /*in*/ "a" (&_argvec[0]) \ |
| : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5, \ |
| arg6,arg7,arg8,arg9,arg10, \ |
| arg11) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[12]; \ |
| volatile unsigned long _res; \ |
| _argvec[0] = (unsigned long)_orig.nraddr; \ |
| _argvec[1] = (unsigned long)(arg1); \ |
| _argvec[2] = (unsigned long)(arg2); \ |
| _argvec[3] = (unsigned long)(arg3); \ |
| _argvec[4] = (unsigned long)(arg4); \ |
| _argvec[5] = (unsigned long)(arg5); \ |
| _argvec[6] = (unsigned long)(arg6); \ |
| _argvec[7] = (unsigned long)(arg7); \ |
| _argvec[8] = (unsigned long)(arg8); \ |
| _argvec[9] = (unsigned long)(arg9); \ |
| _argvec[10] = (unsigned long)(arg10); \ |
| _argvec[11] = (unsigned long)(arg11); \ |
| __asm__ volatile( \ |
| VALGRIND_ALIGN_STACK \ |
| "subl $4, %%esp\n\t" \ |
| "pushl 44(%%eax)\n\t" \ |
| "pushl 40(%%eax)\n\t" \ |
| "pushl 36(%%eax)\n\t" \ |
| "pushl 32(%%eax)\n\t" \ |
| "pushl 28(%%eax)\n\t" \ |
| "pushl 24(%%eax)\n\t" \ |
| "pushl 20(%%eax)\n\t" \ |
| "pushl 16(%%eax)\n\t" \ |
| "pushl 12(%%eax)\n\t" \ |
| "pushl 8(%%eax)\n\t" \ |
| "pushl 4(%%eax)\n\t" \ |
| "movl (%%eax), %%eax\n\t" /* target->%eax */ \ |
| VALGRIND_CALL_NOREDIR_EAX \ |
| VALGRIND_RESTORE_STACK \ |
| : /*out*/ "=a" (_res) \ |
| : /*in*/ "a" (&_argvec[0]) \ |
| : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5, \ |
| arg6,arg7,arg8,arg9,arg10, \ |
| arg11,arg12) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[13]; \ |
| volatile unsigned long _res; \ |
| _argvec[0] = (unsigned long)_orig.nraddr; \ |
| _argvec[1] = (unsigned long)(arg1); \ |
| _argvec[2] = (unsigned long)(arg2); \ |
| _argvec[3] = (unsigned long)(arg3); \ |
| _argvec[4] = (unsigned long)(arg4); \ |
| _argvec[5] = (unsigned long)(arg5); \ |
| _argvec[6] = (unsigned long)(arg6); \ |
| _argvec[7] = (unsigned long)(arg7); \ |
| _argvec[8] = (unsigned long)(arg8); \ |
| _argvec[9] = (unsigned long)(arg9); \ |
| _argvec[10] = (unsigned long)(arg10); \ |
| _argvec[11] = (unsigned long)(arg11); \ |
| _argvec[12] = (unsigned long)(arg12); \ |
| __asm__ volatile( \ |
| VALGRIND_ALIGN_STACK \ |
| "pushl 48(%%eax)\n\t" \ |
| "pushl 44(%%eax)\n\t" \ |
| "pushl 40(%%eax)\n\t" \ |
| "pushl 36(%%eax)\n\t" \ |
| "pushl 32(%%eax)\n\t" \ |
| "pushl 28(%%eax)\n\t" \ |
| "pushl 24(%%eax)\n\t" \ |
| "pushl 20(%%eax)\n\t" \ |
| "pushl 16(%%eax)\n\t" \ |
| "pushl 12(%%eax)\n\t" \ |
| "pushl 8(%%eax)\n\t" \ |
| "pushl 4(%%eax)\n\t" \ |
| "movl (%%eax), %%eax\n\t" /* target->%eax */ \ |
| VALGRIND_CALL_NOREDIR_EAX \ |
| VALGRIND_RESTORE_STACK \ |
| : /*out*/ "=a" (_res) \ |
| : /*in*/ "a" (&_argvec[0]) \ |
| : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #endif /* PLAT_x86_linux || PLAT_x86_darwin || PLAT_x86_solaris */ |
| |
| /* ---------------- amd64-{linux,darwin,solaris} --------------- */ |
| |
| #if defined(PLAT_amd64_linux) || defined(PLAT_amd64_darwin) \ |
| || defined(PLAT_amd64_solaris) |
| |
| /* ARGREGS: rdi rsi rdx rcx r8 r9 (the rest on stack in R-to-L order) */ |
| |
| /* These regs are trashed by the hidden call. */ |
| #define __CALLER_SAVED_REGS /*"rax",*/ "rcx", "rdx", "rsi", \ |
| "rdi", "r8", "r9", "r10", "r11" |
| |
| /* This is all pretty complex. It's so as to make stack unwinding |
| work reliably. See bug 243270. The basic problem is the sub and |
| add of 128 of %rsp in all of the following macros. If gcc believes |
| the CFA is in %rsp, then unwinding may fail, because what's at the |
| CFA is not what gcc "expected" when it constructs the CFIs for the |
| places where the macros are instantiated. |
| |
| But we can't just add a CFI annotation to increase the CFA offset |
| by 128, to match the sub of 128 from %rsp, because we don't know |
| whether gcc has chosen %rsp as the CFA at that point, or whether it |
| has chosen some other register (eg, %rbp). In the latter case, |
| adding a CFI annotation to change the CFA offset is simply wrong. |
| |
| So the solution is to get hold of the CFA using |
| __builtin_dwarf_cfa(), put it in a known register, and add a |
| CFI annotation to say what the register is. We choose %rbp for |
| this (perhaps perversely), because: |
| |
| (1) %rbp is already subject to unwinding. If a new register was |
| chosen then the unwinder would have to unwind it in all stack |
| traces, which is expensive, and |
| |
| (2) %rbp is already subject to precise exception updates in the |
| JIT. If a new register was chosen, we'd have to have precise |
| exceptions for it too, which reduces performance of the |
| generated code. |
| |
| However .. one extra complication. We can't just whack the result |
| of __builtin_dwarf_cfa() into %rbp and then add %rbp to the |
| list of trashed registers at the end of the inline assembly |
| fragments; gcc won't allow %rbp to appear in that list. Hence |
| instead we need to stash %rbp in %r15 for the duration of the asm, |
| and say that %r15 is trashed instead. gcc seems happy to go with |
| that. |
| |
| Oh .. and this all needs to be conditionalised so that it is |
| unchanged from before this commit, when compiled with older gccs |
| that don't support __builtin_dwarf_cfa. Furthermore, since |
| this header file is freestanding, it has to be independent of |
| config.h, and so the following conditionalisation cannot depend on |
| configure time checks. |
| |
| Although it's not clear from |
| 'defined(__GNUC__) && defined(__GCC_HAVE_DWARF2_CFI_ASM)', |
| this expression excludes Darwin. |
| .cfi directives in Darwin assembly appear to be completely |
| different and I haven't investigated how they work. |
| |
| For even more entertainment value, note we have to use the |
| completely undocumented __builtin_dwarf_cfa(), which appears to |
| really compute the CFA, whereas __builtin_frame_address(0) claims |
| to but actually doesn't. See |
| https://bugs.kde.org/show_bug.cgi?id=243270#c47 |
| */ |
| #if defined(__GNUC__) && defined(__GCC_HAVE_DWARF2_CFI_ASM) |
| # define __FRAME_POINTER \ |
| ,"r"(__builtin_dwarf_cfa()) |
| # define VALGRIND_CFI_PROLOGUE \ |
| "movq %%rbp, %%r15\n\t" \ |
| "movq %2, %%rbp\n\t" \ |
| ".cfi_remember_state\n\t" \ |
| ".cfi_def_cfa rbp, 0\n\t" |
| # define VALGRIND_CFI_EPILOGUE \ |
| "movq %%r15, %%rbp\n\t" \ |
| ".cfi_restore_state\n\t" |
| #else |
| # define __FRAME_POINTER |
| # define VALGRIND_CFI_PROLOGUE |
| # define VALGRIND_CFI_EPILOGUE |
| #endif |
| |
| /* Macros to save and align the stack before making a function |
| call and restore it afterwards as gcc may not keep the stack |
| pointer aligned if it doesn't realise calls are being made |
| to other functions. */ |
| |
| #define VALGRIND_ALIGN_STACK \ |
| "movq %%rsp,%%r14\n\t" \ |
| "andq $0xfffffffffffffff0,%%rsp\n\t" |
| #define VALGRIND_RESTORE_STACK \ |
| "movq %%r14,%%rsp\n\t" |
| |
| /* These CALL_FN_ macros assume that on amd64-linux, sizeof(unsigned |
| long) == 8. */ |
| |
| /* NB 9 Sept 07. There is a nasty kludge here in all these CALL_FN_ |
| macros. In order not to trash the stack redzone, we need to drop |
| %rsp by 128 before the hidden call, and restore afterwards. The |
| nastyness is that it is only by luck that the stack still appears |
| to be unwindable during the hidden call - since then the behaviour |
| of any routine using this macro does not match what the CFI data |
| says. Sigh. |
| |
| Why is this important? Imagine that a wrapper has a stack |
| allocated local, and passes to the hidden call, a pointer to it. |
| Because gcc does not know about the hidden call, it may allocate |
| that local in the redzone. Unfortunately the hidden call may then |
| trash it before it comes to use it. So we must step clear of the |
| redzone, for the duration of the hidden call, to make it safe. |
| |
| Probably the same problem afflicts the other redzone-style ABIs too |
| (ppc64-linux); but for those, the stack is |
| self describing (none of this CFI nonsense) so at least messing |
| with the stack pointer doesn't give a danger of non-unwindable |
| stack. */ |
| |
| #define CALL_FN_W_v(lval, orig) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[1]; \ |
| volatile unsigned long _res; \ |
| _argvec[0] = (unsigned long)_orig.nraddr; \ |
| __asm__ volatile( \ |
| VALGRIND_CFI_PROLOGUE \ |
| VALGRIND_ALIGN_STACK \ |
| "subq $128,%%rsp\n\t" \ |
| "movq (%%rax), %%rax\n\t" /* target->%rax */ \ |
| VALGRIND_CALL_NOREDIR_RAX \ |
| VALGRIND_RESTORE_STACK \ |
| VALGRIND_CFI_EPILOGUE \ |
| : /*out*/ "=a" (_res) \ |
| : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ |
| : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_W(lval, orig, arg1) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[2]; \ |
| volatile unsigned long _res; \ |
| _argvec[0] = (unsigned long)_orig.nraddr; \ |
| _argvec[1] = (unsigned long)(arg1); \ |
| __asm__ volatile( \ |
| VALGRIND_CFI_PROLOGUE \ |
| VALGRIND_ALIGN_STACK \ |
| "subq $128,%%rsp\n\t" \ |
| "movq 8(%%rax), %%rdi\n\t" \ |
| "movq (%%rax), %%rax\n\t" /* target->%rax */ \ |
| VALGRIND_CALL_NOREDIR_RAX \ |
| VALGRIND_RESTORE_STACK \ |
| VALGRIND_CFI_EPILOGUE \ |
| : /*out*/ "=a" (_res) \ |
| : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ |
| : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_WW(lval, orig, arg1,arg2) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[3]; \ |
| volatile unsigned long _res; \ |
| _argvec[0] = (unsigned long)_orig.nraddr; \ |
| _argvec[1] = (unsigned long)(arg1); \ |
| _argvec[2] = (unsigned long)(arg2); \ |
| __asm__ volatile( \ |
| VALGRIND_CFI_PROLOGUE \ |
| VALGRIND_ALIGN_STACK \ |
| "subq $128,%%rsp\n\t" \ |
| "movq 16(%%rax), %%rsi\n\t" \ |
| "movq 8(%%rax), %%rdi\n\t" \ |
| "movq (%%rax), %%rax\n\t" /* target->%rax */ \ |
| VALGRIND_CALL_NOREDIR_RAX \ |
| VALGRIND_RESTORE_STACK \ |
| VALGRIND_CFI_EPILOGUE \ |
| : /*out*/ "=a" (_res) \ |
| : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ |
| : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[4]; \ |
| volatile unsigned long _res; \ |
| _argvec[0] = (unsigned long)_orig.nraddr; \ |
| _argvec[1] = (unsigned long)(arg1); \ |
| _argvec[2] = (unsigned long)(arg2); \ |
| _argvec[3] = (unsigned long)(arg3); \ |
| __asm__ volatile( \ |
| VALGRIND_CFI_PROLOGUE \ |
| VALGRIND_ALIGN_STACK \ |
| "subq $128,%%rsp\n\t" \ |
| "movq 24(%%rax), %%rdx\n\t" \ |
| "movq 16(%%rax), %%rsi\n\t" \ |
| "movq 8(%%rax), %%rdi\n\t" \ |
| "movq (%%rax), %%rax\n\t" /* target->%rax */ \ |
| VALGRIND_CALL_NOREDIR_RAX \ |
| VALGRIND_RESTORE_STACK \ |
| VALGRIND_CFI_EPILOGUE \ |
| : /*out*/ "=a" (_res) \ |
| : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ |
| : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[5]; \ |
| volatile unsigned long _res; \ |
| _argvec[0] = (unsigned long)_orig.nraddr; \ |
| _argvec[1] = (unsigned long)(arg1); \ |
| _argvec[2] = (unsigned long)(arg2); \ |
| _argvec[3] = (unsigned long)(arg3); \ |
| _argvec[4] = (unsigned long)(arg4); \ |
| __asm__ volatile( \ |
| VALGRIND_CFI_PROLOGUE \ |
| VALGRIND_ALIGN_STACK \ |
| "subq $128,%%rsp\n\t" \ |
| "movq 32(%%rax), %%rcx\n\t" \ |
| "movq 24(%%rax), %%rdx\n\t" \ |
| "movq 16(%%rax), %%rsi\n\t" \ |
| "movq 8(%%rax), %%rdi\n\t" \ |
| "movq (%%rax), %%rax\n\t" /* target->%rax */ \ |
| VALGRIND_CALL_NOREDIR_RAX \ |
| VALGRIND_RESTORE_STACK \ |
| VALGRIND_CFI_EPILOGUE \ |
| : /*out*/ "=a" (_res) \ |
| : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ |
| : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[6]; \ |
| volatile unsigned long _res; \ |
| _argvec[0] = (unsigned long)_orig.nraddr; \ |
| _argvec[1] = (unsigned long)(arg1); \ |
| _argvec[2] = (unsigned long)(arg2); \ |
| _argvec[3] = (unsigned long)(arg3); \ |
| _argvec[4] = (unsigned long)(arg4); \ |
| _argvec[5] = (unsigned long)(arg5); \ |
| __asm__ volatile( \ |
| VALGRIND_CFI_PROLOGUE \ |
| VALGRIND_ALIGN_STACK \ |
| "subq $128,%%rsp\n\t" \ |
| "movq 40(%%rax), %%r8\n\t" \ |
| "movq 32(%%rax), %%rcx\n\t" \ |
| "movq 24(%%rax), %%rdx\n\t" \ |
| "movq 16(%%rax), %%rsi\n\t" \ |
| "movq 8(%%rax), %%rdi\n\t" \ |
| "movq (%%rax), %%rax\n\t" /* target->%rax */ \ |
| VALGRIND_CALL_NOREDIR_RAX \ |
| VALGRIND_RESTORE_STACK \ |
| VALGRIND_CFI_EPILOGUE \ |
| : /*out*/ "=a" (_res) \ |
| : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ |
| : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[7]; \ |
| volatile unsigned long _res; \ |
| _argvec[0] = (unsigned long)_orig.nraddr; \ |
| _argvec[1] = (unsigned long)(arg1); \ |
| _argvec[2] = (unsigned long)(arg2); \ |
| _argvec[3] = (unsigned long)(arg3); \ |
| _argvec[4] = (unsigned long)(arg4); \ |
| _argvec[5] = (unsigned long)(arg5); \ |
| _argvec[6] = (unsigned long)(arg6); \ |
| __asm__ volatile( \ |
| VALGRIND_CFI_PROLOGUE \ |
| VALGRIND_ALIGN_STACK \ |
| "subq $128,%%rsp\n\t" \ |
| "movq 48(%%rax), %%r9\n\t" \ |
| "movq 40(%%rax), %%r8\n\t" \ |
| "movq 32(%%rax), %%rcx\n\t" \ |
| "movq 24(%%rax), %%rdx\n\t" \ |
| "movq 16(%%rax), %%rsi\n\t" \ |
| "movq 8(%%rax), %%rdi\n\t" \ |
| "movq (%%rax), %%rax\n\t" /* target->%rax */ \ |
| VALGRIND_CALL_NOREDIR_RAX \ |
| VALGRIND_RESTORE_STACK \ |
| VALGRIND_CFI_EPILOGUE \ |
| : /*out*/ "=a" (_res) \ |
| : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ |
| : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ |
| arg7) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[8]; \ |
| volatile unsigned long _res; \ |
| _argvec[0] = (unsigned long)_orig.nraddr; \ |
| _argvec[1] = (unsigned long)(arg1); \ |
| _argvec[2] = (unsigned long)(arg2); \ |
| _argvec[3] = (unsigned long)(arg3); \ |
| _argvec[4] = (unsigned long)(arg4); \ |
| _argvec[5] = (unsigned long)(arg5); \ |
| _argvec[6] = (unsigned long)(arg6); \ |
| _argvec[7] = (unsigned long)(arg7); \ |
| __asm__ volatile( \ |
| VALGRIND_CFI_PROLOGUE \ |
| VALGRIND_ALIGN_STACK \ |
| "subq $136,%%rsp\n\t" \ |
| "pushq 56(%%rax)\n\t" \ |
| "movq 48(%%rax), %%r9\n\t" \ |
| "movq 40(%%rax), %%r8\n\t" \ |
| "movq 32(%%rax), %%rcx\n\t" \ |
| "movq 24(%%rax), %%rdx\n\t" \ |
| "movq 16(%%rax), %%rsi\n\t" \ |
| "movq 8(%%rax), %%rdi\n\t" \ |
| "movq (%%rax), %%rax\n\t" /* target->%rax */ \ |
| VALGRIND_CALL_NOREDIR_RAX \ |
| VALGRIND_RESTORE_STACK \ |
| VALGRIND_CFI_EPILOGUE \ |
| : /*out*/ "=a" (_res) \ |
| : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ |
| : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ |
| arg7,arg8) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[9]; \ |
| volatile unsigned long _res; \ |
| _argvec[0] = (unsigned long)_orig.nraddr; \ |
| _argvec[1] = (unsigned long)(arg1); \ |
| _argvec[2] = (unsigned long)(arg2); \ |
| _argvec[3] = (unsigned long)(arg3); \ |
| _argvec[4] = (unsigned long)(arg4); \ |
| _argvec[5] = (unsigned long)(arg5); \ |
| _argvec[6] = (unsigned long)(arg6); \ |
| _argvec[7] = (unsigned long)(arg7); \ |
| _argvec[8] = (unsigned long)(arg8); \ |
| __asm__ volatile( \ |
| VALGRIND_CFI_PROLOGUE \ |
| VALGRIND_ALIGN_STACK \ |
| "subq $128,%%rsp\n\t" \ |
| "pushq 64(%%rax)\n\t" \ |
| "pushq 56(%%rax)\n\t" \ |
| "movq 48(%%rax), %%r9\n\t" \ |
| "movq 40(%%rax), %%r8\n\t" \ |
| "movq 32(%%rax), %%rcx\n\t" \ |
| "movq 24(%%rax), %%rdx\n\t" \ |
| "movq 16(%%rax), %%rsi\n\t" \ |
| "movq 8(%%rax), %%rdi\n\t" \ |
| "movq (%%rax), %%rax\n\t" /* target->%rax */ \ |
| VALGRIND_CALL_NOREDIR_RAX \ |
| VALGRIND_RESTORE_STACK \ |
| VALGRIND_CFI_EPILOGUE \ |
| : /*out*/ "=a" (_res) \ |
| : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ |
| : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ |
| arg7,arg8,arg9) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[10]; \ |
| volatile unsigned long _res; \ |
| _argvec[0] = (unsigned long)_orig.nraddr; \ |
| _argvec[1] = (unsigned long)(arg1); \ |
| _argvec[2] = (unsigned long)(arg2); \ |
| _argvec[3] = (unsigned long)(arg3); \ |
| _argvec[4] = (unsigned long)(arg4); \ |
| _argvec[5] = (unsigned long)(arg5); \ |
| _argvec[6] = (unsigned long)(arg6); \ |
| _argvec[7] = (unsigned long)(arg7); \ |
| _argvec[8] = (unsigned long)(arg8); \ |
| _argvec[9] = (unsigned long)(arg9); \ |
| __asm__ volatile( \ |
| VALGRIND_CFI_PROLOGUE \ |
| VALGRIND_ALIGN_STACK \ |
| "subq $136,%%rsp\n\t" \ |
| "pushq 72(%%rax)\n\t" \ |
| "pushq 64(%%rax)\n\t" \ |
| "pushq 56(%%rax)\n\t" \ |
| "movq 48(%%rax), %%r9\n\t" \ |
| "movq 40(%%rax), %%r8\n\t" \ |
| "movq 32(%%rax), %%rcx\n\t" \ |
| "movq 24(%%rax), %%rdx\n\t" \ |
| "movq 16(%%rax), %%rsi\n\t" \ |
| "movq 8(%%rax), %%rdi\n\t" \ |
| "movq (%%rax), %%rax\n\t" /* target->%rax */ \ |
| VALGRIND_CALL_NOREDIR_RAX \ |
| VALGRIND_RESTORE_STACK \ |
| VALGRIND_CFI_EPILOGUE \ |
| : /*out*/ "=a" (_res) \ |
| : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ |
| : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ |
| arg7,arg8,arg9,arg10) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[11]; \ |
| volatile unsigned long _res; \ |
| _argvec[0] = (unsigned long)_orig.nraddr; \ |
| _argvec[1] = (unsigned long)(arg1); \ |
| _argvec[2] = (unsigned long)(arg2); \ |
| _argvec[3] = (unsigned long)(arg3); \ |
| _argvec[4] = (unsigned long)(arg4); \ |
| _argvec[5] = (unsigned long)(arg5); \ |
| _argvec[6] = (unsigned long)(arg6); \ |
| _argvec[7] = (unsigned long)(arg7); \ |
| _argvec[8] = (unsigned long)(arg8); \ |
| _argvec[9] = (unsigned long)(arg9); \ |
| _argvec[10] = (unsigned long)(arg10); \ |
| __asm__ volatile( \ |
| VALGRIND_CFI_PROLOGUE \ |
| VALGRIND_ALIGN_STACK \ |
| "subq $128,%%rsp\n\t" \ |
| "pushq 80(%%rax)\n\t" \ |
| "pushq 72(%%rax)\n\t" \ |
| "pushq 64(%%rax)\n\t" \ |
| "pushq 56(%%rax)\n\t" \ |
| "movq 48(%%rax), %%r9\n\t" \ |
| "movq 40(%%rax), %%r8\n\t" \ |
| "movq 32(%%rax), %%rcx\n\t" \ |
| "movq 24(%%rax), %%rdx\n\t" \ |
| "movq 16(%%rax), %%rsi\n\t" \ |
| "movq 8(%%rax), %%rdi\n\t" \ |
| "movq (%%rax), %%rax\n\t" /* target->%rax */ \ |
| VALGRIND_CALL_NOREDIR_RAX \ |
| VALGRIND_RESTORE_STACK \ |
| VALGRIND_CFI_EPILOGUE \ |
| : /*out*/ "=a" (_res) \ |
| : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ |
| : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ |
| arg7,arg8,arg9,arg10,arg11) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[12]; \ |
| volatile unsigned long _res; \ |
| _argvec[0] = (unsigned long)_orig.nraddr; \ |
| _argvec[1] = (unsigned long)(arg1); \ |
| _argvec[2] = (unsigned long)(arg2); \ |
| _argvec[3] = (unsigned long)(arg3); \ |
| _argvec[4] = (unsigned long)(arg4); \ |
| _argvec[5] = (unsigned long)(arg5); \ |
| _argvec[6] = (unsigned long)(arg6); \ |
| _argvec[7] = (unsigned long)(arg7); \ |
| _argvec[8] = (unsigned long)(arg8); \ |
| _argvec[9] = (unsigned long)(arg9); \ |
| _argvec[10] = (unsigned long)(arg10); \ |
| _argvec[11] = (unsigned long)(arg11); \ |
| __asm__ volatile( \ |
| VALGRIND_CFI_PROLOGUE \ |
| VALGRIND_ALIGN_STACK \ |
| "subq $136,%%rsp\n\t" \ |
| "pushq 88(%%rax)\n\t" \ |
| "pushq 80(%%rax)\n\t" \ |
| "pushq 72(%%rax)\n\t" \ |
| "pushq 64(%%rax)\n\t" \ |
| "pushq 56(%%rax)\n\t" \ |
| "movq 48(%%rax), %%r9\n\t" \ |
| "movq 40(%%rax), %%r8\n\t" \ |
| "movq 32(%%rax), %%rcx\n\t" \ |
| "movq 24(%%rax), %%rdx\n\t" \ |
| "movq 16(%%rax), %%rsi\n\t" \ |
| "movq 8(%%rax), %%rdi\n\t" \ |
| "movq (%%rax), %%rax\n\t" /* target->%rax */ \ |
| VALGRIND_CALL_NOREDIR_RAX \ |
| VALGRIND_RESTORE_STACK \ |
| VALGRIND_CFI_EPILOGUE \ |
| : /*out*/ "=a" (_res) \ |
| : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ |
| : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ |
| arg7,arg8,arg9,arg10,arg11,arg12) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[13]; \ |
| volatile unsigned long _res; \ |
| _argvec[0] = (unsigned long)_orig.nraddr; \ |
| _argvec[1] = (unsigned long)(arg1); \ |
| _argvec[2] = (unsigned long)(arg2); \ |
| _argvec[3] = (unsigned long)(arg3); \ |
| _argvec[4] = (unsigned long)(arg4); \ |
| _argvec[5] = (unsigned long)(arg5); \ |
| _argvec[6] = (unsigned long)(arg6); \ |
| _argvec[7] = (unsigned long)(arg7); \ |
| _argvec[8] = (unsigned long)(arg8); \ |
| _argvec[9] = (unsigned long)(arg9); \ |
| _argvec[10] = (unsigned long)(arg10); \ |
| _argvec[11] = (unsigned long)(arg11); \ |
| _argvec[12] = (unsigned long)(arg12); \ |
| __asm__ volatile( \ |
| VALGRIND_CFI_PROLOGUE \ |
| VALGRIND_ALIGN_STACK \ |
| "subq $128,%%rsp\n\t" \ |
| "pushq 96(%%rax)\n\t" \ |
| "pushq 88(%%rax)\n\t" \ |
| "pushq 80(%%rax)\n\t" \ |
| "pushq 72(%%rax)\n\t" \ |
| "pushq 64(%%rax)\n\t" \ |
| "pushq 56(%%rax)\n\t" \ |
| "movq 48(%%rax), %%r9\n\t" \ |
| "movq 40(%%rax), %%r8\n\t" \ |
| "movq 32(%%rax), %%rcx\n\t" \ |
| "movq 24(%%rax), %%rdx\n\t" \ |
| "movq 16(%%rax), %%rsi\n\t" \ |
| "movq 8(%%rax), %%rdi\n\t" \ |
| "movq (%%rax), %%rax\n\t" /* target->%rax */ \ |
| VALGRIND_CALL_NOREDIR_RAX \ |
| VALGRIND_RESTORE_STACK \ |
| VALGRIND_CFI_EPILOGUE \ |
| : /*out*/ "=a" (_res) \ |
| : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ |
| : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #endif /* PLAT_amd64_linux || PLAT_amd64_darwin || PLAT_amd64_solaris */ |
| |
| /* ------------------------ ppc32-linux ------------------------ */ |
| |
| #if defined(PLAT_ppc32_linux) |
| |
| /* This is useful for finding out about the on-stack stuff: |
| |
| extern int f9 ( int,int,int,int,int,int,int,int,int ); |
| extern int f10 ( int,int,int,int,int,int,int,int,int,int ); |
| extern int f11 ( int,int,int,int,int,int,int,int,int,int,int ); |
| extern int f12 ( int,int,int,int,int,int,int,int,int,int,int,int ); |
| |
| int g9 ( void ) { |
| return f9(11,22,33,44,55,66,77,88,99); |
| } |
| int g10 ( void ) { |
| return f10(11,22,33,44,55,66,77,88,99,110); |
| } |
| int g11 ( void ) { |
| return f11(11,22,33,44,55,66,77,88,99,110,121); |
| } |
| int g12 ( void ) { |
| return f12(11,22,33,44,55,66,77,88,99,110,121,132); |
| } |
| */ |
| |
| /* ARGREGS: r3 r4 r5 r6 r7 r8 r9 r10 (the rest on stack somewhere) */ |
| |
| /* These regs are trashed by the hidden call. */ |
| #define __CALLER_SAVED_REGS \ |
| "lr", "ctr", "xer", \ |
| "cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7", \ |
| "r0", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", \ |
| "r11", "r12", "r13" |
| |
| /* Macros to save and align the stack before making a function |
| call and restore it afterwards as gcc may not keep the stack |
| pointer aligned if it doesn't realise calls are being made |
| to other functions. */ |
| |
| #define VALGRIND_ALIGN_STACK \ |
| "mr 28,1\n\t" \ |
| "rlwinm 1,1,0,0,27\n\t" |
| #define VALGRIND_RESTORE_STACK \ |
| "mr 1,28\n\t" |
| |
| /* These CALL_FN_ macros assume that on ppc32-linux, |
| sizeof(unsigned long) == 4. */ |
| |
| #define CALL_FN_W_v(lval, orig) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[1]; \ |
| volatile unsigned long _res; \ |
| _argvec[0] = (unsigned long)_orig.nraddr; \ |
| __asm__ volatile( \ |
| VALGRIND_ALIGN_STACK \ |
| "mr 11,%1\n\t" \ |
| "lwz 11,0(11)\n\t" /* target->r11 */ \ |
| VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ |
| VALGRIND_RESTORE_STACK \ |
| "mr %0,3" \ |
| : /*out*/ "=r" (_res) \ |
| : /*in*/ "r" (&_argvec[0]) \ |
| : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_W(lval, orig, arg1) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[2]; \ |
| volatile unsigned long _res; \ |
| _argvec[0] = (unsigned long)_orig.nraddr; \ |
| _argvec[1] = (unsigned long)arg1; \ |
| __asm__ volatile( \ |
| VALGRIND_ALIGN_STACK \ |
| "mr 11,%1\n\t" \ |
| "lwz 3,4(11)\n\t" /* arg1->r3 */ \ |
| "lwz 11,0(11)\n\t" /* target->r11 */ \ |
| VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ |
| VALGRIND_RESTORE_STACK \ |
| "mr %0,3" \ |
| : /*out*/ "=r" (_res) \ |
| : /*in*/ "r" (&_argvec[0]) \ |
| : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_WW(lval, orig, arg1,arg2) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[3]; \ |
| volatile unsigned long _res; \ |
| _argvec[0] = (unsigned long)_orig.nraddr; \ |
| _argvec[1] = (unsigned long)arg1; \ |
| _argvec[2] = (unsigned long)arg2; \ |
| __asm__ volatile( \ |
| VALGRIND_ALIGN_STACK \ |
| "mr 11,%1\n\t" \ |
| "lwz 3,4(11)\n\t" /* arg1->r3 */ \ |
| "lwz 4,8(11)\n\t" \ |
| "lwz 11,0(11)\n\t" /* target->r11 */ \ |
| VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ |
| VALGRIND_RESTORE_STACK \ |
| "mr %0,3" \ |
| : /*out*/ "=r" (_res) \ |
| : /*in*/ "r" (&_argvec[0]) \ |
| : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[4]; \ |
| volatile unsigned long _res; \ |
| _argvec[0] = (unsigned long)_orig.nraddr; \ |
| _argvec[1] = (unsigned long)arg1; \ |
| _argvec[2] = (unsigned long)arg2; \ |
| _argvec[3] = (unsigned long)arg3; \ |
| __asm__ volatile( \ |
| VALGRIND_ALIGN_STACK \ |
| "mr 11,%1\n\t" \ |
| "lwz 3,4(11)\n\t" /* arg1->r3 */ \ |
| "lwz 4,8(11)\n\t" \ |
| "lwz 5,12(11)\n\t" \ |
| "lwz 11,0(11)\n\t" /* target->r11 */ \ |
| VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ |
| VALGRIND_RESTORE_STACK \ |
| "mr %0,3" \ |
| : /*out*/ "=r" (_res) \ |
| : /*in*/ "r" (&_argvec[0]) \ |
| : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[5]; \ |
| volatile unsigned long _res; \ |
| _argvec[0] = (unsigned long)_orig.nraddr; \ |
| _argvec[1] = (unsigned long)arg1; \ |
| _argvec[2] = (unsigned long)arg2; \ |
| _argvec[3] = (unsigned long)arg3; \ |
| _argvec[4] = (unsigned long)arg4; \ |
| __asm__ volatile( \ |
| VALGRIND_ALIGN_STACK \ |
| "mr 11,%1\n\t" \ |
| "lwz 3,4(11)\n\t" /* arg1->r3 */ \ |
| "lwz 4,8(11)\n\t" \ |
| "lwz 5,12(11)\n\t" \ |
| "lwz 6,16(11)\n\t" /* arg4->r6 */ \ |
| "lwz 11,0(11)\n\t" /* target->r11 */ \ |
| VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ |
| VALGRIND_RESTORE_STACK \ |
| "mr %0,3" \ |
| : /*out*/ "=r" (_res) \ |
| : /*in*/ "r" (&_argvec[0]) \ |
| : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[6]; \ |
| volatile unsigned long _res; \ |
| _argvec[0] = (unsigned long)_orig.nraddr; \ |
| _argvec[1] = (unsigned long)arg1; \ |
| _argvec[2] = (unsigned long)arg2; \ |
| _argvec[3] = (unsigned long)arg3; \ |
| _argvec[4] = (unsigned long)arg4; \ |
| _argvec[5] = (unsigned long)arg5; \ |
| __asm__ volatile( \ |
| VALGRIND_ALIGN_STACK \ |
| "mr 11,%1\n\t" \ |
| "lwz 3,4(11)\n\t" /* arg1->r3 */ \ |
| "lwz 4,8(11)\n\t" \ |
| "lwz 5,12(11)\n\t" \ |
| "lwz 6,16(11)\n\t" /* arg4->r6 */ \ |
| "lwz 7,20(11)\n\t" \ |
| "lwz 11,0(11)\n\t" /* target->r11 */ \ |
| VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ |
| VALGRIND_RESTORE_STACK \ |
| "mr %0,3" \ |
| : /*out*/ "=r" (_res) \ |
| : /*in*/ "r" (&_argvec[0]) \ |
| : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[7]; \ |
| volatile unsigned long _res; \ |
| _argvec[0] = (unsigned long)_orig.nraddr; \ |
| _argvec[1] = (unsigned long)arg1; \ |
| _argvec[2] = (unsigned long)arg2; \ |
| _argvec[3] = (unsigned long)arg3; \ |
| _argvec[4] = (unsigned long)arg4; \ |
| _argvec[5] = (unsigned long)arg5; \ |
| _argvec[6] = (unsigned long)arg6; \ |
| __asm__ volatile( \ |
| VALGRIND_ALIGN_STACK \ |
| "mr 11,%1\n\t" \ |
| "lwz 3,4(11)\n\t" /* arg1->r3 */ \ |
| "lwz 4,8(11)\n\t" \ |
| "lwz 5,12(11)\n\t" \ |
| "lwz 6,16(11)\n\t" /* arg4->r6 */ \ |
| "lwz 7,20(11)\n\t" \ |
| "lwz 8,24(11)\n\t" \ |
| "lwz 11,0(11)\n\t" /* target->r11 */ \ |
| VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ |
| VALGRIND_RESTORE_STACK \ |
| "mr %0,3" \ |
| : /*out*/ "=r" (_res) \ |
| : /*in*/ "r" (&_argvec[0]) \ |
| : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ |
| arg7) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[8]; \ |
| volatile unsigned long _res; \ |
| _argvec[0] = (unsigned long)_orig.nraddr; \ |
| _argvec[1] = (unsigned long)arg1; \ |
| _argvec[2] = (unsigned long)arg2; \ |
| _argvec[3] = (unsigned long)arg3; \ |
| _argvec[4] = (unsigned long)arg4; \ |
| _argvec[5] = (unsigned long)arg5; \ |
| _argvec[6] = (unsigned long)arg6; \ |
| _argvec[7] = (unsigned long)arg7; \ |
| __asm__ volatile( \ |
| VALGRIND_ALIGN_STACK \ |
| "mr 11,%1\n\t" \ |
| "lwz 3,4(11)\n\t" /* arg1->r3 */ \ |
| "lwz 4,8(11)\n\t" \ |
| "lwz 5,12(11)\n\t" \ |
| "lwz 6,16(11)\n\t" /* arg4->r6 */ \ |
| "lwz 7,20(11)\n\t" \ |
| "lwz 8,24(11)\n\t" \ |
| "lwz 9,28(11)\n\t" \ |
| "lwz 11,0(11)\n\t" /* target->r11 */ \ |
| VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ |
| VALGRIND_RESTORE_STACK \ |
| "mr %0,3" \ |
| : /*out*/ "=r" (_res) \ |
| : /*in*/ "r" (&_argvec[0]) \ |
| : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ |
| arg7,arg8) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[9]; \ |
| volatile unsigned long _res; \ |
| _argvec[0] = (unsigned long)_orig.nraddr; \ |
| _argvec[1] = (unsigned long)arg1; \ |
| _argvec[2] = (unsigned long)arg2; \ |
| _argvec[3] = (unsigned long)arg3; \ |
| _argvec[4] = (unsigned long)arg4; \ |
| _argvec[5] = (unsigned long)arg5; \ |
| _argvec[6] = (unsigned long)arg6; \ |
| _argvec[7] = (unsigned long)arg7; \ |
| _argvec[8] = (unsigned long)arg8; \ |
| __asm__ volatile( \ |
| VALGRIND_ALIGN_STACK \ |
| "mr 11,%1\n\t" \ |
| "lwz 3,4(11)\n\t" /* arg1->r3 */ \ |
| "lwz 4,8(11)\n\t" \ |
| "lwz 5,12(11)\n\t" \ |
| "lwz 6,16(11)\n\t" /* arg4->r6 */ \ |
| "lwz 7,20(11)\n\t" \ |
| "lwz 8,24(11)\n\t" \ |
| "lwz 9,28(11)\n\t" \ |
| "lwz 10,32(11)\n\t" /* arg8->r10 */ \ |
| "lwz 11,0(11)\n\t" /* target->r11 */ \ |
| VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ |
| VALGRIND_RESTORE_STACK \ |
| "mr %0,3" \ |
| : /*out*/ "=r" (_res) \ |
| : /*in*/ "r" (&_argvec[0]) \ |
| : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ |
| arg7,arg8,arg9) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[10]; \ |
| volatile unsigned long _res; \ |
| _argvec[0] = (unsigned long)_orig.nraddr; \ |
| _argvec[1] = (unsigned long)arg1; \ |
| _argvec[2] = (unsigned long)arg2; \ |
| _argvec[3] = (unsigned long)arg3; \ |
| _argvec[4] = (unsigned long)arg4; \ |
| _argvec[5] = (unsigned long)arg5; \ |
| _argvec[6] = (unsigned long)arg6; \ |
| _argvec[7] = (unsigned long)arg7; \ |
| _argvec[8] = (unsigned long)arg8; \ |
| _argvec[9] = (unsigned long)arg9; \ |
| __asm__ volatile( \ |
| VALGRIND_ALIGN_STACK \ |
| "mr 11,%1\n\t" \ |
| "addi 1,1,-16\n\t" \ |
| /* arg9 */ \ |
| "lwz 3,36(11)\n\t" \ |
| "stw 3,8(1)\n\t" \ |
| /* args1-8 */ \ |
| "lwz 3,4(11)\n\t" /* arg1->r3 */ \ |
| "lwz 4,8(11)\n\t" \ |
| "lwz 5,12(11)\n\t" \ |
| "lwz 6,16(11)\n\t" /* arg4->r6 */ \ |
| "lwz 7,20(11)\n\t" \ |
| "lwz 8,24(11)\n\t" \ |
| "lwz 9,28(11)\n\t" \ |
| "lwz 10,32(11)\n\t" /* arg8->r10 */ \ |
| "lwz 11,0(11)\n\t" /* target->r11 */ \ |
| VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ |
| VALGRIND_RESTORE_STACK \ |
| "mr %0,3" \ |
| : /*out*/ "=r" (_res) \ |
| : /*in*/ "r" (&_argvec[0]) \ |
| : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ |
| arg7,arg8,arg9,arg10) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[11]; \ |
| volatile unsigned long _res; \ |
| _argvec[0] = (unsigned long)_orig.nraddr; \ |
| _argvec[1] = (unsigned long)arg1; \ |
| _argvec[2] = (unsigned long)arg2; \ |
| _argvec[3] = (unsigned long)arg3; \ |
| _argvec[4] = (unsigned long)arg4; \ |
| _argvec[5] = (unsigned long)arg5; \ |
| _argvec[6] = (unsigned long)arg6; \ |
| _argvec[7] = (unsigned long)arg7; \ |
| _argvec[8] = (unsigned long)arg8; \ |
| _argvec[9] = (unsigned long)arg9; \ |
| _argvec[10] = (unsigned long)arg10; \ |
| __asm__ volatile( \ |
| VALGRIND_ALIGN_STACK \ |
| "mr 11,%1\n\t" \ |
| "addi 1,1,-16\n\t" \ |
| /* arg10 */ \ |
| "lwz 3,40(11)\n\t" \ |
| "stw 3,12(1)\n\t" \ |
| /* arg9 */ \ |
| "lwz 3,36(11)\n\t" \ |
| "stw 3,8(1)\n\t" \ |
| /* args1-8 */ \ |
| "lwz 3,4(11)\n\t" /* arg1->r3 */ \ |
| "lwz 4,8(11)\n\t" \ |
| "lwz 5,12(11)\n\t" \ |
| "lwz 6,16(11)\n\t" /* arg4->r6 */ \ |
| "lwz 7,20(11)\n\t" \ |
| "lwz 8,24(11)\n\t" \ |
| "lwz 9,28(11)\n\t" \ |
| "lwz 10,32(11)\n\t" /* arg8->r10 */ \ |
| "lwz 11,0(11)\n\t" /* target->r11 */ \ |
| VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ |
| VALGRIND_RESTORE_STACK \ |
| "mr %0,3" \ |
| : /*out*/ "=r" (_res) \ |
| : /*in*/ "r" (&_argvec[0]) \ |
| : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ |
| arg7,arg8,arg9,arg10,arg11) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[12]; \ |
| volatile unsigned long _res; \ |
| _argvec[0] = (unsigned long)_orig.nraddr; \ |
| _argvec[1] = (unsigned long)arg1; \ |
| _argvec[2] = (unsigned long)arg2; \ |
| _argvec[3] = (unsigned long)arg3; \ |
| _argvec[4] = (unsigned long)arg4; \ |
| _argvec[5] = (unsigned long)arg5; \ |
| _argvec[6] = (unsigned long)arg6; \ |
| _argvec[7] = (unsigned long)arg7; \ |
| _argvec[8] = (unsigned long)arg8; \ |
| _argvec[9] = (unsigned long)arg9; \ |
| _argvec[10] = (unsigned long)arg10; \ |
| _argvec[11] = (unsigned long)arg11; \ |
| __asm__ volatile( \ |
| VALGRIND_ALIGN_STACK \ |
| "mr 11,%1\n\t" \ |
| "addi 1,1,-32\n\t" \ |
| /* arg11 */ \ |
| "lwz 3,44(11)\n\t" \ |
| "stw 3,16(1)\n\t" \ |
| /* arg10 */ \ |
| "lwz 3,40(11)\n\t" \ |
| "stw 3,12(1)\n\t" \ |
| /* arg9 */ \ |
| "lwz 3,36(11)\n\t" \ |
| "stw 3,8(1)\n\t" \ |
| /* args1-8 */ \ |
| "lwz 3,4(11)\n\t" /* arg1->r3 */ \ |
| "lwz 4,8(11)\n\t" \ |
| "lwz 5,12(11)\n\t" \ |
| "lwz 6,16(11)\n\t" /* arg4->r6 */ \ |
| "lwz 7,20(11)\n\t" \ |
| "lwz 8,24(11)\n\t" \ |
| "lwz 9,28(11)\n\t" \ |
| "lwz 10,32(11)\n\t" /* arg8->r10 */ \ |
| "lwz 11,0(11)\n\t" /* target->r11 */ \ |
| VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ |
| VALGRIND_RESTORE_STACK \ |
| "mr %0,3" \ |
| : /*out*/ "=r" (_res) \ |
| : /*in*/ "r" (&_argvec[0]) \ |
| : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ |
| arg7,arg8,arg9,arg10,arg11,arg12) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[13]; \ |
| volatile unsigned long _res; \ |
| _argvec[0] = (unsigned long)_orig.nraddr; \ |
| _argvec[1] = (unsigned long)arg1; \ |
| _argvec[2] = (unsigned long)arg2; \ |
| _argvec[3] = (unsigned long)arg3; \ |
| _argvec[4] = (unsigned long)arg4; \ |
| _argvec[5] = (unsigned long)arg5; \ |
| _argvec[6] = (unsigned long)arg6; \ |
| _argvec[7] = (unsigned long)arg7; \ |
| _argvec[8] = (unsigned long)arg8; \ |
| _argvec[9] = (unsigned long)arg9; \ |
| _argvec[10] = (unsigned long)arg10; \ |
| _argvec[11] = (unsigned long)arg11; \ |
| _argvec[12] = (unsigned long)arg12; \ |
| __asm__ volatile( \ |
| VALGRIND_ALIGN_STACK \ |
| "mr 11,%1\n\t" \ |
| "addi 1,1,-32\n\t" \ |
| /* arg12 */ \ |
| "lwz 3,48(11)\n\t" \ |
| "stw 3,20(1)\n\t" \ |
| /* arg11 */ \ |
| "lwz 3,44(11)\n\t" \ |
| "stw 3,16(1)\n\t" \ |
| /* arg10 */ \ |
| "lwz 3,40(11)\n\t" \ |
| "stw 3,12(1)\n\t" \ |
| /* arg9 */ \ |
| "lwz 3,36(11)\n\t" \ |
| "stw 3,8(1)\n\t" \ |
| /* args1-8 */ \ |
| "lwz 3,4(11)\n\t" /* arg1->r3 */ \ |
| "lwz 4,8(11)\n\t" \ |
| "lwz 5,12(11)\n\t" \ |
| "lwz 6,16(11)\n\t" /* arg4->r6 */ \ |
| "lwz 7,20(11)\n\t" \ |
| "lwz 8,24(11)\n\t" \ |
| "lwz 9,28(11)\n\t" \ |
| "lwz 10,32(11)\n\t" /* arg8->r10 */ \ |
| "lwz 11,0(11)\n\t" /* target->r11 */ \ |
| VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ |
| VALGRIND_RESTORE_STACK \ |
| "mr %0,3" \ |
| : /*out*/ "=r" (_res) \ |
| : /*in*/ "r" (&_argvec[0]) \ |
| : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #endif /* PLAT_ppc32_linux */ |
| |
| /* ------------------------ ppc64-linux ------------------------ */ |
| |
| #if defined(PLAT_ppc64be_linux) |
| |
| /* ARGREGS: r3 r4 r5 r6 r7 r8 r9 r10 (the rest on stack somewhere) */ |
| |
| /* These regs are trashed by the hidden call. */ |
| #define __CALLER_SAVED_REGS \ |
| "lr", "ctr", "xer", \ |
| "cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7", \ |
| "r0", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", \ |
| "r11", "r12", "r13" |
| |
| /* Macros to save and align the stack before making a function |
| call and restore it afterwards as gcc may not keep the stack |
| pointer aligned if it doesn't realise calls are being made |
| to other functions. */ |
| |
| #define VALGRIND_ALIGN_STACK \ |
| "mr 28,1\n\t" \ |
| "rldicr 1,1,0,59\n\t" |
| #define VALGRIND_RESTORE_STACK \ |
| "mr 1,28\n\t" |
| |
| /* These CALL_FN_ macros assume that on ppc64-linux, sizeof(unsigned |
| long) == 8. */ |
| |
| #define CALL_FN_W_v(lval, orig) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[3+0]; \ |
| volatile unsigned long _res; \ |
| /* _argvec[0] holds current r2 across the call */ \ |
| _argvec[1] = (unsigned long)_orig.r2; \ |
| _argvec[2] = (unsigned long)_orig.nraddr; \ |
| __asm__ volatile( \ |
| VALGRIND_ALIGN_STACK \ |
| "mr 11,%1\n\t" \ |
| "std 2,-16(11)\n\t" /* save tocptr */ \ |
| "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ |
| "ld 11, 0(11)\n\t" /* target->r11 */ \ |
| VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ |
| "mr 11,%1\n\t" \ |
| "mr %0,3\n\t" \ |
| "ld 2,-16(11)\n\t" /* restore tocptr */ \ |
| VALGRIND_RESTORE_STACK \ |
| : /*out*/ "=r" (_res) \ |
| : /*in*/ "r" (&_argvec[2]) \ |
| : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_W(lval, orig, arg1) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[3+1]; \ |
| volatile unsigned long _res; \ |
| /* _argvec[0] holds current r2 across the call */ \ |
| _argvec[1] = (unsigned long)_orig.r2; \ |
| _argvec[2] = (unsigned long)_orig.nraddr; \ |
| _argvec[2+1] = (unsigned long)arg1; \ |
| __asm__ volatile( \ |
| VALGRIND_ALIGN_STACK \ |
| "mr 11,%1\n\t" \ |
| "std 2,-16(11)\n\t" /* save tocptr */ \ |
| "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ |
| "ld 3, 8(11)\n\t" /* arg1->r3 */ \ |
| "ld 11, 0(11)\n\t" /* target->r11 */ \ |
| VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ |
| "mr 11,%1\n\t" \ |
| "mr %0,3\n\t" \ |
| "ld 2,-16(11)\n\t" /* restore tocptr */ \ |
| VALGRIND_RESTORE_STACK \ |
| : /*out*/ "=r" (_res) \ |
| : /*in*/ "r" (&_argvec[2]) \ |
| : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_WW(lval, orig, arg1,arg2) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[3+2]; \ |
| volatile unsigned long _res; \ |
| /* _argvec[0] holds current r2 across the call */ \ |
| _argvec[1] = (unsigned long)_orig.r2; \ |
| _argvec[2] = (unsigned long)_orig.nraddr; \ |
| _argvec[2+1] = (unsigned long)arg1; \ |
| _argvec[2+2] = (unsigned long)arg2; \ |
| __asm__ volatile( \ |
| VALGRIND_ALIGN_STACK \ |
| "mr 11,%1\n\t" \ |
| "std 2,-16(11)\n\t" /* save tocptr */ \ |
| "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ |
| "ld 3, 8(11)\n\t" /* arg1->r3 */ \ |
| "ld 4, 16(11)\n\t" /* arg2->r4 */ \ |
| "ld 11, 0(11)\n\t" /* target->r11 */ \ |
| VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ |
| "mr 11,%1\n\t" \ |
| "mr %0,3\n\t" \ |
| "ld 2,-16(11)\n\t" /* restore tocptr */ \ |
| VALGRIND_RESTORE_STACK \ |
| : /*out*/ "=r" (_res) \ |
| : /*in*/ "r" (&_argvec[2]) \ |
| : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[3+3]; \ |
| volatile unsigned long _res; \ |
| /* _argvec[0] holds current r2 across the call */ \ |
| _argvec[1] = (unsigned long)_orig.r2; \ |
| _argvec[2] = (unsigned long)_orig.nraddr; \ |
| _argvec[2+1] = (unsigned long)arg1; \ |
| _argvec[2+2] = (unsigned long)arg2; \ |
| _argvec[2+3] = (unsigned long)arg3; \ |
| __asm__ volatile( \ |
| VALGRIND_ALIGN_STACK \ |
| "mr 11,%1\n\t" \ |
| "std 2,-16(11)\n\t" /* save tocptr */ \ |
| "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ |
| "ld 3, 8(11)\n\t" /* arg1->r3 */ \ |
| "ld 4, 16(11)\n\t" /* arg2->r4 */ \ |
| "ld 5, 24(11)\n\t" /* arg3->r5 */ \ |
| "ld 11, 0(11)\n\t" /* target->r11 */ \ |
| VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ |
| "mr 11,%1\n\t" \ |
| "mr %0,3\n\t" \ |
| "ld 2,-16(11)\n\t" /* restore tocptr */ \ |
| VALGRIND_RESTORE_STACK \ |
| : /*out*/ "=r" (_res) \ |
| : /*in*/ "r" (&_argvec[2]) \ |
| : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[3+4]; \ |
| volatile unsigned long _res; \ |
| /* _argvec[0] holds current r2 across the call */ \ |
| _argvec[1] = (unsigned long)_orig.r2; \ |
| _argvec[2] = (unsigned long)_orig.nraddr; \ |
| _argvec[2+1] = (unsigned long)arg1; \ |
| _argvec[2+2] = (unsigned long)arg2; \ |
| _argvec[2+3] = (unsigned long)arg3; \ |
| _argvec[2+4] = (unsigned long)arg4; \ |
| __asm__ volatile( \ |
| VALGRIND_ALIGN_STACK \ |
| "mr 11,%1\n\t" \ |
| "std 2,-16(11)\n\t" /* save tocptr */ \ |
| "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ |
| "ld 3, 8(11)\n\t" /* arg1->r3 */ \ |
| "ld 4, 16(11)\n\t" /* arg2->r4 */ \ |
| "ld 5, 24(11)\n\t" /* arg3->r5 */ \ |
| "ld 6, 32(11)\n\t" /* arg4->r6 */ \ |
| "ld 11, 0(11)\n\t" /* target->r11 */ \ |
| VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ |
| "mr 11,%1\n\t" \ |
| "mr %0,3\n\t" \ |
| "ld 2,-16(11)\n\t" /* restore tocptr */ \ |
| VALGRIND_RESTORE_STACK \ |
| : /*out*/ "=r" (_res) \ |
| : /*in*/ "r" (&_argvec[2]) \ |
| : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[3+5]; \ |
| volatile unsigned long _res; \ |
| /* _argvec[0] holds current r2 across the call */ \ |
| _argvec[1] = (unsigned long)_orig.r2; \ |
| _argvec[2] = (unsigned long)_orig.nraddr; \ |
| _argvec[2+1] = (unsigned long)arg1; \ |
| _argvec[2+2] = (unsigned long)arg2; \ |
| _argvec[2+3] = (unsigned long)arg3; \ |
| _argvec[2+4] = (unsigned long)arg4; \ |
| _argvec[2+5] = (unsigned long)arg5; \ |
| __asm__ volatile( \ |
| VALGRIND_ALIGN_STACK \ |
| "mr 11,%1\n\t" \ |
| "std 2,-16(11)\n\t" /* save tocptr */ \ |
| "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ |
| "ld 3, 8(11)\n\t" /* arg1->r3 */ \ |
| "ld 4, 16(11)\n\t" /* arg2->r4 */ \ |
| "ld 5, 24(11)\n\t" /* arg3->r5 */ \ |
| "ld 6, 32(11)\n\t" /* arg4->r6 */ \ |
| "ld 7, 40(11)\n\t" /* arg5->r7 */ \ |
| "ld 11, 0(11)\n\t" /* target->r11 */ \ |
| VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ |
| "mr 11,%1\n\t" \ |
| "mr %0,3\n\t" \ |
| "ld 2,-16(11)\n\t" /* restore tocptr */ \ |
| VALGRIND_RESTORE_STACK \ |
| : /*out*/ "=r" (_res) \ |
| : /*in*/ "r" (&_argvec[2]) \ |
| : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[3+6]; \ |
| volatile unsigned long _res; \ |
| /* _argvec[0] holds current r2 across the call */ \ |
| _argvec[1] = (unsigned long)_orig.r2; \ |
| _argvec[2] = (unsigned long)_orig.nraddr; \ |
| _argvec[2+1] = (unsigned long)arg1; \ |
| _argvec[2+2] = (unsigned long)arg2; \ |
| _argvec[2+3] = (unsigned long)arg3; \ |
| _argvec[2+4] = (unsigned long)arg4; \ |
| _argvec[2+5] = (unsigned long)arg5; \ |
| _argvec[2+6] = (unsigned long)arg6; \ |
| __asm__ volatile( \ |
| VALGRIND_ALIGN_STACK \ |
| "mr 11,%1\n\t" \ |
| "std 2,-16(11)\n\t" /* save tocptr */ \ |
| "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ |
| "ld 3, 8(11)\n\t" /* arg1->r3 */ \ |
| "ld 4, 16(11)\n\t" /* arg2->r4 */ \ |
| "ld 5, 24(11)\n\t" /* arg3->r5 */ \ |
| "ld 6, 32(11)\n\t" /* arg4->r6 */ \ |
| "ld 7, 40(11)\n\t" /* arg5->r7 */ \ |
| "ld 8, 48(11)\n\t" /* arg6->r8 */ \ |
| "ld 11, 0(11)\n\t" /* target->r11 */ \ |
| VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ |
| "mr 11,%1\n\t" \ |
| "mr %0,3\n\t" \ |
| "ld 2,-16(11)\n\t" /* restore tocptr */ \ |
| VALGRIND_RESTORE_STACK \ |
| : /*out*/ "=r" (_res) \ |
| : /*in*/ "r" (&_argvec[2]) \ |
| : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ |
| arg7) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[3+7]; \ |
| volatile unsigned long _res; \ |
| /* _argvec[0] holds current r2 across the call */ \ |
| _argvec[1] = (unsigned long)_orig.r2; \ |
| _argvec[2] = (unsigned long)_orig.nraddr; \ |
| _argvec[2+1] = (unsigned long)arg1; \ |
| _argvec[2+2] = (unsigned long)arg2; \ |
| _argvec[2+3] = (unsigned long)arg3; \ |
| _argvec[2+4] = (unsigned long)arg4; \ |
| _argvec[2+5] = (unsigned long)arg5; \ |
| _argvec[2+6] = (unsigned long)arg6; \ |
| _argvec[2+7] = (unsigned long)arg7; \ |
| __asm__ volatile( \ |
| VALGRIND_ALIGN_STACK \ |
| "mr 11,%1\n\t" \ |
| "std 2,-16(11)\n\t" /* save tocptr */ \ |
| "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ |
| "ld 3, 8(11)\n\t" /* arg1->r3 */ \ |
| "ld 4, 16(11)\n\t" /* arg2->r4 */ \ |
| "ld 5, 24(11)\n\t" /* arg3->r5 */ \ |
| "ld 6, 32(11)\n\t" /* arg4->r6 */ \ |
| "ld 7, 40(11)\n\t" /* arg5->r7 */ \ |
| "ld 8, 48(11)\n\t" /* arg6->r8 */ \ |
| "ld 9, 56(11)\n\t" /* arg7->r9 */ \ |
| "ld 11, 0(11)\n\t" /* target->r11 */ \ |
| VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ |
| "mr 11,%1\n\t" \ |
| "mr %0,3\n\t" \ |
| "ld 2,-16(11)\n\t" /* restore tocptr */ \ |
| VALGRIND_RESTORE_STACK \ |
| : /*out*/ "=r" (_res) \ |
| : /*in*/ "r" (&_argvec[2]) \ |
| : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ |
| arg7,arg8) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[3+8]; \ |
| volatile unsigned long _res; \ |
| /* _argvec[0] holds current r2 across the call */ \ |
| _argvec[1] = (unsigned long)_orig.r2; \ |
| _argvec[2] = (unsigned long)_orig.nraddr; \ |
| _argvec[2+1] = (unsigned long)arg1; \ |
| _argvec[2+2] = (unsigned long)arg2; \ |
| _argvec[2+3] = (unsigned long)arg3; \ |
| _argvec[2+4] = (unsigned long)arg4; \ |
| _argvec[2+5] = (unsigned long)arg5; \ |
| _argvec[2+6] = (unsigned long)arg6; \ |
| _argvec[2+7] = (unsigned long)arg7; \ |
| _argvec[2+8] = (unsigned long)arg8; \ |
| __asm__ volatile( \ |
| VALGRIND_ALIGN_STACK \ |
| "mr 11,%1\n\t" \ |
| "std 2,-16(11)\n\t" /* save tocptr */ \ |
| "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ |
| "ld 3, 8(11)\n\t" /* arg1->r3 */ \ |
| "ld 4, 16(11)\n\t" /* arg2->r4 */ \ |
| "ld 5, 24(11)\n\t" /* arg3->r5 */ \ |
| "ld 6, 32(11)\n\t" /* arg4->r6 */ \ |
| "ld 7, 40(11)\n\t" /* arg5->r7 */ \ |
| "ld 8, 48(11)\n\t" /* arg6->r8 */ \ |
| "ld 9, 56(11)\n\t" /* arg7->r9 */ \ |
| "ld 10, 64(11)\n\t" /* arg8->r10 */ \ |
| "ld 11, 0(11)\n\t" /* target->r11 */ \ |
| VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ |
| "mr 11,%1\n\t" \ |
| "mr %0,3\n\t" \ |
| "ld 2,-16(11)\n\t" /* restore tocptr */ \ |
| VALGRIND_RESTORE_STACK \ |
| : /*out*/ "=r" (_res) \ |
| : /*in*/ "r" (&_argvec[2]) \ |
| : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ |
| arg7,arg8,arg9) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[3+9]; \ |
| volatile unsigned long _res; \ |
| /* _argvec[0] holds current r2 across the call */ \ |
| _argvec[1] = (unsigned long)_orig.r2; \ |
| _argvec[2] = (unsigned long)_orig.nraddr; \ |
| _argvec[2+1] = (unsigned long)arg1; \ |
| _argvec[2+2] = (unsigned long)arg2; \ |
| _argvec[2+3] = (unsigned long)arg3; \ |
| _argvec[2+4] = (unsigned long)arg4; \ |
| _argvec[2+5] = (unsigned long)arg5; \ |
| _argvec[2+6] = (unsigned long)arg6; \ |
| _argvec[2+7] = (unsigned long)arg7; \ |
| _argvec[2+8] = (unsigned long)arg8; \ |
| _argvec[2+9] = (unsigned long)arg9; \ |
| __asm__ volatile( \ |
| VALGRIND_ALIGN_STACK \ |
| "mr 11,%1\n\t" \ |
| "std 2,-16(11)\n\t" /* save tocptr */ \ |
| "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ |
| "addi 1,1,-128\n\t" /* expand stack frame */ \ |
| /* arg9 */ \ |
| "ld 3,72(11)\n\t" \ |
| "std 3,112(1)\n\t" \ |
| /* args1-8 */ \ |
| "ld 3, 8(11)\n\t" /* arg1->r3 */ \ |
| "ld 4, 16(11)\n\t" /* arg2->r4 */ \ |
| "ld 5, 24(11)\n\t" /* arg3->r5 */ \ |
| "ld 6, 32(11)\n\t" /* arg4->r6 */ \ |
| "ld 7, 40(11)\n\t" /* arg5->r7 */ \ |
| "ld 8, 48(11)\n\t" /* arg6->r8 */ \ |
| "ld 9, 56(11)\n\t" /* arg7->r9 */ \ |
| "ld 10, 64(11)\n\t" /* arg8->r10 */ \ |
| "ld 11, 0(11)\n\t" /* target->r11 */ \ |
| VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ |
| "mr 11,%1\n\t" \ |
| "mr %0,3\n\t" \ |
| "ld 2,-16(11)\n\t" /* restore tocptr */ \ |
| VALGRIND_RESTORE_STACK \ |
| : /*out*/ "=r" (_res) \ |
| : /*in*/ "r" (&_argvec[2]) \ |
| : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ |
| arg7,arg8,arg9,arg10) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[3+10]; \ |
| volatile unsigned long _res; \ |
| /* _argvec[0] holds current r2 across the call */ \ |
| _argvec[1] = (unsigned long)_orig.r2; \ |
| _argvec[2] = (unsigned long)_orig.nraddr; \ |
| _argvec[2+1] = (unsigned long)arg1; \ |
| _argvec[2+2] = (unsigned long)arg2; \ |
| _argvec[2+3] = (unsigned long)arg3; \ |
| _argvec[2+4] = (unsigned long)arg4; \ |
| _argvec[2+5] = (unsigned long)arg5; \ |
| _argvec[2+6] = (unsigned long)arg6; \ |
| _argvec[2+7] = (unsigned long)arg7; \ |
| _argvec[2+8] = (unsigned long)arg8; \ |
| _argvec[2+9] = (unsigned long)arg9; \ |
| _argvec[2+10] = (unsigned long)arg10; \ |
| __asm__ volatile( \ |
| VALGRIND_ALIGN_STACK \ |
| "mr 11,%1\n\t" \ |
| "std 2,-16(11)\n\t" /* save tocptr */ \ |
| "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ |
| "addi 1,1,-128\n\t" /* expand stack frame */ \ |
| /* arg10 */ \ |
| "ld 3,80(11)\n\t" \ |
| "std 3,120(1)\n\t" \ |
| /* arg9 */ \ |
| "ld 3,72(11)\n\t" \ |
| "std 3,112(1)\n\t" \ |
| /* args1-8 */ \ |
| "ld 3, 8(11)\n\t" /* arg1->r3 */ \ |
| "ld 4, 16(11)\n\t" /* arg2->r4 */ \ |
| "ld 5, 24(11)\n\t" /* arg3->r5 */ \ |
| "ld 6, 32(11)\n\t" /* arg4->r6 */ \ |
| "ld 7, 40(11)\n\t" /* arg5->r7 */ \ |
| "ld 8, 48(11)\n\t" /* arg6->r8 */ \ |
| "ld 9, 56(11)\n\t" /* arg7->r9 */ \ |
| "ld 10, 64(11)\n\t" /* arg8->r10 */ \ |
| "ld 11, 0(11)\n\t" /* target->r11 */ \ |
| VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ |
| "mr 11,%1\n\t" \ |
| "mr %0,3\n\t" \ |
| "ld 2,-16(11)\n\t" /* restore tocptr */ \ |
| VALGRIND_RESTORE_STACK \ |
| : /*out*/ "=r" (_res) \ |
| : /*in*/ "r" (&_argvec[2]) \ |
| : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ |
| arg7,arg8,arg9,arg10,arg11) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[3+11]; \ |
| volatile unsigned long _res; \ |
| /* _argvec[0] holds current r2 across the call */ \ |
| _argvec[1] = (unsigned long)_orig.r2; \ |
| _argvec[2] = (unsigned long)_orig.nraddr; \ |
| _argvec[2+1] = (unsigned long)arg1; \ |
| _argvec[2+2] = (unsigned long)arg2; \ |
| _argvec[2+3] = (unsigned long)arg3; \ |
| _argvec[2+4] = (unsigned long)arg4; \ |
| _argvec[2+5] = (unsigned long)arg5; \ |
| _argvec[2+6] = (unsigned long)arg6; \ |
| _argvec[2+7] = (unsigned long)arg7; \ |
| _argvec[2+8] = (unsigned long)arg8; \ |
| _argvec[2+9] = (unsigned long)arg9; \ |
| _argvec[2+10] = (unsigned long)arg10; \ |
| _argvec[2+11] = (unsigned long)arg11; \ |
| __asm__ volatile( \ |
| VALGRIND_ALIGN_STACK \ |
| "mr 11,%1\n\t" \ |
| "std 2,-16(11)\n\t" /* save tocptr */ \ |
| "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ |
| "addi 1,1,-144\n\t" /* expand stack frame */ \ |
| /* arg11 */ \ |
| "ld 3,88(11)\n\t" \ |
| "std 3,128(1)\n\t" \ |
| /* arg10 */ \ |
| "ld 3,80(11)\n\t" \ |
| "std 3,120(1)\n\t" \ |
| /* arg9 */ \ |
| "ld 3,72(11)\n\t" \ |
| "std 3,112(1)\n\t" \ |
| /* args1-8 */ \ |
| "ld 3, 8(11)\n\t" /* arg1->r3 */ \ |
| "ld 4, 16(11)\n\t" /* arg2->r4 */ \ |
| "ld 5, 24(11)\n\t" /* arg3->r5 */ \ |
| "ld 6, 32(11)\n\t" /* arg4->r6 */ \ |
| "ld 7, 40(11)\n\t" /* arg5->r7 */ \ |
| "ld 8, 48(11)\n\t" /* arg6->r8 */ \ |
| "ld 9, 56(11)\n\t" /* arg7->r9 */ \ |
| "ld 10, 64(11)\n\t" /* arg8->r10 */ \ |
| "ld 11, 0(11)\n\t" /* target->r11 */ \ |
| VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ |
| "mr 11,%1\n\t" \ |
| "mr %0,3\n\t" \ |
| "ld 2,-16(11)\n\t" /* restore tocptr */ \ |
| VALGRIND_RESTORE_STACK \ |
| : /*out*/ "=r" (_res) \ |
| : /*in*/ "r" (&_argvec[2]) \ |
| : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ |
| arg7,arg8,arg9,arg10,arg11,arg12) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[3+12]; \ |
| volatile unsigned long _res; \ |
| /* _argvec[0] holds current r2 across the call */ \ |
| _argvec[1] = (unsigned long)_orig.r2; \ |
| _argvec[2] = (unsigned long)_orig.nraddr; \ |
| _argvec[2+1] = (unsigned long)arg1; \ |
| _argvec[2+2] = (unsigned long)arg2; \ |
| _argvec[2+3] = (unsigned long)arg3; \ |
| _argvec[2+4] = (unsigned long)arg4; \ |
| _argvec[2+5] = (unsigned long)arg5; \ |
| _argvec[2+6] = (unsigned long)arg6; \ |
| _argvec[2+7] = (unsigned long)arg7; \ |
| _argvec[2+8] = (unsigned long)arg8; \ |
| _argvec[2+9] = (unsigned long)arg9; \ |
| _argvec[2+10] = (unsigned long)arg10; \ |
| _argvec[2+11] = (unsigned long)arg11; \ |
| _argvec[2+12] = (unsigned long)arg12; \ |
| __asm__ volatile( \ |
| VALGRIND_ALIGN_STACK \ |
| "mr 11,%1\n\t" \ |
| "std 2,-16(11)\n\t" /* save tocptr */ \ |
| "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ |
| "addi 1,1,-144\n\t" /* expand stack frame */ \ |
| /* arg12 */ \ |
| "ld 3,96(11)\n\t" \ |
| "std 3,136(1)\n\t" \ |
| /* arg11 */ \ |
| "ld 3,88(11)\n\t" \ |
| "std 3,128(1)\n\t" \ |
| /* arg10 */ \ |
| "ld 3,80(11)\n\t" \ |
| "std 3,120(1)\n\t" \ |
| /* arg9 */ \ |
| "ld 3,72(11)\n\t" \ |
| "std 3,112(1)\n\t" \ |
| /* args1-8 */ \ |
| "ld 3, 8(11)\n\t" /* arg1->r3 */ \ |
| "ld 4, 16(11)\n\t" /* arg2->r4 */ \ |
| "ld 5, 24(11)\n\t" /* arg3->r5 */ \ |
| "ld 6, 32(11)\n\t" /* arg4->r6 */ \ |
| "ld 7, 40(11)\n\t" /* arg5->r7 */ \ |
| "ld 8, 48(11)\n\t" /* arg6->r8 */ \ |
| "ld 9, 56(11)\n\t" /* arg7->r9 */ \ |
| "ld 10, 64(11)\n\t" /* arg8->r10 */ \ |
| "ld 11, 0(11)\n\t" /* target->r11 */ \ |
| VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ |
| "mr 11,%1\n\t" \ |
| "mr %0,3\n\t" \ |
| "ld 2,-16(11)\n\t" /* restore tocptr */ \ |
| VALGRIND_RESTORE_STACK \ |
| : /*out*/ "=r" (_res) \ |
| : /*in*/ "r" (&_argvec[2]) \ |
| : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #endif /* PLAT_ppc64be_linux */ |
| |
| /* ------------------------- ppc64le-linux ----------------------- */ |
| #if defined(PLAT_ppc64le_linux) |
| |
| /* ARGREGS: r3 r4 r5 r6 r7 r8 r9 r10 (the rest on stack somewhere) */ |
| |
| /* These regs are trashed by the hidden call. */ |
| #define __CALLER_SAVED_REGS \ |
| "lr", "ctr", "xer", \ |
| "cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7", \ |
| "r0", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", \ |
| "r11", "r12", "r13" |
| |
| /* Macros to save and align the stack before making a function |
| call and restore it afterwards as gcc may not keep the stack |
| pointer aligned if it doesn't realise calls are being made |
| to other functions. */ |
| |
| #define VALGRIND_ALIGN_STACK \ |
| "mr 28,1\n\t" \ |
| "rldicr 1,1,0,59\n\t" |
| #define VALGRIND_RESTORE_STACK \ |
| "mr 1,28\n\t" |
| |
| /* These CALL_FN_ macros assume that on ppc64-linux, sizeof(unsigned |
| long) == 8. */ |
| |
| #define CALL_FN_W_v(lval, orig) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[3+0]; \ |
| volatile unsigned long _res; \ |
| /* _argvec[0] holds current r2 across the call */ \ |
| _argvec[1] = (unsigned long)_orig.r2; \ |
| _argvec[2] = (unsigned long)_orig.nraddr; \ |
| __asm__ volatile( \ |
| VALGRIND_ALIGN_STACK \ |
| "mr 12,%1\n\t" \ |
| "std 2,-16(12)\n\t" /* save tocptr */ \ |
| "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \ |
| "ld 12, 0(12)\n\t" /* target->r12 */ \ |
| VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \ |
| "mr 12,%1\n\t" \ |
| "mr %0,3\n\t" \ |
| "ld 2,-16(12)\n\t" /* restore tocptr */ \ |
| VALGRIND_RESTORE_STACK \ |
| : /*out*/ "=r" (_res) \ |
| : /*in*/ "r" (&_argvec[2]) \ |
| : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_W(lval, orig, arg1) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[3+1]; \ |
| volatile unsigned long _res; \ |
| /* _argvec[0] holds current r2 across the call */ \ |
| _argvec[1] = (unsigned long)_orig.r2; \ |
| _argvec[2] = (unsigned long)_orig.nraddr; \ |
| _argvec[2+1] = (unsigned long)arg1; \ |
| __asm__ volatile( \ |
| VALGRIND_ALIGN_STACK \ |
| "mr 12,%1\n\t" \ |
| "std 2,-16(12)\n\t" /* save tocptr */ \ |
| "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \ |
| "ld 3, 8(12)\n\t" /* arg1->r3 */ \ |
| "ld 12, 0(12)\n\t" /* target->r12 */ \ |
| VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \ |
| "mr 12,%1\n\t" \ |
| "mr %0,3\n\t" \ |
| "ld 2,-16(12)\n\t" /* restore tocptr */ \ |
| VALGRIND_RESTORE_STACK \ |
| : /*out*/ "=r" (_res) \ |
| : /*in*/ "r" (&_argvec[2]) \ |
| : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_WW(lval, orig, arg1,arg2) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[3+2]; \ |
| volatile unsigned long _res; \ |
| /* _argvec[0] holds current r2 across the call */ \ |
| _argvec[1] = (unsigned long)_orig.r2; \ |
| _argvec[2] = (unsigned long)_orig.nraddr; \ |
| _argvec[2+1] = (unsigned long)arg1; \ |
| _argvec[2+2] = (unsigned long)arg2; \ |
| __asm__ volatile( \ |
| VALGRIND_ALIGN_STACK \ |
| "mr 12,%1\n\t" \ |
| "std 2,-16(12)\n\t" /* save tocptr */ \ |
| "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \ |
| "ld 3, 8(12)\n\t" /* arg1->r3 */ \ |
| "ld 4, 16(12)\n\t" /* arg2->r4 */ \ |
| "ld 12, 0(12)\n\t" /* target->r12 */ \ |
| VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \ |
| "mr 12,%1\n\t" \ |
| "mr %0,3\n\t" \ |
| "ld 2,-16(12)\n\t" /* restore tocptr */ \ |
| VALGRIND_RESTORE_STACK \ |
| : /*out*/ "=r" (_res) \ |
| : /*in*/ "r" (&_argvec[2]) \ |
| : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[3+3]; \ |
| volatile unsigned long _res; \ |
| /* _argvec[0] holds current r2 across the call */ \ |
| _argvec[1] = (unsigned long)_orig.r2; \ |
| _argvec[2] = (unsigned long)_orig.nraddr; \ |
| _argvec[2+1] = (unsigned long)arg1; \ |
| _argvec[2+2] = (unsigned long)arg2; \ |
| _argvec[2+3] = (unsigned long)arg3; \ |
| __asm__ volatile( \ |
| VALGRIND_ALIGN_STACK \ |
| "mr 12,%1\n\t" \ |
| "std 2,-16(12)\n\t" /* save tocptr */ \ |
| "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \ |
| "ld 3, 8(12)\n\t" /* arg1->r3 */ \ |
| "ld 4, 16(12)\n\t" /* arg2->r4 */ \ |
| "ld 5, 24(12)\n\t" /* arg3->r5 */ \ |
| "ld 12, 0(12)\n\t" /* target->r12 */ \ |
| VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \ |
| "mr 12,%1\n\t" \ |
| "mr %0,3\n\t" \ |
| "ld 2,-16(12)\n\t" /* restore tocptr */ \ |
| VALGRIND_RESTORE_STACK \ |
| : /*out*/ "=r" (_res) \ |
| : /*in*/ "r" (&_argvec[2]) \ |
| : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[3+4]; \ |
| volatile unsigned long _res; \ |
| /* _argvec[0] holds current r2 across the call */ \ |
| _argvec[1] = (unsigned long)_orig.r2; \ |
| _argvec[2] = (unsigned long)_orig.nraddr; \ |
| _argvec[2+1] = (unsigned long)arg1; \ |
| _argvec[2+2] = (unsigned long)arg2; \ |
| _argvec[2+3] = (unsigned long)arg3; \ |
| _argvec[2+4] = (unsigned long)arg4; \ |
| __asm__ volatile( \ |
| VALGRIND_ALIGN_STACK \ |
| "mr 12,%1\n\t" \ |
| "std 2,-16(12)\n\t" /* save tocptr */ \ |
| "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \ |
| "ld 3, 8(12)\n\t" /* arg1->r3 */ \ |
| "ld 4, 16(12)\n\t" /* arg2->r4 */ \ |
| "ld 5, 24(12)\n\t" /* arg3->r5 */ \ |
| "ld 6, 32(12)\n\t" /* arg4->r6 */ \ |
| "ld 12, 0(12)\n\t" /* target->r12 */ \ |
| VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \ |
| "mr 12,%1\n\t" \ |
| "mr %0,3\n\t" \ |
| "ld 2,-16(12)\n\t" /* restore tocptr */ \ |
| VALGRIND_RESTORE_STACK \ |
| : /*out*/ "=r" (_res) \ |
| : /*in*/ "r" (&_argvec[2]) \ |
| : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[3+5]; \ |
| volatile unsigned long _res; \ |
| /* _argvec[0] holds current r2 across the call */ \ |
| _argvec[1] = (unsigned long)_orig.r2; \ |
| _argvec[2] = (unsigned long)_orig.nraddr; \ |
| _argvec[2+1] = (unsigned long)arg1; \ |
| _argvec[2+2] = (unsigned long)arg2; \ |
| _argvec[2+3] = (unsigned long)arg3; \ |
| _argvec[2+4] = (unsigned long)arg4; \ |
| _argvec[2+5] = (unsigned long)arg5; \ |
| __asm__ volatile( \ |
| VALGRIND_ALIGN_STACK \ |
| "mr 12,%1\n\t" \ |
| "std 2,-16(12)\n\t" /* save tocptr */ \ |
| "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \ |
| "ld 3, 8(12)\n\t" /* arg1->r3 */ \ |
| "ld 4, 16(12)\n\t" /* arg2->r4 */ \ |
| "ld 5, 24(12)\n\t" /* arg3->r5 */ \ |
| "ld 6, 32(12)\n\t" /* arg4->r6 */ \ |
| "ld 7, 40(12)\n\t" /* arg5->r7 */ \ |
| "ld 12, 0(12)\n\t" /* target->r12 */ \ |
| VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \ |
| "mr 12,%1\n\t" \ |
| "mr %0,3\n\t" \ |
| "ld 2,-16(12)\n\t" /* restore tocptr */ \ |
| VALGRIND_RESTORE_STACK \ |
| : /*out*/ "=r" (_res) \ |
| : /*in*/ "r" (&_argvec[2]) \ |
| : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[3+6]; \ |
| volatile unsigned long _res; \ |
| /* _argvec[0] holds current r2 across the call */ \ |
| _argvec[1] = (unsigned long)_orig.r2; \ |
| _argvec[2] = (unsigned long)_orig.nraddr; \ |
| _argvec[2+1] = (unsigned long)arg1; \ |
| _argvec[2+2] = (unsigned long)arg2; \ |
| _argvec[2+3] = (unsigned long)arg3; \ |
| _argvec[2+4] = (unsigned long)arg4; \ |
| _argvec[2+5] = (unsigned long)arg5; \ |
| _argvec[2+6] = (unsigned long)arg6; \ |
| __asm__ volatile( \ |
| VALGRIND_ALIGN_STACK \ |
| "mr 12,%1\n\t" \ |
| "std 2,-16(12)\n\t" /* save tocptr */ \ |
| "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \ |
| "ld 3, 8(12)\n\t" /* arg1->r3 */ \ |
| "ld 4, 16(12)\n\t" /* arg2->r4 */ \ |
| "ld 5, 24(12)\n\t" /* arg3->r5 */ \ |
| "ld 6, 32(12)\n\t" /* arg4->r6 */ \ |
| "ld 7, 40(12)\n\t" /* arg5->r7 */ \ |
| "ld 8, 48(12)\n\t" /* arg6->r8 */ \ |
| "ld 12, 0(12)\n\t" /* target->r12 */ \ |
| VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \ |
| "mr 12,%1\n\t" \ |
| "mr %0,3\n\t" \ |
| "ld 2,-16(12)\n\t" /* restore tocptr */ \ |
| VALGRIND_RESTORE_STACK \ |
| : /*out*/ "=r" (_res) \ |
| : /*in*/ "r" (&_argvec[2]) \ |
| : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ |
| arg7) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[3+7]; \ |
| volatile unsigned long _res; \ |
| /* _argvec[0] holds current r2 across the call */ \ |
| _argvec[1] = (unsigned long)_orig.r2; \ |
| _argvec[2] = (unsigned long)_orig.nraddr; \ |
| _argvec[2+1] = (unsigned long)arg1; \ |
| _argvec[2+2] = (unsigned long)arg2; \ |
| _argvec[2+3] = (unsigned long)arg3; \ |
| _argvec[2+4] = (unsigned long)arg4; \ |
| _argvec[2+5] = (unsigned long)arg5; \ |
| _argvec[2+6] = (unsigned long)arg6; \ |
| _argvec[2+7] = (unsigned long)arg7; \ |
| __asm__ volatile( \ |
| VALGRIND_ALIGN_STACK \ |
| "mr 12,%1\n\t" \ |
| "std 2,-16(12)\n\t" /* save tocptr */ \ |
| "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \ |
| "ld 3, 8(12)\n\t" /* arg1->r3 */ \ |
| "ld 4, 16(12)\n\t" /* arg2->r4 */ \ |
| "ld 5, 24(12)\n\t" /* arg3->r5 */ \ |
| "ld 6, 32(12)\n\t" /* arg4->r6 */ \ |
| "ld 7, 40(12)\n\t" /* arg5->r7 */ \ |
| "ld 8, 48(12)\n\t" /* arg6->r8 */ \ |
| "ld 9, 56(12)\n\t" /* arg7->r9 */ \ |
| "ld 12, 0(12)\n\t" /* target->r12 */ \ |
| VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \ |
| "mr 12,%1\n\t" \ |
| "mr %0,3\n\t" \ |
| "ld 2,-16(12)\n\t" /* restore tocptr */ \ |
| VALGRIND_RESTORE_STACK \ |
| : /*out*/ "=r" (_res) \ |
| : /*in*/ "r" (&_argvec[2]) \ |
| : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ |
| arg7,arg8) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[3+8]; \ |
| volatile unsigned long _res; \ |
| /* _argvec[0] holds current r2 across the call */ \ |
| _argvec[1] = (unsigned long)_orig.r2; \ |
| _argvec[2] = (unsigned long)_orig.nraddr; \ |
| _argvec[2+1] = (unsigned long)arg1; \ |
| _argvec[2+2] = (unsigned long)arg2; \ |
| _argvec[2+3] = (unsigned long)arg3; \ |
| _argvec[2+4] = (unsigned long)arg4; \ |
| _argvec[2+5] = (unsigned long)arg5; \ |
| _argvec[2+6] = (unsigned long)arg6; \ |
| _argvec[2+7] = (unsigned long)arg7; \ |
| _argvec[2+8] = (unsigned long)arg8; \ |
| __asm__ volatile( \ |
| VALGRIND_ALIGN_STACK \ |
| "mr 12,%1\n\t" \ |
| "std 2,-16(12)\n\t" /* save tocptr */ \ |
| "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \ |
| "ld 3, 8(12)\n\t" /* arg1->r3 */ \ |
| "ld 4, 16(12)\n\t" /* arg2->r4 */ \ |
| "ld 5, 24(12)\n\t" /* arg3->r5 */ \ |
| "ld 6, 32(12)\n\t" /* arg4->r6 */ \ |
| "ld 7, 40(12)\n\t" /* arg5->r7 */ \ |
| "ld 8, 48(12)\n\t" /* arg6->r8 */ \ |
| "ld 9, 56(12)\n\t" /* arg7->r9 */ \ |
| "ld 10, 64(12)\n\t" /* arg8->r10 */ \ |
| "ld 12, 0(12)\n\t" /* target->r12 */ \ |
| VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \ |
| "mr 12,%1\n\t" \ |
| "mr %0,3\n\t" \ |
| "ld 2,-16(12)\n\t" /* restore tocptr */ \ |
| VALGRIND_RESTORE_STACK \ |
| : /*out*/ "=r" (_res) \ |
| : /*in*/ "r" (&_argvec[2]) \ |
| : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ |
| arg7,arg8,arg9) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[3+9]; \ |
| volatile unsigned long _res; \ |
| /* _argvec[0] holds current r2 across the call */ \ |
| _argvec[1] = (unsigned long)_orig.r2; \ |
| _argvec[2] = (unsigned long)_orig.nraddr; \ |
| _argvec[2+1] = (unsigned long)arg1; \ |
| _argvec[2+2] = (unsigned long)arg2; \ |
| _argvec[2+3] = (unsigned long)arg3; \ |
| _argvec[2+4] = (unsigned long)arg4; \ |
| _argvec[2+5] = (unsigned long)arg5; \ |
| _argvec[2+6] = (unsigned long)arg6; \ |
| _argvec[2+7] = (unsigned long)arg7; \ |
| _argvec[2+8] = (unsigned long)arg8; \ |
| _argvec[2+9] = (unsigned long)arg9; \ |
| __asm__ volatile( \ |
| VALGRIND_ALIGN_STACK \ |
| "mr 12,%1\n\t" \ |
| "std 2,-16(12)\n\t" /* save tocptr */ \ |
| "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \ |
| "addi 1,1,-128\n\t" /* expand stack frame */ \ |
| /* arg9 */ \ |
| "ld 3,72(12)\n\t" \ |
| "std 3,96(1)\n\t" \ |
| /* args1-8 */ \ |
| "ld 3, 8(12)\n\t" /* arg1->r3 */ \ |
| "ld 4, 16(12)\n\t" /* arg2->r4 */ \ |
| "ld 5, 24(12)\n\t" /* arg3->r5 */ \ |
| "ld 6, 32(12)\n\t" /* arg4->r6 */ \ |
| "ld 7, 40(12)\n\t" /* arg5->r7 */ \ |
| "ld 8, 48(12)\n\t" /* arg6->r8 */ \ |
| "ld 9, 56(12)\n\t" /* arg7->r9 */ \ |
| "ld 10, 64(12)\n\t" /* arg8->r10 */ \ |
| "ld 12, 0(12)\n\t" /* target->r12 */ \ |
| VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \ |
| "mr 12,%1\n\t" \ |
| "mr %0,3\n\t" \ |
| "ld 2,-16(12)\n\t" /* restore tocptr */ \ |
| VALGRIND_RESTORE_STACK \ |
| : /*out*/ "=r" (_res) \ |
| : /*in*/ "r" (&_argvec[2]) \ |
| : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ |
| arg7,arg8,arg9,arg10) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[3+10]; \ |
| volatile unsigned long _res; \ |
| /* _argvec[0] holds current r2 across the call */ \ |
| _argvec[1] = (unsigned long)_orig.r2; \ |
| _argvec[2] = (unsigned long)_orig.nraddr; \ |
| _argvec[2+1] = (unsigned long)arg1; \ |
| _argvec[2+2] = (unsigned long)arg2; \ |
| _argvec[2+3] = (unsigned long)arg3; \ |
| _argvec[2+4] = (unsigned long)arg4; \ |
| _argvec[2+5] = (unsigned long)arg5; \ |
| _argvec[2+6] = (unsigned long)arg6; \ |
| _argvec[2+7] = (unsigned long)arg7; \ |
| _argvec[2+8] = (unsigned long)arg8; \ |
| _argvec[2+9] = (unsigned long)arg9; \ |
| _argvec[2+10] = (unsigned long)arg10; \ |
| __asm__ volatile( \ |
| VALGRIND_ALIGN_STACK \ |
| "mr 12,%1\n\t" \ |
| "std 2,-16(12)\n\t" /* save tocptr */ \ |
| "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \ |
| "addi 1,1,-128\n\t" /* expand stack frame */ \ |
| /* arg10 */ \ |
| "ld 3,80(12)\n\t" \ |
| "std 3,104(1)\n\t" \ |
| /* arg9 */ \ |
| "ld 3,72(12)\n\t" \ |
| "std 3,96(1)\n\t" \ |
| /* args1-8 */ \ |
| "ld 3, 8(12)\n\t" /* arg1->r3 */ \ |
| "ld 4, 16(12)\n\t" /* arg2->r4 */ \ |
| "ld 5, 24(12)\n\t" /* arg3->r5 */ \ |
| "ld 6, 32(12)\n\t" /* arg4->r6 */ \ |
| "ld 7, 40(12)\n\t" /* arg5->r7 */ \ |
| "ld 8, 48(12)\n\t" /* arg6->r8 */ \ |
| "ld 9, 56(12)\n\t" /* arg7->r9 */ \ |
| "ld 10, 64(12)\n\t" /* arg8->r10 */ \ |
| "ld 12, 0(12)\n\t" /* target->r12 */ \ |
| VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \ |
| "mr 12,%1\n\t" \ |
| "mr %0,3\n\t" \ |
| "ld 2,-16(12)\n\t" /* restore tocptr */ \ |
| VALGRIND_RESTORE_STACK \ |
| : /*out*/ "=r" (_res) \ |
| : /*in*/ "r" (&_argvec[2]) \ |
| : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ |
| arg7,arg8,arg9,arg10,arg11) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[3+11]; \ |
| volatile unsigned long _res; \ |
| /* _argvec[0] holds current r2 across the call */ \ |
| _argvec[1] = (unsigned long)_orig.r2; \ |
| _argvec[2] = (unsigned long)_orig.nraddr; \ |
| _argvec[2+1] = (unsigned long)arg1; \ |
| _argvec[2+2] = (unsigned long)arg2; \ |
| _argvec[2+3] = (unsigned long)arg3; \ |
| _argvec[2+4] = (unsigned long)arg4; \ |
| _argvec[2+5] = (unsigned long)arg5; \ |
| _argvec[2+6] = (unsigned long)arg6; \ |
| _argvec[2+7] = (unsigned long)arg7; \ |
| _argvec[2+8] = (unsigned long)arg8; \ |
| _argvec[2+9] = (unsigned long)arg9; \ |
| _argvec[2+10] = (unsigned long)arg10; \ |
| _argvec[2+11] = (unsigned long)arg11; \ |
| __asm__ volatile( \ |
| VALGRIND_ALIGN_STACK \ |
| "mr 12,%1\n\t" \ |
| "std 2,-16(12)\n\t" /* save tocptr */ \ |
| "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \ |
| "addi 1,1,-144\n\t" /* expand stack frame */ \ |
| /* arg11 */ \ |
| "ld 3,88(12)\n\t" \ |
| "std 3,112(1)\n\t" \ |
| /* arg10 */ \ |
| "ld 3,80(12)\n\t" \ |
| "std 3,104(1)\n\t" \ |
| /* arg9 */ \ |
| "ld 3,72(12)\n\t" \ |
| "std 3,96(1)\n\t" \ |
| /* args1-8 */ \ |
| "ld 3, 8(12)\n\t" /* arg1->r3 */ \ |
| "ld 4, 16(12)\n\t" /* arg2->r4 */ \ |
| "ld 5, 24(12)\n\t" /* arg3->r5 */ \ |
| "ld 6, 32(12)\n\t" /* arg4->r6 */ \ |
| "ld 7, 40(12)\n\t" /* arg5->r7 */ \ |
| "ld 8, 48(12)\n\t" /* arg6->r8 */ \ |
| "ld 9, 56(12)\n\t" /* arg7->r9 */ \ |
| "ld 10, 64(12)\n\t" /* arg8->r10 */ \ |
| "ld 12, 0(12)\n\t" /* target->r12 */ \ |
| VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \ |
| "mr 12,%1\n\t" \ |
| "mr %0,3\n\t" \ |
| "ld 2,-16(12)\n\t" /* restore tocptr */ \ |
| VALGRIND_RESTORE_STACK \ |
| : /*out*/ "=r" (_res) \ |
| : /*in*/ "r" (&_argvec[2]) \ |
| : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ |
| arg7,arg8,arg9,arg10,arg11,arg12) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[3+12]; \ |
| volatile unsigned long _res; \ |
| /* _argvec[0] holds current r2 across the call */ \ |
| _argvec[1] = (unsigned long)_orig.r2; \ |
| _argvec[2] = (unsigned long)_orig.nraddr; \ |
| _argvec[2+1] = (unsigned long)arg1; \ |
| _argvec[2+2] = (unsigned long)arg2; \ |
| _argvec[2+3] = (unsigned long)arg3; \ |
| _argvec[2+4] = (unsigned long)arg4; \ |
| _argvec[2+5] = (unsigned long)arg5; \ |
| _argvec[2+6] = (unsigned long)arg6; \ |
| _argvec[2+7] = (unsigned long)arg7; \ |
| _argvec[2+8] = (unsigned long)arg8; \ |
| _argvec[2+9] = (unsigned long)arg9; \ |
| _argvec[2+10] = (unsigned long)arg10; \ |
| _argvec[2+11] = (unsigned long)arg11; \ |
| _argvec[2+12] = (unsigned long)arg12; \ |
| __asm__ volatile( \ |
| VALGRIND_ALIGN_STACK \ |
| "mr 12,%1\n\t" \ |
| "std 2,-16(12)\n\t" /* save tocptr */ \ |
| "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \ |
| "addi 1,1,-144\n\t" /* expand stack frame */ \ |
| /* arg12 */ \ |
| "ld 3,96(12)\n\t" \ |
| "std 3,120(1)\n\t" \ |
| /* arg11 */ \ |
| "ld 3,88(12)\n\t" \ |
| "std 3,112(1)\n\t" \ |
| /* arg10 */ \ |
| "ld 3,80(12)\n\t" \ |
| "std 3,104(1)\n\t" \ |
| /* arg9 */ \ |
| "ld 3,72(12)\n\t" \ |
| "std 3,96(1)\n\t" \ |
| /* args1-8 */ \ |
| "ld 3, 8(12)\n\t" /* arg1->r3 */ \ |
| "ld 4, 16(12)\n\t" /* arg2->r4 */ \ |
| "ld 5, 24(12)\n\t" /* arg3->r5 */ \ |
| "ld 6, 32(12)\n\t" /* arg4->r6 */ \ |
| "ld 7, 40(12)\n\t" /* arg5->r7 */ \ |
| "ld 8, 48(12)\n\t" /* arg6->r8 */ \ |
| "ld 9, 56(12)\n\t" /* arg7->r9 */ \ |
| "ld 10, 64(12)\n\t" /* arg8->r10 */ \ |
| "ld 12, 0(12)\n\t" /* target->r12 */ \ |
| VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \ |
| "mr 12,%1\n\t" \ |
| "mr %0,3\n\t" \ |
| "ld 2,-16(12)\n\t" /* restore tocptr */ \ |
| VALGRIND_RESTORE_STACK \ |
| : /*out*/ "=r" (_res) \ |
| : /*in*/ "r" (&_argvec[2]) \ |
| : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #endif /* PLAT_ppc64le_linux */ |
| |
| /* ------------------------- arm-linux ------------------------- */ |
| |
| #if defined(PLAT_arm_linux) |
| |
| /* These regs are trashed by the hidden call. */ |
| #define __CALLER_SAVED_REGS "r0", "r1", "r2", "r3","r4", "r12", "r14" |
| |
| /* Macros to save and align the stack before making a function |
| call and restore it afterwards as gcc may not keep the stack |
| pointer aligned if it doesn't realise calls are being made |
| to other functions. */ |
| |
| /* This is a bit tricky. We store the original stack pointer in r10 |
| as it is callee-saves. gcc doesn't allow the use of r11 for some |
| reason. Also, we can't directly "bic" the stack pointer in thumb |
| mode since r13 isn't an allowed register number in that context. |
| So use r4 as a temporary, since that is about to get trashed |
| anyway, just after each use of this macro. Side effect is we need |
| to be very careful about any future changes, since |
| VALGRIND_ALIGN_STACK simply assumes r4 is usable. */ |
| #define VALGRIND_ALIGN_STACK \ |
| "mov r10, sp\n\t" \ |
| "mov r4, sp\n\t" \ |
| "bic r4, r4, #7\n\t" \ |
| "mov sp, r4\n\t" |
| #define VALGRIND_RESTORE_STACK \ |
| "mov sp, r10\n\t" |
| |
| /* These CALL_FN_ macros assume that on arm-linux, sizeof(unsigned |
| long) == 4. */ |
| |
| #define CALL_FN_W_v(lval, orig) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[1]; \ |
| volatile unsigned long _res; \ |
| _argvec[0] = (unsigned long)_orig.nraddr; \ |
| __asm__ volatile( \ |
| VALGRIND_ALIGN_STACK \ |
| "ldr r4, [%1] \n\t" /* target->r4 */ \ |
| VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ |
| VALGRIND_RESTORE_STACK \ |
| "mov %0, r0\n" \ |
| : /*out*/ "=r" (_res) \ |
| : /*in*/ "0" (&_argvec[0]) \ |
| : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_W(lval, orig, arg1) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[2]; \ |
| volatile unsigned long _res; \ |
| _argvec[0] = (unsigned long)_orig.nraddr; \ |
| _argvec[1] = (unsigned long)(arg1); \ |
| __asm__ volatile( \ |
| VALGRIND_ALIGN_STACK \ |
| "ldr r0, [%1, #4] \n\t" \ |
| "ldr r4, [%1] \n\t" /* target->r4 */ \ |
| VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ |
| VALGRIND_RESTORE_STACK \ |
| "mov %0, r0\n" \ |
| : /*out*/ "=r" (_res) \ |
| : /*in*/ "0" (&_argvec[0]) \ |
| : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_WW(lval, orig, arg1,arg2) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[3]; \ |
| volatile unsigned long _res; \ |
| _argvec[0] = (unsigned long)_orig.nraddr; \ |
| _argvec[1] = (unsigned long)(arg1); \ |
| _argvec[2] = (unsigned long)(arg2); \ |
| __asm__ volatile( \ |
| VALGRIND_ALIGN_STACK \ |
| "ldr r0, [%1, #4] \n\t" \ |
| "ldr r1, [%1, #8] \n\t" \ |
| "ldr r4, [%1] \n\t" /* target->r4 */ \ |
| VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ |
| VALGRIND_RESTORE_STACK \ |
| "mov %0, r0\n" \ |
| : /*out*/ "=r" (_res) \ |
| : /*in*/ "0" (&_argvec[0]) \ |
| : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[4]; \ |
| volatile unsigned long _res; \ |
| _argvec[0] = (unsigned long)_orig.nraddr; \ |
| _argvec[1] = (unsigned long)(arg1); \ |
| _argvec[2] = (unsigned long)(arg2); \ |
| _argvec[3] = (unsigned long)(arg3); \ |
| __asm__ volatile( \ |
| VALGRIND_ALIGN_STACK \ |
| "ldr r0, [%1, #4] \n\t" \ |
| "ldr r1, [%1, #8] \n\t" \ |
| "ldr r2, [%1, #12] \n\t" \ |
| "ldr r4, [%1] \n\t" /* target->r4 */ \ |
| VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ |
| VALGRIND_RESTORE_STACK \ |
| "mov %0, r0\n" \ |
| : /*out*/ "=r" (_res) \ |
| : /*in*/ "0" (&_argvec[0]) \ |
| : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[5]; \ |
| volatile unsigned long _res; \ |
| _argvec[0] = (unsigned long)_orig.nraddr; \ |
| _argvec[1] = (unsigned long)(arg1); \ |
| _argvec[2] = (unsigned long)(arg2); \ |
| _argvec[3] = (unsigned long)(arg3); \ |
| _argvec[4] = (unsigned long)(arg4); \ |
| __asm__ volatile( \ |
| VALGRIND_ALIGN_STACK \ |
| "ldr r0, [%1, #4] \n\t" \ |
| "ldr r1, [%1, #8] \n\t" \ |
| "ldr r2, [%1, #12] \n\t" \ |
| "ldr r3, [%1, #16] \n\t" \ |
| "ldr r4, [%1] \n\t" /* target->r4 */ \ |
| VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ |
| VALGRIND_RESTORE_STACK \ |
| "mov %0, r0" \ |
| : /*out*/ "=r" (_res) \ |
| : /*in*/ "0" (&_argvec[0]) \ |
| : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[6]; \ |
| volatile unsigned long _res; \ |
| _argvec[0] = (unsigned long)_orig.nraddr; \ |
| _argvec[1] = (unsigned long)(arg1); \ |
| _argvec[2] = (unsigned long)(arg2); \ |
| _argvec[3] = (unsigned long)(arg3); \ |
| _argvec[4] = (unsigned long)(arg4); \ |
| _argvec[5] = (unsigned long)(arg5); \ |
| __asm__ volatile( \ |
| VALGRIND_ALIGN_STACK \ |
| "sub sp, sp, #4 \n\t" \ |
| "ldr r0, [%1, #20] \n\t" \ |
| "push {r0} \n\t" \ |
| "ldr r0, [%1, #4] \n\t" \ |
| "ldr r1, [%1, #8] \n\t" \ |
| "ldr r2, [%1, #12] \n\t" \ |
| "ldr r3, [%1, #16] \n\t" \ |
| "ldr r4, [%1] \n\t" /* target->r4 */ \ |
| VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ |
| VALGRIND_RESTORE_STACK \ |
| "mov %0, r0" \ |
| : /*out*/ "=r" (_res) \ |
| : /*in*/ "0" (&_argvec[0]) \ |
| : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[7]; \ |
| volatile unsigned long _res; \ |
| _argvec[0] = (unsigned long)_orig.nraddr; \ |
| _argvec[1] = (unsigned long)(arg1); \ |
| _argvec[2] = (unsigned long)(arg2); \ |
| _argvec[3] = (unsigned long)(arg3); \ |
| _argvec[4] = (unsigned long)(arg4); \ |
| _argvec[5] = (unsigned long)(arg5); \ |
| _argvec[6] = (unsigned long)(arg6); \ |
| __asm__ volatile( \ |
| VALGRIND_ALIGN_STACK \ |
| "ldr r0, [%1, #20] \n\t" \ |
| "ldr r1, [%1, #24] \n\t" \ |
| "push {r0, r1} \n\t" \ |
| "ldr r0, [%1, #4] \n\t" \ |
| "ldr r1, [%1, #8] \n\t" \ |
| "ldr r2, [%1, #12] \n\t" \ |
| "ldr r3, [%1, #16] \n\t" \ |
| "ldr r4, [%1] \n\t" /* target->r4 */ \ |
| VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ |
| VALGRIND_RESTORE_STACK \ |
| "mov %0, r0" \ |
| : /*out*/ "=r" (_res) \ |
| : /*in*/ "0" (&_argvec[0]) \ |
| : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ |
| arg7) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[8]; \ |
| volatile unsigned long _res; \ |
| _argvec[0] = (unsigned long)_orig.nraddr; \ |
| _argvec[1] = (unsigned long)(arg1); \ |
| _argvec[2] = (unsigned long)(arg2); \ |
| _argvec[3] = (unsigned long)(arg3); \ |
| _argvec[4] = (unsigned long)(arg4); \ |
| _argvec[5] = (unsigned long)(arg5); \ |
| _argvec[6] = (unsigned long)(arg6); \ |
| _argvec[7] = (unsigned long)(arg7); \ |
| __asm__ volatile( \ |
| VALGRIND_ALIGN_STACK \ |
| "sub sp, sp, #4 \n\t" \ |
| "ldr r0, [%1, #20] \n\t" \ |
| "ldr r1, [%1, #24] \n\t" \ |
| "ldr r2, [%1, #28] \n\t" \ |
| "push {r0, r1, r2} \n\t" \ |
| "ldr r0, [%1, #4] \n\t" \ |
| "ldr r1, [%1, #8] \n\t" \ |
| "ldr r2, [%1, #12] \n\t" \ |
| "ldr r3, [%1, #16] \n\t" \ |
| "ldr r4, [%1] \n\t" /* target->r4 */ \ |
| VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ |
| VALGRIND_RESTORE_STACK \ |
| "mov %0, r0" \ |
| : /*out*/ "=r" (_res) \ |
| : /*in*/ "0" (&_argvec[0]) \ |
| : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ |
| arg7,arg8) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[9]; \ |
| volatile unsigned long _res; \ |
| _argvec[0] = (unsigned long)_orig.nraddr; \ |
| _argvec[1] = (unsigned long)(arg1); \ |
| _argvec[2] = (unsigned long)(arg2); \ |
| _argvec[3] = (unsigned long)(arg3); \ |
| _argvec[4] = (unsigned long)(arg4); \ |
| _argvec[5] = (unsigned long)(arg5); \ |
| _argvec[6] = (unsigned long)(arg6); \ |
| _argvec[7] = (unsigned long)(arg7); \ |
| _argvec[8] = (unsigned long)(arg8); \ |
| __asm__ volatile( \ |
| VALGRIND_ALIGN_STACK \ |
| "ldr r0, [%1, #20] \n\t" \ |
| "ldr r1, [%1, #24] \n\t" \ |
| "ldr r2, [%1, #28] \n\t" \ |
| "ldr r3, [%1, #32] \n\t" \ |
| "push {r0, r1, r2, r3} \n\t" \ |
| "ldr r0, [%1, #4] \n\t" \ |
| "ldr r1, [%1, #8] \n\t" \ |
| "ldr r2, [%1, #12] \n\t" \ |
| "ldr r3, [%1, #16] \n\t" \ |
| "ldr r4, [%1] \n\t" /* target->r4 */ \ |
| VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ |
| VALGRIND_RESTORE_STACK \ |
| "mov %0, r0" \ |
| : /*out*/ "=r" (_res) \ |
| : /*in*/ "0" (&_argvec[0]) \ |
| : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ |
| arg7,arg8,arg9) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[10]; \ |
| volatile unsigned long _res; \ |
| _argvec[0] = (unsigned long)_orig.nraddr; \ |
| _argvec[1] = (unsigned long)(arg1); \ |
| _argvec[2] = (unsigned long)(arg2); \ |
| _argvec[3] = (unsigned long)(arg3); \ |
| _argvec[4] = (unsigned long)(arg4); \ |
| _argvec[5] = (unsigned long)(arg5); \ |
| _argvec[6] = (unsigned long)(arg6); \ |
| _argvec[7] = (unsigned long)(arg7); \ |
| _argvec[8] = (unsigned long)(arg8); \ |
| _argvec[9] = (unsigned long)(arg9); \ |
| __asm__ volatile( \ |
| VALGRIND_ALIGN_STACK \ |
| "sub sp, sp, #4 \n\t" \ |
| "ldr r0, [%1, #20] \n\t" \ |
| "ldr r1, [%1, #24] \n\t" \ |
| "ldr r2, [%1, #28] \n\t" \ |
| "ldr r3, [%1, #32] \n\t" \ |
| "ldr r4, [%1, #36] \n\t" \ |
| "push {r0, r1, r2, r3, r4} \n\t" \ |
| "ldr r0, [%1, #4] \n\t" \ |
| "ldr r1, [%1, #8] \n\t" \ |
| "ldr r2, [%1, #12] \n\t" \ |
| "ldr r3, [%1, #16] \n\t" \ |
| "ldr r4, [%1] \n\t" /* target->r4 */ \ |
| VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ |
| VALGRIND_RESTORE_STACK \ |
| "mov %0, r0" \ |
| : /*out*/ "=r" (_res) \ |
| : /*in*/ "0" (&_argvec[0]) \ |
| : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ |
| arg7,arg8,arg9,arg10) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[11]; \ |
| volatile unsigned long _res; \ |
| _argvec[0] = (unsigned long)_orig.nraddr; \ |
| _argvec[1] = (unsigned long)(arg1); \ |
| _argvec[2] = (unsigned long)(arg2); \ |
| _argvec[3] = (unsigned long)(arg3); \ |
| _argvec[4] = (unsigned long)(arg4); \ |
| _argvec[5] = (unsigned long)(arg5); \ |
| _argvec[6] = (unsigned long)(arg6); \ |
| _argvec[7] = (unsigned long)(arg7); \ |
| _argvec[8] = (unsigned long)(arg8); \ |
| _argvec[9] = (unsigned long)(arg9); \ |
| _argvec[10] = (unsigned long)(arg10); \ |
| __asm__ volatile( \ |
| VALGRIND_ALIGN_STACK \ |
| "ldr r0, [%1, #40] \n\t" \ |
| "push {r0} \n\t" \ |
| "ldr r0, [%1, #20] \n\t" \ |
| "ldr r1, [%1, #24] \n\t" \ |
| "ldr r2, [%1, #28] \n\t" \ |
| "ldr r3, [%1, #32] \n\t" \ |
| "ldr r4, [%1, #36] \n\t" \ |
| "push {r0, r1, r2, r3, r4} \n\t" \ |
| "ldr r0, [%1, #4] \n\t" \ |
| "ldr r1, [%1, #8] \n\t" \ |
| "ldr r2, [%1, #12] \n\t" \ |
| "ldr r3, [%1, #16] \n\t" \ |
| "ldr r4, [%1] \n\t" /* target->r4 */ \ |
| VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ |
| VALGRIND_RESTORE_STACK \ |
| "mov %0, r0" \ |
| : /*out*/ "=r" (_res) \ |
| : /*in*/ "0" (&_argvec[0]) \ |
| : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5, \ |
| arg6,arg7,arg8,arg9,arg10, \ |
| arg11) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[12]; \ |
| volatile unsigned long _res; \ |
| _argvec[0] = (unsigned long)_orig.nraddr; \ |
| _argvec[1] = (unsigned long)(arg1); \ |
| _argvec[2] = (unsigned long)(arg2); \ |
| _argvec[3] = (unsigned long)(arg3); \ |
| _argvec[4] = (unsigned long)(arg4); \ |
| _argvec[5] = (unsigned long)(arg5); \ |
| _argvec[6] = (unsigned long)(arg6); \ |
| _argvec[7] = (unsigned long)(arg7); \ |
| _argvec[8] = (unsigned long)(arg8); \ |
| _argvec[9] = (unsigned long)(arg9); \ |
| _argvec[10] = (unsigned long)(arg10); \ |
| _argvec[11] = (unsigned long)(arg11); \ |
| __asm__ volatile( \ |
| VALGRIND_ALIGN_STACK \ |
| "sub sp, sp, #4 \n\t" \ |
| "ldr r0, [%1, #40] \n\t" \ |
| "ldr r1, [%1, #44] \n\t" \ |
| "push {r0, r1} \n\t" \ |
| "ldr r0, [%1, #20] \n\t" \ |
| "ldr r1, [%1, #24] \n\t" \ |
| "ldr r2, [%1, #28] \n\t" \ |
| "ldr r3, [%1, #32] \n\t" \ |
| "ldr r4, [%1, #36] \n\t" \ |
| "push {r0, r1, r2, r3, r4} \n\t" \ |
| "ldr r0, [%1, #4] \n\t" \ |
| "ldr r1, [%1, #8] \n\t" \ |
| "ldr r2, [%1, #12] \n\t" \ |
| "ldr r3, [%1, #16] \n\t" \ |
| "ldr r4, [%1] \n\t" /* target->r4 */ \ |
| VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ |
| VALGRIND_RESTORE_STACK \ |
| "mov %0, r0" \ |
| : /*out*/ "=r" (_res) \ |
| : /*in*/ "0" (&_argvec[0]) \ |
| : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5, \ |
| arg6,arg7,arg8,arg9,arg10, \ |
| arg11,arg12) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[13]; \ |
| volatile unsigned long _res; \ |
| _argvec[0] = (unsigned long)_orig.nraddr; \ |
| _argvec[1] = (unsigned long)(arg1); \ |
| _argvec[2] = (unsigned long)(arg2); \ |
| _argvec[3] = (unsigned long)(arg3); \ |
| _argvec[4] = (unsigned long)(arg4); \ |
| _argvec[5] = (unsigned long)(arg5); \ |
| _argvec[6] = (unsigned long)(arg6); \ |
| _argvec[7] = (unsigned long)(arg7); \ |
| _argvec[8] = (unsigned long)(arg8); \ |
| _argvec[9] = (unsigned long)(arg9); \ |
| _argvec[10] = (unsigned long)(arg10); \ |
| _argvec[11] = (unsigned long)(arg11); \ |
| _argvec[12] = (unsigned long)(arg12); \ |
| __asm__ volatile( \ |
| VALGRIND_ALIGN_STACK \ |
| "ldr r0, [%1, #40] \n\t" \ |
| "ldr r1, [%1, #44] \n\t" \ |
| "ldr r2, [%1, #48] \n\t" \ |
| "push {r0, r1, r2} \n\t" \ |
| "ldr r0, [%1, #20] \n\t" \ |
| "ldr r1, [%1, #24] \n\t" \ |
| "ldr r2, [%1, #28] \n\t" \ |
| "ldr r3, [%1, #32] \n\t" \ |
| "ldr r4, [%1, #36] \n\t" \ |
| "push {r0, r1, r2, r3, r4} \n\t" \ |
| "ldr r0, [%1, #4] \n\t" \ |
| "ldr r1, [%1, #8] \n\t" \ |
| "ldr r2, [%1, #12] \n\t" \ |
| "ldr r3, [%1, #16] \n\t" \ |
| "ldr r4, [%1] \n\t" /* target->r4 */ \ |
| VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ |
| VALGRIND_RESTORE_STACK \ |
| "mov %0, r0" \ |
| : /*out*/ "=r" (_res) \ |
| : /*in*/ "0" (&_argvec[0]) \ |
| : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #endif /* PLAT_arm_linux */ |
| |
| /* ------------------------ arm64-linux ------------------------ */ |
| |
| #if defined(PLAT_arm64_linux) |
| |
| /* These regs are trashed by the hidden call. */ |
| #define __CALLER_SAVED_REGS \ |
| "x0", "x1", "x2", "x3","x4", "x5", "x6", "x7", "x8", "x9", \ |
| "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", \ |
| "x18", "x19", "x20", "x30", \ |
| "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", \ |
| "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", \ |
| "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", \ |
| "v26", "v27", "v28", "v29", "v30", "v31" |
| |
| /* x21 is callee-saved, so we can use it to save and restore SP around |
| the hidden call. */ |
| #define VALGRIND_ALIGN_STACK \ |
| "mov x21, sp\n\t" \ |
| "bic sp, x21, #15\n\t" |
| #define VALGRIND_RESTORE_STACK \ |
| "mov sp, x21\n\t" |
| |
| /* These CALL_FN_ macros assume that on arm64-linux, |
| sizeof(unsigned long) == 8. */ |
| |
| #define CALL_FN_W_v(lval, orig) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[1]; \ |
| volatile unsigned long _res; \ |
| _argvec[0] = (unsigned long)_orig.nraddr; \ |
| __asm__ volatile( \ |
| VALGRIND_ALIGN_STACK \ |
| "ldr x8, [%1] \n\t" /* target->x8 */ \ |
| VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \ |
| VALGRIND_RESTORE_STACK \ |
| "mov %0, x0\n" \ |
| : /*out*/ "=r" (_res) \ |
| : /*in*/ "0" (&_argvec[0]) \ |
| : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_W(lval, orig, arg1) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[2]; \ |
| volatile unsigned long _res; \ |
| _argvec[0] = (unsigned long)_orig.nraddr; \ |
| _argvec[1] = (unsigned long)(arg1); \ |
| __asm__ volatile( \ |
| VALGRIND_ALIGN_STACK \ |
| "ldr x0, [%1, #8] \n\t" \ |
| "ldr x8, [%1] \n\t" /* target->x8 */ \ |
| VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \ |
| VALGRIND_RESTORE_STACK \ |
| "mov %0, x0\n" \ |
| : /*out*/ "=r" (_res) \ |
| : /*in*/ "0" (&_argvec[0]) \ |
| : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_WW(lval, orig, arg1,arg2) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[3]; \ |
| volatile unsigned long _res; \ |
| _argvec[0] = (unsigned long)_orig.nraddr; \ |
| _argvec[1] = (unsigned long)(arg1); \ |
| _argvec[2] = (unsigned long)(arg2); \ |
| __asm__ volatile( \ |
| VALGRIND_ALIGN_STACK \ |
| "ldr x0, [%1, #8] \n\t" \ |
| "ldr x1, [%1, #16] \n\t" \ |
| "ldr x8, [%1] \n\t" /* target->x8 */ \ |
| VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \ |
| VALGRIND_RESTORE_STACK \ |
| "mov %0, x0\n" \ |
| : /*out*/ "=r" (_res) \ |
| : /*in*/ "0" (&_argvec[0]) \ |
| : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[4]; \ |
| volatile unsigned long _res; \ |
| _argvec[0] = (unsigned long)_orig.nraddr; \ |
| _argvec[1] = (unsigned long)(arg1); \ |
| _argvec[2] = (unsigned long)(arg2); \ |
| _argvec[3] = (unsigned long)(arg3); \ |
| __asm__ volatile( \ |
| VALGRIND_ALIGN_STACK \ |
| "ldr x0, [%1, #8] \n\t" \ |
| "ldr x1, [%1, #16] \n\t" \ |
| "ldr x2, [%1, #24] \n\t" \ |
| "ldr x8, [%1] \n\t" /* target->x8 */ \ |
| VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \ |
| VALGRIND_RESTORE_STACK \ |
| "mov %0, x0\n" \ |
| : /*out*/ "=r" (_res) \ |
| : /*in*/ "0" (&_argvec[0]) \ |
| : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[5]; \ |
| volatile unsigned long _res; \ |
| _argvec[0] = (unsigned long)_orig.nraddr; \ |
| _argvec[1] = (unsigned long)(arg1); \ |
| _argvec[2] = (unsigned long)(arg2); \ |
| _argvec[3] = (unsigned long)(arg3); \ |
| _argvec[4] = (unsigned long)(arg4); \ |
| __asm__ volatile( \ |
| VALGRIND_ALIGN_STACK \ |
| "ldr x0, [%1, #8] \n\t" \ |
| "ldr x1, [%1, #16] \n\t" \ |
| "ldr x2, [%1, #24] \n\t" \ |
| "ldr x3, [%1, #32] \n\t" \ |
| "ldr x8, [%1] \n\t" /* target->x8 */ \ |
| VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \ |
| VALGRIND_RESTORE_STACK \ |
| "mov %0, x0" \ |
| : /*out*/ "=r" (_res) \ |
| : /*in*/ "0" (&_argvec[0]) \ |
| : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[6]; \ |
| volatile unsigned long _res; \ |
| _argvec[0] = (unsigned long)_orig.nraddr; \ |
| _argvec[1] = (unsigned long)(arg1); \ |
| _argvec[2] = (unsigned long)(arg2); \ |
| _argvec[3] = (unsigned long)(arg3); \ |
| _argvec[4] = (unsigned long)(arg4); \ |
| _argvec[5] = (unsigned long)(arg5); \ |
| __asm__ volatile( \ |
| VALGRIND_ALIGN_STACK \ |
| "ldr x0, [%1, #8] \n\t" \ |
| "ldr x1, [%1, #16] \n\t" \ |
| "ldr x2, [%1, #24] \n\t" \ |
| "ldr x3, [%1, #32] \n\t" \ |
| "ldr x4, [%1, #40] \n\t" \ |
| "ldr x8, [%1] \n\t" /* target->x8 */ \ |
| VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \ |
| VALGRIND_RESTORE_STACK \ |
| "mov %0, x0" \ |
| : /*out*/ "=r" (_res) \ |
| : /*in*/ "0" (&_argvec[0]) \ |
| : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[7]; \ |
| volatile unsigned long _res; \ |
| _argvec[0] = (unsigned long)_orig.nraddr; \ |
| _argvec[1] = (unsigned long)(arg1); \ |
| _argvec[2] = (unsigned long)(arg2); \ |
| _argvec[3] = (unsigned long)(arg3); \ |
| _argvec[4] = (unsigned long)(arg4); \ |
| _argvec[5] = (unsigned long)(arg5); \ |
| _argvec[6] = (unsigned long)(arg6); \ |
| __asm__ volatile( \ |
| VALGRIND_ALIGN_STACK \ |
| "ldr x0, [%1, #8] \n\t" \ |
| "ldr x1, [%1, #16] \n\t" \ |
| "ldr x2, [%1, #24] \n\t" \ |
| "ldr x3, [%1, #32] \n\t" \ |
| "ldr x4, [%1, #40] \n\t" \ |
| "ldr x5, [%1, #48] \n\t" \ |
| "ldr x8, [%1] \n\t" /* target->x8 */ \ |
| VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \ |
| VALGRIND_RESTORE_STACK \ |
| "mov %0, x0" \ |
| : /*out*/ "=r" (_res) \ |
| : /*in*/ "0" (&_argvec[0]) \ |
| : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ |
| arg7) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[8]; \ |
| volatile unsigned long _res; \ |
| _argvec[0] = (unsigned long)_orig.nraddr; \ |
| _argvec[1] = (unsigned long)(arg1); \ |
| _argvec[2] = (unsigned long)(arg2); \ |
| _argvec[3] = (unsigned long)(arg3); \ |
| _argvec[4] = (unsigned long)(arg4); \ |
| _argvec[5] = (unsigned long)(arg5); \ |
| _argvec[6] = (unsigned long)(arg6); \ |
| _argvec[7] = (unsigned long)(arg7); \ |
| __asm__ volatile( \ |
| VALGRIND_ALIGN_STACK \ |
| "ldr x0, [%1, #8] \n\t" \ |
| "ldr x1, [%1, #16] \n\t" \ |
| "ldr x2, [%1, #24] \n\t" \ |
| "ldr x3, [%1, #32] \n\t" \ |
| "ldr x4, [%1, #40] \n\t" \ |
| "ldr x5, [%1, #48] \n\t" \ |
| "ldr x6, [%1, #56] \n\t" \ |
| "ldr x8, [%1] \n\t" /* target->x8 */ \ |
| VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \ |
| VALGRIND_RESTORE_STACK \ |
| "mov %0, x0" \ |
| : /*out*/ "=r" (_res) \ |
| : /*in*/ "0" (&_argvec[0]) \ |
| : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ |
| arg7,arg8) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[9]; \ |
| volatile unsigned long _res; \ |
| _argvec[0] = (unsigned long)_orig.nraddr; \ |
| _argvec[1] = (unsigned long)(arg1); \ |
| _argvec[2] = (unsigned long)(arg2); \ |
| _argvec[3] = (unsigned long)(arg3); \ |
| _argvec[4] = (unsigned long)(arg4); \ |
| _argvec[5] = (unsigned long)(arg5); \ |
| _argvec[6] = (unsigned long)(arg6); \ |
| _argvec[7] = (unsigned long)(arg7); \ |
| _argvec[8] = (unsigned long)(arg8); \ |
| __asm__ volatile( \ |
| VALGRIND_ALIGN_STACK \ |
| "ldr x0, [%1, #8] \n\t" \ |
| "ldr x1, [%1, #16] \n\t" \ |
| "ldr x2, [%1, #24] \n\t" \ |
| "ldr x3, [%1, #32] \n\t" \ |
| "ldr x4, [%1, #40] \n\t" \ |
| "ldr x5, [%1, #48] \n\t" \ |
| "ldr x6, [%1, #56] \n\t" \ |
| "ldr x7, [%1, #64] \n\t" \ |
| "ldr x8, [%1] \n\t" /* target->x8 */ \ |
| VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \ |
| VALGRIND_RESTORE_STACK \ |
| "mov %0, x0" \ |
| : /*out*/ "=r" (_res) \ |
| : /*in*/ "0" (&_argvec[0]) \ |
| : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ |
| arg7,arg8,arg9) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[10]; \ |
| volatile unsigned long _res; \ |
| _argvec[0] = (unsigned long)_orig.nraddr; \ |
| _argvec[1] = (unsigned long)(arg1); \ |
| _argvec[2] = (unsigned long)(arg2); \ |
| _argvec[3] = (unsigned long)(arg3); \ |
| _argvec[4] = (unsigned long)(arg4); \ |
| _argvec[5] = (unsigned long)(arg5); \ |
| _argvec[6] = (unsigned long)(arg6); \ |
| _argvec[7] = (unsigned long)(arg7); \ |
| _argvec[8] = (unsigned long)(arg8); \ |
| _argvec[9] = (unsigned long)(arg9); \ |
| __asm__ volatile( \ |
| VALGRIND_ALIGN_STACK \ |
| "sub sp, sp, #0x20 \n\t" \ |
| "ldr x0, [%1, #8] \n\t" \ |
| "ldr x1, [%1, #16] \n\t" \ |
| "ldr x2, [%1, #24] \n\t" \ |
| "ldr x3, [%1, #32] \n\t" \ |
| "ldr x4, [%1, #40] \n\t" \ |
| "ldr x5, [%1, #48] \n\t" \ |
| "ldr x6, [%1, #56] \n\t" \ |
| "ldr x7, [%1, #64] \n\t" \ |
| "ldr x8, [%1, #72] \n\t" \ |
| "str x8, [sp, #0] \n\t" \ |
| "ldr x8, [%1] \n\t" /* target->x8 */ \ |
| VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \ |
| VALGRIND_RESTORE_STACK \ |
| "mov %0, x0" \ |
| : /*out*/ "=r" (_res) \ |
| : /*in*/ "0" (&_argvec[0]) \ |
| : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ |
| arg7,arg8,arg9,arg10) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[11]; \ |
| volatile unsigned long _res; \ |
| _argvec[0] = (unsigned long)_orig.nraddr; \ |
| _argvec[1] = (unsigned long)(arg1); \ |
| _argvec[2] = (unsigned long)(arg2); \ |
| _argvec[3] = (unsigned long)(arg3); \ |
| _argvec[4] = (unsigned long)(arg4); \ |
| _argvec[5] = (unsigned long)(arg5); \ |
| _argvec[6] = (unsigned long)(arg6); \ |
| _argvec[7] = (unsigned long)(arg7); \ |
| _argvec[8] = (unsigned long)(arg8); \ |
| _argvec[9] = (unsigned long)(arg9); \ |
| _argvec[10] = (unsigned long)(arg10); \ |
| __asm__ volatile( \ |
| VALGRIND_ALIGN_STACK \ |
| "sub sp, sp, #0x20 \n\t" \ |
| "ldr x0, [%1, #8] \n\t" \ |
| "ldr x1, [%1, #16] \n\t" \ |
| "ldr x2, [%1, #24] \n\t" \ |
| "ldr x3, [%1, #32] \n\t" \ |
| "ldr x4, [%1, #40] \n\t" \ |
| "ldr x5, [%1, #48] \n\t" \ |
| "ldr x6, [%1, #56] \n\t" \ |
| "ldr x7, [%1, #64] \n\t" \ |
| "ldr x8, [%1, #72] \n\t" \ |
| "str x8, [sp, #0] \n\t" \ |
| "ldr x8, [%1, #80] \n\t" \ |
| "str x8, [sp, #8] \n\t" \ |
| "ldr x8, [%1] \n\t" /* target->x8 */ \ |
| VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \ |
| VALGRIND_RESTORE_STACK \ |
| "mov %0, x0" \ |
| : /*out*/ "=r" (_res) \ |
| : /*in*/ "0" (&_argvec[0]) \ |
| : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ |
| arg7,arg8,arg9,arg10,arg11) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[12]; \ |
| volatile unsigned long _res; \ |
| _argvec[0] = (unsigned long)_orig.nraddr; \ |
| _argvec[1] = (unsigned long)(arg1); \ |
| _argvec[2] = (unsigned long)(arg2); \ |
| _argvec[3] = (unsigned long)(arg3); \ |
| _argvec[4] = (unsigned long)(arg4); \ |
| _argvec[5] = (unsigned long)(arg5); \ |
| _argvec[6] = (unsigned long)(arg6); \ |
| _argvec[7] = (unsigned long)(arg7); \ |
| _argvec[8] = (unsigned long)(arg8); \ |
| _argvec[9] = (unsigned long)(arg9); \ |
| _argvec[10] = (unsigned long)(arg10); \ |
| _argvec[11] = (unsigned long)(arg11); \ |
| __asm__ volatile( \ |
| VALGRIND_ALIGN_STACK \ |
| "sub sp, sp, #0x30 \n\t" \ |
| "ldr x0, [%1, #8] \n\t" \ |
| "ldr x1, [%1, #16] \n\t" \ |
| "ldr x2, [%1, #24] \n\t" \ |
| "ldr x3, [%1, #32] \n\t" \ |
| "ldr x4, [%1, #40] \n\t" \ |
| "ldr x5, [%1, #48] \n\t" \ |
| "ldr x6, [%1, #56] \n\t" \ |
| "ldr x7, [%1, #64] \n\t" \ |
| "ldr x8, [%1, #72] \n\t" \ |
| "str x8, [sp, #0] \n\t" \ |
| "ldr x8, [%1, #80] \n\t" \ |
| "str x8, [sp, #8] \n\t" \ |
| "ldr x8, [%1, #88] \n\t" \ |
| "str x8, [sp, #16] \n\t" \ |
| "ldr x8, [%1] \n\t" /* target->x8 */ \ |
| VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \ |
| VALGRIND_RESTORE_STACK \ |
| "mov %0, x0" \ |
| : /*out*/ "=r" (_res) \ |
| : /*in*/ "0" (&_argvec[0]) \ |
| : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ |
| arg7,arg8,arg9,arg10,arg11, \ |
| arg12) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[13]; \ |
| volatile unsigned long _res; \ |
| _argvec[0] = (unsigned long)_orig.nraddr; \ |
| _argvec[1] = (unsigned long)(arg1); \ |
| _argvec[2] = (unsigned long)(arg2); \ |
| _argvec[3] = (unsigned long)(arg3); \ |
| _argvec[4] = (unsigned long)(arg4); \ |
| _argvec[5] = (unsigned long)(arg5); \ |
| _argvec[6] = (unsigned long)(arg6); \ |
| _argvec[7] = (unsigned long)(arg7); \ |
| _argvec[8] = (unsigned long)(arg8); \ |
| _argvec[9] = (unsigned long)(arg9); \ |
| _argvec[10] = (unsigned long)(arg10); \ |
| _argvec[11] = (unsigned long)(arg11); \ |
| _argvec[12] = (unsigned long)(arg12); \ |
| __asm__ volatile( \ |
| VALGRIND_ALIGN_STACK \ |
| "sub sp, sp, #0x30 \n\t" \ |
| "ldr x0, [%1, #8] \n\t" \ |
| "ldr x1, [%1, #16] \n\t" \ |
| "ldr x2, [%1, #24] \n\t" \ |
| "ldr x3, [%1, #32] \n\t" \ |
| "ldr x4, [%1, #40] \n\t" \ |
| "ldr x5, [%1, #48] \n\t" \ |
| "ldr x6, [%1, #56] \n\t" \ |
| "ldr x7, [%1, #64] \n\t" \ |
| "ldr x8, [%1, #72] \n\t" \ |
| "str x8, [sp, #0] \n\t" \ |
| "ldr x8, [%1, #80] \n\t" \ |
| "str x8, [sp, #8] \n\t" \ |
| "ldr x8, [%1, #88] \n\t" \ |
| "str x8, [sp, #16] \n\t" \ |
| "ldr x8, [%1, #96] \n\t" \ |
| "str x8, [sp, #24] \n\t" \ |
| "ldr x8, [%1] \n\t" /* target->x8 */ \ |
| VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \ |
| VALGRIND_RESTORE_STACK \ |
| "mov %0, x0" \ |
| : /*out*/ "=r" (_res) \ |
| : /*in*/ "0" (&_argvec[0]) \ |
| : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #endif /* PLAT_arm64_linux */ |
| |
| /* ------------------------- s390x-linux ------------------------- */ |
| |
| #if defined(PLAT_s390x_linux) |
| |
| /* Similar workaround as amd64 (see above), but we use r11 as frame |
| pointer and save the old r11 in r7. r11 might be used for |
| argvec, therefore we copy argvec in r1 since r1 is clobbered |
| after the call anyway. */ |
| #if defined(__GNUC__) && defined(__GCC_HAVE_DWARF2_CFI_ASM) |
| # define __FRAME_POINTER \ |
| ,"d"(__builtin_dwarf_cfa()) |
| # define VALGRIND_CFI_PROLOGUE \ |
| ".cfi_remember_state\n\t" \ |
| "lgr 1,%1\n\t" /* copy the argvec pointer in r1 */ \ |
| "lgr 7,11\n\t" \ |
| "lgr 11,%2\n\t" \ |
| ".cfi_def_cfa r11, 0\n\t" |
| # define VALGRIND_CFI_EPILOGUE \ |
| "lgr 11, 7\n\t" \ |
| ".cfi_restore_state\n\t" |
| #else |
| # define __FRAME_POINTER |
| # define VALGRIND_CFI_PROLOGUE \ |
| "lgr 1,%1\n\t" |
| # define VALGRIND_CFI_EPILOGUE |
| #endif |
| |
| /* Nb: On s390 the stack pointer is properly aligned *at all times* |
| according to the s390 GCC maintainer. (The ABI specification is not |
| precise in this regard.) Therefore, VALGRIND_ALIGN_STACK and |
| VALGRIND_RESTORE_STACK are not defined here. */ |
| |
| /* These regs are trashed by the hidden call. Note that we overwrite |
| r14 in s390_irgen_noredir (VEX/priv/guest_s390_irgen.c) to give the |
| function a proper return address. All others are ABI defined call |
| clobbers. */ |
| #if defined(__VX__) || defined(__S390_VX__) |
| #define __CALLER_SAVED_REGS "0", "1", "2", "3", "4", "5", "14", \ |
| "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", \ |
| "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", \ |
| "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", \ |
| "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31" |
| #else |
| #define __CALLER_SAVED_REGS "0", "1", "2", "3", "4", "5", "14", \ |
| "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7" |
| #endif |
| |
| /* Nb: Although r11 is modified in the asm snippets below (inside |
| VALGRIND_CFI_PROLOGUE) it is not listed in the clobber section, for |
| two reasons: |
| (1) r11 is restored in VALGRIND_CFI_EPILOGUE, so effectively it is not |
| modified |
| (2) GCC will complain that r11 cannot appear inside a clobber section, |
| when compiled with -O -fno-omit-frame-pointer |
| */ |
| |
| #define CALL_FN_W_v(lval, orig) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[1]; \ |
| volatile unsigned long _res; \ |
| _argvec[0] = (unsigned long)_orig.nraddr; \ |
| __asm__ volatile( \ |
| VALGRIND_CFI_PROLOGUE \ |
| "aghi 15,-160\n\t" \ |
| "lg 1, 0(1)\n\t" /* target->r1 */ \ |
| VALGRIND_CALL_NOREDIR_R1 \ |
| "aghi 15,160\n\t" \ |
| VALGRIND_CFI_EPILOGUE \ |
| "lgr %0, 2\n\t" \ |
| : /*out*/ "=d" (_res) \ |
| : /*in*/ "d" (&_argvec[0]) __FRAME_POINTER \ |
| : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"7" \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| /* The call abi has the arguments in r2-r6 and stack */ |
| #define CALL_FN_W_W(lval, orig, arg1) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[2]; \ |
| volatile unsigned long _res; \ |
| _argvec[0] = (unsigned long)_orig.nraddr; \ |
| _argvec[1] = (unsigned long)arg1; \ |
| __asm__ volatile( \ |
| VALGRIND_CFI_PROLOGUE \ |
| "aghi 15,-160\n\t" \ |
| "lg 2, 8(1)\n\t" \ |
| "lg 1, 0(1)\n\t" \ |
| VALGRIND_CALL_NOREDIR_R1 \ |
| "aghi 15,160\n\t" \ |
| VALGRIND_CFI_EPILOGUE \ |
| "lgr %0, 2\n\t" \ |
| : /*out*/ "=d" (_res) \ |
| : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ |
| : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"7" \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_WW(lval, orig, arg1, arg2) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[3]; \ |
| volatile unsigned long _res; \ |
| _argvec[0] = (unsigned long)_orig.nraddr; \ |
| _argvec[1] = (unsigned long)arg1; \ |
| _argvec[2] = (unsigned long)arg2; \ |
| __asm__ volatile( \ |
| VALGRIND_CFI_PROLOGUE \ |
| "aghi 15,-160\n\t" \ |
| "lg 2, 8(1)\n\t" \ |
| "lg 3,16(1)\n\t" \ |
| "lg 1, 0(1)\n\t" \ |
| VALGRIND_CALL_NOREDIR_R1 \ |
| "aghi 15,160\n\t" \ |
| VALGRIND_CFI_EPILOGUE \ |
| "lgr %0, 2\n\t" \ |
| : /*out*/ "=d" (_res) \ |
| : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ |
| : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"7" \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_WWW(lval, orig, arg1, arg2, arg3) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[4]; \ |
| volatile unsigned long _res; \ |
| _argvec[0] = (unsigned long)_orig.nraddr; \ |
| _argvec[1] = (unsigned long)arg1; \ |
| _argvec[2] = (unsigned long)arg2; \ |
| _argvec[3] = (unsigned long)arg3; \ |
| __asm__ volatile( \ |
| VALGRIND_CFI_PROLOGUE \ |
| "aghi 15,-160\n\t" \ |
| "lg 2, 8(1)\n\t" \ |
| "lg 3,16(1)\n\t" \ |
| "lg 4,24(1)\n\t" \ |
| "lg 1, 0(1)\n\t" \ |
| VALGRIND_CALL_NOREDIR_R1 \ |
| "aghi 15,160\n\t" \ |
| VALGRIND_CFI_EPILOGUE \ |
| "lgr %0, 2\n\t" \ |
| : /*out*/ "=d" (_res) \ |
| : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ |
| : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"7" \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_WWWW(lval, orig, arg1, arg2, arg3, arg4) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[5]; \ |
| volatile unsigned long _res; \ |
| _argvec[0] = (unsigned long)_orig.nraddr; \ |
| _argvec[1] = (unsigned long)arg1; \ |
| _argvec[2] = (unsigned long)arg2; \ |
| _argvec[3] = (unsigned long)arg3; \ |
| _argvec[4] = (unsigned long)arg4; \ |
| __asm__ volatile( \ |
| VALGRIND_CFI_PROLOGUE \ |
| "aghi 15,-160\n\t" \ |
| "lg 2, 8(1)\n\t" \ |
| "lg 3,16(1)\n\t" \ |
| "lg 4,24(1)\n\t" \ |
| "lg 5,32(1)\n\t" \ |
| "lg 1, 0(1)\n\t" \ |
| VALGRIND_CALL_NOREDIR_R1 \ |
| "aghi 15,160\n\t" \ |
| VALGRIND_CFI_EPILOGUE \ |
| "lgr %0, 2\n\t" \ |
| : /*out*/ "=d" (_res) \ |
| : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ |
| : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"7" \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_5W(lval, orig, arg1, arg2, arg3, arg4, arg5) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[6]; \ |
| volatile unsigned long _res; \ |
| _argvec[0] = (unsigned long)_orig.nraddr; \ |
| _argvec[1] = (unsigned long)arg1; \ |
| _argvec[2] = (unsigned long)arg2; \ |
| _argvec[3] = (unsigned long)arg3; \ |
| _argvec[4] = (unsigned long)arg4; \ |
| _argvec[5] = (unsigned long)arg5; \ |
| __asm__ volatile( \ |
| VALGRIND_CFI_PROLOGUE \ |
| "aghi 15,-160\n\t" \ |
| "lg 2, 8(1)\n\t" \ |
| "lg 3,16(1)\n\t" \ |
| "lg 4,24(1)\n\t" \ |
| "lg 5,32(1)\n\t" \ |
| "lg 6,40(1)\n\t" \ |
| "lg 1, 0(1)\n\t" \ |
| VALGRIND_CALL_NOREDIR_R1 \ |
| "aghi 15,160\n\t" \ |
| VALGRIND_CFI_EPILOGUE \ |
| "lgr %0, 2\n\t" \ |
| : /*out*/ "=d" (_res) \ |
| : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ |
| : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_6W(lval, orig, arg1, arg2, arg3, arg4, arg5, \ |
| arg6) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[7]; \ |
| volatile unsigned long _res; \ |
| _argvec[0] = (unsigned long)_orig.nraddr; \ |
| _argvec[1] = (unsigned long)arg1; \ |
| _argvec[2] = (unsigned long)arg2; \ |
| _argvec[3] = (unsigned long)arg3; \ |
| _argvec[4] = (unsigned long)arg4; \ |
| _argvec[5] = (unsigned long)arg5; \ |
| _argvec[6] = (unsigned long)arg6; \ |
| __asm__ volatile( \ |
| VALGRIND_CFI_PROLOGUE \ |
| "aghi 15,-168\n\t" \ |
| "lg 2, 8(1)\n\t" \ |
| "lg 3,16(1)\n\t" \ |
| "lg 4,24(1)\n\t" \ |
| "lg 5,32(1)\n\t" \ |
| "lg 6,40(1)\n\t" \ |
| "mvc 160(8,15), 48(1)\n\t" \ |
| "lg 1, 0(1)\n\t" \ |
| VALGRIND_CALL_NOREDIR_R1 \ |
| "aghi 15,168\n\t" \ |
| VALGRIND_CFI_EPILOGUE \ |
| "lgr %0, 2\n\t" \ |
| : /*out*/ "=d" (_res) \ |
| : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ |
| : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_7W(lval, orig, arg1, arg2, arg3, arg4, arg5, \ |
| arg6, arg7) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[8]; \ |
| volatile unsigned long _res; \ |
| _argvec[0] = (unsigned long)_orig.nraddr; \ |
| _argvec[1] = (unsigned long)arg1; \ |
| _argvec[2] = (unsigned long)arg2; \ |
| _argvec[3] = (unsigned long)arg3; \ |
| _argvec[4] = (unsigned long)arg4; \ |
| _argvec[5] = (unsigned long)arg5; \ |
| _argvec[6] = (unsigned long)arg6; \ |
| _argvec[7] = (unsigned long)arg7; \ |
| __asm__ volatile( \ |
| VALGRIND_CFI_PROLOGUE \ |
| "aghi 15,-176\n\t" \ |
| "lg 2, 8(1)\n\t" \ |
| "lg 3,16(1)\n\t" \ |
| "lg 4,24(1)\n\t" \ |
| "lg 5,32(1)\n\t" \ |
| "lg 6,40(1)\n\t" \ |
| "mvc 160(8,15), 48(1)\n\t" \ |
| "mvc 168(8,15), 56(1)\n\t" \ |
| "lg 1, 0(1)\n\t" \ |
| VALGRIND_CALL_NOREDIR_R1 \ |
| "aghi 15,176\n\t" \ |
| VALGRIND_CFI_EPILOGUE \ |
| "lgr %0, 2\n\t" \ |
| : /*out*/ "=d" (_res) \ |
| : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ |
| : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_8W(lval, orig, arg1, arg2, arg3, arg4, arg5, \ |
| arg6, arg7 ,arg8) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[9]; \ |
| volatile unsigned long _res; \ |
| _argvec[0] = (unsigned long)_orig.nraddr; \ |
| _argvec[1] = (unsigned long)arg1; \ |
| _argvec[2] = (unsigned long)arg2; \ |
| _argvec[3] = (unsigned long)arg3; \ |
| _argvec[4] = (unsigned long)arg4; \ |
| _argvec[5] = (unsigned long)arg5; \ |
| _argvec[6] = (unsigned long)arg6; \ |
| _argvec[7] = (unsigned long)arg7; \ |
| _argvec[8] = (unsigned long)arg8; \ |
| __asm__ volatile( \ |
| VALGRIND_CFI_PROLOGUE \ |
| "aghi 15,-184\n\t" \ |
| "lg 2, 8(1)\n\t" \ |
| "lg 3,16(1)\n\t" \ |
| "lg 4,24(1)\n\t" \ |
| "lg 5,32(1)\n\t" \ |
| "lg 6,40(1)\n\t" \ |
| "mvc 160(8,15), 48(1)\n\t" \ |
| "mvc 168(8,15), 56(1)\n\t" \ |
| "mvc 176(8,15), 64(1)\n\t" \ |
| "lg 1, 0(1)\n\t" \ |
| VALGRIND_CALL_NOREDIR_R1 \ |
| "aghi 15,184\n\t" \ |
| VALGRIND_CFI_EPILOGUE \ |
| "lgr %0, 2\n\t" \ |
| : /*out*/ "=d" (_res) \ |
| : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ |
| : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_9W(lval, orig, arg1, arg2, arg3, arg4, arg5, \ |
| arg6, arg7 ,arg8, arg9) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[10]; \ |
| volatile unsigned long _res; \ |
| _argvec[0] = (unsigned long)_orig.nraddr; \ |
| _argvec[1] = (unsigned long)arg1; \ |
| _argvec[2] = (unsigned long)arg2; \ |
| _argvec[3] = (unsigned long)arg3; \ |
| _argvec[4] = (unsigned long)arg4; \ |
| _argvec[5] = (unsigned long)arg5; \ |
| _argvec[6] = (unsigned long)arg6; \ |
| _argvec[7] = (unsigned long)arg7; \ |
| _argvec[8] = (unsigned long)arg8; \ |
| _argvec[9] = (unsigned long)arg9; \ |
| __asm__ volatile( \ |
| VALGRIND_CFI_PROLOGUE \ |
| "aghi 15,-192\n\t" \ |
| "lg 2, 8(1)\n\t" \ |
| "lg 3,16(1)\n\t" \ |
| "lg 4,24(1)\n\t" \ |
| "lg 5,32(1)\n\t" \ |
| "lg 6,40(1)\n\t" \ |
| "mvc 160(8,15), 48(1)\n\t" \ |
| "mvc 168(8,15), 56(1)\n\t" \ |
| "mvc 176(8,15), 64(1)\n\t" \ |
| "mvc 184(8,15), 72(1)\n\t" \ |
| "lg 1, 0(1)\n\t" \ |
| VALGRIND_CALL_NOREDIR_R1 \ |
| "aghi 15,192\n\t" \ |
| VALGRIND_CFI_EPILOGUE \ |
| "lgr %0, 2\n\t" \ |
| : /*out*/ "=d" (_res) \ |
| : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ |
| : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_10W(lval, orig, arg1, arg2, arg3, arg4, arg5, \ |
| arg6, arg7 ,arg8, arg9, arg10) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[11]; \ |
| volatile unsigned long _res; \ |
| _argvec[0] = (unsigned long)_orig.nraddr; \ |
| _argvec[1] = (unsigned long)arg1; \ |
| _argvec[2] = (unsigned long)arg2; \ |
| _argvec[3] = (unsigned long)arg3; \ |
| _argvec[4] = (unsigned long)arg4; \ |
| _argvec[5] = (unsigned long)arg5; \ |
| _argvec[6] = (unsigned long)arg6; \ |
| _argvec[7] = (unsigned long)arg7; \ |
| _argvec[8] = (unsigned long)arg8; \ |
| _argvec[9] = (unsigned long)arg9; \ |
| _argvec[10] = (unsigned long)arg10; \ |
| __asm__ volatile( \ |
| VALGRIND_CFI_PROLOGUE \ |
| "aghi 15,-200\n\t" \ |
| "lg 2, 8(1)\n\t" \ |
| "lg 3,16(1)\n\t" \ |
| "lg 4,24(1)\n\t" \ |
| "lg 5,32(1)\n\t" \ |
| "lg 6,40(1)\n\t" \ |
| "mvc 160(8,15), 48(1)\n\t" \ |
| "mvc 168(8,15), 56(1)\n\t" \ |
| "mvc 176(8,15), 64(1)\n\t" \ |
| "mvc 184(8,15), 72(1)\n\t" \ |
| "mvc 192(8,15), 80(1)\n\t" \ |
| "lg 1, 0(1)\n\t" \ |
| VALGRIND_CALL_NOREDIR_R1 \ |
| "aghi 15,200\n\t" \ |
| VALGRIND_CFI_EPILOGUE \ |
| "lgr %0, 2\n\t" \ |
| : /*out*/ "=d" (_res) \ |
| : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ |
| : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_11W(lval, orig, arg1, arg2, arg3, arg4, arg5, \ |
| arg6, arg7 ,arg8, arg9, arg10, arg11) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[12]; \ |
| volatile unsigned long _res; \ |
| _argvec[0] = (unsigned long)_orig.nraddr; \ |
| _argvec[1] = (unsigned long)arg1; \ |
| _argvec[2] = (unsigned long)arg2; \ |
| _argvec[3] = (unsigned long)arg3; \ |
| _argvec[4] = (unsigned long)arg4; \ |
| _argvec[5] = (unsigned long)arg5; \ |
| _argvec[6] = (unsigned long)arg6; \ |
| _argvec[7] = (unsigned long)arg7; \ |
| _argvec[8] = (unsigned long)arg8; \ |
| _argvec[9] = (unsigned long)arg9; \ |
| _argvec[10] = (unsigned long)arg10; \ |
| _argvec[11] = (unsigned long)arg11; \ |
| __asm__ volatile( \ |
| VALGRIND_CFI_PROLOGUE \ |
| "aghi 15,-208\n\t" \ |
| "lg 2, 8(1)\n\t" \ |
| "lg 3,16(1)\n\t" \ |
| "lg 4,24(1)\n\t" \ |
| "lg 5,32(1)\n\t" \ |
| "lg 6,40(1)\n\t" \ |
| "mvc 160(8,15), 48(1)\n\t" \ |
| "mvc 168(8,15), 56(1)\n\t" \ |
| "mvc 176(8,15), 64(1)\n\t" \ |
| "mvc 184(8,15), 72(1)\n\t" \ |
| "mvc 192(8,15), 80(1)\n\t" \ |
| "mvc 200(8,15), 88(1)\n\t" \ |
| "lg 1, 0(1)\n\t" \ |
| VALGRIND_CALL_NOREDIR_R1 \ |
| "aghi 15,208\n\t" \ |
| VALGRIND_CFI_EPILOGUE \ |
| "lgr %0, 2\n\t" \ |
| : /*out*/ "=d" (_res) \ |
| : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ |
| : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_12W(lval, orig, arg1, arg2, arg3, arg4, arg5, \ |
| arg6, arg7 ,arg8, arg9, arg10, arg11, arg12)\ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[13]; \ |
| volatile unsigned long _res; \ |
| _argvec[0] = (unsigned long)_orig.nraddr; \ |
| _argvec[1] = (unsigned long)arg1; \ |
| _argvec[2] = (unsigned long)arg2; \ |
| _argvec[3] = (unsigned long)arg3; \ |
| _argvec[4] = (unsigned long)arg4; \ |
| _argvec[5] = (unsigned long)arg5; \ |
| _argvec[6] = (unsigned long)arg6; \ |
| _argvec[7] = (unsigned long)arg7; \ |
| _argvec[8] = (unsigned long)arg8; \ |
| _argvec[9] = (unsigned long)arg9; \ |
| _argvec[10] = (unsigned long)arg10; \ |
| _argvec[11] = (unsigned long)arg11; \ |
| _argvec[12] = (unsigned long)arg12; \ |
| __asm__ volatile( \ |
| VALGRIND_CFI_PROLOGUE \ |
| "aghi 15,-216\n\t" \ |
| "lg 2, 8(1)\n\t" \ |
| "lg 3,16(1)\n\t" \ |
| "lg 4,24(1)\n\t" \ |
| "lg 5,32(1)\n\t" \ |
| "lg 6,40(1)\n\t" \ |
| "mvc 160(8,15), 48(1)\n\t" \ |
| "mvc 168(8,15), 56(1)\n\t" \ |
| "mvc 176(8,15), 64(1)\n\t" \ |
| "mvc 184(8,15), 72(1)\n\t" \ |
| "mvc 192(8,15), 80(1)\n\t" \ |
| "mvc 200(8,15), 88(1)\n\t" \ |
| "mvc 208(8,15), 96(1)\n\t" \ |
| "lg 1, 0(1)\n\t" \ |
| VALGRIND_CALL_NOREDIR_R1 \ |
| "aghi 15,216\n\t" \ |
| VALGRIND_CFI_EPILOGUE \ |
| "lgr %0, 2\n\t" \ |
| : /*out*/ "=d" (_res) \ |
| : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ |
| : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| |
| #endif /* PLAT_s390x_linux */ |
| |
| /* ------------------------- mips32-linux ----------------------- */ |
| |
| #if defined(PLAT_mips32_linux) |
| |
| /* These regs are trashed by the hidden call. */ |
| #define __CALLER_SAVED_REGS "$2", "$3", "$4", "$5", "$6", \ |
| "$7", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", \ |
| "$25", "$31" |
| |
| /* These CALL_FN_ macros assume that on mips-linux, sizeof(unsigned |
| long) == 4. */ |
| |
| #define CALL_FN_W_v(lval, orig) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[1]; \ |
| volatile unsigned long _res; \ |
| _argvec[0] = (unsigned long)_orig.nraddr; \ |
| __asm__ volatile( \ |
| "subu $29, $29, 8 \n\t" \ |
| "sw $28, 0($29) \n\t" \ |
| "sw $31, 4($29) \n\t" \ |
| "subu $29, $29, 16 \n\t" \ |
| "lw $25, 0(%1) \n\t" /* target->t9 */ \ |
| VALGRIND_CALL_NOREDIR_T9 \ |
| "addu $29, $29, 16\n\t" \ |
| "lw $28, 0($29) \n\t" \ |
| "lw $31, 4($29) \n\t" \ |
| "addu $29, $29, 8 \n\t" \ |
| "move %0, $2\n" \ |
| : /*out*/ "=r" (_res) \ |
| : /*in*/ "0" (&_argvec[0]) \ |
| : /*trash*/ "memory", __CALLER_SAVED_REGS \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_W(lval, orig, arg1) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[2]; \ |
| volatile unsigned long _res; \ |
| _argvec[0] = (unsigned long)_orig.nraddr; \ |
| _argvec[1] = (unsigned long)(arg1); \ |
| __asm__ volatile( \ |
| "subu $29, $29, 8 \n\t" \ |
| "sw $28, 0($29) \n\t" \ |
| "sw $31, 4($29) \n\t" \ |
| "subu $29, $29, 16 \n\t" \ |
| "lw $4, 4(%1) \n\t" /* arg1*/ \ |
| "lw $25, 0(%1) \n\t" /* target->t9 */ \ |
| VALGRIND_CALL_NOREDIR_T9 \ |
| "addu $29, $29, 16 \n\t" \ |
| "lw $28, 0($29) \n\t" \ |
| "lw $31, 4($29) \n\t" \ |
| "addu $29, $29, 8 \n\t" \ |
| "move %0, $2\n" \ |
| : /*out*/ "=r" (_res) \ |
| : /*in*/ "0" (&_argvec[0]) \ |
| : /*trash*/ "memory", __CALLER_SAVED_REGS \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_WW(lval, orig, arg1,arg2) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[3]; \ |
| volatile unsigned long _res; \ |
| _argvec[0] = (unsigned long)_orig.nraddr; \ |
| _argvec[1] = (unsigned long)(arg1); \ |
| _argvec[2] = (unsigned long)(arg2); \ |
| __asm__ volatile( \ |
| "subu $29, $29, 8 \n\t" \ |
| "sw $28, 0($29) \n\t" \ |
| "sw $31, 4($29) \n\t" \ |
| "subu $29, $29, 16 \n\t" \ |
| "lw $4, 4(%1) \n\t" \ |
| "lw $5, 8(%1) \n\t" \ |
| "lw $25, 0(%1) \n\t" /* target->t9 */ \ |
| VALGRIND_CALL_NOREDIR_T9 \ |
| "addu $29, $29, 16 \n\t" \ |
| "lw $28, 0($29) \n\t" \ |
| "lw $31, 4($29) \n\t" \ |
| "addu $29, $29, 8 \n\t" \ |
| "move %0, $2\n" \ |
| : /*out*/ "=r" (_res) \ |
| : /*in*/ "0" (&_argvec[0]) \ |
| : /*trash*/ "memory", __CALLER_SAVED_REGS \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[4]; \ |
| volatile unsigned long _res; \ |
| _argvec[0] = (unsigned long)_orig.nraddr; \ |
| _argvec[1] = (unsigned long)(arg1); \ |
| _argvec[2] = (unsigned long)(arg2); \ |
| _argvec[3] = (unsigned long)(arg3); \ |
| __asm__ volatile( \ |
| "subu $29, $29, 8 \n\t" \ |
| "sw $28, 0($29) \n\t" \ |
| "sw $31, 4($29) \n\t" \ |
| "subu $29, $29, 16 \n\t" \ |
| "lw $4, 4(%1) \n\t" \ |
| "lw $5, 8(%1) \n\t" \ |
| "lw $6, 12(%1) \n\t" \ |
| "lw $25, 0(%1) \n\t" /* target->t9 */ \ |
| VALGRIND_CALL_NOREDIR_T9 \ |
| "addu $29, $29, 16 \n\t" \ |
| "lw $28, 0($29) \n\t" \ |
| "lw $31, 4($29) \n\t" \ |
| "addu $29, $29, 8 \n\t" \ |
| "move %0, $2\n" \ |
| : /*out*/ "=r" (_res) \ |
| : /*in*/ "0" (&_argvec[0]) \ |
| : /*trash*/ "memory", __CALLER_SAVED_REGS \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[5]; \ |
| volatile unsigned long _res; \ |
| _argvec[0] = (unsigned long)_orig.nraddr; \ |
| _argvec[1] = (unsigned long)(arg1); \ |
| _argvec[2] = (unsigned long)(arg2); \ |
| _argvec[3] = (unsigned long)(arg3); \ |
| _argvec[4] = (unsigned long)(arg4); \ |
| __asm__ volatile( \ |
| "subu $29, $29, 8 \n\t" \ |
| "sw $28, 0($29) \n\t" \ |
| "sw $31, 4($29) \n\t" \ |
| "subu $29, $29, 16 \n\t" \ |
| "lw $4, 4(%1) \n\t" \ |
| "lw $5, 8(%1) \n\t" \ |
| "lw $6, 12(%1) \n\t" \ |
| "lw $7, 16(%1) \n\t" \ |
| "lw $25, 0(%1) \n\t" /* target->t9 */ \ |
| VALGRIND_CALL_NOREDIR_T9 \ |
| "addu $29, $29, 16 \n\t" \ |
| "lw $28, 0($29) \n\t" \ |
| "lw $31, 4($29) \n\t" \ |
| "addu $29, $29, 8 \n\t" \ |
| "move %0, $2\n" \ |
| : /*out*/ "=r" (_res) \ |
| : /*in*/ "0" (&_argvec[0]) \ |
| : /*trash*/ "memory", __CALLER_SAVED_REGS \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[6]; \ |
| volatile unsigned long _res; \ |
| _argvec[0] = (unsigned long)_orig.nraddr; \ |
| _argvec[1] = (unsigned long)(arg1); \ |
| _argvec[2] = (unsigned long)(arg2); \ |
| _argvec[3] = (unsigned long)(arg3); \ |
| _argvec[4] = (unsigned long)(arg4); \ |
| _argvec[5] = (unsigned long)(arg5); \ |
| __asm__ volatile( \ |
| "subu $29, $29, 8 \n\t" \ |
| "sw $28, 0($29) \n\t" \ |
| "sw $31, 4($29) \n\t" \ |
| "lw $4, 20(%1) \n\t" \ |
| "subu $29, $29, 24\n\t" \ |
| "sw $4, 16($29) \n\t" \ |
| "lw $4, 4(%1) \n\t" \ |
| "lw $5, 8(%1) \n\t" \ |
| "lw $6, 12(%1) \n\t" \ |
| "lw $7, 16(%1) \n\t" \ |
| "lw $25, 0(%1) \n\t" /* target->t9 */ \ |
| VALGRIND_CALL_NOREDIR_T9 \ |
| "addu $29, $29, 24 \n\t" \ |
| "lw $28, 0($29) \n\t" \ |
| "lw $31, 4($29) \n\t" \ |
| "addu $29, $29, 8 \n\t" \ |
| "move %0, $2\n" \ |
| : /*out*/ "=r" (_res) \ |
| : /*in*/ "0" (&_argvec[0]) \ |
| : /*trash*/ "memory", __CALLER_SAVED_REGS \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| #define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[7]; \ |
| volatile unsigned long _res; \ |
| _argvec[0] = (unsigned long)_orig.nraddr; \ |
| _argvec[1] = (unsigned long)(arg1); \ |
| _argvec[2] = (unsigned long)(arg2); \ |
| _argvec[3] = (unsigned long)(arg3); \ |
| _argvec[4] = (unsigned long)(arg4); \ |
| _argvec[5] = (unsigned long)(arg5); \ |
| _argvec[6] = (unsigned long)(arg6); \ |
| __asm__ volatile( \ |
| "subu $29, $29, 8 \n\t" \ |
| "sw $28, 0($29) \n\t" \ |
| "sw $31, 4($29) \n\t" \ |
| "lw $4, 20(%1) \n\t" \ |
| "subu $29, $29, 32\n\t" \ |
| "sw $4, 16($29) \n\t" \ |
| "lw $4, 24(%1) \n\t" \ |
| "nop\n\t" \ |
| "sw $4, 20($29) \n\t" \ |
| "lw $4, 4(%1) \n\t" \ |
| "lw $5, 8(%1) \n\t" \ |
| "lw $6, 12(%1) \n\t" \ |
| "lw $7, 16(%1) \n\t" \ |
| "lw $25, 0(%1) \n\t" /* target->t9 */ \ |
| VALGRIND_CALL_NOREDIR_T9 \ |
| "addu $29, $29, 32 \n\t" \ |
| "lw $28, 0($29) \n\t" \ |
| "lw $31, 4($29) \n\t" \ |
| "addu $29, $29, 8 \n\t" \ |
| "move %0, $2\n" \ |
| : /*out*/ "=r" (_res) \ |
| : /*in*/ "0" (&_argvec[0]) \ |
| : /*trash*/ "memory", __CALLER_SAVED_REGS \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ |
| arg7) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[8]; \ |
| volatile unsigned long _res; \ |
| _argvec[0] = (unsigned long)_orig.nraddr; \ |
| _argvec[1] = (unsigned long)(arg1); \ |
| _argvec[2] = (unsigned long)(arg2); \ |
| _argvec[3] = (unsigned long)(arg3); \ |
| _argvec[4] = (unsigned long)(arg4); \ |
| _argvec[5] = (unsigned long)(arg5); \ |
| _argvec[6] = (unsigned long)(arg6); \ |
| _argvec[7] = (unsigned long)(arg7); \ |
| __asm__ volatile( \ |
| "subu $29, $29, 8 \n\t" \ |
| "sw $28, 0($29) \n\t" \ |
| "sw $31, 4($29) \n\t" \ |
| "lw $4, 20(%1) \n\t" \ |
| "subu $29, $29, 32\n\t" \ |
| "sw $4, 16($29) \n\t" \ |
| "lw $4, 24(%1) \n\t" \ |
| "sw $4, 20($29) \n\t" \ |
| "lw $4, 28(%1) \n\t" \ |
| "sw $4, 24($29) \n\t" \ |
| "lw $4, 4(%1) \n\t" \ |
| "lw $5, 8(%1) \n\t" \ |
| "lw $6, 12(%1) \n\t" \ |
| "lw $7, 16(%1) \n\t" \ |
| "lw $25, 0(%1) \n\t" /* target->t9 */ \ |
| VALGRIND_CALL_NOREDIR_T9 \ |
| "addu $29, $29, 32 \n\t" \ |
| "lw $28, 0($29) \n\t" \ |
| "lw $31, 4($29) \n\t" \ |
| "addu $29, $29, 8 \n\t" \ |
| "move %0, $2\n" \ |
| : /*out*/ "=r" (_res) \ |
| : /*in*/ "0" (&_argvec[0]) \ |
| : /*trash*/ "memory", __CALLER_SAVED_REGS \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ |
| arg7,arg8) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[9]; \ |
| volatile unsigned long _res; \ |
| _argvec[0] = (unsigned long)_orig.nraddr; \ |
| _argvec[1] = (unsigned long)(arg1); \ |
| _argvec[2] = (unsigned long)(arg2); \ |
| _argvec[3] = (unsigned long)(arg3); \ |
| _argvec[4] = (unsigned long)(arg4); \ |
| _argvec[5] = (unsigned long)(arg5); \ |
| _argvec[6] = (unsigned long)(arg6); \ |
| _argvec[7] = (unsigned long)(arg7); \ |
| _argvec[8] = (unsigned long)(arg8); \ |
| __asm__ volatile( \ |
| "subu $29, $29, 8 \n\t" \ |
| "sw $28, 0($29) \n\t" \ |
| "sw $31, 4($29) \n\t" \ |
| "lw $4, 20(%1) \n\t" \ |
| "subu $29, $29, 40\n\t" \ |
| "sw $4, 16($29) \n\t" \ |
| "lw $4, 24(%1) \n\t" \ |
| "sw $4, 20($29) \n\t" \ |
| "lw $4, 28(%1) \n\t" \ |
| "sw $4, 24($29) \n\t" \ |
| "lw $4, 32(%1) \n\t" \ |
| "sw $4, 28($29) \n\t" \ |
| "lw $4, 4(%1) \n\t" \ |
| "lw $5, 8(%1) \n\t" \ |
| "lw $6, 12(%1) \n\t" \ |
| "lw $7, 16(%1) \n\t" \ |
| "lw $25, 0(%1) \n\t" /* target->t9 */ \ |
| VALGRIND_CALL_NOREDIR_T9 \ |
| "addu $29, $29, 40 \n\t" \ |
| "lw $28, 0($29) \n\t" \ |
| "lw $31, 4($29) \n\t" \ |
| "addu $29, $29, 8 \n\t" \ |
| "move %0, $2\n" \ |
| : /*out*/ "=r" (_res) \ |
| : /*in*/ "0" (&_argvec[0]) \ |
| : /*trash*/ "memory", __CALLER_SAVED_REGS \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ |
| arg7,arg8,arg9) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[10]; \ |
| volatile unsigned long _res; \ |
| _argvec[0] = (unsigned long)_orig.nraddr; \ |
| _argvec[1] = (unsigned long)(arg1); \ |
| _argvec[2] = (unsigned long)(arg2); \ |
| _argvec[3] = (unsigned long)(arg3); \ |
| _argvec[4] = (unsigned long)(arg4); \ |
| _argvec[5] = (unsigned long)(arg5); \ |
| _argvec[6] = (unsigned long)(arg6); \ |
| _argvec[7] = (unsigned long)(arg7); \ |
| _argvec[8] = (unsigned long)(arg8); \ |
| _argvec[9] = (unsigned long)(arg9); \ |
| __asm__ volatile( \ |
| "subu $29, $29, 8 \n\t" \ |
| "sw $28, 0($29) \n\t" \ |
| "sw $31, 4($29) \n\t" \ |
| "lw $4, 20(%1) \n\t" \ |
| "subu $29, $29, 40\n\t" \ |
| "sw $4, 16($29) \n\t" \ |
| "lw $4, 24(%1) \n\t" \ |
| "sw $4, 20($29) \n\t" \ |
| "lw $4, 28(%1) \n\t" \ |
| "sw $4, 24($29) \n\t" \ |
| "lw $4, 32(%1) \n\t" \ |
| "sw $4, 28($29) \n\t" \ |
| "lw $4, 36(%1) \n\t" \ |
| "sw $4, 32($29) \n\t" \ |
| "lw $4, 4(%1) \n\t" \ |
| "lw $5, 8(%1) \n\t" \ |
| "lw $6, 12(%1) \n\t" \ |
| "lw $7, 16(%1) \n\t" \ |
| "lw $25, 0(%1) \n\t" /* target->t9 */ \ |
| VALGRIND_CALL_NOREDIR_T9 \ |
| "addu $29, $29, 40 \n\t" \ |
| "lw $28, 0($29) \n\t" \ |
| "lw $31, 4($29) \n\t" \ |
| "addu $29, $29, 8 \n\t" \ |
| "move %0, $2\n" \ |
| : /*out*/ "=r" (_res) \ |
| : /*in*/ "0" (&_argvec[0]) \ |
| : /*trash*/ "memory", __CALLER_SAVED_REGS \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ |
| arg7,arg8,arg9,arg10) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[11]; \ |
| volatile unsigned long _res; \ |
| _argvec[0] = (unsigned long)_orig.nraddr; \ |
| _argvec[1] = (unsigned long)(arg1); \ |
| _argvec[2] = (unsigned long)(arg2); \ |
| _argvec[3] = (unsigned long)(arg3); \ |
| _argvec[4] = (unsigned long)(arg4); \ |
| _argvec[5] = (unsigned long)(arg5); \ |
| _argvec[6] = (unsigned long)(arg6); \ |
| _argvec[7] = (unsigned long)(arg7); \ |
| _argvec[8] = (unsigned long)(arg8); \ |
| _argvec[9] = (unsigned long)(arg9); \ |
| _argvec[10] = (unsigned long)(arg10); \ |
| __asm__ volatile( \ |
| "subu $29, $29, 8 \n\t" \ |
| "sw $28, 0($29) \n\t" \ |
| "sw $31, 4($29) \n\t" \ |
| "lw $4, 20(%1) \n\t" \ |
| "subu $29, $29, 48\n\t" \ |
| "sw $4, 16($29) \n\t" \ |
| "lw $4, 24(%1) \n\t" \ |
| "sw $4, 20($29) \n\t" \ |
| "lw $4, 28(%1) \n\t" \ |
| "sw $4, 24($29) \n\t" \ |
| "lw $4, 32(%1) \n\t" \ |
| "sw $4, 28($29) \n\t" \ |
| "lw $4, 36(%1) \n\t" \ |
| "sw $4, 32($29) \n\t" \ |
| "lw $4, 40(%1) \n\t" \ |
| "sw $4, 36($29) \n\t" \ |
| "lw $4, 4(%1) \n\t" \ |
| "lw $5, 8(%1) \n\t" \ |
| "lw $6, 12(%1) \n\t" \ |
| "lw $7, 16(%1) \n\t" \ |
| "lw $25, 0(%1) \n\t" /* target->t9 */ \ |
| VALGRIND_CALL_NOREDIR_T9 \ |
| "addu $29, $29, 48 \n\t" \ |
| "lw $28, 0($29) \n\t" \ |
| "lw $31, 4($29) \n\t" \ |
| "addu $29, $29, 8 \n\t" \ |
| "move %0, $2\n" \ |
| : /*out*/ "=r" (_res) \ |
| : /*in*/ "0" (&_argvec[0]) \ |
| : /*trash*/ "memory", __CALLER_SAVED_REGS \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5, \ |
| arg6,arg7,arg8,arg9,arg10, \ |
| arg11) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[12]; \ |
| volatile unsigned long _res; \ |
| _argvec[0] = (unsigned long)_orig.nraddr; \ |
| _argvec[1] = (unsigned long)(arg1); \ |
| _argvec[2] = (unsigned long)(arg2); \ |
| _argvec[3] = (unsigned long)(arg3); \ |
| _argvec[4] = (unsigned long)(arg4); \ |
| _argvec[5] = (unsigned long)(arg5); \ |
| _argvec[6] = (unsigned long)(arg6); \ |
| _argvec[7] = (unsigned long)(arg7); \ |
| _argvec[8] = (unsigned long)(arg8); \ |
| _argvec[9] = (unsigned long)(arg9); \ |
| _argvec[10] = (unsigned long)(arg10); \ |
| _argvec[11] = (unsigned long)(arg11); \ |
| __asm__ volatile( \ |
| "subu $29, $29, 8 \n\t" \ |
| "sw $28, 0($29) \n\t" \ |
| "sw $31, 4($29) \n\t" \ |
| "lw $4, 20(%1) \n\t" \ |
| "subu $29, $29, 48\n\t" \ |
| "sw $4, 16($29) \n\t" \ |
| "lw $4, 24(%1) \n\t" \ |
| "sw $4, 20($29) \n\t" \ |
| "lw $4, 28(%1) \n\t" \ |
| "sw $4, 24($29) \n\t" \ |
| "lw $4, 32(%1) \n\t" \ |
| "sw $4, 28($29) \n\t" \ |
| "lw $4, 36(%1) \n\t" \ |
| "sw $4, 32($29) \n\t" \ |
| "lw $4, 40(%1) \n\t" \ |
| "sw $4, 36($29) \n\t" \ |
| "lw $4, 44(%1) \n\t" \ |
| "sw $4, 40($29) \n\t" \ |
| "lw $4, 4(%1) \n\t" \ |
| "lw $5, 8(%1) \n\t" \ |
| "lw $6, 12(%1) \n\t" \ |
| "lw $7, 16(%1) \n\t" \ |
| "lw $25, 0(%1) \n\t" /* target->t9 */ \ |
| VALGRIND_CALL_NOREDIR_T9 \ |
| "addu $29, $29, 48 \n\t" \ |
| "lw $28, 0($29) \n\t" \ |
| "lw $31, 4($29) \n\t" \ |
| "addu $29, $29, 8 \n\t" \ |
| "move %0, $2\n" \ |
| : /*out*/ "=r" (_res) \ |
| : /*in*/ "0" (&_argvec[0]) \ |
| : /*trash*/ "memory", __CALLER_SAVED_REGS \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5, \ |
| arg6,arg7,arg8,arg9,arg10, \ |
| arg11,arg12) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[13]; \ |
| volatile unsigned long _res; \ |
| _argvec[0] = (unsigned long)_orig.nraddr; \ |
| _argvec[1] = (unsigned long)(arg1); \ |
| _argvec[2] = (unsigned long)(arg2); \ |
| _argvec[3] = (unsigned long)(arg3); \ |
| _argvec[4] = (unsigned long)(arg4); \ |
| _argvec[5] = (unsigned long)(arg5); \ |
| _argvec[6] = (unsigned long)(arg6); \ |
| _argvec[7] = (unsigned long)(arg7); \ |
| _argvec[8] = (unsigned long)(arg8); \ |
| _argvec[9] = (unsigned long)(arg9); \ |
| _argvec[10] = (unsigned long)(arg10); \ |
| _argvec[11] = (unsigned long)(arg11); \ |
| _argvec[12] = (unsigned long)(arg12); \ |
| __asm__ volatile( \ |
| "subu $29, $29, 8 \n\t" \ |
| "sw $28, 0($29) \n\t" \ |
| "sw $31, 4($29) \n\t" \ |
| "lw $4, 20(%1) \n\t" \ |
| "subu $29, $29, 56\n\t" \ |
| "sw $4, 16($29) \n\t" \ |
| "lw $4, 24(%1) \n\t" \ |
| "sw $4, 20($29) \n\t" \ |
| "lw $4, 28(%1) \n\t" \ |
| "sw $4, 24($29) \n\t" \ |
| "lw $4, 32(%1) \n\t" \ |
| "sw $4, 28($29) \n\t" \ |
| "lw $4, 36(%1) \n\t" \ |
| "sw $4, 32($29) \n\t" \ |
| "lw $4, 40(%1) \n\t" \ |
| "sw $4, 36($29) \n\t" \ |
| "lw $4, 44(%1) \n\t" \ |
| "sw $4, 40($29) \n\t" \ |
| "lw $4, 48(%1) \n\t" \ |
| "sw $4, 44($29) \n\t" \ |
| "lw $4, 4(%1) \n\t" \ |
| "lw $5, 8(%1) \n\t" \ |
| "lw $6, 12(%1) \n\t" \ |
| "lw $7, 16(%1) \n\t" \ |
| "lw $25, 0(%1) \n\t" /* target->t9 */ \ |
| VALGRIND_CALL_NOREDIR_T9 \ |
| "addu $29, $29, 56 \n\t" \ |
| "lw $28, 0($29) \n\t" \ |
| "lw $31, 4($29) \n\t" \ |
| "addu $29, $29, 8 \n\t" \ |
| "move %0, $2\n" \ |
| : /*out*/ "=r" (_res) \ |
| : /*in*/ "r" (&_argvec[0]) \ |
| : /*trash*/ "memory", __CALLER_SAVED_REGS \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #endif /* PLAT_mips32_linux */ |
| |
| /* ------------------------- nanomips-linux -------------------- */ |
| |
| #if defined(PLAT_nanomips_linux) |
| |
| /* These regs are trashed by the hidden call. */ |
| #define __CALLER_SAVED_REGS "$t4", "$t5", "$a0", "$a1", "$a2", \ |
| "$a3", "$a4", "$a5", "$a6", "$a7", "$t0", "$t1", "$t2", "$t3", \ |
| "$t8","$t9", "$at" |
| |
| /* These CALL_FN_ macros assume that on mips-linux, sizeof(unsigned |
| long) == 4. */ |
| |
| #define CALL_FN_W_v(lval, orig) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[1]; \ |
| volatile unsigned long _res; \ |
| _argvec[0] = (unsigned long)_orig.nraddr; \ |
| __asm__ volatile( \ |
| "lw $t9, 0(%1)\n\t" \ |
| VALGRIND_CALL_NOREDIR_T9 \ |
| "move %0, $a0\n" \ |
| : /*out*/ "=r" (_res) \ |
| : /*in*/ "r" (&_argvec[0]) \ |
| : /*trash*/ "memory", __CALLER_SAVED_REGS \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_W(lval, orig, arg1) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[2]; \ |
| volatile unsigned long _res; \ |
| _argvec[0] = (unsigned long)_orig.nraddr; \ |
| _argvec[1] = (unsigned long)(arg1); \ |
| __asm__ volatile( \ |
| "lw $t9, 0(%1)\n\t" \ |
| "lw $a0, 4(%1)\n\t" \ |
| VALGRIND_CALL_NOREDIR_T9 \ |
| "move %0, $a0\n" \ |
| : /*out*/ "=r" (_res) \ |
| : /*in*/ "r" (&_argvec[0]) \ |
| : /*trash*/ "memory", __CALLER_SAVED_REGS \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_WW(lval, orig, arg1,arg2) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[3]; \ |
| volatile unsigned long _res; \ |
| _argvec[0] = (unsigned long)_orig.nraddr; \ |
| _argvec[1] = (unsigned long)(arg1); \ |
| _argvec[2] = (unsigned long)(arg2); \ |
| __asm__ volatile( \ |
| "lw $t9, 0(%1)\n\t" \ |
| "lw $a0, 4(%1)\n\t" \ |
| "lw $a1, 8(%1)\n\t" \ |
| VALGRIND_CALL_NOREDIR_T9 \ |
| "move %0, $a0\n" \ |
| : /*out*/ "=r" (_res) \ |
| : /*in*/ "r" (&_argvec[0]) \ |
| : /*trash*/ "memory", __CALLER_SAVED_REGS \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[4]; \ |
| volatile unsigned long _res; \ |
| _argvec[0] = (unsigned long)_orig.nraddr; \ |
| _argvec[1] = (unsigned long)(arg1); \ |
| _argvec[2] = (unsigned long)(arg2); \ |
| _argvec[3] = (unsigned long)(arg3); \ |
| __asm__ volatile( \ |
| "lw $t9, 0(%1)\n\t" \ |
| "lw $a0, 4(%1)\n\t" \ |
| "lw $a1, 8(%1)\n\t" \ |
| "lw $a2,12(%1)\n\t" \ |
| VALGRIND_CALL_NOREDIR_T9 \ |
| "move %0, $a0\n" \ |
| : /*out*/ "=r" (_res) \ |
| : /*in*/ "r" (&_argvec[0]) \ |
| : /*trash*/ "memory", __CALLER_SAVED_REGS \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[5]; \ |
| volatile unsigned long _res; \ |
| _argvec[0] = (unsigned long)_orig.nraddr; \ |
| _argvec[1] = (unsigned long)(arg1); \ |
| _argvec[2] = (unsigned long)(arg2); \ |
| _argvec[3] = (unsigned long)(arg3); \ |
| _argvec[4] = (unsigned long)(arg4); \ |
| __asm__ volatile( \ |
| "lw $t9, 0(%1)\n\t" \ |
| "lw $a0, 4(%1)\n\t" \ |
| "lw $a1, 8(%1)\n\t" \ |
| "lw $a2,12(%1)\n\t" \ |
| "lw $a3,16(%1)\n\t" \ |
| VALGRIND_CALL_NOREDIR_T9 \ |
| "move %0, $a0\n" \ |
| : /*out*/ "=r" (_res) \ |
| : /*in*/ "r" (&_argvec[0]) \ |
| : /*trash*/ "memory", __CALLER_SAVED_REGS \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[6]; \ |
| volatile unsigned long _res; \ |
| _argvec[0] = (unsigned long)_orig.nraddr; \ |
| _argvec[1] = (unsigned long)(arg1); \ |
| _argvec[2] = (unsigned long)(arg2); \ |
| _argvec[3] = (unsigned long)(arg3); \ |
| _argvec[4] = (unsigned long)(arg4); \ |
| _argvec[5] = (unsigned long)(arg5); \ |
| __asm__ volatile( \ |
| "lw $t9, 0(%1)\n\t" \ |
| "lw $a0, 4(%1)\n\t" \ |
| "lw $a1, 8(%1)\n\t" \ |
| "lw $a2,12(%1)\n\t" \ |
| "lw $a3,16(%1)\n\t" \ |
| "lw $a4,20(%1)\n\t" \ |
| VALGRIND_CALL_NOREDIR_T9 \ |
| "move %0, $a0\n" \ |
| : /*out*/ "=r" (_res) \ |
| : /*in*/ "r" (&_argvec[0]) \ |
| : /*trash*/ "memory", __CALLER_SAVED_REGS \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| #define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[7]; \ |
| volatile unsigned long _res; \ |
| _argvec[0] = (unsigned long)_orig.nraddr; \ |
| _argvec[1] = (unsigned long)(arg1); \ |
| _argvec[2] = (unsigned long)(arg2); \ |
| _argvec[3] = (unsigned long)(arg3); \ |
| _argvec[4] = (unsigned long)(arg4); \ |
| _argvec[5] = (unsigned long)(arg5); \ |
| _argvec[6] = (unsigned long)(arg6); \ |
| __asm__ volatile( \ |
| "lw $t9, 0(%1)\n\t" \ |
| "lw $a0, 4(%1)\n\t" \ |
| "lw $a1, 8(%1)\n\t" \ |
| "lw $a2,12(%1)\n\t" \ |
| "lw $a3,16(%1)\n\t" \ |
| "lw $a4,20(%1)\n\t" \ |
| "lw $a5,24(%1)\n\t" \ |
| VALGRIND_CALL_NOREDIR_T9 \ |
| "move %0, $a0\n" \ |
| : /*out*/ "=r" (_res) \ |
| : /*in*/ "r" (&_argvec[0]) \ |
| : /*trash*/ "memory", __CALLER_SAVED_REGS \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ |
| arg7) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[8]; \ |
| volatile unsigned long _res; \ |
| _argvec[0] = (unsigned long)_orig.nraddr; \ |
| _argvec[1] = (unsigned long)(arg1); \ |
| _argvec[2] = (unsigned long)(arg2); \ |
| _argvec[3] = (unsigned long)(arg3); \ |
| _argvec[4] = (unsigned long)(arg4); \ |
| _argvec[5] = (unsigned long)(arg5); \ |
| _argvec[6] = (unsigned long)(arg6); \ |
| _argvec[7] = (unsigned long)(arg7); \ |
| __asm__ volatile( \ |
| "lw $t9, 0(%1)\n\t" \ |
| "lw $a0, 4(%1)\n\t" \ |
| "lw $a1, 8(%1)\n\t" \ |
| "lw $a2,12(%1)\n\t" \ |
| "lw $a3,16(%1)\n\t" \ |
| "lw $a4,20(%1)\n\t" \ |
| "lw $a5,24(%1)\n\t" \ |
| "lw $a6,28(%1)\n\t" \ |
| VALGRIND_CALL_NOREDIR_T9 \ |
| "move %0, $a0\n" \ |
| : /*out*/ "=r" (_res) \ |
| : /*in*/ "r" (&_argvec[0]) \ |
| : /*trash*/ "memory", __CALLER_SAVED_REGS \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ |
| arg7,arg8) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[9]; \ |
| volatile unsigned long _res; \ |
| _argvec[0] = (unsigned long)_orig.nraddr; \ |
| _argvec[1] = (unsigned long)(arg1); \ |
| _argvec[2] = (unsigned long)(arg2); \ |
| _argvec[3] = (unsigned long)(arg3); \ |
| _argvec[4] = (unsigned long)(arg4); \ |
| _argvec[5] = (unsigned long)(arg5); \ |
| _argvec[6] = (unsigned long)(arg6); \ |
| _argvec[7] = (unsigned long)(arg7); \ |
| _argvec[8] = (unsigned long)(arg8); \ |
| __asm__ volatile( \ |
| "lw $t9, 0(%1)\n\t" \ |
| "lw $a0, 4(%1)\n\t" \ |
| "lw $a1, 8(%1)\n\t" \ |
| "lw $a2,12(%1)\n\t" \ |
| "lw $a3,16(%1)\n\t" \ |
| "lw $a4,20(%1)\n\t" \ |
| "lw $a5,24(%1)\n\t" \ |
| "lw $a6,28(%1)\n\t" \ |
| "lw $a7,32(%1)\n\t" \ |
| VALGRIND_CALL_NOREDIR_T9 \ |
| "move %0, $a0\n" \ |
| : /*out*/ "=r" (_res) \ |
| : /*in*/ "r" (&_argvec[0]) \ |
| : /*trash*/ "memory", __CALLER_SAVED_REGS \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ |
| arg7,arg8,arg9) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[10]; \ |
| volatile unsigned long _res; \ |
| _argvec[0] = (unsigned long)_orig.nraddr; \ |
| _argvec[1] = (unsigned long)(arg1); \ |
| _argvec[2] = (unsigned long)(arg2); \ |
| _argvec[3] = (unsigned long)(arg3); \ |
| _argvec[4] = (unsigned long)(arg4); \ |
| _argvec[5] = (unsigned long)(arg5); \ |
| _argvec[6] = (unsigned long)(arg6); \ |
| _argvec[7] = (unsigned long)(arg7); \ |
| _argvec[8] = (unsigned long)(arg8); \ |
| _argvec[9] = (unsigned long)(arg9); \ |
| __asm__ volatile( \ |
| "addiu $sp, $sp, -16 \n\t" \ |
| "lw $t9,36(%1) \n\t" \ |
| "sw $t9, 0($sp) \n\t" \ |
| "lw $t9, 0(%1) \n\t" \ |
| "lw $a0, 4(%1) \n\t" \ |
| "lw $a1, 8(%1) \n\t" \ |
| "lw $a2,12(%1) \n\t" \ |
| "lw $a3,16(%1) \n\t" \ |
| "lw $a4,20(%1) \n\t" \ |
| "lw $a5,24(%1) \n\t" \ |
| "lw $a6,28(%1) \n\t" \ |
| "lw $a7,32(%1) \n\t" \ |
| VALGRIND_CALL_NOREDIR_T9 \ |
| "move %0, $a0 \n\t" \ |
| "addiu $sp, $sp, 16 \n\t" \ |
| : /*out*/ "=r" (_res) \ |
| : /*in*/ "r" (&_argvec[0]) \ |
| : /*trash*/ "memory", __CALLER_SAVED_REGS \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ |
| arg7,arg8,arg9,arg10) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[11]; \ |
| volatile unsigned long _res; \ |
| _argvec[0] = (unsigned long)_orig.nraddr; \ |
| _argvec[1] = (unsigned long)(arg1); \ |
| _argvec[2] = (unsigned long)(arg2); \ |
| _argvec[3] = (unsigned long)(arg3); \ |
| _argvec[4] = (unsigned long)(arg4); \ |
| _argvec[5] = (unsigned long)(arg5); \ |
| _argvec[6] = (unsigned long)(arg6); \ |
| _argvec[7] = (unsigned long)(arg7); \ |
| _argvec[8] = (unsigned long)(arg8); \ |
| _argvec[9] = (unsigned long)(arg9); \ |
| _argvec[10] = (unsigned long)(arg10); \ |
| __asm__ volatile( \ |
| "addiu $sp, $sp, -16 \n\t" \ |
| "lw $t9,36(%1) \n\t" \ |
| "sw $t9, 0($sp) \n\t" \ |
| "lw $t9,40(%1) \n\t" \ |
| "sw $t9, 4($sp) \n\t" \ |
| "lw $t9, 0(%1) \n\t" \ |
| "lw $a0, 4(%1) \n\t" \ |
| "lw $a1, 8(%1) \n\t" \ |
| "lw $a2,12(%1) \n\t" \ |
| "lw $a3,16(%1) \n\t" \ |
| "lw $a4,20(%1) \n\t" \ |
| "lw $a5,24(%1) \n\t" \ |
| "lw $a6,28(%1) \n\t" \ |
| "lw $a7,32(%1) \n\t" \ |
| VALGRIND_CALL_NOREDIR_T9 \ |
| "move %0, $a0 \n\t" \ |
| "addiu $sp, $sp, 16 \n\t" \ |
| : /*out*/ "=r" (_res) \ |
| : /*in*/ "r" (&_argvec[0]) \ |
| : /*trash*/ "memory", __CALLER_SAVED_REGS \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5, \ |
| arg6,arg7,arg8,arg9,arg10, \ |
| arg11) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[12]; \ |
| volatile unsigned long _res; \ |
| _argvec[0] = (unsigned long)_orig.nraddr; \ |
| _argvec[1] = (unsigned long)(arg1); \ |
| _argvec[2] = (unsigned long)(arg2); \ |
| _argvec[3] = (unsigned long)(arg3); \ |
| _argvec[4] = (unsigned long)(arg4); \ |
| _argvec[5] = (unsigned long)(arg5); \ |
| _argvec[6] = (unsigned long)(arg6); \ |
| _argvec[7] = (unsigned long)(arg7); \ |
| _argvec[8] = (unsigned long)(arg8); \ |
| _argvec[9] = (unsigned long)(arg9); \ |
| _argvec[10] = (unsigned long)(arg10); \ |
| _argvec[11] = (unsigned long)(arg11); \ |
| __asm__ volatile( \ |
| "addiu $sp, $sp, -16 \n\t" \ |
| "lw $t9,36(%1) \n\t" \ |
| "sw $t9, 0($sp) \n\t" \ |
| "lw $t9,40(%1) \n\t" \ |
| "sw $t9, 4($sp) \n\t" \ |
| "lw $t9,44(%1) \n\t" \ |
| "sw $t9, 8($sp) \n\t" \ |
| "lw $t9, 0(%1) \n\t" \ |
| "lw $a0, 4(%1) \n\t" \ |
| "lw $a1, 8(%1) \n\t" \ |
| "lw $a2,12(%1) \n\t" \ |
| "lw $a3,16(%1) \n\t" \ |
| "lw $a4,20(%1) \n\t" \ |
| "lw $a5,24(%1) \n\t" \ |
| "lw $a6,28(%1) \n\t" \ |
| "lw $a7,32(%1) \n\t" \ |
| VALGRIND_CALL_NOREDIR_T9 \ |
| "move %0, $a0 \n\t" \ |
| "addiu $sp, $sp, 16 \n\t" \ |
| : /*out*/ "=r" (_res) \ |
| : /*in*/ "r" (&_argvec[0]) \ |
| : /*trash*/ "memory", __CALLER_SAVED_REGS \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5, \ |
| arg6,arg7,arg8,arg9,arg10, \ |
| arg11,arg12) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long _argvec[13]; \ |
| volatile unsigned long _res; \ |
| _argvec[0] = (unsigned long)_orig.nraddr; \ |
| _argvec[1] = (unsigned long)(arg1); \ |
| _argvec[2] = (unsigned long)(arg2); \ |
| _argvec[3] = (unsigned long)(arg3); \ |
| _argvec[4] = (unsigned long)(arg4); \ |
| _argvec[5] = (unsigned long)(arg5); \ |
| _argvec[6] = (unsigned long)(arg6); \ |
| _argvec[7] = (unsigned long)(arg7); \ |
| _argvec[8] = (unsigned long)(arg8); \ |
| _argvec[9] = (unsigned long)(arg9); \ |
| _argvec[10] = (unsigned long)(arg10); \ |
| _argvec[11] = (unsigned long)(arg11); \ |
| _argvec[12] = (unsigned long)(arg12); \ |
| __asm__ volatile( \ |
| "addiu $sp, $sp, -16 \n\t" \ |
| "lw $t9,36(%1) \n\t" \ |
| "sw $t9, 0($sp) \n\t" \ |
| "lw $t9,40(%1) \n\t" \ |
| "sw $t9, 4($sp) \n\t" \ |
| "lw $t9,44(%1) \n\t" \ |
| "sw $t9, 8($sp) \n\t" \ |
| "lw $t9,48(%1) \n\t" \ |
| "sw $t9,12($sp) \n\t" \ |
| "lw $t9, 0(%1) \n\t" \ |
| "lw $a0, 4(%1) \n\t" \ |
| "lw $a1, 8(%1) \n\t" \ |
| "lw $a2,12(%1) \n\t" \ |
| "lw $a3,16(%1) \n\t" \ |
| "lw $a4,20(%1) \n\t" \ |
| "lw $a5,24(%1) \n\t" \ |
| "lw $a6,28(%1) \n\t" \ |
| "lw $a7,32(%1) \n\t" \ |
| VALGRIND_CALL_NOREDIR_T9 \ |
| "move %0, $a0 \n\t" \ |
| "addiu $sp, $sp, 16 \n\t" \ |
| : /*out*/ "=r" (_res) \ |
| : /*in*/ "r" (&_argvec[0]) \ |
| : /*trash*/ "memory", __CALLER_SAVED_REGS \ |
| ); \ |
| lval = (__typeof__(lval)) _res; \ |
| } while (0) |
| |
| #endif /* PLAT_nanomips_linux */ |
| |
| /* ------------------------- mips64-linux ------------------------- */ |
| |
| #if defined(PLAT_mips64_linux) |
| |
| /* These regs are trashed by the hidden call. */ |
| #define __CALLER_SAVED_REGS "$2", "$3", "$4", "$5", "$6", \ |
| "$7", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", \ |
| "$25", "$31" |
| |
| /* These CALL_FN_ macros assume that on mips64-linux, |
| sizeof(long long) == 8. */ |
| |
| #define MIPS64_LONG2REG_CAST(x) ((long long)(long)x) |
| |
| #define CALL_FN_W_v(lval, orig) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long long _argvec[1]; \ |
| volatile unsigned long long _res; \ |
| _argvec[0] = MIPS64_LONG2REG_CAST(_orig.nraddr); \ |
| __asm__ volatile( \ |
| "ld $25, 0(%1)\n\t" /* target->t9 */ \ |
| VALGRIND_CALL_NOREDIR_T9 \ |
| "move %0, $2\n" \ |
| : /*out*/ "=r" (_res) \ |
| : /*in*/ "0" (&_argvec[0]) \ |
| : /*trash*/ "memory", __CALLER_SAVED_REGS \ |
| ); \ |
| lval = (__typeof__(lval)) (long)_res; \ |
| } while (0) |
| |
| #define CALL_FN_W_W(lval, orig, arg1) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long long _argvec[2]; \ |
| volatile unsigned long long _res; \ |
| _argvec[0] = MIPS64_LONG2REG_CAST(_orig.nraddr); \ |
| _argvec[1] = MIPS64_LONG2REG_CAST(arg1); \ |
| __asm__ volatile( \ |
| "ld $4, 8(%1)\n\t" /* arg1*/ \ |
| "ld $25, 0(%1)\n\t" /* target->t9 */ \ |
| VALGRIND_CALL_NOREDIR_T9 \ |
| "move %0, $2\n" \ |
| : /*out*/ "=r" (_res) \ |
| : /*in*/ "r" (&_argvec[0]) \ |
| : /*trash*/ "memory", __CALLER_SAVED_REGS \ |
| ); \ |
| lval = (__typeof__(lval)) (long)_res; \ |
| } while (0) |
| |
| #define CALL_FN_W_WW(lval, orig, arg1,arg2) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long long _argvec[3]; \ |
| volatile unsigned long long _res; \ |
| _argvec[0] = _orig.nraddr; \ |
| _argvec[1] = MIPS64_LONG2REG_CAST(arg1); \ |
| _argvec[2] = MIPS64_LONG2REG_CAST(arg2); \ |
| __asm__ volatile( \ |
| "ld $4, 8(%1)\n\t" \ |
| "ld $5, 16(%1)\n\t" \ |
| "ld $25, 0(%1)\n\t" /* target->t9 */ \ |
| VALGRIND_CALL_NOREDIR_T9 \ |
| "move %0, $2\n" \ |
| : /*out*/ "=r" (_res) \ |
| : /*in*/ "r" (&_argvec[0]) \ |
| : /*trash*/ "memory", __CALLER_SAVED_REGS \ |
| ); \ |
| lval = (__typeof__(lval)) (long)_res; \ |
| } while (0) |
| |
| |
| #define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long long _argvec[4]; \ |
| volatile unsigned long long _res; \ |
| _argvec[0] = _orig.nraddr; \ |
| _argvec[1] = MIPS64_LONG2REG_CAST(arg1); \ |
| _argvec[2] = MIPS64_LONG2REG_CAST(arg2); \ |
| _argvec[3] = MIPS64_LONG2REG_CAST(arg3); \ |
| __asm__ volatile( \ |
| "ld $4, 8(%1)\n\t" \ |
| "ld $5, 16(%1)\n\t" \ |
| "ld $6, 24(%1)\n\t" \ |
| "ld $25, 0(%1)\n\t" /* target->t9 */ \ |
| VALGRIND_CALL_NOREDIR_T9 \ |
| "move %0, $2\n" \ |
| : /*out*/ "=r" (_res) \ |
| : /*in*/ "r" (&_argvec[0]) \ |
| : /*trash*/ "memory", __CALLER_SAVED_REGS \ |
| ); \ |
| lval = (__typeof__(lval)) (long)_res; \ |
| } while (0) |
| |
| #define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long long _argvec[5]; \ |
| volatile unsigned long long _res; \ |
| _argvec[0] = MIPS64_LONG2REG_CAST(_orig.nraddr); \ |
| _argvec[1] = MIPS64_LONG2REG_CAST(arg1); \ |
| _argvec[2] = MIPS64_LONG2REG_CAST(arg2); \ |
| _argvec[3] = MIPS64_LONG2REG_CAST(arg3); \ |
| _argvec[4] = MIPS64_LONG2REG_CAST(arg4); \ |
| __asm__ volatile( \ |
| "ld $4, 8(%1)\n\t" \ |
| "ld $5, 16(%1)\n\t" \ |
| "ld $6, 24(%1)\n\t" \ |
| "ld $7, 32(%1)\n\t" \ |
| "ld $25, 0(%1)\n\t" /* target->t9 */ \ |
| VALGRIND_CALL_NOREDIR_T9 \ |
| "move %0, $2\n" \ |
| : /*out*/ "=r" (_res) \ |
| : /*in*/ "r" (&_argvec[0]) \ |
| : /*trash*/ "memory", __CALLER_SAVED_REGS \ |
| ); \ |
| lval = (__typeof__(lval)) (long)_res; \ |
| } while (0) |
| |
| #define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long long _argvec[6]; \ |
| volatile unsigned long long _res; \ |
| _argvec[0] = MIPS64_LONG2REG_CAST(_orig.nraddr); \ |
| _argvec[1] = MIPS64_LONG2REG_CAST(arg1); \ |
| _argvec[2] = MIPS64_LONG2REG_CAST(arg2); \ |
| _argvec[3] = MIPS64_LONG2REG_CAST(arg3); \ |
| _argvec[4] = MIPS64_LONG2REG_CAST(arg4); \ |
| _argvec[5] = MIPS64_LONG2REG_CAST(arg5); \ |
| __asm__ volatile( \ |
| "ld $4, 8(%1)\n\t" \ |
| "ld $5, 16(%1)\n\t" \ |
| "ld $6, 24(%1)\n\t" \ |
| "ld $7, 32(%1)\n\t" \ |
| "ld $8, 40(%1)\n\t" \ |
| "ld $25, 0(%1)\n\t" /* target->t9 */ \ |
| VALGRIND_CALL_NOREDIR_T9 \ |
| "move %0, $2\n" \ |
| : /*out*/ "=r" (_res) \ |
| : /*in*/ "r" (&_argvec[0]) \ |
| : /*trash*/ "memory", __CALLER_SAVED_REGS \ |
| ); \ |
| lval = (__typeof__(lval)) (long)_res; \ |
| } while (0) |
| |
| #define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long long _argvec[7]; \ |
| volatile unsigned long long _res; \ |
| _argvec[0] = MIPS64_LONG2REG_CAST(_orig.nraddr); \ |
| _argvec[1] = MIPS64_LONG2REG_CAST(arg1); \ |
| _argvec[2] = MIPS64_LONG2REG_CAST(arg2); \ |
| _argvec[3] = MIPS64_LONG2REG_CAST(arg3); \ |
| _argvec[4] = MIPS64_LONG2REG_CAST(arg4); \ |
| _argvec[5] = MIPS64_LONG2REG_CAST(arg5); \ |
| _argvec[6] = MIPS64_LONG2REG_CAST(arg6); \ |
| __asm__ volatile( \ |
| "ld $4, 8(%1)\n\t" \ |
| "ld $5, 16(%1)\n\t" \ |
| "ld $6, 24(%1)\n\t" \ |
| "ld $7, 32(%1)\n\t" \ |
| "ld $8, 40(%1)\n\t" \ |
| "ld $9, 48(%1)\n\t" \ |
| "ld $25, 0(%1)\n\t" /* target->t9 */ \ |
| VALGRIND_CALL_NOREDIR_T9 \ |
| "move %0, $2\n" \ |
| : /*out*/ "=r" (_res) \ |
| : /*in*/ "r" (&_argvec[0]) \ |
| : /*trash*/ "memory", __CALLER_SAVED_REGS \ |
| ); \ |
| lval = (__typeof__(lval)) (long)_res; \ |
| } while (0) |
| |
| #define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ |
| arg7) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long long _argvec[8]; \ |
| volatile unsigned long long _res; \ |
| _argvec[0] = MIPS64_LONG2REG_CAST(_orig.nraddr); \ |
| _argvec[1] = MIPS64_LONG2REG_CAST(arg1); \ |
| _argvec[2] = MIPS64_LONG2REG_CAST(arg2); \ |
| _argvec[3] = MIPS64_LONG2REG_CAST(arg3); \ |
| _argvec[4] = MIPS64_LONG2REG_CAST(arg4); \ |
| _argvec[5] = MIPS64_LONG2REG_CAST(arg5); \ |
| _argvec[6] = MIPS64_LONG2REG_CAST(arg6); \ |
| _argvec[7] = MIPS64_LONG2REG_CAST(arg7); \ |
| __asm__ volatile( \ |
| "ld $4, 8(%1)\n\t" \ |
| "ld $5, 16(%1)\n\t" \ |
| "ld $6, 24(%1)\n\t" \ |
| "ld $7, 32(%1)\n\t" \ |
| "ld $8, 40(%1)\n\t" \ |
| "ld $9, 48(%1)\n\t" \ |
| "ld $10, 56(%1)\n\t" \ |
| "ld $25, 0(%1) \n\t" /* target->t9 */ \ |
| VALGRIND_CALL_NOREDIR_T9 \ |
| "move %0, $2\n" \ |
| : /*out*/ "=r" (_res) \ |
| : /*in*/ "r" (&_argvec[0]) \ |
| : /*trash*/ "memory", __CALLER_SAVED_REGS \ |
| ); \ |
| lval = (__typeof__(lval)) (long)_res; \ |
| } while (0) |
| |
| #define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ |
| arg7,arg8) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long long _argvec[9]; \ |
| volatile unsigned long long _res; \ |
| _argvec[0] = MIPS64_LONG2REG_CAST(_orig.nraddr); \ |
| _argvec[1] = MIPS64_LONG2REG_CAST(arg1); \ |
| _argvec[2] = MIPS64_LONG2REG_CAST(arg2); \ |
| _argvec[3] = MIPS64_LONG2REG_CAST(arg3); \ |
| _argvec[4] = MIPS64_LONG2REG_CAST(arg4); \ |
| _argvec[5] = MIPS64_LONG2REG_CAST(arg5); \ |
| _argvec[6] = MIPS64_LONG2REG_CAST(arg6); \ |
| _argvec[7] = MIPS64_LONG2REG_CAST(arg7); \ |
| _argvec[8] = MIPS64_LONG2REG_CAST(arg8); \ |
| __asm__ volatile( \ |
| "ld $4, 8(%1)\n\t" \ |
| "ld $5, 16(%1)\n\t" \ |
| "ld $6, 24(%1)\n\t" \ |
| "ld $7, 32(%1)\n\t" \ |
| "ld $8, 40(%1)\n\t" \ |
| "ld $9, 48(%1)\n\t" \ |
| "ld $10, 56(%1)\n\t" \ |
| "ld $11, 64(%1)\n\t" \ |
| "ld $25, 0(%1) \n\t" /* target->t9 */ \ |
| VALGRIND_CALL_NOREDIR_T9 \ |
| "move %0, $2\n" \ |
| : /*out*/ "=r" (_res) \ |
| : /*in*/ "r" (&_argvec[0]) \ |
| : /*trash*/ "memory", __CALLER_SAVED_REGS \ |
| ); \ |
| lval = (__typeof__(lval)) (long)_res; \ |
| } while (0) |
| |
| #define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ |
| arg7,arg8,arg9) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long long _argvec[10]; \ |
| volatile unsigned long long _res; \ |
| _argvec[0] = MIPS64_LONG2REG_CAST(_orig.nraddr); \ |
| _argvec[1] = MIPS64_LONG2REG_CAST(arg1); \ |
| _argvec[2] = MIPS64_LONG2REG_CAST(arg2); \ |
| _argvec[3] = MIPS64_LONG2REG_CAST(arg3); \ |
| _argvec[4] = MIPS64_LONG2REG_CAST(arg4); \ |
| _argvec[5] = MIPS64_LONG2REG_CAST(arg5); \ |
| _argvec[6] = MIPS64_LONG2REG_CAST(arg6); \ |
| _argvec[7] = MIPS64_LONG2REG_CAST(arg7); \ |
| _argvec[8] = MIPS64_LONG2REG_CAST(arg8); \ |
| _argvec[9] = MIPS64_LONG2REG_CAST(arg9); \ |
| __asm__ volatile( \ |
| "dsubu $29, $29, 8\n\t" \ |
| "ld $4, 72(%1)\n\t" \ |
| "sd $4, 0($29)\n\t" \ |
| "ld $4, 8(%1)\n\t" \ |
| "ld $5, 16(%1)\n\t" \ |
| "ld $6, 24(%1)\n\t" \ |
| "ld $7, 32(%1)\n\t" \ |
| "ld $8, 40(%1)\n\t" \ |
| "ld $9, 48(%1)\n\t" \ |
| "ld $10, 56(%1)\n\t" \ |
| "ld $11, 64(%1)\n\t" \ |
| "ld $25, 0(%1)\n\t" /* target->t9 */ \ |
| VALGRIND_CALL_NOREDIR_T9 \ |
| "daddu $29, $29, 8\n\t" \ |
| "move %0, $2\n" \ |
| : /*out*/ "=r" (_res) \ |
| : /*in*/ "r" (&_argvec[0]) \ |
| : /*trash*/ "memory", __CALLER_SAVED_REGS \ |
| ); \ |
| lval = (__typeof__(lval)) (long)_res; \ |
| } while (0) |
| |
| #define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ |
| arg7,arg8,arg9,arg10) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long long _argvec[11]; \ |
| volatile unsigned long long _res; \ |
| _argvec[0] = MIPS64_LONG2REG_CAST(_orig.nraddr); \ |
| _argvec[1] = MIPS64_LONG2REG_CAST(arg1); \ |
| _argvec[2] = MIPS64_LONG2REG_CAST(arg2); \ |
| _argvec[3] = MIPS64_LONG2REG_CAST(arg3); \ |
| _argvec[4] = MIPS64_LONG2REG_CAST(arg4); \ |
| _argvec[5] = MIPS64_LONG2REG_CAST(arg5); \ |
| _argvec[6] = MIPS64_LONG2REG_CAST(arg6); \ |
| _argvec[7] = MIPS64_LONG2REG_CAST(arg7); \ |
| _argvec[8] = MIPS64_LONG2REG_CAST(arg8); \ |
| _argvec[9] = MIPS64_LONG2REG_CAST(arg9); \ |
| _argvec[10] = MIPS64_LONG2REG_CAST(arg10); \ |
| __asm__ volatile( \ |
| "dsubu $29, $29, 16\n\t" \ |
| "ld $4, 72(%1)\n\t" \ |
| "sd $4, 0($29)\n\t" \ |
| "ld $4, 80(%1)\n\t" \ |
| "sd $4, 8($29)\n\t" \ |
| "ld $4, 8(%1)\n\t" \ |
| "ld $5, 16(%1)\n\t" \ |
| "ld $6, 24(%1)\n\t" \ |
| "ld $7, 32(%1)\n\t" \ |
| "ld $8, 40(%1)\n\t" \ |
| "ld $9, 48(%1)\n\t" \ |
| "ld $10, 56(%1)\n\t" \ |
| "ld $11, 64(%1)\n\t" \ |
| "ld $25, 0(%1)\n\t" /* target->t9 */ \ |
| VALGRIND_CALL_NOREDIR_T9 \ |
| "daddu $29, $29, 16\n\t" \ |
| "move %0, $2\n" \ |
| : /*out*/ "=r" (_res) \ |
| : /*in*/ "r" (&_argvec[0]) \ |
| : /*trash*/ "memory", __CALLER_SAVED_REGS \ |
| ); \ |
| lval = (__typeof__(lval)) (long)_res; \ |
| } while (0) |
| |
| #define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5, \ |
| arg6,arg7,arg8,arg9,arg10, \ |
| arg11) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long long _argvec[12]; \ |
| volatile unsigned long long _res; \ |
| _argvec[0] = MIPS64_LONG2REG_CAST(_orig.nraddr); \ |
| _argvec[1] = MIPS64_LONG2REG_CAST(arg1); \ |
| _argvec[2] = MIPS64_LONG2REG_CAST(arg2); \ |
| _argvec[3] = MIPS64_LONG2REG_CAST(arg3); \ |
| _argvec[4] = MIPS64_LONG2REG_CAST(arg4); \ |
| _argvec[5] = MIPS64_LONG2REG_CAST(arg5); \ |
| _argvec[6] = MIPS64_LONG2REG_CAST(arg6); \ |
| _argvec[7] = MIPS64_LONG2REG_CAST(arg7); \ |
| _argvec[8] = MIPS64_LONG2REG_CAST(arg8); \ |
| _argvec[9] = MIPS64_LONG2REG_CAST(arg9); \ |
| _argvec[10] = MIPS64_LONG2REG_CAST(arg10); \ |
| _argvec[11] = MIPS64_LONG2REG_CAST(arg11); \ |
| __asm__ volatile( \ |
| "dsubu $29, $29, 24\n\t" \ |
| "ld $4, 72(%1)\n\t" \ |
| "sd $4, 0($29)\n\t" \ |
| "ld $4, 80(%1)\n\t" \ |
| "sd $4, 8($29)\n\t" \ |
| "ld $4, 88(%1)\n\t" \ |
| "sd $4, 16($29)\n\t" \ |
| "ld $4, 8(%1)\n\t" \ |
| "ld $5, 16(%1)\n\t" \ |
| "ld $6, 24(%1)\n\t" \ |
| "ld $7, 32(%1)\n\t" \ |
| "ld $8, 40(%1)\n\t" \ |
| "ld $9, 48(%1)\n\t" \ |
| "ld $10, 56(%1)\n\t" \ |
| "ld $11, 64(%1)\n\t" \ |
| "ld $25, 0(%1)\n\t" /* target->t9 */ \ |
| VALGRIND_CALL_NOREDIR_T9 \ |
| "daddu $29, $29, 24\n\t" \ |
| "move %0, $2\n" \ |
| : /*out*/ "=r" (_res) \ |
| : /*in*/ "r" (&_argvec[0]) \ |
| : /*trash*/ "memory", __CALLER_SAVED_REGS \ |
| ); \ |
| lval = (__typeof__(lval)) (long)_res; \ |
| } while (0) |
| |
| #define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5, \ |
| arg6,arg7,arg8,arg9,arg10, \ |
| arg11,arg12) \ |
| do { \ |
| volatile OrigFn _orig = (orig); \ |
| volatile unsigned long long _argvec[13]; \ |
| volatile unsigned long long _res; \ |
| _argvec[0] = MIPS64_LONG2REG_CAST(_orig.nraddr); \ |
| _argvec[1] = MIPS64_LONG2REG_CAST(arg1); \ |
| _argvec[2] = MIPS64_LONG2REG_CAST(arg2); \ |
| _argvec[3] = MIPS64_LONG2REG_CAST(arg3); \ |
| _argvec[4] = MIPS64_LONG2REG_CAST(arg4); \ |
| _argvec[5] = MIPS64_LONG2REG_CAST(arg5); \ |
| _argvec[6] = MIPS64_LONG2REG_CAST(arg6); \ |
| _argvec[7] = MIPS64_LONG2REG_CAST(arg7); \ |
| _argvec[8] = MIPS64_LONG2REG_CAST(arg8); \ |
| _argvec[9] = MIPS64_LONG2REG_CAST(arg9); \ |
| _argvec[10] = MIPS64_LONG2REG_CAST(arg10); \ |
| _argvec[11] = MIPS64_LONG2REG_CAST(arg11); \ |
| _argvec[12] = MIPS64_LONG2REG_CAST(arg12); \ |
| __asm__ volatile( \ |
| "dsubu $29, $29, 32\n\t" \ |
| "ld $4, 72(%1)\n\t" \ |
| "sd $4, 0($29)\n\t" \ |
| "ld $4, 80(%1)\n\t" \ |
| "sd $4, 8($29)\n\t" \ |
| "ld $4, 88(%1)\n\t" \ |
| "sd $4, 16($29)\n\t" \ |
| "ld $4, 96(%1)\n\t" \ |
| "sd $4, 24($29)\n\t" \ |
| "ld $4, 8(%1)\n\t" \ |
| "ld $5, 16(%1)\n\t" \ |
| "ld $6, 24(%1)\n\t" \ |
| "ld $7, 32(%1)\n\t" \ |
| "ld $8, 40(%1)\n\t" \ |
| "ld $9, 48(%1)\n\t" \ |
| "ld $10, 56(%1)\n\t" \ |
| "ld $11, 64(%1)\n\t" \ |
| "ld $25, 0(%1)\n\t" /* target->t9 */ \ |
| VALGRIND_CALL_NOREDIR_T9 \ |
| "daddu $29, $29, 32\n\t" \ |
| "move %0, $2\n" \ |
| : /*out*/ "=r" (_res) \ |
| : /*in*/ "r" (&_argvec[0]) \ |
| : /*trash*/ "memory", __CALLER_SAVED_REGS \ |
| ); \ |
| lval = (__typeof__(lval)) (long)_res; \ |
| } while (0) |
| |
| #endif /* PLAT_mips64_linux */ |
| |
| /* ------------------------------------------------------------------ */ |
| /* ARCHITECTURE INDEPENDENT MACROS for CLIENT REQUESTS. */ |
| /* */ |
| /* ------------------------------------------------------------------ */ |
| |
| /* Some request codes. There are many more of these, but most are not |
| exposed to end-user view. These are the public ones, all of the |
| form 0x1000 + small_number. |
| |
| Core ones are in the range 0x00000000--0x0000ffff. The non-public |
| ones start at 0x2000. |
| */ |
| |
| /* These macros are used by tools -- they must be public, but don't |
| embed them into other programs. */ |
| #define VG_USERREQ_TOOL_BASE(a,b) \ |
| ((unsigned int)(((a)&0xff) << 24 | ((b)&0xff) << 16)) |
| #define VG_IS_TOOL_USERREQ(a, b, v) \ |
| (VG_USERREQ_TOOL_BASE(a,b) == ((v) & 0xffff0000)) |
| |
| /* !! ABIWARNING !! ABIWARNING !! ABIWARNING !! ABIWARNING !! |
| This enum comprises an ABI exported by Valgrind to programs |
| which use client requests. DO NOT CHANGE THE NUMERIC VALUES OF THESE |
| ENTRIES, NOR DELETE ANY -- add new ones at the end of the most |
| relevant group. */ |
| typedef |
| enum { VG_USERREQ__RUNNING_ON_VALGRIND = 0x1001, |
| VG_USERREQ__DISCARD_TRANSLATIONS = 0x1002, |
| |
| /* These allow any function to be called from the simulated |
| CPU but run on the real CPU. Nb: the first arg passed to |
| the function is always the ThreadId of the running |
| thread! So CLIENT_CALL0 actually requires a 1 arg |
| function, etc. */ |
| VG_USERREQ__CLIENT_CALL0 = 0x1101, |
| VG_USERREQ__CLIENT_CALL1 = 0x1102, |
| VG_USERREQ__CLIENT_CALL2 = 0x1103, |
| VG_USERREQ__CLIENT_CALL3 = 0x1104, |
| |
| /* Can be useful in regression testing suites -- eg. can |
| send Valgrind's output to /dev/null and still count |
| errors. */ |
| VG_USERREQ__COUNT_ERRORS = 0x1201, |
| |
| /* Allows the client program and/or gdbserver to execute a monitor |
| command. */ |
| VG_USERREQ__GDB_MONITOR_COMMAND = 0x1202, |
| |
| /* Allows the client program to change a dynamic command line |
| option. */ |
| VG_USERREQ__CLO_CHANGE = 0x1203, |
| |
| /* These are useful and can be interpreted by any tool that |
| tracks malloc() et al, by using vg_replace_malloc.c. */ |
| VG_USERREQ__MALLOCLIKE_BLOCK = 0x1301, |
| VG_USERREQ__RESIZEINPLACE_BLOCK = 0x130b, |
| VG_USERREQ__FREELIKE_BLOCK = 0x1302, |
| /* Memory pool support. */ |
| VG_USERREQ__CREATE_MEMPOOL = 0x1303, |
| VG_USERREQ__DESTROY_MEMPOOL = 0x1304, |
| VG_USERREQ__MEMPOOL_ALLOC = 0x1305, |
| VG_USERREQ__MEMPOOL_FREE = 0x1306, |
| VG_USERREQ__MEMPOOL_TRIM = 0x1307, |
| VG_USERREQ__MOVE_MEMPOOL = 0x1308, |
| VG_USERREQ__MEMPOOL_CHANGE = 0x1309, |
| VG_USERREQ__MEMPOOL_EXISTS = 0x130a, |
| |
| /* Allow printfs to valgrind log. */ |
| /* The first two pass the va_list argument by value, which |
| assumes it is the same size as or smaller than a UWord, |
| which generally isn't the case. Hence are deprecated. |
| The second two pass the vargs by reference and so are |
| immune to this problem. */ |
| /* both :: char* fmt, va_list vargs (DEPRECATED) */ |
| VG_USERREQ__PRINTF = 0x1401, |
| VG_USERREQ__PRINTF_BACKTRACE = 0x1402, |
| /* both :: char* fmt, va_list* vargs */ |
| VG_USERREQ__PRINTF_VALIST_BY_REF = 0x1403, |
| VG_USERREQ__PRINTF_BACKTRACE_VALIST_BY_REF = 0x1404, |
| |
| /* Stack support. */ |
| VG_USERREQ__STACK_REGISTER = 0x1501, |
| VG_USERREQ__STACK_DEREGISTER = 0x1502, |
| VG_USERREQ__STACK_CHANGE = 0x1503, |
| |
| /* Wine support */ |
| VG_USERREQ__LOAD_PDB_DEBUGINFO = 0x1601, |
| |
| /* Querying of debug info. */ |
| VG_USERREQ__MAP_IP_TO_SRCLOC = 0x1701, |
| |
| /* Disable/enable error reporting level. Takes a single |
| Word arg which is the delta to this thread's error |
| disablement indicator. Hence 1 disables or further |
| disables errors, and -1 moves back towards enablement. |
| Other values are not allowed. */ |
| VG_USERREQ__CHANGE_ERR_DISABLEMENT = 0x1801, |
| |
| /* Some requests used for Valgrind internal, such as |
| self-test or self-hosting. */ |
| /* Initialise IR injection */ |
| VG_USERREQ__VEX_INIT_FOR_IRI = 0x1901, |
| /* Used by Inner Valgrind to inform Outer Valgrind where to |
| find the list of inner guest threads */ |
| VG_USERREQ__INNER_THREADS = 0x1902 |
| } Vg_ClientRequest; |
| |
| #if !defined(__GNUC__) |
| # define __extension__ /* */ |
| #endif |
| |
| |
| /* Returns the number of Valgrinds this code is running under. That |
| is, 0 if running natively, 1 if running under Valgrind, 2 if |
| running under Valgrind which is running under another Valgrind, |
| etc. */ |
| #define RUNNING_ON_VALGRIND \ |
| (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* if not */, \ |
| VG_USERREQ__RUNNING_ON_VALGRIND, \ |
| 0, 0, 0, 0, 0) \ |
| |
| |
| /* Discard translation of code in the range [_qzz_addr .. _qzz_addr + |
| _qzz_len - 1]. Useful if you are debugging a JITter or some such, |
| since it provides a way to make sure valgrind will retranslate the |
| invalidated area. Returns no value. */ |
| #define VALGRIND_DISCARD_TRANSLATIONS(_qzz_addr,_qzz_len) \ |
| VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DISCARD_TRANSLATIONS, \ |
| _qzz_addr, _qzz_len, 0, 0, 0) |
| |
| #define VALGRIND_INNER_THREADS(_qzz_addr) \ |
| VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__INNER_THREADS, \ |
| _qzz_addr, 0, 0, 0, 0) |
| |
| |
| /* These requests are for getting Valgrind itself to print something. |
| Possibly with a backtrace. This is a really ugly hack. The return value |
| is the number of characters printed, excluding the "**<pid>** " part at the |
| start and the backtrace (if present). */ |
| |
| #if defined(__GNUC__) || defined(__INTEL_COMPILER) && !defined(_MSC_VER) |
| /* Modern GCC will optimize the static routine out if unused, |
| and unused attribute will shut down warnings about it. */ |
| static int VALGRIND_PRINTF(const char *format, ...) |
| __attribute__((format(__printf__, 1, 2), __unused__)); |
| #endif |
| static int |
| #if defined(_MSC_VER) |
| __inline |
| #endif |
| VALGRIND_PRINTF(const char *format, ...) |
| { |
| #if !IS_ENABLED(CONFIG_VALGRIND) |
| (void)format; |
| return 0; |
| #else /* CONFIG_VALGRIND */ |
| #if defined(_MSC_VER) || defined(__MINGW64__) |
| uintptr_t _qzz_res; |
| #else |
| unsigned long _qzz_res; |
| #endif |
| va_list vargs; |
| va_start(vargs, format); |
| #if defined(_MSC_VER) || defined(__MINGW64__) |
| _qzz_res = VALGRIND_DO_CLIENT_REQUEST_EXPR(0, |
| VG_USERREQ__PRINTF_VALIST_BY_REF, |
| (uintptr_t)format, |
| (uintptr_t)&vargs, |
| 0, 0, 0); |
| #else |
| _qzz_res = VALGRIND_DO_CLIENT_REQUEST_EXPR(0, |
| VG_USERREQ__PRINTF_VALIST_BY_REF, |
| (unsigned long)format, |
| (unsigned long)&vargs, |
| 0, 0, 0); |
| #endif |
| va_end(vargs); |
| return (int)_qzz_res; |
| #endif /* CONFIG_VALGRIND */ |
| } |
| |
| #if defined(__GNUC__) || defined(__INTEL_COMPILER) && !defined(_MSC_VER) |
| static int VALGRIND_PRINTF_BACKTRACE(const char *format, ...) |
| __attribute__((format(__printf__, 1, 2), __unused__)); |
| #endif |
| static int |
| #if defined(_MSC_VER) |
| __inline |
| #endif |
| VALGRIND_PRINTF_BACKTRACE(const char *format, ...) |
| { |
| #if !IS_ENABLED(CONFIG_VALGRIND) |
| (void)format; |
| return 0; |
| #else /* CONFIG_VALGRIND */ |
| #if defined(_MSC_VER) || defined(__MINGW64__) |
| uintptr_t _qzz_res; |
| #else |
| unsigned long _qzz_res; |
| #endif |
| va_list vargs; |
| va_start(vargs, format); |
| #if defined(_MSC_VER) || defined(__MINGW64__) |
| _qzz_res = VALGRIND_DO_CLIENT_REQUEST_EXPR(0, |
| VG_USERREQ__PRINTF_BACKTRACE_VALIST_BY_REF, |
| (uintptr_t)format, |
| (uintptr_t)&vargs, |
| 0, 0, 0); |
| #else |
| _qzz_res = VALGRIND_DO_CLIENT_REQUEST_EXPR(0, |
| VG_USERREQ__PRINTF_BACKTRACE_VALIST_BY_REF, |
| (unsigned long)format, |
| (unsigned long)&vargs, |
| 0, 0, 0); |
| #endif |
| va_end(vargs); |
| return (int)_qzz_res; |
| #endif /* CONFIG_VALGRIND */ |
| } |
| |
| |
| /* These requests allow control to move from the simulated CPU to the |
| real CPU, calling an arbitrary function. |
| |
| Note that the current ThreadId is inserted as the first argument. |
| So this call: |
| |
| VALGRIND_NON_SIMD_CALL2(f, arg1, arg2) |
| |
| requires f to have this signature: |
| |
| Word f(Word tid, Word arg1, Word arg2) |
| |
| where "Word" is a word-sized type. |
| |
| Note that these client requests are not entirely reliable. For example, |
| if you call a function with them that subsequently calls printf(), |
| there's a high chance Valgrind will crash. Generally, your prospects of |
| these working are made higher if the called function does not refer to |
| any global variables, and does not refer to any libc or other functions |
| (printf et al). Any kind of entanglement with libc or dynamic linking is |
| likely to have a bad outcome, for tricky reasons which we've grappled |
| with a lot in the past. |
| */ |
| #define VALGRIND_NON_SIMD_CALL0(_qyy_fn) \ |
| VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \ |
| VG_USERREQ__CLIENT_CALL0, \ |
| _qyy_fn, \ |
| 0, 0, 0, 0) |
| |
| #define VALGRIND_NON_SIMD_CALL1(_qyy_fn, _qyy_arg1) \ |
| VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \ |
| VG_USERREQ__CLIENT_CALL1, \ |
| _qyy_fn, \ |
| _qyy_arg1, 0, 0, 0) |
| |
| #define VALGRIND_NON_SIMD_CALL2(_qyy_fn, _qyy_arg1, _qyy_arg2) \ |
| VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \ |
| VG_USERREQ__CLIENT_CALL2, \ |
| _qyy_fn, \ |
| _qyy_arg1, _qyy_arg2, 0, 0) |
| |
| #define VALGRIND_NON_SIMD_CALL3(_qyy_fn, _qyy_arg1, _qyy_arg2, _qyy_arg3) \ |
| VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \ |
| VG_USERREQ__CLIENT_CALL3, \ |
| _qyy_fn, \ |
| _qyy_arg1, _qyy_arg2, \ |
| _qyy_arg3, 0) |
| |
| |
| /* Counts the number of errors that have been recorded by a tool. Nb: |
| the tool must record the errors with VG_(maybe_record_error)() or |
| VG_(unique_error)() for them to be counted. */ |
| #define VALGRIND_COUNT_ERRORS \ |
| (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR( \ |
| 0 /* default return */, \ |
| VG_USERREQ__COUNT_ERRORS, \ |
| 0, 0, 0, 0, 0) |
| |
| /* Several Valgrind tools (Memcheck, Massif, Helgrind, DRD) rely on knowing |
| when heap blocks are allocated in order to give accurate results. This |
| happens automatically for the standard allocator functions such as |
| malloc(), calloc(), realloc(), memalign(), new, new[], free(), delete, |
| delete[], etc. |
| |
| But if your program uses a custom allocator, this doesn't automatically |
| happen, and Valgrind will not do as well. For example, if you allocate |
| superblocks with mmap() and then allocates chunks of the superblocks, all |
| Valgrind's observations will be at the mmap() level and it won't know that |
| the chunks should be considered separate entities. In Memcheck's case, |
| that means you probably won't get heap block overrun detection (because |
| there won't be redzones marked as unaddressable) and you definitely won't |
| get any leak detection. |
| |
| The following client requests allow a custom allocator to be annotated so |
| that it can be handled accurately by Valgrind. |
| |
| VALGRIND_MALLOCLIKE_BLOCK marks a region of memory as having been allocated |
| by a malloc()-like function. For Memcheck (an illustrative case), this |
| does two things: |
| |
| - It records that the block has been allocated. This means any addresses |
| within the block mentioned in error messages will be |
| identified as belonging to the block. It also means that if the block |
| isn't freed it will be detected by the leak checker. |
| |
| - It marks the block as being addressable and undefined (if 'is_zeroed' is |
| not set), or addressable and defined (if 'is_zeroed' is set). This |
| controls how accesses to the block by the program are handled. |
| |
| 'addr' is the start of the usable block (ie. after any |
| redzone), 'sizeB' is its size. 'rzB' is the redzone size if the allocator |
| can apply redzones -- these are blocks of padding at the start and end of |
| each block. Adding redzones is recommended as it makes it much more likely |
| Valgrind will spot block overruns. `is_zeroed' indicates if the memory is |
| zeroed (or filled with another predictable value), as is the case for |
| calloc(). |
| |
| VALGRIND_MALLOCLIKE_BLOCK should be put immediately after the point where a |
| heap block -- that will be used by the client program -- is allocated. |
| It's best to put it at the outermost level of the allocator if possible; |
| for example, if you have a function my_alloc() which calls |
| internal_alloc(), and the client request is put inside internal_alloc(), |
| stack traces relating to the heap block will contain entries for both |
| my_alloc() and internal_alloc(), which is probably not what you want. |
| |
| For Memcheck users: if you use VALGRIND_MALLOCLIKE_BLOCK to carve out |
| custom blocks from within a heap block, B, that has been allocated with |
| malloc/calloc/new/etc, then block B will be *ignored* during leak-checking |
| -- the custom blocks will take precedence. |
| |
| VALGRIND_FREELIKE_BLOCK is the partner to VALGRIND_MALLOCLIKE_BLOCK. For |
| Memcheck, it does two things: |
| |
| - It records that the block has been deallocated. This assumes that the |
| block was annotated as having been allocated via |
| VALGRIND_MALLOCLIKE_BLOCK. Otherwise, an error will be issued. |
| |
| - It marks the block as being unaddressable. |
| |
| VALGRIND_FREELIKE_BLOCK should be put immediately after the point where a |
| heap block is deallocated. |
| |
| VALGRIND_RESIZEINPLACE_BLOCK informs a tool about reallocation. For |
| Memcheck, it does four things: |
| |
| - It records that the size of a block has been changed. This assumes that |
| the block was annotated as having been allocated via |
| VALGRIND_MALLOCLIKE_BLOCK. Otherwise, an error will be issued. |
| |
| - If the block shrunk, it marks the freed memory as being unaddressable. |
| |
| - If the block grew, it marks the new area as undefined and defines a red |
| zone past the end of the new block. |
| |
| - The V-bits of the overlap between the old and the new block are preserved. |
| |
| VALGRIND_RESIZEINPLACE_BLOCK should be put after allocation of the new block |
| and before deallocation of the old block. |
| |
| In many cases, these three client requests will not be enough to get your |
| allocator working well with Memcheck. More specifically, if your allocator |
| writes to freed blocks in any way then a VALGRIND_MAKE_MEM_UNDEFINED call |
| will be necessary to mark the memory as addressable just before the zeroing |
| occurs, otherwise you'll get a lot of invalid write errors. For example, |
| you'll need to do this if your allocator recycles freed blocks, but it |
| zeroes them before handing them back out (via VALGRIND_MALLOCLIKE_BLOCK). |
| Alternatively, if your allocator reuses freed blocks for allocator-internal |
| data structures, VALGRIND_MAKE_MEM_UNDEFINED calls will also be necessary. |
| |
| Really, what's happening is a blurring of the lines between the client |
| program and the allocator... after VALGRIND_FREELIKE_BLOCK is called, the |
| memory should be considered unaddressable to the client program, but the |
| allocator knows more than the rest of the client program and so may be able |
| to safely access it. Extra client requests are necessary for Valgrind to |
| understand the distinction between the allocator and the rest of the |
| program. |
| |
| Ignored if addr == 0. |
| */ |
| #define VALGRIND_MALLOCLIKE_BLOCK(addr, sizeB, rzB, is_zeroed) \ |
| VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__MALLOCLIKE_BLOCK, \ |
| addr, sizeB, rzB, is_zeroed, 0) |
| |
| /* See the comment for VALGRIND_MALLOCLIKE_BLOCK for details. |
| Ignored if addr == 0. |
| */ |
| #define VALGRIND_RESIZEINPLACE_BLOCK(addr, oldSizeB, newSizeB, rzB) \ |
| VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__RESIZEINPLACE_BLOCK, \ |
| addr, oldSizeB, newSizeB, rzB, 0) |
| |
| /* See the comment for VALGRIND_MALLOCLIKE_BLOCK for details. |
| Ignored if addr == 0. |
| */ |
| #define VALGRIND_FREELIKE_BLOCK(addr, rzB) \ |
| VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__FREELIKE_BLOCK, \ |
| addr, rzB, 0, 0, 0) |
| |
| /* Create a memory pool. */ |
| #define VALGRIND_CREATE_MEMPOOL(pool, rzB, is_zeroed) \ |
| VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__CREATE_MEMPOOL, \ |
| pool, rzB, is_zeroed, 0, 0) |
| |
| /* Create a memory pool with some flags specifying extended behaviour. |
| When flags is zero, the behaviour is identical to VALGRIND_CREATE_MEMPOOL. |
| |
| The flag VALGRIND_MEMPOOL_METAPOOL specifies that the pieces of memory |
| associated with the pool using VALGRIND_MEMPOOL_ALLOC will be used |
| by the application as superblocks to dole out MALLOC_LIKE blocks using |
| VALGRIND_MALLOCLIKE_BLOCK. In other words, a meta pool is a "2 levels" |
| pool : first level is the blocks described by VALGRIND_MEMPOOL_ALLOC. |
| The second level blocks are described using VALGRIND_MALLOCLIKE_BLOCK. |
| Note that the association between the pool and the second level blocks |
| is implicit : second level blocks will be located inside first level |
| blocks. It is necessary to use the VALGRIND_MEMPOOL_METAPOOL flag |
| for such 2 levels pools, as otherwise valgrind will detect overlapping |
| memory blocks, and will abort execution (e.g. during leak search). |
| |
| Such a meta pool can also be marked as an 'auto free' pool using the flag |
| VALGRIND_MEMPOOL_AUTO_FREE, which must be OR-ed together with the |
| VALGRIND_MEMPOOL_METAPOOL. For an 'auto free' pool, VALGRIND_MEMPOOL_FREE |
| will automatically free the second level blocks that are contained |
| inside the first level block freed with VALGRIND_MEMPOOL_FREE. |
| In other words, calling VALGRIND_MEMPOOL_FREE will cause implicit calls |
| to VALGRIND_FREELIKE_BLOCK for all the second level blocks included |
| in the first level block. |
| Note: it is an error to use the VALGRIND_MEMPOOL_AUTO_FREE flag |
| without the VALGRIND_MEMPOOL_METAPOOL flag. |
| */ |
| #define VALGRIND_MEMPOOL_AUTO_FREE 1 |
| #define VALGRIND_MEMPOOL_METAPOOL 2 |
| #define VALGRIND_CREATE_MEMPOOL_EXT(pool, rzB, is_zeroed, flags) \ |
| VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__CREATE_MEMPOOL, \ |
| pool, rzB, is_zeroed, flags, 0) |
| |
| /* Destroy a memory pool. */ |
| #define VALGRIND_DESTROY_MEMPOOL(pool) \ |
| VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DESTROY_MEMPOOL, \ |
| pool, 0, 0, 0, 0) |
| |
| /* Associate a piece of memory with a memory pool. */ |
| #define VALGRIND_MEMPOOL_ALLOC(pool, addr, size) \ |
| VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__MEMPOOL_ALLOC, \ |
| pool, addr, size, 0, 0) |
| |
| /* Disassociate a piece of memory from a memory pool. */ |
| #define VALGRIND_MEMPOOL_FREE(pool, addr) \ |
| VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__MEMPOOL_FREE, \ |
| pool, addr, 0, 0, 0) |
| |
| /* Disassociate any pieces outside a particular range. */ |
| #define VALGRIND_MEMPOOL_TRIM(pool, addr, size) \ |
| VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__MEMPOOL_TRIM, \ |
| pool, addr, size, 0, 0) |
| |
| /* Resize and/or move a piece associated with a memory pool. */ |
| #define VALGRIND_MOVE_MEMPOOL(poolA, poolB) \ |
| VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__MOVE_MEMPOOL, \ |
| poolA, poolB, 0, 0, 0) |
| |
| /* Resize and/or move a piece associated with a memory pool. */ |
| #define VALGRIND_MEMPOOL_CHANGE(pool, addrA, addrB, size) \ |
| VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__MEMPOOL_CHANGE, \ |
| pool, addrA, addrB, size, 0) |
| |
| /* Return 1 if a mempool exists, else 0. */ |
| #define VALGRIND_MEMPOOL_EXISTS(pool) \ |
| (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \ |
| VG_USERREQ__MEMPOOL_EXISTS, \ |
| pool, 0, 0, 0, 0) |
| |
| /* Mark a piece of memory as being a stack. Returns a stack id. |
| start is the lowest addressable stack byte, end is the highest |
| addressable stack byte. */ |
| #define VALGRIND_STACK_REGISTER(start, end) \ |
| (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \ |
| VG_USERREQ__STACK_REGISTER, \ |
| start, end, 0, 0, 0) |
| |
| /* Unmark the piece of memory associated with a stack id as being a |
| stack. */ |
| #define VALGRIND_STACK_DEREGISTER(id) \ |
| VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__STACK_DEREGISTER, \ |
| id, 0, 0, 0, 0) |
| |
| /* Change the start and end address of the stack id. |
| start is the new lowest addressable stack byte, end is the new highest |
| addressable stack byte. */ |
| #define VALGRIND_STACK_CHANGE(id, start, end) \ |
| VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__STACK_CHANGE, \ |
| id, start, end, 0, 0) |
| |
| /* Load PDB debug info for Wine PE image_map. */ |
| #define VALGRIND_LOAD_PDB_DEBUGINFO(fd, ptr, total_size, delta) \ |
| VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__LOAD_PDB_DEBUGINFO, \ |
| fd, ptr, total_size, delta, 0) |
| |
| /* Map a code address to a source file name and line number. buf64 |
| must point to a 64-byte buffer in the caller's address space. The |
| result will be dumped in there and is guaranteed to be zero |
| terminated. If no info is found, the first byte is set to zero. */ |
| #define VALGRIND_MAP_IP_TO_SRCLOC(addr, buf64) \ |
| (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \ |
| VG_USERREQ__MAP_IP_TO_SRCLOC, \ |
| addr, buf64, 0, 0, 0) |
| |
| /* Disable error reporting for this thread. Behaves in a stack like |
| way, so you can safely call this multiple times provided that |
| VALGRIND_ENABLE_ERROR_REPORTING is called the same number of times |
| to re-enable reporting. The first call of this macro disables |
| reporting. Subsequent calls have no effect except to increase the |
| number of VALGRIND_ENABLE_ERROR_REPORTING calls needed to re-enable |
| reporting. Child threads do not inherit this setting from their |
| parents -- they are always created with reporting enabled. */ |
| #define VALGRIND_DISABLE_ERROR_REPORTING \ |
| VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__CHANGE_ERR_DISABLEMENT, \ |
| 1, 0, 0, 0, 0) |
| |
| /* Re-enable error reporting, as per comments on |
| VALGRIND_DISABLE_ERROR_REPORTING. */ |
| #define VALGRIND_ENABLE_ERROR_REPORTING \ |
| VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__CHANGE_ERR_DISABLEMENT, \ |
| -1, 0, 0, 0, 0) |
| |
| /* Execute a monitor command from the client program. |
| If a connection is opened with GDB, the output will be sent |
| according to the output mode set for vgdb. |
| If no connection is opened, output will go to the log output. |
| Returns 1 if command not recognised, 0 otherwise. */ |
| #define VALGRIND_MONITOR_COMMAND(command) \ |
| VALGRIND_DO_CLIENT_REQUEST_EXPR(0, VG_USERREQ__GDB_MONITOR_COMMAND, \ |
| command, 0, 0, 0, 0) |
| |
| |
| /* Change the value of a dynamic command line option. |
| Note that unknown or not dynamically changeable options |
| will cause a warning message to be output. */ |
| #define VALGRIND_CLO_CHANGE(option) \ |
| VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__CLO_CHANGE, \ |
| option, 0, 0, 0, 0) |
| |
| |
| #undef PLAT_x86_darwin |
| #undef PLAT_amd64_darwin |
| #undef PLAT_x86_win32 |
| #undef PLAT_amd64_win64 |
| #undef PLAT_x86_linux |
| #undef PLAT_amd64_linux |
| #undef PLAT_ppc32_linux |
| #undef PLAT_ppc64be_linux |
| #undef PLAT_ppc64le_linux |
| #undef PLAT_arm_linux |
| #undef PLAT_s390x_linux |
| #undef PLAT_mips32_linux |
| #undef PLAT_mips64_linux |
| #undef PLAT_nanomips_linux |
| #undef PLAT_x86_solaris |
| #undef PLAT_amd64_solaris |
| |
| #endif /* __VALGRIND_H */ |