X-Git-Url: http://git.megacz.com/?a=blobdiff_plain;f=ghc%2Frts%2FAdjustor.c;h=3d111b5d2e91e0b2e26825c57c2616b2ecbb4f00;hb=b5dbb387d42da93c3fa2976dd70475a9d6c03475;hp=fc4781c443ab1c697c5b047610360944b06cef45;hpb=b18ec0c58ef70ab531a2f4948827a543e4a48b69;p=ghc-hetmet.git diff --git a/ghc/rts/Adjustor.c b/ghc/rts/Adjustor.c index fc4781c..3d111b5 100644 --- a/ghc/rts/Adjustor.c +++ b/ghc/rts/Adjustor.c @@ -34,8 +34,8 @@ An adjustor thunk differs from a C function pointer in one respect: when the code is through with it, it has to be freed in order to release Haskell and C resources. Failure to do so result in memory leaks on both the C and Haskell side. - */ + #include "PosixSource.h" #include "Rts.h" #include "RtsExternal.h" @@ -46,46 +46,52 @@ Haskell side. #include #endif -/* Heavily arch-specific, I'm afraid.. */ +#if defined(openbsd_TARGET_OS) +#include +#include +#include + +/* no C99 header stdint.h on OpenBSD? */ +typedef unsigned long my_uintptr_t; +#endif + +#if defined(powerpc_TARGET_ARCH) && defined(linux_TARGET_OS) +#include +#endif -typedef enum { - pageExecuteRead, - pageExecuteReadWrite -} pageMode; +/* Heavily arch-specific, I'm afraid.. */ /* - * Function: execPage() + * Allocate len bytes which are readable, writable, and executable. * - * Set the executable bit on page containing addr. - * - * TODO: Can the code span more than one page? If yes, we need to make two - * pages executable! + * ToDo: If this turns out to be a performance bottleneck, one could + * e.g. cache the last VirtualProtect/mprotect-ed region and do + * nothing in case of a cache hit. */ -static rtsBool -execPage (void* addr, pageMode mode) +static void* +mallocBytesRWX(int len) { -#if defined(i386_TARGET_ARCH) && defined(_WIN32) && 0 - SYSTEM_INFO sInfo; - DWORD dwOldProtect = 0; - - /* doesn't return a result, so presumably it can't fail... */ - GetSystemInfo(&sInfo); - - if ( VirtualProtect ( (void*)((unsigned long)addr & (sInfo.dwPageSize - 1)), - sInfo.dwPageSize, - ( mode == pageExecuteReadWrite ? PAGE_EXECUTE_READWRITE : PAGE_EXECUTE_READ), - &dwOldProtect) == 0 ) { -# if 1 - DWORD rc = GetLastError(); - prog_belch("execPage: failed to protect 0x%p; error=%lu; old protection: %lu\n", addr, rc, dwOldProtect); -# endif - return rtsFalse; - } - return rtsTrue; -#else - (void)addr; (void)mode; /* keep gcc -Wall happy */ - return rtsTrue; + void *addr = stgMallocBytes(len, "mallocBytesRWX"); +#if defined(i386_TARGET_ARCH) && defined(_WIN32) + /* This could be necessary for processors which distinguish between READ and + EXECUTE memory accesses, e.g. Itaniums. */ + DWORD dwOldProtect = 0; + if (VirtualProtect (addr, len, PAGE_EXECUTE_READWRITE, &dwOldProtect) == 0) { + barf("mallocBytesRWX: failed to protect 0x%p; error=%lu; old protection: %lu\n", + addr, (unsigned long)GetLastError(), (unsigned long)dwOldProtect); + } +#elif defined(openbsd_TARGET_OS) + /* malloced memory isn't executable by default on OpenBSD */ + my_uintptr_t pageSize = sysconf(_SC_PAGESIZE); + my_uintptr_t mask = ~(pageSize - 1); + my_uintptr_t startOfFirstPage = ((my_uintptr_t)addr ) & mask; + my_uintptr_t startOfLastPage = ((my_uintptr_t)addr + len - 1) & mask; + my_uintptr_t size = startOfLastPage - startOfFirstPage + pageSize; + if (mprotect((void*)startOfFirstPage, (size_t)size, PROT_EXEC | PROT_READ | PROT_WRITE) != 0) { + barf("mallocBytesRWX: failed to protect 0x%p\n", addr); + } #endif + return addr; } #if defined(i386_TARGET_ARCH) @@ -132,8 +138,67 @@ stgAllocStable(size_t size_in_bytes, StgStablePtr *stable) } #endif +#if defined(powerpc_TARGET_ARCH) && defined(linux_TARGET_OS) +__asm__("obscure_ccall_ret_code:\n\t" + "lwz 1,0(1)\n\t" + "lwz 0,4(1)\n\t" + "mtlr 0\n\t" + "blr"); +extern void obscure_ccall_ret_code(void); +#endif + +#if defined(powerpc_TARGET_ARCH) || defined(powerpc64_TARGET_ARCH) +#if !(defined(powerpc_TARGET_ARCH) && defined(linux_TARGET_OS)) + +/* !!! !!! WARNING: !!! !!! + * This structure is accessed from AdjustorAsm.s + * Any changes here have to be mirrored in the offsets there. + */ + +typedef struct AdjustorStub { +#if defined(powerpc_TARGET_ARCH) && defined(darwin_TARGET_OS) + unsigned lis; + unsigned ori; + unsigned lwz; + unsigned mtctr; + unsigned bctr; + StgFunPtr code; +#elif defined(powerpc64_TARGET_ARCH) && defined(darwin_TARGET_OS) + /* powerpc64-darwin: just guessing that it won't use fundescs. */ + unsigned lis; + unsigned ori; + unsigned rldimi; + unsigned oris; + unsigned ori2; + unsigned lwz; + unsigned mtctr; + unsigned bctr; + StgFunPtr code; +#else + /* fundesc-based ABIs */ +#define FUNDESCS + StgFunPtr code; + struct AdjustorStub + *toc; + void *env; +#endif + StgStablePtr hptr; + StgFunPtr wptr; + StgInt negative_framesize; + StgInt extrawords_plus_one; +} AdjustorStub; + +#endif +#endif + void* -createAdjustor(int cconv, StgStablePtr hptr, StgFunPtr wptr) +createAdjustor(int cconv, StgStablePtr hptr, + StgFunPtr wptr, + char *typeString +#if !defined(powerpc_TARGET_ARCH) && !defined(powerpc64_TARGET_ARCH) + STG_UNUSED +#endif + ) { void *adjustor = NULL; @@ -153,22 +218,22 @@ createAdjustor(int cconv, StgStablePtr hptr, StgFunPtr wptr) : ff e0 jmp %eax # and jump to it. # the callee cleans up the stack */ - adjustor = stgMallocBytes(14, "createAdjustor"); - unsigned char *const adj_code = (unsigned char *)adjustor; - adj_code[0x00] = (unsigned char)0x58; /* popl %eax */ + adjustor = mallocBytesRWX(14); + { + unsigned char *const adj_code = (unsigned char *)adjustor; + adj_code[0x00] = (unsigned char)0x58; /* popl %eax */ - adj_code[0x01] = (unsigned char)0x68; /* pushl hptr (which is a dword immediate ) */ - *((StgStablePtr*)(adj_code + 0x02)) = (StgStablePtr)hptr; + adj_code[0x01] = (unsigned char)0x68; /* pushl hptr (which is a dword immediate ) */ + *((StgStablePtr*)(adj_code + 0x02)) = (StgStablePtr)hptr; - adj_code[0x06] = (unsigned char)0x50; /* pushl %eax */ + adj_code[0x06] = (unsigned char)0x50; /* pushl %eax */ - adj_code[0x07] = (unsigned char)0xb8; /* movl $wptr, %eax */ - *((StgFunPtr*)(adj_code + 0x08)) = (StgFunPtr)wptr; + adj_code[0x07] = (unsigned char)0xb8; /* movl $wptr, %eax */ + *((StgFunPtr*)(adj_code + 0x08)) = (StgFunPtr)wptr; - adj_code[0x0c] = (unsigned char)0xff; /* jmp %eax */ - adj_code[0x0d] = (unsigned char)0xe0; - - execPage(adjustor, pageExecuteReadWrite); + adj_code[0x0c] = (unsigned char)0xff; /* jmp %eax */ + adj_code[0x0d] = (unsigned char)0xe0; + } #endif break; @@ -198,22 +263,22 @@ createAdjustor(int cconv, StgStablePtr hptr, StgFunPtr wptr) That's (thankfully) the case here with the restricted set of return types that we support. */ - adjustor = stgMallocBytes(17, "createAdjustor"); - unsigned char *const adj_code = (unsigned char *)adjustor; - - adj_code[0x00] = (unsigned char)0x68; /* pushl hptr (which is a dword immediate ) */ - *((StgStablePtr*)(adj_code+0x01)) = (StgStablePtr)hptr; + adjustor = mallocBytesRWX(17); + { + unsigned char *const adj_code = (unsigned char *)adjustor; - adj_code[0x05] = (unsigned char)0xb8; /* movl $wptr, %eax */ - *((StgFunPtr*)(adj_code + 0x06)) = (StgFunPtr)wptr; + adj_code[0x00] = (unsigned char)0x68; /* pushl hptr (which is a dword immediate ) */ + *((StgStablePtr*)(adj_code+0x01)) = (StgStablePtr)hptr; - adj_code[0x0a] = (unsigned char)0x68; /* pushl obscure_ccall_ret_code */ - *((StgFunPtr*)(adj_code + 0x0b)) = (StgFunPtr)obscure_ccall_ret_code; + adj_code[0x05] = (unsigned char)0xb8; /* movl $wptr, %eax */ + *((StgFunPtr*)(adj_code + 0x06)) = (StgFunPtr)wptr; - adj_code[0x0f] = (unsigned char)0xff; /* jmp *%eax */ - adj_code[0x10] = (unsigned char)0xe0; + adj_code[0x0a] = (unsigned char)0x68; /* pushl obscure_ccall_ret_code */ + *((StgFunPtr*)(adj_code + 0x0b)) = (StgFunPtr)obscure_ccall_ret_code; - execPage(adjustor, pageExecuteReadWrite); + adj_code[0x0f] = (unsigned char)0xff; /* jmp *%eax */ + adj_code[0x10] = (unsigned char)0xe0; + } #elif defined(sparc_TARGET_ARCH) /* Magic constant computed by inspecting the code length of the following assembly language snippet (offset and machine code prefixed): @@ -244,39 +309,41 @@ createAdjustor(int cconv, StgStablePtr hptr, StgFunPtr wptr) similarly, and local variables should be accessed via %fp, not %sp. In a nutshell: This should work! (Famous last words! :-) */ - adjustor = stgMallocBytes(4*(11+1), "createAdjustor"); - unsigned long *const adj_code = (unsigned long *)adjustor; - - adj_code[ 0] = 0x9C23A008UL; /* sub %sp, 8, %sp */ - adj_code[ 1] = 0xDA23A060UL; /* st %o5, [%sp + 96] */ - adj_code[ 2] = 0xD823A05CUL; /* st %o4, [%sp + 92] */ - adj_code[ 3] = 0x9A10000BUL; /* mov %o3, %o5 */ - adj_code[ 4] = 0x9810000AUL; /* mov %o2, %o4 */ - adj_code[ 5] = 0x96100009UL; /* mov %o1, %o3 */ - adj_code[ 6] = 0x94100008UL; /* mov %o0, %o2 */ - adj_code[ 7] = 0x13000000UL; /* sethi %hi(wptr), %o1 */ - adj_code[ 7] |= ((unsigned long)wptr) >> 10; - adj_code[ 8] = 0x11000000UL; /* sethi %hi(hptr), %o0 */ - adj_code[ 8] |= ((unsigned long)hptr) >> 10; - adj_code[ 9] = 0x81C26000UL; /* jmp %o1 + %lo(wptr) */ - adj_code[ 9] |= ((unsigned long)wptr) & 0x000003FFUL; - adj_code[10] = 0x90122000UL; /* or %o0, %lo(hptr), %o0 */ - adj_code[10] |= ((unsigned long)hptr) & 0x000003FFUL; - - adj_code[11] = (unsigned long)hptr; - - /* flush cache */ - asm("flush %0" : : "r" (adj_code )); - asm("flush %0" : : "r" (adj_code + 2)); - asm("flush %0" : : "r" (adj_code + 4)); - asm("flush %0" : : "r" (adj_code + 6)); - asm("flush %0" : : "r" (adj_code + 10)); - - /* max. 5 instructions latency, and we need at >= 1 for returning */ - asm("nop"); - asm("nop"); - asm("nop"); - asm("nop"); + adjustor = mallocBytesRWX(4*(11+1)); + { + unsigned long *const adj_code = (unsigned long *)adjustor; + + adj_code[ 0] = 0x9C23A008UL; /* sub %sp, 8, %sp */ + adj_code[ 1] = 0xDA23A060UL; /* st %o5, [%sp + 96] */ + adj_code[ 2] = 0xD823A05CUL; /* st %o4, [%sp + 92] */ + adj_code[ 3] = 0x9A10000BUL; /* mov %o3, %o5 */ + adj_code[ 4] = 0x9810000AUL; /* mov %o2, %o4 */ + adj_code[ 5] = 0x96100009UL; /* mov %o1, %o3 */ + adj_code[ 6] = 0x94100008UL; /* mov %o0, %o2 */ + adj_code[ 7] = 0x13000000UL; /* sethi %hi(wptr), %o1 */ + adj_code[ 7] |= ((unsigned long)wptr) >> 10; + adj_code[ 8] = 0x11000000UL; /* sethi %hi(hptr), %o0 */ + adj_code[ 8] |= ((unsigned long)hptr) >> 10; + adj_code[ 9] = 0x81C26000UL; /* jmp %o1 + %lo(wptr) */ + adj_code[ 9] |= ((unsigned long)wptr) & 0x000003FFUL; + adj_code[10] = 0x90122000UL; /* or %o0, %lo(hptr), %o0 */ + adj_code[10] |= ((unsigned long)hptr) & 0x000003FFUL; + + adj_code[11] = (unsigned long)hptr; + + /* flush cache */ + asm("flush %0" : : "r" (adj_code )); + asm("flush %0" : : "r" (adj_code + 2)); + asm("flush %0" : : "r" (adj_code + 4)); + asm("flush %0" : : "r" (adj_code + 6)); + asm("flush %0" : : "r" (adj_code + 10)); + + /* max. 5 instructions latency, and we need at >= 1 for returning */ + asm("nop"); + asm("nop"); + asm("nop"); + asm("nop"); + } #elif defined(alpha_TARGET_ARCH) /* Magic constant computed by inspecting the code length of the following assembly language snippet @@ -319,81 +386,359 @@ TODO: Depending on how much allocation overhead stgMallocBytes uses for 4 bytes (getting rid of the nop), hence saving memory. [ccshan] */ ASSERT(((StgWord64)wptr & 3) == 0); - adjustor = stgMallocBytes(48, "createAdjustor"); - StgWord64 *const code = (StgWord64 *)adjustor; + adjustor = mallocBytesRWX(48); + { + StgWord64 *const code = (StgWord64 *)adjustor; - code[0] = 0x4610041246520414L; - code[1] = 0x46730415a61b0020L; - code[2] = 0x46310413a77b0028L; - code[3] = 0x000000006bfb0000L - | (((StgWord32*)(wptr) - (StgWord32*)(code) - 3) & 0x3fff); + code[0] = 0x4610041246520414L; + code[1] = 0x46730415a61b0020L; + code[2] = 0x46310413a77b0028L; + code[3] = 0x000000006bfb0000L + | (((StgWord32*)(wptr) - (StgWord32*)(code) - 3) & 0x3fff); - code[4] = (StgWord64)hptr; - code[5] = (StgWord64)wptr; + code[4] = (StgWord64)hptr; + code[5] = (StgWord64)wptr; - /* Ensure that instruction cache is consistent with our new code */ - __asm__ volatile("call_pal %0" : : "i" (PAL_imb)); -#elif defined(powerpc_TARGET_ARCH) -/* - For PowerPC, the following code is used: - - mr r10,r8 - mr r9,r7 - mr r8,r6 - mr r7,r5 - mr r6,r4 - mr r5,r3 - lis r0,0xDEAD ;hi(wptr) - lis r3,0xDEAF ;hi(hptr) - ori r0,r0,0xBEEF ; lo(wptr) - ori r3,r3,0xFACE ; lo(hptr) - mtctr r0 - bctr - - The arguments (passed in registers r3 - r10) are shuffled along by two to - make room for hptr and a dummy argument. As r9 and r10 are overwritten by - this code, it only works for up to 6 arguments (when floating point arguments - are involved, this may be more or less, depending on the exact situation). -*/ - adjustor = stgMallocBytes(4*13, "createAdjustor"); - unsigned long *const adj_code = (unsigned long *)adjustor; - - // make room for extra arguments - adj_code[0] = 0x7d0a4378; //mr r10,r8 - adj_code[1] = 0x7ce93b78; //mr r9,r7 - adj_code[2] = 0x7cc83378; //mr r8,r6 - adj_code[3] = 0x7ca72b78; //mr r7,r5 - adj_code[4] = 0x7c862378; //mr r6,r4 - adj_code[5] = 0x7c651b78; //mr r5,r3 - - adj_code[6] = 0x3c000000; //lis r0,hi(wptr) - adj_code[6] |= ((unsigned long)wptr) >> 16; - - adj_code[7] = 0x3c600000; //lis r3,hi(hptr) - adj_code[7] |= ((unsigned long)hptr) >> 16; - - adj_code[8] = 0x60000000; //ori r0,r0,lo(wptr) - adj_code[8] |= ((unsigned long)wptr) & 0xFFFF; - - adj_code[9] = 0x60630000; //ori r3,r3,lo(hptr) - adj_code[9] |= ((unsigned long)hptr) & 0xFFFF; - - adj_code[10] = 0x7c0903a6; //mtctr r0 - adj_code[11] = 0x4e800420; //bctr - adj_code[12] = (unsigned long)hptr; - - /* Flush the Instruction cache: */ - /* MakeDataExecutable(adjustor,4*13); */ - /* This would require us to link with CoreServices.framework */ - { /* this should do the same: */ - int n = 13; - unsigned long *p = adj_code; - while (n--) { - __asm__ volatile ("dcbf 0,%0\n\tsync\n\ticbi 0,%0" : : "r" (p)); - p++; - } - __asm__ volatile ("sync\n\tisync"); + /* Ensure that instruction cache is consistent with our new code */ + __asm__ volatile("call_pal %0" : : "i" (PAL_imb)); + } +#elif defined(powerpc_TARGET_ARCH) && defined(linux_TARGET_OS) + +#define OP_LO(op,lo) ((((unsigned)(op)) << 16) | (((unsigned)(lo)) & 0xFFFF)) +#define OP_HI(op,hi) ((((unsigned)(op)) << 16) | (((unsigned)(hi)) >> 16)) + { + /* The PowerPC Linux (32-bit) calling convention is annoyingly complex. + We need to calculate all the details of the stack frame layout, + taking into account the types of all the arguments, and then + generate code on the fly. */ + + int src_gpr = 3, dst_gpr = 5; + int fpr = 3; + int src_offset = 0, dst_offset = 0; + int n = strlen(typeString),i; + int src_locs[n], dst_locs[n]; + int frameSize; + unsigned *code; + + /* Step 1: + Calculate where the arguments should go. + src_locs[] will contain the locations of the arguments in the + original stack frame passed to the adjustor. + dst_locs[] will contain the locations of the arguments after the + adjustor runs, on entry to the wrapper proc pointed to by wptr. + + This algorithm is based on the one described on page 3-19 of the + System V ABI PowerPC Processor Supplement. + */ + for(i=0;typeString[i];i++) + { + char t = typeString[i]; + if((t == 'f' || t == 'd') && fpr <= 8) + src_locs[i] = dst_locs[i] = -32-(fpr++); + else + { + if(t == 'l' && src_gpr <= 9) + { + if((src_gpr & 1) == 0) + src_gpr++; + src_locs[i] = -src_gpr; + src_gpr += 2; + } + else if(t == 'i' && src_gpr <= 10) + { + src_locs[i] = -(src_gpr++); + } + else + { + if(t == 'l' || t == 'd') + { + if(src_offset % 8) + src_offset += 4; + } + src_locs[i] = src_offset; + src_offset += (t == 'l' || t == 'd') ? 8 : 4; + } + + if(t == 'l' && dst_gpr <= 9) + { + if((dst_gpr & 1) == 0) + dst_gpr++; + dst_locs[i] = -dst_gpr; + dst_gpr += 2; + } + else if(t == 'i' && dst_gpr <= 10) + { + dst_locs[i] = -(dst_gpr++); + } + else + { + if(t == 'l' || t == 'd') + { + if(dst_offset % 8) + dst_offset += 4; + } + dst_locs[i] = dst_offset; + dst_offset += (t == 'l' || t == 'd') ? 8 : 4; + } + } + } + + frameSize = dst_offset + 8; + frameSize = (frameSize+15) & ~0xF; + + /* Step 2: + Build the adjustor. + */ + // allocate space for at most 4 insns per parameter + // plus 14 more instructions. + adjustor = mallocBytesRWX(4 * (4*n + 14)); + code = (unsigned*)adjustor; + + *code++ = 0x48000008; // b *+8 + // * Put the hptr in a place where freeHaskellFunctionPtr + // can get at it. + *code++ = (unsigned) hptr; + + // * save the link register + *code++ = 0x7c0802a6; // mflr r0; + *code++ = 0x90010004; // stw r0, 4(r1); + // * and build a new stack frame + *code++ = OP_LO(0x9421, -frameSize); // stwu r1, -frameSize(r1) + + // * now generate instructions to copy arguments + // from the old stack frame into the new stack frame. + for(i=n-1;i>=0;i--) + { + if(src_locs[i] < -32) + ASSERT(dst_locs[i] == src_locs[i]); + else if(src_locs[i] < 0) + { + // source in GPR. + ASSERT(typeString[i] != 'f' && typeString[i] != 'd'); + if(dst_locs[i] < 0) + { + ASSERT(dst_locs[i] > -32); + // dst is in GPR, too. + + if(typeString[i] == 'l') + { + // mr dst+1, src+1 + *code++ = 0x7c000378 + | ((-dst_locs[i]+1) << 16) + | ((-src_locs[i]+1) << 11) + | ((-src_locs[i]+1) << 21); + } + // mr dst, src + *code++ = 0x7c000378 + | ((-dst_locs[i]) << 16) + | ((-src_locs[i]) << 11) + | ((-src_locs[i]) << 21); + } + else + { + if(typeString[i] == 'l') + { + // stw src+1, dst_offset+4(r1) + *code++ = 0x90010000 + | ((-src_locs[i]+1) << 21) + | (dst_locs[i] + 4); + } + + // stw src, dst_offset(r1) + *code++ = 0x90010000 + | ((-src_locs[i]) << 21) + | (dst_locs[i] + 8); + } + } + else + { + ASSERT(dst_locs[i] >= 0); + ASSERT(typeString[i] != 'f' && typeString[i] != 'd'); + + if(typeString[i] == 'l') + { + // lwz r0, src_offset(r1) + *code++ = 0x80010000 + | (src_locs[i] + frameSize + 8 + 4); + // stw r0, dst_offset(r1) + *code++ = 0x90010000 + | (dst_locs[i] + 8 + 4); + } + // lwz r0, src_offset(r1) + *code++ = 0x80010000 + | (src_locs[i] + frameSize + 8); + // stw r0, dst_offset(r1) + *code++ = 0x90010000 + | (dst_locs[i] + 8); + } + } + + // * hptr will be the new first argument. + // lis r3, hi(hptr) + *code++ = OP_HI(0x3c60, hptr); + // ori r3,r3,lo(hptr) + *code++ = OP_LO(0x6063, hptr); + + // * we need to return to a piece of code + // which will tear down the stack frame. + // lis r11,hi(obscure_ccall_ret_code) + *code++ = OP_HI(0x3d60, obscure_ccall_ret_code); + // ori r11,r11,lo(obscure_ccall_ret_code) + *code++ = OP_LO(0x616b, obscure_ccall_ret_code); + // mtlr r11 + *code++ = 0x7d6803a6; + + // * jump to wptr + // lis r11,hi(wptr) + *code++ = OP_HI(0x3d60, wptr); + // ori r11,r11,lo(wptr) + *code++ = OP_LO(0x616b, wptr); + // mtctr r11 + *code++ = 0x7d6903a6; + // bctr + *code++ = 0x4e800420; + + // Flush the Instruction cache: + { + unsigned *p = adjustor; + while(p < code) + { + __asm__ volatile ("dcbf 0,%0\n\tsync\n\ticbi 0,%0" + : : "r" (p)); + p++; + } + __asm__ volatile ("sync\n\tisync"); + } + } + +#elif defined(powerpc_TARGET_ARCH) || defined(powerpc64_TARGET_ARCH) + +#define OP_LO(op,lo) ((((unsigned)(op)) << 16) | (((unsigned)(lo)) & 0xFFFF)) +#define OP_HI(op,hi) ((((unsigned)(op)) << 16) | (((unsigned)(hi)) >> 16)) + { + /* The following code applies to all PowerPC and PowerPC64 platforms + whose stack layout is based on the AIX ABI. + + Besides (obviously) AIX, this includes + Mac OS 9 and BeOS/PPC (may they rest in peace), + which use the 32-bit AIX ABI + powerpc64-linux, + which uses the 64-bit AIX ABI + and Darwin (Mac OS X), + which uses the same stack layout as AIX, + but no function descriptors. + + The actual stack-frame shuffling is implemented out-of-line + in the function adjustorCode, in AdjustorAsm.S. + Here, we set up an AdjustorStub structure, which + is a function descriptor (on platforms that have function + descriptors) or a short piece of stub code (on Darwin) to call + adjustorCode with a pointer to the AdjustorStub struct loaded + into register r2. + + One nice thing about this is that there is _no_ code generated at + runtime on the platforms that have function descriptors. + */ + AdjustorStub *adjustorStub; + int sz = 0, extra_sz, total_sz; + + // from AdjustorAsm.s + // not declared as a function so that AIX-style + // fundescs can never get in the way. + extern void *adjustorCode; + +#ifdef FUNDESCS + adjustorStub = stgMallocBytes(sizeof(AdjustorStub), "createAdjustor"); +#else + adjustorStub = mallocBytesRWX(sizeof(AdjustorStub)); +#endif + adjustor = adjustorStub; + + adjustorStub->code = (void*) &adjustorCode; + +#ifdef FUNDESCS + // function descriptors are a cool idea. + // We don't need to generate any code at runtime. + adjustorStub->toc = adjustorStub; +#else + + // no function descriptors :-( + // We need to do things "by hand". +#if defined(powerpc_TARGET_ARCH) + // lis r2, hi(adjustorStub) + adjustorStub->lis = OP_HI(0x3c40, adjustorStub); + // ori r2, r2, lo(adjustorStub) + adjustorStub->ori = OP_LO(0x6042, adjustorStub); + // lwz r0, code(r2) + adjustorStub->lwz = OP_LO(0x8002, (char*)(&adjustorStub->code) + - (char*)adjustorStub); + // mtctr r0 + adjustorStub->mtctr = 0x7c0903a6; + // bctr + adjustorStub->bctr = 0x4e800420; +#else + barf("adjustor creation not supported on this platform"); +#endif + + // Flush the Instruction cache: + { + int n = sizeof(AdjustorStub)/sizeof(unsigned); + unsigned *p = (unsigned*)adjustor; + while(n--) + { + __asm__ volatile ("dcbf 0,%0\n\tsync\n\ticbi 0,%0" + : : "r" (p)); + p++; + } + __asm__ volatile ("sync\n\tisync"); + } +#endif + + // Calculate the size of the stack frame, in words. + while(*typeString) + { + char t = *typeString++; + + switch(t) + { +#if defined(powerpc_TARGET_ARCH) + // on 32-bit platforms, Double and Int64 occupy two words. + case 'd': + case 'l': + sz += 2; + break; +#endif + // everything else is one word. + default: + sz += 1; + } + } + // The first eight words of the parameter area + // are just "backing store" for the parameters passed in + // the GPRs. extra_sz is the number of words beyond those first + // 8 words. + extra_sz = sz - 8; + if(extra_sz < 0) + extra_sz = 0; + + // Calculate the total size of the stack frame. + total_sz = (6 /* linkage area */ + + 8 /* minimum parameter area */ + + 2 /* two extra arguments */ + + extra_sz)*sizeof(StgWord); + + // align to 16 bytes. + // AIX only requires 8 bytes, but who cares? + total_sz = (total_sz+15) & ~0xF; + + // Fill in the information that adjustorCode in AdjustorAsm.S + // will use to create a new stack frame with the additional args. + adjustorStub->hptr = hptr; + adjustorStub->wptr = wptr; + adjustorStub->negative_framesize = -total_sz; + adjustorStub->extrawords_plus_one = extra_sz + 1; } + #elif defined(ia64_TARGET_ARCH) /* Up to 8 inputs are passed in registers. We flush the last two inputs to @@ -494,7 +839,7 @@ freeHaskellFunctionPtr(void* ptr) #if defined(i386_TARGET_ARCH) if ( *(unsigned char*)ptr != 0x68 && *(unsigned char*)ptr != 0x58 ) { - prog_belch("freeHaskellFunctionPtr: not for me, guv! %p\n", ptr); + errorBelch("freeHaskellFunctionPtr: not for me, guv! %p\n", ptr); return; } @@ -506,7 +851,7 @@ freeHaskellFunctionPtr(void* ptr) } #elif defined(sparc_TARGET_ARCH) if ( *(unsigned long*)ptr != 0x9C23A008UL ) { - prog_belch("freeHaskellFunctionPtr: not for me, guv! %p\n", ptr); + errorBelch("freeHaskellFunctionPtr: not for me, guv! %p\n", ptr); return; } @@ -514,24 +859,31 @@ freeHaskellFunctionPtr(void* ptr) freeStablePtr(*((StgStablePtr*)((unsigned long*)ptr + 11))); #elif defined(alpha_TARGET_ARCH) if ( *(StgWord64*)ptr != 0xa77b0018a61b0010L ) { - prog_belch("freeHaskellFunctionPtr: not for me, guv! %p\n", ptr); + errorBelch("freeHaskellFunctionPtr: not for me, guv! %p\n", ptr); return; } /* Free the stable pointer first..*/ freeStablePtr(*((StgStablePtr*)((unsigned char*)ptr + 0x10))); -#elif defined(powerpc_TARGET_ARCH) - if ( *(StgWord*)ptr != 0x7d0a4378 ) { - prog_belch("freeHaskellFunctionPtr: not for me, guv! %p\n", ptr); +#elif defined(powerpc_TARGET_ARCH) && defined(linux_TARGET_OS) + if ( *(StgWord*)ptr != 0x48000008 ) { + errorBelch("freeHaskellFunctionPtr: not for me, guv! %p\n", ptr); return; } - freeStablePtr(*((StgStablePtr*)((unsigned char*)ptr + 4*12))); + freeStablePtr(((StgStablePtr*)ptr)[1]); +#elif defined(powerpc_TARGET_ARCH) || defined(powerpc64_TARGET_ARCH) + extern void* adjustorCode; + if ( ((AdjustorStub*)ptr)->code != (StgFunPtr) &adjustorCode ) { + errorBelch("freeHaskellFunctionPtr: not for me, guv! %p\n", ptr); + return; + } + freeStablePtr(((AdjustorStub*)ptr)->hptr); #elif defined(ia64_TARGET_ARCH) IA64FunDesc *fdesc = (IA64FunDesc *)ptr; StgWord64 *code = (StgWord64 *)(fdesc+1); if (fdesc->ip != (StgWord64)code) { - prog_belch("freeHaskellFunctionPtr: not for me, guv! %p\n", ptr); + errorBelch("freeHaskellFunctionPtr: not for me, guv! %p\n", ptr); return; } freeStablePtr((StgStablePtr)code[16]); @@ -551,7 +903,7 @@ freeHaskellFunctionPtr(void* ptr) * * Perform initialisation of adjustor thunk layer (if needed.) */ -rtsBool +void initAdjustor(void) { #if defined(i386_TARGET_ARCH) @@ -572,15 +924,12 @@ initAdjustor(void) to return to it before tail jumping from the adjustor thunk. */ - obscure_ccall_ret_code = stgMallocBytes(4, "initAdjustor"); + obscure_ccall_ret_code = mallocBytesRWX(4); obscure_ccall_ret_code[0x00] = (unsigned char)0x83; /* addl $0x4, %esp */ obscure_ccall_ret_code[0x01] = (unsigned char)0xc4; obscure_ccall_ret_code[0x02] = (unsigned char)0x04; obscure_ccall_ret_code[0x03] = (unsigned char)0xc3; /* ret */ - - execPage(obscure_ccall_ret_code, pageExecuteRead); #endif - return rtsTrue; }