the code is through with it, it has to be freed in order to release Haskell
and C resources. Failure to do so result in memory leaks on both the C and
Haskell side.
-
*/
+
#include "PosixSource.h"
#include "Rts.h"
#include "RtsExternal.h"
#include <windows.h>
#endif
-/* Heavily arch-specific, I'm afraid.. */
+#if defined(openbsd_HOST_OS)
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/mman.h>
-typedef enum {
- pageExecuteRead,
- pageExecuteReadWrite
-} pageMode;
+/* no C99 header stdint.h on OpenBSD? */
+typedef unsigned long my_uintptr_t;
+#endif
+
+#if defined(powerpc_HOST_ARCH) && defined(linux_HOST_OS)
+#include <string.h>
+#endif
+
+/* Heavily arch-specific, I'm afraid.. */
/*
- * Function: execPage()
- *
- * Set the executable bit on page containing addr.
+ * Allocate len bytes which are readable, writable, and executable.
*
- * TODO: Can the code span more than one page? If yes, we need to make two
- * pages executable!
+ * ToDo: If this turns out to be a performance bottleneck, one could
+ * e.g. cache the last VirtualProtect/mprotect-ed region and do
+ * nothing in case of a cache hit.
*/
-static rtsBool
-execPage (void* addr, pageMode mode)
+static void*
+mallocBytesRWX(int len)
{
-#if defined(i386_TARGET_ARCH) && defined(_WIN32) && 0
- SYSTEM_INFO sInfo;
- DWORD dwOldProtect = 0;
-
- /* doesn't return a result, so presumably it can't fail... */
- GetSystemInfo(&sInfo);
-
- if ( VirtualProtect ( (void*)((unsigned long)addr & (sInfo.dwPageSize - 1)),
- sInfo.dwPageSize,
- ( mode == pageExecuteReadWrite ? PAGE_EXECUTE_READWRITE : PAGE_EXECUTE_READ),
- &dwOldProtect) == 0 ) {
-# if 1
- DWORD rc = GetLastError();
- prog_belch("execPage: failed to protect 0x%p; error=%lu; old protection: %lu\n", addr, rc, dwOldProtect);
-# endif
- return rtsFalse;
- }
- return rtsTrue;
-#else
- (void)addr; (void)mode; /* keep gcc -Wall happy */
- return rtsTrue;
+ void *addr = stgMallocBytes(len, "mallocBytesRWX");
+#if defined(i386_HOST_ARCH) && defined(_WIN32)
+ /* This could be necessary for processors which distinguish between READ and
+ EXECUTE memory accesses, e.g. Itaniums. */
+ DWORD dwOldProtect = 0;
+ if (VirtualProtect (addr, len, PAGE_EXECUTE_READWRITE, &dwOldProtect) == 0) {
+ barf("mallocBytesRWX: failed to protect 0x%p; error=%lu; old protection: %lu\n",
+ addr, (unsigned long)GetLastError(), (unsigned long)dwOldProtect);
+ }
+#elif defined(openbsd_HOST_OS)
+ /* malloced memory isn't executable by default on OpenBSD */
+ my_uintptr_t pageSize = sysconf(_SC_PAGESIZE);
+ my_uintptr_t mask = ~(pageSize - 1);
+ my_uintptr_t startOfFirstPage = ((my_uintptr_t)addr ) & mask;
+ my_uintptr_t startOfLastPage = ((my_uintptr_t)addr + len - 1) & mask;
+ my_uintptr_t size = startOfLastPage - startOfFirstPage + pageSize;
+ if (mprotect((void*)startOfFirstPage, (size_t)size, PROT_EXEC | PROT_READ | PROT_WRITE) != 0) {
+ barf("mallocBytesRWX: failed to protect 0x%p\n", addr);
+ }
#endif
+ return addr;
}
-
-static unsigned char __obscure_ccall_ret_code [] =
-#if defined(i386_TARGET_ARCH)
-/* Now here's something obscure for you:
-
- When generating an adjustor thunk that uses the C calling
- convention, we have to make sure that the thunk kicks off
- the process of jumping into Haskell with a tail jump. Why?
- Because as a result of jumping in into Haskell we may end
- up freeing the very adjustor thunk we came from using
- freeHaskellFunctionPtr(). Hence, we better not return to
- the adjustor code on our way out, since it could by then
- point to junk.
-
- The fix is readily at hand, just include the opcodes
- for the C stack fixup code that we need to perform when
- returning in some static piece of memory and arrange
- to return to it before tail jumping from the adjustor thunk.
-
- For this to work we make the assumption that bytes in .data
- are considered executable.
-*/
- { 0x83, 0xc4, 0x04 /* addl $0x4, %esp */
- , 0xc3 /* ret */
- };
-#else
-/* No such mind-twisters on non-Intel platforms */
- { };
+#if defined(i386_HOST_ARCH)
+static unsigned char *obscure_ccall_ret_code;
#endif
-#if defined(alpha_TARGET_ARCH)
+#if defined(alpha_HOST_ARCH)
/* To get the definition of PAL_imb: */
-# if defined(linux_TARGET_OS)
+# if defined(linux_HOST_OS)
# include <asm/pal.h>
# else
# include <machine/pal.h>
# endif
#endif
-#if defined(ia64_TARGET_ARCH)
+#if defined(ia64_HOST_ARCH)
#include "Storage.h"
/* Layout of a function descriptor */
}
#endif
+#if defined(powerpc_HOST_ARCH) && defined(linux_HOST_OS)
+__asm__("obscure_ccall_ret_code:\n\t"
+ "lwz 1,0(1)\n\t"
+ "lwz 0,4(1)\n\t"
+ "mtlr 0\n\t"
+ "blr");
+extern void obscure_ccall_ret_code(void);
+#endif
+
+#if defined(powerpc_HOST_ARCH) || defined(powerpc64_HOST_ARCH)
+#if !(defined(powerpc_HOST_ARCH) && defined(linux_HOST_OS))
+
+/* !!! !!! WARNING: !!! !!!
+ * This structure is accessed from AdjustorAsm.s
+ * Any changes here have to be mirrored in the offsets there.
+ */
+
+typedef struct AdjustorStub {
+#if defined(powerpc_HOST_ARCH) && defined(darwin_HOST_OS)
+ unsigned lis;
+ unsigned ori;
+ unsigned lwz;
+ unsigned mtctr;
+ unsigned bctr;
+ StgFunPtr code;
+#elif defined(powerpc64_HOST_ARCH) && defined(darwin_HOST_OS)
+ /* powerpc64-darwin: just guessing that it won't use fundescs. */
+ unsigned lis;
+ unsigned ori;
+ unsigned rldimi;
+ unsigned oris;
+ unsigned ori2;
+ unsigned lwz;
+ unsigned mtctr;
+ unsigned bctr;
+ StgFunPtr code;
+#else
+ /* fundesc-based ABIs */
+#define FUNDESCS
+ StgFunPtr code;
+ struct AdjustorStub
+ *toc;
+ void *env;
+#endif
+ StgStablePtr hptr;
+ StgFunPtr wptr;
+ StgInt negative_framesize;
+ StgInt extrawords_plus_one;
+} AdjustorStub;
+
+#endif
+#endif
+
void*
-createAdjustor(int cconv, StgStablePtr hptr, StgFunPtr wptr)
+createAdjustor(int cconv, StgStablePtr hptr,
+ StgFunPtr wptr,
+ char *typeString
+#if !defined(powerpc_HOST_ARCH) && !defined(powerpc64_HOST_ARCH)
+ STG_UNUSED
+#endif
+ )
{
void *adjustor = NULL;
switch (cconv)
{
case 0: /* _stdcall */
-#if defined(i386_TARGET_ARCH)
+#if defined(i386_HOST_ARCH)
/* Magic constant computed by inspecting the code length of
the following assembly language snippet
(offset and machine code prefixed):
<c>: ff e0 jmp %eax # and jump to it.
# the callee cleans up the stack
*/
- if ((adjustor = stgMallocBytes(14, "createAdjustor")) != NULL) {
+ adjustor = mallocBytesRWX(14);
+ {
unsigned char *const adj_code = (unsigned char *)adjustor;
adj_code[0x00] = (unsigned char)0x58; /* popl %eax */
adj_code[0x0c] = (unsigned char)0xff; /* jmp %eax */
adj_code[0x0d] = (unsigned char)0xe0;
-
- execPage(adjustor, pageExecuteReadWrite);
}
#endif
break;
case 1: /* _ccall */
-#if defined(i386_TARGET_ARCH)
+#if defined(i386_HOST_ARCH)
/* Magic constant computed by inspecting the code length of
the following assembly language snippet
(offset and machine code prefixed):
<00>: 68 ef be ad de pushl $0xdeadbeef # constant is large enough to
# hold a StgStablePtr
<05>: b8 fa ef ff 00 movl $0x00ffeffa, %eax # load up wptr
- <0a>: 68 ef be ad de pushl $__obscure_ccall_ret_code # push the return address
+ <0a>: 68 ef be ad de pushl $obscure_ccall_ret_code # push the return address
<0f>: ff e0 jmp *%eax # jump to wptr
The ccall'ing version is a tad different, passing in the return
via the stable pointer.) (The auto-generated C stub is in on this
game, don't worry :-)
- See the comment next to __obscure_ccall_ret_code why we need to
+ See the comment next to obscure_ccall_ret_code why we need to
perform a tail jump instead of a call, followed by some C stack
fixup.
That's (thankfully) the case here with the restricted set of
return types that we support.
*/
- if ((adjustor = stgMallocBytes(17, "createAdjustor")) != NULL) {
+ adjustor = mallocBytesRWX(17);
+ {
unsigned char *const adj_code = (unsigned char *)adjustor;
adj_code[0x00] = (unsigned char)0x68; /* pushl hptr (which is a dword immediate ) */
adj_code[0x05] = (unsigned char)0xb8; /* movl $wptr, %eax */
*((StgFunPtr*)(adj_code + 0x06)) = (StgFunPtr)wptr;
- adj_code[0x0a] = (unsigned char)0x68; /* pushl __obscure_ccall_ret_code */
- *((StgFunPtr*)(adj_code + 0x0b)) = (StgFunPtr)__obscure_ccall_ret_code;
+ adj_code[0x0a] = (unsigned char)0x68; /* pushl obscure_ccall_ret_code */
+ *((StgFunPtr*)(adj_code + 0x0b)) = (StgFunPtr)obscure_ccall_ret_code;
adj_code[0x0f] = (unsigned char)0xff; /* jmp *%eax */
adj_code[0x10] = (unsigned char)0xe0;
-
- execPage(adjustor, pageExecuteReadWrite);
}
-#elif defined(sparc_TARGET_ARCH)
+#elif defined(sparc_HOST_ARCH)
/* Magic constant computed by inspecting the code length of the following
assembly language snippet (offset and machine code prefixed):
similarly, and local variables should be accessed via %fp, not %sp. In a
nutshell: This should work! (Famous last words! :-)
*/
- if ((adjustor = stgMallocBytes(4*(11+1), "createAdjustor")) != NULL) {
+ adjustor = mallocBytesRWX(4*(11+1));
+ {
unsigned long *const adj_code = (unsigned long *)adjustor;
adj_code[ 0] = 0x9C23A008UL; /* sub %sp, 8, %sp */
asm("nop");
asm("nop");
}
-#elif defined(alpha_TARGET_ARCH)
+#elif defined(alpha_HOST_ARCH)
/* Magic constant computed by inspecting the code length of
the following assembly language snippet
(offset and machine code prefixed; note that the machine code
divided by 4, taking the lowest 14 bits.
We only support passing 4 or fewer argument words, for the same
- reason described under sparc_TARGET_ARCH above by JRS, 21 Aug 01.
+ reason described under sparc_HOST_ARCH above by JRS, 21 Aug 01.
On the Alpha the first 6 integer arguments are in a0 through a5,
and the rest on the stack. Hence we want to shuffle the original
caller's arguments by two.
4 bytes (getting rid of the nop), hence saving memory. [ccshan]
*/
ASSERT(((StgWord64)wptr & 3) == 0);
- if ((adjustor = stgMallocBytes(48, "createAdjustor")) != NULL) {
+ adjustor = mallocBytesRWX(48);
+ {
StgWord64 *const code = (StgWord64 *)adjustor;
code[0] = 0x4610041246520414L;
/* Ensure that instruction cache is consistent with our new code */
__asm__ volatile("call_pal %0" : : "i" (PAL_imb));
}
-#elif defined(powerpc_TARGET_ARCH)
-/*
- For PowerPC, the following code is used:
-
- mr r10,r8
- mr r9,r7
- mr r8,r6
- mr r7,r5
- mr r6,r4
- mr r5,r3
- lis r0,0xDEAD ;hi(wptr)
- lis r3,0xDEAF ;hi(hptr)
- ori r0,r0,0xBEEF ; lo(wptr)
- ori r3,r3,0xFACE ; lo(hptr)
- mtctr r0
- bctr
-
- The arguments (passed in registers r3 - r10) are shuffled along by two to
- make room for hptr and a dummy argument. As r9 and r10 are overwritten by
- this code, it only works for up to 6 arguments (when floating point arguments
- are involved, this may be more or less, depending on the exact situation).
-*/
- if ((adjustor = stgMallocBytes(4*13, "createAdjustor")) != NULL) {
- unsigned long *const adj_code = (unsigned long *)adjustor;
-
- // make room for extra arguments
- adj_code[0] = 0x7d0a4378; //mr r10,r8
- adj_code[1] = 0x7ce93b78; //mr r9,r7
- adj_code[2] = 0x7cc83378; //mr r8,r6
- adj_code[3] = 0x7ca72b78; //mr r7,r5
- adj_code[4] = 0x7c862378; //mr r6,r4
- adj_code[5] = 0x7c651b78; //mr r5,r3
-
- adj_code[6] = 0x3c000000; //lis r0,hi(wptr)
- adj_code[6] |= ((unsigned long)wptr) >> 16;
-
- adj_code[7] = 0x3c600000; //lis r3,hi(hptr)
- adj_code[7] |= ((unsigned long)hptr) >> 16;
-
- adj_code[8] = 0x60000000; //ori r0,r0,lo(wptr)
- adj_code[8] |= ((unsigned long)wptr) & 0xFFFF;
-
- adj_code[9] = 0x60630000; //ori r3,r3,lo(hptr)
- adj_code[9] |= ((unsigned long)hptr) & 0xFFFF;
-
- adj_code[10] = 0x7c0903a6; //mtctr r0
- adj_code[11] = 0x4e800420; //bctr
- adj_code[12] = (unsigned long)hptr;
-
- // Flush the Instruction cache:
- // MakeDataExecutable(adjustor,4*13);
- /* This would require us to link with CoreServices.framework */
- { /* this should do the same: */
- int n = 13;
- unsigned long *p = adj_code;
- while(n--)
- {
- __asm__ volatile ("dcbf 0,%0\n\tsync\n\ticbi 0,%0"
- : : "r" (p));
- p++;
- }
- __asm__ volatile ("sync\n\tisync");
- }
- }
-#elif defined(ia64_TARGET_ARCH)
+#elif defined(powerpc_HOST_ARCH) && defined(linux_HOST_OS)
+
+#define OP_LO(op,lo) ((((unsigned)(op)) << 16) | (((unsigned)(lo)) & 0xFFFF))
+#define OP_HI(op,hi) ((((unsigned)(op)) << 16) | (((unsigned)(hi)) >> 16))
+ {
+ /* The PowerPC Linux (32-bit) calling convention is annoyingly complex.
+ We need to calculate all the details of the stack frame layout,
+ taking into account the types of all the arguments, and then
+ generate code on the fly. */
+
+ int src_gpr = 3, dst_gpr = 5;
+ int fpr = 3;
+ int src_offset = 0, dst_offset = 0;
+ int n = strlen(typeString),i;
+ int src_locs[n], dst_locs[n];
+ int frameSize;
+ unsigned *code;
+
+ /* Step 1:
+ Calculate where the arguments should go.
+ src_locs[] will contain the locations of the arguments in the
+ original stack frame passed to the adjustor.
+ dst_locs[] will contain the locations of the arguments after the
+ adjustor runs, on entry to the wrapper proc pointed to by wptr.
+
+ This algorithm is based on the one described on page 3-19 of the
+ System V ABI PowerPC Processor Supplement.
+ */
+ for(i=0;typeString[i];i++)
+ {
+ char t = typeString[i];
+ if((t == 'f' || t == 'd') && fpr <= 8)
+ src_locs[i] = dst_locs[i] = -32-(fpr++);
+ else
+ {
+ if(t == 'l' && src_gpr <= 9)
+ {
+ if((src_gpr & 1) == 0)
+ src_gpr++;
+ src_locs[i] = -src_gpr;
+ src_gpr += 2;
+ }
+ else if(t == 'i' && src_gpr <= 10)
+ {
+ src_locs[i] = -(src_gpr++);
+ }
+ else
+ {
+ if(t == 'l' || t == 'd')
+ {
+ if(src_offset % 8)
+ src_offset += 4;
+ }
+ src_locs[i] = src_offset;
+ src_offset += (t == 'l' || t == 'd') ? 8 : 4;
+ }
+
+ if(t == 'l' && dst_gpr <= 9)
+ {
+ if((dst_gpr & 1) == 0)
+ dst_gpr++;
+ dst_locs[i] = -dst_gpr;
+ dst_gpr += 2;
+ }
+ else if(t == 'i' && dst_gpr <= 10)
+ {
+ dst_locs[i] = -(dst_gpr++);
+ }
+ else
+ {
+ if(t == 'l' || t == 'd')
+ {
+ if(dst_offset % 8)
+ dst_offset += 4;
+ }
+ dst_locs[i] = dst_offset;
+ dst_offset += (t == 'l' || t == 'd') ? 8 : 4;
+ }
+ }
+ }
+
+ frameSize = dst_offset + 8;
+ frameSize = (frameSize+15) & ~0xF;
+
+ /* Step 2:
+ Build the adjustor.
+ */
+ // allocate space for at most 4 insns per parameter
+ // plus 14 more instructions.
+ adjustor = mallocBytesRWX(4 * (4*n + 14));
+ code = (unsigned*)adjustor;
+
+ *code++ = 0x48000008; // b *+8
+ // * Put the hptr in a place where freeHaskellFunctionPtr
+ // can get at it.
+ *code++ = (unsigned) hptr;
+
+ // * save the link register
+ *code++ = 0x7c0802a6; // mflr r0;
+ *code++ = 0x90010004; // stw r0, 4(r1);
+ // * and build a new stack frame
+ *code++ = OP_LO(0x9421, -frameSize); // stwu r1, -frameSize(r1)
+
+ // * now generate instructions to copy arguments
+ // from the old stack frame into the new stack frame.
+ for(i=n-1;i>=0;i--)
+ {
+ if(src_locs[i] < -32)
+ ASSERT(dst_locs[i] == src_locs[i]);
+ else if(src_locs[i] < 0)
+ {
+ // source in GPR.
+ ASSERT(typeString[i] != 'f' && typeString[i] != 'd');
+ if(dst_locs[i] < 0)
+ {
+ ASSERT(dst_locs[i] > -32);
+ // dst is in GPR, too.
+
+ if(typeString[i] == 'l')
+ {
+ // mr dst+1, src+1
+ *code++ = 0x7c000378
+ | ((-dst_locs[i]+1) << 16)
+ | ((-src_locs[i]+1) << 11)
+ | ((-src_locs[i]+1) << 21);
+ }
+ // mr dst, src
+ *code++ = 0x7c000378
+ | ((-dst_locs[i]) << 16)
+ | ((-src_locs[i]) << 11)
+ | ((-src_locs[i]) << 21);
+ }
+ else
+ {
+ if(typeString[i] == 'l')
+ {
+ // stw src+1, dst_offset+4(r1)
+ *code++ = 0x90010000
+ | ((-src_locs[i]+1) << 21)
+ | (dst_locs[i] + 4);
+ }
+
+ // stw src, dst_offset(r1)
+ *code++ = 0x90010000
+ | ((-src_locs[i]) << 21)
+ | (dst_locs[i] + 8);
+ }
+ }
+ else
+ {
+ ASSERT(dst_locs[i] >= 0);
+ ASSERT(typeString[i] != 'f' && typeString[i] != 'd');
+
+ if(typeString[i] == 'l')
+ {
+ // lwz r0, src_offset(r1)
+ *code++ = 0x80010000
+ | (src_locs[i] + frameSize + 8 + 4);
+ // stw r0, dst_offset(r1)
+ *code++ = 0x90010000
+ | (dst_locs[i] + 8 + 4);
+ }
+ // lwz r0, src_offset(r1)
+ *code++ = 0x80010000
+ | (src_locs[i] + frameSize + 8);
+ // stw r0, dst_offset(r1)
+ *code++ = 0x90010000
+ | (dst_locs[i] + 8);
+ }
+ }
+
+ // * hptr will be the new first argument.
+ // lis r3, hi(hptr)
+ *code++ = OP_HI(0x3c60, hptr);
+ // ori r3,r3,lo(hptr)
+ *code++ = OP_LO(0x6063, hptr);
+
+ // * we need to return to a piece of code
+ // which will tear down the stack frame.
+ // lis r11,hi(obscure_ccall_ret_code)
+ *code++ = OP_HI(0x3d60, obscure_ccall_ret_code);
+ // ori r11,r11,lo(obscure_ccall_ret_code)
+ *code++ = OP_LO(0x616b, obscure_ccall_ret_code);
+ // mtlr r11
+ *code++ = 0x7d6803a6;
+
+ // * jump to wptr
+ // lis r11,hi(wptr)
+ *code++ = OP_HI(0x3d60, wptr);
+ // ori r11,r11,lo(wptr)
+ *code++ = OP_LO(0x616b, wptr);
+ // mtctr r11
+ *code++ = 0x7d6903a6;
+ // bctr
+ *code++ = 0x4e800420;
+
+ // Flush the Instruction cache:
+ {
+ unsigned *p = adjustor;
+ while(p < code)
+ {
+ __asm__ volatile ("dcbf 0,%0\n\tsync\n\ticbi 0,%0"
+ : : "r" (p));
+ p++;
+ }
+ __asm__ volatile ("sync\n\tisync");
+ }
+ }
+
+#elif defined(powerpc_HOST_ARCH) || defined(powerpc64_HOST_ARCH)
+
+#define OP_LO(op,lo) ((((unsigned)(op)) << 16) | (((unsigned)(lo)) & 0xFFFF))
+#define OP_HI(op,hi) ((((unsigned)(op)) << 16) | (((unsigned)(hi)) >> 16))
+ {
+ /* The following code applies to all PowerPC and PowerPC64 platforms
+ whose stack layout is based on the AIX ABI.
+
+ Besides (obviously) AIX, this includes
+ Mac OS 9 and BeOS/PPC (may they rest in peace),
+ which use the 32-bit AIX ABI
+ powerpc64-linux,
+ which uses the 64-bit AIX ABI
+ and Darwin (Mac OS X),
+ which uses the same stack layout as AIX,
+ but no function descriptors.
+
+ The actual stack-frame shuffling is implemented out-of-line
+ in the function adjustorCode, in AdjustorAsm.S.
+ Here, we set up an AdjustorStub structure, which
+ is a function descriptor (on platforms that have function
+ descriptors) or a short piece of stub code (on Darwin) to call
+ adjustorCode with a pointer to the AdjustorStub struct loaded
+ into register r2.
+
+ One nice thing about this is that there is _no_ code generated at
+ runtime on the platforms that have function descriptors.
+ */
+ AdjustorStub *adjustorStub;
+ int sz = 0, extra_sz, total_sz;
+
+ // from AdjustorAsm.s
+ // not declared as a function so that AIX-style
+ // fundescs can never get in the way.
+ extern void *adjustorCode;
+
+#ifdef FUNDESCS
+ adjustorStub = stgMallocBytes(sizeof(AdjustorStub), "createAdjustor");
+#else
+ adjustorStub = mallocBytesRWX(sizeof(AdjustorStub));
+#endif
+ adjustor = adjustorStub;
+
+ adjustorStub->code = (void*) &adjustorCode;
+
+#ifdef FUNDESCS
+ // function descriptors are a cool idea.
+ // We don't need to generate any code at runtime.
+ adjustorStub->toc = adjustorStub;
+#else
+
+ // no function descriptors :-(
+ // We need to do things "by hand".
+#if defined(powerpc_HOST_ARCH)
+ // lis r2, hi(adjustorStub)
+ adjustorStub->lis = OP_HI(0x3c40, adjustorStub);
+ // ori r2, r2, lo(adjustorStub)
+ adjustorStub->ori = OP_LO(0x6042, adjustorStub);
+ // lwz r0, code(r2)
+ adjustorStub->lwz = OP_LO(0x8002, (char*)(&adjustorStub->code)
+ - (char*)adjustorStub);
+ // mtctr r0
+ adjustorStub->mtctr = 0x7c0903a6;
+ // bctr
+ adjustorStub->bctr = 0x4e800420;
+#else
+ barf("adjustor creation not supported on this platform");
+#endif
+
+ // Flush the Instruction cache:
+ {
+ int n = sizeof(AdjustorStub)/sizeof(unsigned);
+ unsigned *p = (unsigned*)adjustor;
+ while(n--)
+ {
+ __asm__ volatile ("dcbf 0,%0\n\tsync\n\ticbi 0,%0"
+ : : "r" (p));
+ p++;
+ }
+ __asm__ volatile ("sync\n\tisync");
+ }
+#endif
+
+ // Calculate the size of the stack frame, in words.
+ while(*typeString)
+ {
+ char t = *typeString++;
+
+ switch(t)
+ {
+#if defined(powerpc_HOST_ARCH)
+ // on 32-bit platforms, Double and Int64 occupy two words.
+ case 'd':
+ case 'l':
+ sz += 2;
+ break;
+#endif
+ // everything else is one word.
+ default:
+ sz += 1;
+ }
+ }
+ // The first eight words of the parameter area
+ // are just "backing store" for the parameters passed in
+ // the GPRs. extra_sz is the number of words beyond those first
+ // 8 words.
+ extra_sz = sz - 8;
+ if(extra_sz < 0)
+ extra_sz = 0;
+
+ // Calculate the total size of the stack frame.
+ total_sz = (6 /* linkage area */
+ + 8 /* minimum parameter area */
+ + 2 /* two extra arguments */
+ + extra_sz)*sizeof(StgWord);
+
+ // align to 16 bytes.
+ // AIX only requires 8 bytes, but who cares?
+ total_sz = (total_sz+15) & ~0xF;
+
+ // Fill in the information that adjustorCode in AdjustorAsm.S
+ // will use to create a new stack frame with the additional args.
+ adjustorStub->hptr = hptr;
+ adjustorStub->wptr = wptr;
+ adjustorStub->negative_framesize = -total_sz;
+ adjustorStub->extrawords_plus_one = extra_sz + 1;
+ }
+
+#elif defined(ia64_HOST_ARCH)
/*
Up to 8 inputs are passed in registers. We flush the last two inputs to
the stack, initially into the 16-byte scratch region left by the caller.
void
freeHaskellFunctionPtr(void* ptr)
{
-#if defined(i386_TARGET_ARCH)
+#if defined(i386_HOST_ARCH)
if ( *(unsigned char*)ptr != 0x68 &&
*(unsigned char*)ptr != 0x58 ) {
- prog_belch("freeHaskellFunctionPtr: not for me, guv! %p\n", ptr);
+ errorBelch("freeHaskellFunctionPtr: not for me, guv! %p\n", ptr);
return;
}
} else {
freeStablePtr(*((StgStablePtr*)((unsigned char*)ptr + 0x02)));
}
-#elif defined(sparc_TARGET_ARCH)
+#elif defined(sparc_HOST_ARCH)
if ( *(unsigned long*)ptr != 0x9C23A008UL ) {
- prog_belch("freeHaskellFunctionPtr: not for me, guv! %p\n", ptr);
+ errorBelch("freeHaskellFunctionPtr: not for me, guv! %p\n", ptr);
return;
}
/* Free the stable pointer first..*/
freeStablePtr(*((StgStablePtr*)((unsigned long*)ptr + 11)));
-#elif defined(alpha_TARGET_ARCH)
+#elif defined(alpha_HOST_ARCH)
if ( *(StgWord64*)ptr != 0xa77b0018a61b0010L ) {
- prog_belch("freeHaskellFunctionPtr: not for me, guv! %p\n", ptr);
+ errorBelch("freeHaskellFunctionPtr: not for me, guv! %p\n", ptr);
return;
}
/* Free the stable pointer first..*/
freeStablePtr(*((StgStablePtr*)((unsigned char*)ptr + 0x10)));
-#elif defined(powerpc_TARGET_ARCH)
- if ( *(StgWord*)ptr != 0x7d0a4378 ) {
- prog_belch("freeHaskellFunctionPtr: not for me, guv! %p\n", ptr);
+#elif defined(powerpc_HOST_ARCH) && defined(linux_HOST_OS)
+ if ( *(StgWord*)ptr != 0x48000008 ) {
+ errorBelch("freeHaskellFunctionPtr: not for me, guv! %p\n", ptr);
return;
}
- freeStablePtr(*((StgStablePtr*)((unsigned char*)ptr + 4*12)));
-#elif defined(ia64_TARGET_ARCH)
+ freeStablePtr(((StgStablePtr*)ptr)[1]);
+#elif defined(powerpc_HOST_ARCH) || defined(powerpc64_HOST_ARCH)
+ extern void* adjustorCode;
+ if ( ((AdjustorStub*)ptr)->code != (StgFunPtr) &adjustorCode ) {
+ errorBelch("freeHaskellFunctionPtr: not for me, guv! %p\n", ptr);
+ return;
+ }
+ freeStablePtr(((AdjustorStub*)ptr)->hptr);
+#elif defined(ia64_HOST_ARCH)
IA64FunDesc *fdesc = (IA64FunDesc *)ptr;
StgWord64 *code = (StgWord64 *)(fdesc+1);
if (fdesc->ip != (StgWord64)code) {
- prog_belch("freeHaskellFunctionPtr: not for me, guv! %p\n", ptr);
+ errorBelch("freeHaskellFunctionPtr: not for me, guv! %p\n", ptr);
return;
}
freeStablePtr((StgStablePtr)code[16]);
*
* Perform initialisation of adjustor thunk layer (if needed.)
*/
-rtsBool
+void
initAdjustor(void)
{
- return execPage(__obscure_ccall_ret_code, pageExecuteRead);
+#if defined(i386_HOST_ARCH)
+ /* Now here's something obscure for you:
+
+ When generating an adjustor thunk that uses the C calling
+ convention, we have to make sure that the thunk kicks off
+ the process of jumping into Haskell with a tail jump. Why?
+ Because as a result of jumping in into Haskell we may end
+ up freeing the very adjustor thunk we came from using
+ freeHaskellFunctionPtr(). Hence, we better not return to
+ the adjustor code on our way out, since it could by then
+ point to junk.
+
+ The fix is readily at hand, just include the opcodes
+ for the C stack fixup code that we need to perform when
+ returning in some static piece of memory and arrange
+ to return to it before tail jumping from the adjustor thunk.
+ */
+
+ obscure_ccall_ret_code = mallocBytesRWX(4);
+
+ obscure_ccall_ret_code[0x00] = (unsigned char)0x83; /* addl $0x4, %esp */
+ obscure_ccall_ret_code[0x01] = (unsigned char)0xc4;
+ obscure_ccall_ret_code[0x02] = (unsigned char)0x04;
+
+ obscure_ccall_ret_code[0x03] = (unsigned char)0xc3; /* ret */
+#endif
}