#include <windows.h>
#endif
-#if defined(openbsd_TARGET_OS)
+#if defined(openbsd_HOST_OS)
#include <unistd.h>
#include <sys/types.h>
#include <sys/mman.h>
typedef unsigned long my_uintptr_t;
#endif
+#if defined(powerpc_HOST_ARCH) && defined(linux_HOST_OS)
+#include <string.h>
+#endif
+
/* Heavily arch-specific, I'm afraid.. */
/*
mallocBytesRWX(int len)
{
void *addr = stgMallocBytes(len, "mallocBytesRWX");
-#if defined(i386_TARGET_ARCH) && defined(_WIN32)
+#if defined(i386_HOST_ARCH) && defined(_WIN32)
/* This could be necessary for processors which distinguish between READ and
EXECUTE memory accesses, e.g. Itaniums. */
DWORD dwOldProtect = 0;
barf("mallocBytesRWX: failed to protect 0x%p; error=%lu; old protection: %lu\n",
addr, (unsigned long)GetLastError(), (unsigned long)dwOldProtect);
}
-#elif defined(openbsd_TARGET_OS)
+#elif defined(openbsd_HOST_OS)
/* malloced memory isn't executable by default on OpenBSD */
my_uintptr_t pageSize = sysconf(_SC_PAGESIZE);
my_uintptr_t mask = ~(pageSize - 1);
return addr;
}
-#if defined(i386_TARGET_ARCH)
+#if defined(i386_HOST_ARCH)
static unsigned char *obscure_ccall_ret_code;
#endif
-#if defined(alpha_TARGET_ARCH)
+#if defined(alpha_HOST_ARCH)
/* To get the definition of PAL_imb: */
-# if defined(linux_TARGET_OS)
+# if defined(linux_HOST_OS)
# include <asm/pal.h>
# else
# include <machine/pal.h>
# endif
#endif
-#if defined(ia64_TARGET_ARCH)
+#if defined(ia64_HOST_ARCH)
#include "Storage.h"
/* Layout of a function descriptor */
}
#endif
+#if defined(powerpc_HOST_ARCH) && defined(linux_HOST_OS)
+__asm__("obscure_ccall_ret_code:\n\t"
+ "lwz 1,0(1)\n\t"
+ "lwz 0,4(1)\n\t"
+ "mtlr 0\n\t"
+ "blr");
+extern void obscure_ccall_ret_code(void);
+#endif
+
+#if defined(powerpc_HOST_ARCH) || defined(powerpc64_HOST_ARCH)
+#if !(defined(powerpc_HOST_ARCH) && defined(linux_HOST_OS))
+
+/* !!! !!! WARNING: !!! !!!
+ * This structure is accessed from AdjustorAsm.s
+ * Any changes here have to be mirrored in the offsets there.
+ */
+
+typedef struct AdjustorStub {
+#if defined(powerpc_HOST_ARCH) && defined(darwin_HOST_OS)
+ unsigned lis;
+ unsigned ori;
+ unsigned lwz;
+ unsigned mtctr;
+ unsigned bctr;
+ StgFunPtr code;
+#elif defined(powerpc64_HOST_ARCH) && defined(darwin_HOST_OS)
+ /* powerpc64-darwin: just guessing that it won't use fundescs. */
+ unsigned lis;
+ unsigned ori;
+ unsigned rldimi;
+ unsigned oris;
+ unsigned ori2;
+ unsigned lwz;
+ unsigned mtctr;
+ unsigned bctr;
+ StgFunPtr code;
+#else
+ /* fundesc-based ABIs */
+#define FUNDESCS
+ StgFunPtr code;
+ struct AdjustorStub
+ *toc;
+ void *env;
+#endif
+ StgStablePtr hptr;
+ StgFunPtr wptr;
+ StgInt negative_framesize;
+ StgInt extrawords_plus_one;
+} AdjustorStub;
+
+#endif
+#endif
+
void*
-createAdjustor(int cconv, StgStablePtr hptr, StgFunPtr wptr)
+createAdjustor(int cconv, StgStablePtr hptr,
+ StgFunPtr wptr,
+ char *typeString
+#if !defined(powerpc_HOST_ARCH) && !defined(powerpc64_HOST_ARCH)
+ STG_UNUSED
+#endif
+ )
{
void *adjustor = NULL;
switch (cconv)
{
case 0: /* _stdcall */
-#if defined(i386_TARGET_ARCH)
+#if defined(i386_HOST_ARCH)
/* Magic constant computed by inspecting the code length of
the following assembly language snippet
(offset and machine code prefixed):
break;
case 1: /* _ccall */
-#if defined(i386_TARGET_ARCH)
+#if defined(i386_HOST_ARCH)
/* Magic constant computed by inspecting the code length of
the following assembly language snippet
(offset and machine code prefixed):
adj_code[0x0f] = (unsigned char)0xff; /* jmp *%eax */
adj_code[0x10] = (unsigned char)0xe0;
}
-#elif defined(sparc_TARGET_ARCH)
+#elif defined(sparc_HOST_ARCH)
/* Magic constant computed by inspecting the code length of the following
assembly language snippet (offset and machine code prefixed):
asm("nop");
asm("nop");
}
-#elif defined(alpha_TARGET_ARCH)
+#elif defined(alpha_HOST_ARCH)
/* Magic constant computed by inspecting the code length of
the following assembly language snippet
(offset and machine code prefixed; note that the machine code
divided by 4, taking the lowest 14 bits.
We only support passing 4 or fewer argument words, for the same
- reason described under sparc_TARGET_ARCH above by JRS, 21 Aug 01.
+ reason described under sparc_HOST_ARCH above by JRS, 21 Aug 01.
On the Alpha the first 6 integer arguments are in a0 through a5,
and the rest on the stack. Hence we want to shuffle the original
caller's arguments by two.
/* Ensure that instruction cache is consistent with our new code */
__asm__ volatile("call_pal %0" : : "i" (PAL_imb));
}
-#elif defined(powerpc_TARGET_ARCH)
-/*
- For PowerPC, the following code is used:
-
- mr r10,r8
- mr r9,r7
- mr r8,r6
- mr r7,r5
- mr r6,r4
- mr r5,r3
- lis r0,0xDEAD ;hi(wptr)
- lis r3,0xDEAF ;hi(hptr)
- ori r0,r0,0xBEEF ; lo(wptr)
- ori r3,r3,0xFACE ; lo(hptr)
- mtctr r0
- bctr
-
- The arguments (passed in registers r3 - r10) are shuffled along by two to
- make room for hptr and a dummy argument. As r9 and r10 are overwritten by
- this code, it only works for up to 6 arguments (when floating point arguments
- are involved, this may be more or less, depending on the exact situation).
-*/
- adjustor = mallocBytesRWX(4*13);
- {
- unsigned long *const adj_code = (unsigned long *)adjustor;
-
- // make room for extra arguments
- adj_code[0] = 0x7d0a4378; //mr r10,r8
- adj_code[1] = 0x7ce93b78; //mr r9,r7
- adj_code[2] = 0x7cc83378; //mr r8,r6
- adj_code[3] = 0x7ca72b78; //mr r7,r5
- adj_code[4] = 0x7c862378; //mr r6,r4
- adj_code[5] = 0x7c651b78; //mr r5,r3
-
- adj_code[6] = 0x3c000000; //lis r0,hi(wptr)
- adj_code[6] |= ((unsigned long)wptr) >> 16;
-
- adj_code[7] = 0x3c600000; //lis r3,hi(hptr)
- adj_code[7] |= ((unsigned long)hptr) >> 16;
-
- adj_code[8] = 0x60000000; //ori r0,r0,lo(wptr)
- adj_code[8] |= ((unsigned long)wptr) & 0xFFFF;
-
- adj_code[9] = 0x60630000; //ori r3,r3,lo(hptr)
- adj_code[9] |= ((unsigned long)hptr) & 0xFFFF;
-
- adj_code[10] = 0x7c0903a6; //mtctr r0
- adj_code[11] = 0x4e800420; //bctr
- adj_code[12] = (unsigned long)hptr;
-
- // Flush the Instruction cache:
- // MakeDataExecutable(adjustor,4*13);
- /* This would require us to link with CoreServices.framework */
- { /* this should do the same: */
- int n = 13;
- unsigned long *p = adj_code;
- while(n--)
- {
- __asm__ volatile ("dcbf 0,%0\n\tsync\n\ticbi 0,%0"
- : : "r" (p));
- p++;
- }
- __asm__ volatile ("sync\n\tisync");
- }
- }
-#elif defined(ia64_TARGET_ARCH)
+#elif defined(powerpc_HOST_ARCH) && defined(linux_HOST_OS)
+
+#define OP_LO(op,lo) ((((unsigned)(op)) << 16) | (((unsigned)(lo)) & 0xFFFF))
+#define OP_HI(op,hi) ((((unsigned)(op)) << 16) | (((unsigned)(hi)) >> 16))
+ {
+ /* The PowerPC Linux (32-bit) calling convention is annoyingly complex.
+ We need to calculate all the details of the stack frame layout,
+ taking into account the types of all the arguments, and then
+ generate code on the fly. */
+
+ int src_gpr = 3, dst_gpr = 5;
+ int fpr = 3;
+ int src_offset = 0, dst_offset = 0;
+ int n = strlen(typeString),i;
+ int src_locs[n], dst_locs[n];
+ int frameSize;
+ unsigned *code;
+
+ /* Step 1:
+ Calculate where the arguments should go.
+ src_locs[] will contain the locations of the arguments in the
+ original stack frame passed to the adjustor.
+ dst_locs[] will contain the locations of the arguments after the
+ adjustor runs, on entry to the wrapper proc pointed to by wptr.
+
+ This algorithm is based on the one described on page 3-19 of the
+ System V ABI PowerPC Processor Supplement.
+ */
+ for(i=0;typeString[i];i++)
+ {
+ char t = typeString[i];
+ if((t == 'f' || t == 'd') && fpr <= 8)
+ src_locs[i] = dst_locs[i] = -32-(fpr++);
+ else
+ {
+ if(t == 'l' && src_gpr <= 9)
+ {
+ if((src_gpr & 1) == 0)
+ src_gpr++;
+ src_locs[i] = -src_gpr;
+ src_gpr += 2;
+ }
+ else if(t == 'i' && src_gpr <= 10)
+ {
+ src_locs[i] = -(src_gpr++);
+ }
+ else
+ {
+ if(t == 'l' || t == 'd')
+ {
+ if(src_offset % 8)
+ src_offset += 4;
+ }
+ src_locs[i] = src_offset;
+ src_offset += (t == 'l' || t == 'd') ? 8 : 4;
+ }
+
+ if(t == 'l' && dst_gpr <= 9)
+ {
+ if((dst_gpr & 1) == 0)
+ dst_gpr++;
+ dst_locs[i] = -dst_gpr;
+ dst_gpr += 2;
+ }
+ else if(t == 'i' && dst_gpr <= 10)
+ {
+ dst_locs[i] = -(dst_gpr++);
+ }
+ else
+ {
+ if(t == 'l' || t == 'd')
+ {
+ if(dst_offset % 8)
+ dst_offset += 4;
+ }
+ dst_locs[i] = dst_offset;
+ dst_offset += (t == 'l' || t == 'd') ? 8 : 4;
+ }
+ }
+ }
+
+ frameSize = dst_offset + 8;
+ frameSize = (frameSize+15) & ~0xF;
+
+ /* Step 2:
+ Build the adjustor.
+ */
+ // allocate space for at most 4 insns per parameter
+ // plus 14 more instructions.
+ adjustor = mallocBytesRWX(4 * (4*n + 14));
+ code = (unsigned*)adjustor;
+
+ *code++ = 0x48000008; // b *+8
+ // * Put the hptr in a place where freeHaskellFunctionPtr
+ // can get at it.
+ *code++ = (unsigned) hptr;
+
+ // * save the link register
+ *code++ = 0x7c0802a6; // mflr r0;
+ *code++ = 0x90010004; // stw r0, 4(r1);
+ // * and build a new stack frame
+ *code++ = OP_LO(0x9421, -frameSize); // stwu r1, -frameSize(r1)
+
+ // * now generate instructions to copy arguments
+ // from the old stack frame into the new stack frame.
+ for(i=n-1;i>=0;i--)
+ {
+ if(src_locs[i] < -32)
+ ASSERT(dst_locs[i] == src_locs[i]);
+ else if(src_locs[i] < 0)
+ {
+ // source in GPR.
+ ASSERT(typeString[i] != 'f' && typeString[i] != 'd');
+ if(dst_locs[i] < 0)
+ {
+ ASSERT(dst_locs[i] > -32);
+ // dst is in GPR, too.
+
+ if(typeString[i] == 'l')
+ {
+ // mr dst+1, src+1
+ *code++ = 0x7c000378
+ | ((-dst_locs[i]+1) << 16)
+ | ((-src_locs[i]+1) << 11)
+ | ((-src_locs[i]+1) << 21);
+ }
+ // mr dst, src
+ *code++ = 0x7c000378
+ | ((-dst_locs[i]) << 16)
+ | ((-src_locs[i]) << 11)
+ | ((-src_locs[i]) << 21);
+ }
+ else
+ {
+ if(typeString[i] == 'l')
+ {
+ // stw src+1, dst_offset+4(r1)
+ *code++ = 0x90010000
+ | ((-src_locs[i]+1) << 21)
+ | (dst_locs[i] + 4);
+ }
+
+ // stw src, dst_offset(r1)
+ *code++ = 0x90010000
+ | ((-src_locs[i]) << 21)
+ | (dst_locs[i] + 8);
+ }
+ }
+ else
+ {
+ ASSERT(dst_locs[i] >= 0);
+ ASSERT(typeString[i] != 'f' && typeString[i] != 'd');
+
+ if(typeString[i] == 'l')
+ {
+ // lwz r0, src_offset(r1)
+ *code++ = 0x80010000
+ | (src_locs[i] + frameSize + 8 + 4);
+ // stw r0, dst_offset(r1)
+ *code++ = 0x90010000
+ | (dst_locs[i] + 8 + 4);
+ }
+ // lwz r0, src_offset(r1)
+ *code++ = 0x80010000
+ | (src_locs[i] + frameSize + 8);
+ // stw r0, dst_offset(r1)
+ *code++ = 0x90010000
+ | (dst_locs[i] + 8);
+ }
+ }
+
+ // * hptr will be the new first argument.
+ // lis r3, hi(hptr)
+ *code++ = OP_HI(0x3c60, hptr);
+ // ori r3,r3,lo(hptr)
+ *code++ = OP_LO(0x6063, hptr);
+
+ // * we need to return to a piece of code
+ // which will tear down the stack frame.
+ // lis r11,hi(obscure_ccall_ret_code)
+ *code++ = OP_HI(0x3d60, obscure_ccall_ret_code);
+ // ori r11,r11,lo(obscure_ccall_ret_code)
+ *code++ = OP_LO(0x616b, obscure_ccall_ret_code);
+ // mtlr r11
+ *code++ = 0x7d6803a6;
+
+ // * jump to wptr
+ // lis r11,hi(wptr)
+ *code++ = OP_HI(0x3d60, wptr);
+ // ori r11,r11,lo(wptr)
+ *code++ = OP_LO(0x616b, wptr);
+ // mtctr r11
+ *code++ = 0x7d6903a6;
+ // bctr
+ *code++ = 0x4e800420;
+
+ // Flush the Instruction cache:
+ {
+ unsigned *p = adjustor;
+ while(p < code)
+ {
+ __asm__ volatile ("dcbf 0,%0\n\tsync\n\ticbi 0,%0"
+ : : "r" (p));
+ p++;
+ }
+ __asm__ volatile ("sync\n\tisync");
+ }
+ }
+
+#elif defined(powerpc_HOST_ARCH) || defined(powerpc64_HOST_ARCH)
+
+#define OP_LO(op,lo) ((((unsigned)(op)) << 16) | (((unsigned)(lo)) & 0xFFFF))
+#define OP_HI(op,hi) ((((unsigned)(op)) << 16) | (((unsigned)(hi)) >> 16))
+ {
+ /* The following code applies to all PowerPC and PowerPC64 platforms
+ whose stack layout is based on the AIX ABI.
+
+ Besides (obviously) AIX, this includes
+ Mac OS 9 and BeOS/PPC (may they rest in peace),
+ which use the 32-bit AIX ABI
+ powerpc64-linux,
+ which uses the 64-bit AIX ABI
+ and Darwin (Mac OS X),
+ which uses the same stack layout as AIX,
+ but no function descriptors.
+
+ The actual stack-frame shuffling is implemented out-of-line
+ in the function adjustorCode, in AdjustorAsm.S.
+ Here, we set up an AdjustorStub structure, which
+ is a function descriptor (on platforms that have function
+ descriptors) or a short piece of stub code (on Darwin) to call
+ adjustorCode with a pointer to the AdjustorStub struct loaded
+ into register r2.
+
+ One nice thing about this is that there is _no_ code generated at
+ runtime on the platforms that have function descriptors.
+ */
+ AdjustorStub *adjustorStub;
+ int sz = 0, extra_sz, total_sz;
+
+ // from AdjustorAsm.s
+ // not declared as a function so that AIX-style
+ // fundescs can never get in the way.
+ extern void *adjustorCode;
+
+#ifdef FUNDESCS
+ adjustorStub = stgMallocBytes(sizeof(AdjustorStub), "createAdjustor");
+#else
+ adjustorStub = mallocBytesRWX(sizeof(AdjustorStub));
+#endif
+ adjustor = adjustorStub;
+
+ adjustorStub->code = (void*) &adjustorCode;
+
+#ifdef FUNDESCS
+ // function descriptors are a cool idea.
+ // We don't need to generate any code at runtime.
+ adjustorStub->toc = adjustorStub;
+#else
+
+ // no function descriptors :-(
+ // We need to do things "by hand".
+#if defined(powerpc_HOST_ARCH)
+ // lis r2, hi(adjustorStub)
+ adjustorStub->lis = OP_HI(0x3c40, adjustorStub);
+ // ori r2, r2, lo(adjustorStub)
+ adjustorStub->ori = OP_LO(0x6042, adjustorStub);
+ // lwz r0, code(r2)
+ adjustorStub->lwz = OP_LO(0x8002, (char*)(&adjustorStub->code)
+ - (char*)adjustorStub);
+ // mtctr r0
+ adjustorStub->mtctr = 0x7c0903a6;
+ // bctr
+ adjustorStub->bctr = 0x4e800420;
+#else
+ barf("adjustor creation not supported on this platform");
+#endif
+
+ // Flush the Instruction cache:
+ {
+ int n = sizeof(AdjustorStub)/sizeof(unsigned);
+ unsigned *p = (unsigned*)adjustor;
+ while(n--)
+ {
+ __asm__ volatile ("dcbf 0,%0\n\tsync\n\ticbi 0,%0"
+ : : "r" (p));
+ p++;
+ }
+ __asm__ volatile ("sync\n\tisync");
+ }
+#endif
+
+ // Calculate the size of the stack frame, in words.
+ while(*typeString)
+ {
+ char t = *typeString++;
+
+ switch(t)
+ {
+#if defined(powerpc_HOST_ARCH)
+ // on 32-bit platforms, Double and Int64 occupy two words.
+ case 'd':
+ case 'l':
+ sz += 2;
+ break;
+#endif
+ // everything else is one word.
+ default:
+ sz += 1;
+ }
+ }
+ // The first eight words of the parameter area
+ // are just "backing store" for the parameters passed in
+ // the GPRs. extra_sz is the number of words beyond those first
+ // 8 words.
+ extra_sz = sz - 8;
+ if(extra_sz < 0)
+ extra_sz = 0;
+
+ // Calculate the total size of the stack frame.
+ total_sz = (6 /* linkage area */
+ + 8 /* minimum parameter area */
+ + 2 /* two extra arguments */
+ + extra_sz)*sizeof(StgWord);
+
+ // align to 16 bytes.
+ // AIX only requires 8 bytes, but who cares?
+ total_sz = (total_sz+15) & ~0xF;
+
+ // Fill in the information that adjustorCode in AdjustorAsm.S
+ // will use to create a new stack frame with the additional args.
+ adjustorStub->hptr = hptr;
+ adjustorStub->wptr = wptr;
+ adjustorStub->negative_framesize = -total_sz;
+ adjustorStub->extrawords_plus_one = extra_sz + 1;
+ }
+
+#elif defined(ia64_HOST_ARCH)
/*
Up to 8 inputs are passed in registers. We flush the last two inputs to
the stack, initially into the 16-byte scratch region left by the caller.
void
freeHaskellFunctionPtr(void* ptr)
{
-#if defined(i386_TARGET_ARCH)
+#if defined(i386_HOST_ARCH)
if ( *(unsigned char*)ptr != 0x68 &&
*(unsigned char*)ptr != 0x58 ) {
errorBelch("freeHaskellFunctionPtr: not for me, guv! %p\n", ptr);
} else {
freeStablePtr(*((StgStablePtr*)((unsigned char*)ptr + 0x02)));
}
-#elif defined(sparc_TARGET_ARCH)
+#elif defined(sparc_HOST_ARCH)
if ( *(unsigned long*)ptr != 0x9C23A008UL ) {
errorBelch("freeHaskellFunctionPtr: not for me, guv! %p\n", ptr);
return;
/* Free the stable pointer first..*/
freeStablePtr(*((StgStablePtr*)((unsigned long*)ptr + 11)));
-#elif defined(alpha_TARGET_ARCH)
+#elif defined(alpha_HOST_ARCH)
if ( *(StgWord64*)ptr != 0xa77b0018a61b0010L ) {
errorBelch("freeHaskellFunctionPtr: not for me, guv! %p\n", ptr);
return;
/* Free the stable pointer first..*/
freeStablePtr(*((StgStablePtr*)((unsigned char*)ptr + 0x10)));
-#elif defined(powerpc_TARGET_ARCH)
- if ( *(StgWord*)ptr != 0x7d0a4378 ) {
+#elif defined(powerpc_HOST_ARCH) && defined(linux_HOST_OS)
+ if ( *(StgWord*)ptr != 0x48000008 ) {
+ errorBelch("freeHaskellFunctionPtr: not for me, guv! %p\n", ptr);
+ return;
+ }
+ freeStablePtr(((StgStablePtr*)ptr)[1]);
+#elif defined(powerpc_HOST_ARCH) || defined(powerpc64_HOST_ARCH)
+ extern void* adjustorCode;
+ if ( ((AdjustorStub*)ptr)->code != (StgFunPtr) &adjustorCode ) {
errorBelch("freeHaskellFunctionPtr: not for me, guv! %p\n", ptr);
return;
}
- freeStablePtr(*((StgStablePtr*)((unsigned char*)ptr + 4*12)));
-#elif defined(ia64_TARGET_ARCH)
+ freeStablePtr(((AdjustorStub*)ptr)->hptr);
+#elif defined(ia64_HOST_ARCH)
IA64FunDesc *fdesc = (IA64FunDesc *)ptr;
StgWord64 *code = (StgWord64 *)(fdesc+1);
void
initAdjustor(void)
{
-#if defined(i386_TARGET_ARCH)
+#if defined(i386_HOST_ARCH)
/* Now here's something obscure for you:
When generating an adjustor thunk that uses the C calling