#include <windows.h>
#endif
-#if defined(openbsd_TARGET_OS)
+#if defined(openbsd_HOST_OS) || defined(linux_HOST_OS)
#include <unistd.h>
#include <sys/types.h>
#include <sys/mman.h>
/* no C99 header stdint.h on OpenBSD? */
+#if defined(openbsd_HOST_OS)
typedef unsigned long my_uintptr_t;
+#else
+#include <stdint.h>
+typedef uintptr_t my_uintptr_t;
+#endif
#endif
-#if defined(powerpc_TARGET_ARCH) && defined(linux_TARGET_OS)
+#if defined(powerpc_HOST_ARCH) && defined(linux_HOST_OS)
#include <string.h>
#endif
mallocBytesRWX(int len)
{
void *addr = stgMallocBytes(len, "mallocBytesRWX");
-#if defined(i386_TARGET_ARCH) && defined(_WIN32)
+#if defined(i386_HOST_ARCH) && defined(_WIN32)
/* This could be necessary for processors which distinguish between READ and
EXECUTE memory accesses, e.g. Itaniums. */
DWORD dwOldProtect = 0;
barf("mallocBytesRWX: failed to protect 0x%p; error=%lu; old protection: %lu\n",
addr, (unsigned long)GetLastError(), (unsigned long)dwOldProtect);
}
-#elif defined(openbsd_TARGET_OS)
+#elif defined(openbsd_HOST_OS) || defined(linux_HOST_OS)
/* malloced memory isn't executable by default on OpenBSD */
my_uintptr_t pageSize = sysconf(_SC_PAGESIZE);
my_uintptr_t mask = ~(pageSize - 1);
return addr;
}
-#if defined(i386_TARGET_ARCH)
-static unsigned char *obscure_ccall_ret_code;
+#ifdef LEADING_UNDERSCORE
+#define UNDERSCORE "_"
+#else
+#define UNDERSCORE ""
+#endif
+#if defined(i386_HOST_ARCH)
+/*
+ Now here's something obscure for you:
+
+ When generating an adjustor thunk that uses the C calling
+ convention, we have to make sure that the thunk kicks off
+ the process of jumping into Haskell with a tail jump. Why?
+ Because as a result of jumping in into Haskell we may end
+ up freeing the very adjustor thunk we came from using
+ freeHaskellFunctionPtr(). Hence, we better not return to
+ the adjustor code on our way out, since it could by then
+ point to junk.
+
+ The fix is readily at hand, just include the opcodes
+ for the C stack fixup code that we need to perform when
+ returning in some static piece of memory and arrange
+ to return to it before tail jumping from the adjustor thunk.
+*/
+static void GNUC3_ATTRIBUTE(used) obscure_ccall_wrapper(void)
+{
+ __asm__ (
+ ".globl " UNDERSCORE "obscure_ccall_ret_code\n"
+ UNDERSCORE "obscure_ccall_ret_code:\n\t"
+ "addl $0x4, %esp\n\t"
+ "ret"
+ );
+}
+extern void obscure_ccall_ret_code(void);
+
+#if defined(openbsd_HOST_OS)
+static unsigned char *obscure_ccall_ret_code_dyn;
+#endif
+
+#endif
+
+#if defined(x86_64_HOST_ARCH)
+static void GNUC3_ATTRIBUTE(used) obscure_ccall_wrapper(void)
+{
+ __asm__ (
+ ".globl " UNDERSCORE "obscure_ccall_ret_code\n"
+ UNDERSCORE "obscure_ccall_ret_code:\n\t"
+ "addq $0x8, %rsp\n\t"
+ "ret"
+ );
+}
+extern void obscure_ccall_ret_code(void);
#endif
-#if defined(alpha_TARGET_ARCH)
+#if defined(alpha_HOST_ARCH)
/* To get the definition of PAL_imb: */
-# if defined(linux_TARGET_OS)
+# if defined(linux_HOST_OS)
# include <asm/pal.h>
# else
# include <machine/pal.h>
# endif
#endif
-#if defined(ia64_TARGET_ARCH)
+#if defined(ia64_HOST_ARCH)
#include "Storage.h"
/* Layout of a function descriptor */
*stable = getStablePtr((StgPtr)arr);
/* and return a ptr to the goods inside the array */
- return(BYTE_ARR_CTS(arr));
+ return(&(arr->payload));
}
#endif
-#if defined(powerpc_TARGET_ARCH) && defined(linux_TARGET_OS)
+#if defined(powerpc_HOST_ARCH) && defined(linux_HOST_OS)
__asm__("obscure_ccall_ret_code:\n\t"
"lwz 1,0(1)\n\t"
"lwz 0,4(1)\n\t"
extern void obscure_ccall_ret_code(void);
#endif
-#if defined(powerpc_TARGET_ARCH) || defined(powerpc64_TARGET_ARCH)
-#if !(defined(powerpc_TARGET_ARCH) && defined(linux_TARGET_OS))
+#if defined(powerpc_HOST_ARCH) || defined(powerpc64_HOST_ARCH)
+#if !(defined(powerpc_HOST_ARCH) && defined(linux_HOST_OS))
/* !!! !!! WARNING: !!! !!!
* This structure is accessed from AdjustorAsm.s
*/
typedef struct AdjustorStub {
-#if defined(powerpc_TARGET_ARCH) && defined(darwin_TARGET_OS)
+#if defined(powerpc_HOST_ARCH) && defined(darwin_HOST_OS)
unsigned lis;
unsigned ori;
unsigned lwz;
unsigned mtctr;
unsigned bctr;
StgFunPtr code;
-#elif defined(powerpc64_TARGET_ARCH) && defined(darwin_TARGET_OS)
+#elif defined(powerpc64_HOST_ARCH) && defined(darwin_HOST_OS)
/* powerpc64-darwin: just guessing that it won't use fundescs. */
unsigned lis;
unsigned ori;
#endif
void*
-createAdjustor(int cconv, StgStablePtr hptr, StgFunPtr wptr, char *typeString)
+createAdjustor(int cconv, StgStablePtr hptr,
+ StgFunPtr wptr,
+ char *typeString
+#if !defined(powerpc_HOST_ARCH) && !defined(powerpc64_HOST_ARCH) && !defined(x86_64_HOST_ARCH)
+ STG_UNUSED
+#endif
+ )
{
void *adjustor = NULL;
switch (cconv)
{
case 0: /* _stdcall */
-#if defined(i386_TARGET_ARCH)
+#if defined(i386_HOST_ARCH)
/* Magic constant computed by inspecting the code length of
the following assembly language snippet
(offset and machine code prefixed):
break;
case 1: /* _ccall */
-#if defined(i386_TARGET_ARCH)
+#if defined(i386_HOST_ARCH)
/* Magic constant computed by inspecting the code length of
the following assembly language snippet
(offset and machine code prefixed):
*((StgFunPtr*)(adj_code + 0x06)) = (StgFunPtr)wptr;
adj_code[0x0a] = (unsigned char)0x68; /* pushl obscure_ccall_ret_code */
- *((StgFunPtr*)(adj_code + 0x0b)) = (StgFunPtr)obscure_ccall_ret_code;
+ *((StgFunPtr*)(adj_code + 0x0b)) =
+#if !defined(openbsd_HOST_OS)
+ (StgFunPtr)obscure_ccall_ret_code;
+#else
+ (StgFunPtr)obscure_ccall_ret_code_dyn;
+#endif
adj_code[0x0f] = (unsigned char)0xff; /* jmp *%eax */
adj_code[0x10] = (unsigned char)0xe0;
}
-#elif defined(sparc_TARGET_ARCH)
+#elif defined(x86_64_HOST_ARCH)
+ /*
+ stack at call:
+ argn
+ ...
+ arg7
+ return address
+ %rdi,%rsi,%rdx,%rcx,%r8,%r9 = arg0..arg6
+
+ if there are <6 integer args, then we can just push the
+ StablePtr into %edi and shuffle the other args up.
+
+ If there are >=6 integer args, then we have to flush one arg
+ to the stack, and arrange to adjust the stack ptr on return.
+ The stack will be rearranged to this:
+
+ argn
+ ...
+ arg7
+ return address *** <-- dummy arg in stub fn.
+ arg6
+ obscure_ccall_ret_code
+
+ This unfortunately means that the type of the stub function
+ must have a dummy argument for the original return address
+ pointer inserted just after the 6th integer argument.
+
+ Code for the simple case:
+
+ 0: 4d 89 c1 mov %r8,%r9
+ 3: 49 89 c8 mov %rcx,%r8
+ 6: 48 89 d1 mov %rdx,%rcx
+ 9: 48 89 f2 mov %rsi,%rdx
+ c: 48 89 fe mov %rdi,%rsi
+ f: 48 8b 3d 0a 00 00 00 mov 10(%rip),%rdi
+ 16: e9 00 00 00 00 jmpq stub_function
+ ...
+ 20: .quad 0 # aligned on 8-byte boundary
+
+
+ And the version for >=6 integer arguments:
+
+ 0: 41 51 push %r9
+ 2: 68 00 00 00 00 pushq $obscure_ccall_ret_code
+ 7: 4d 89 c1 mov %r8,%r9
+ a: 49 89 c8 mov %rcx,%r8
+ d: 48 89 d1 mov %rdx,%rcx
+ 10: 48 89 f2 mov %rsi,%rdx
+ 13: 48 89 fe mov %rdi,%rsi
+ 16: 48 8b 3d 0b 00 00 00 mov 11(%rip),%rdi
+ 1d: e9 00 00 00 00 jmpq stub_function
+ ...
+ 28: .quad 0 # aligned on 8-byte boundary
+ */
+
+ /* we assume the small code model (gcc -mcmmodel=small) where
+ * all symbols are <2^32, so hence wptr should fit into 32 bits.
+ */
+ ASSERT(((long)wptr >> 32) == 0);
+
+ {
+ int i = 0;
+ char *c;
+
+ // determine whether we have 6 or more integer arguments,
+ // and therefore need to flush one to the stack.
+ for (c = typeString; *c != '\0'; c++) {
+ if (*c == 'i' || *c == 'l') i++;
+ if (i == 6) break;
+ }
+
+ if (i < 6) {
+ adjustor = mallocBytesRWX(40);
+
+ *(StgInt32 *)adjustor = 0x49c1894d;
+ *(StgInt32 *)(adjustor+4) = 0x8948c889;
+ *(StgInt32 *)(adjustor+8) = 0xf28948d1;
+ *(StgInt32 *)(adjustor+12) = 0x48fe8948;
+ *(StgInt32 *)(adjustor+16) = 0x000a3d8b;
+ *(StgInt32 *)(adjustor+20) = 0x00e90000;
+
+ *(StgInt32 *)(adjustor+23) =
+ (StgInt32)((StgInt64)wptr - (StgInt64)adjustor - 27);
+ *(StgInt64 *)(adjustor+32) = (StgInt64)hptr;
+ }
+ else
+ {
+ adjustor = mallocBytesRWX(48);
+
+ *(StgInt32 *)adjustor = 0x00685141;
+ *(StgInt32 *)(adjustor+4) = 0x4d000000;
+ *(StgInt32 *)(adjustor+8) = 0x8949c189;
+ *(StgInt32 *)(adjustor+12) = 0xd18948c8;
+ *(StgInt32 *)(adjustor+16) = 0x48f28948;
+ *(StgInt32 *)(adjustor+20) = 0x8b48fe89;
+ *(StgInt32 *)(adjustor+24) = 0x00000b3d;
+ *(StgInt32 *)(adjustor+28) = 0x0000e900;
+
+ *(StgInt32 *)(adjustor+3) =
+ (StgInt32)(StgInt64)obscure_ccall_ret_code;
+ *(StgInt32 *)(adjustor+30) =
+ (StgInt32)((StgInt64)wptr - (StgInt64)adjustor - 34);
+ *(StgInt64 *)(adjustor+40) = (StgInt64)hptr;
+ }
+ }
+#elif defined(sparc_HOST_ARCH)
/* Magic constant computed by inspecting the code length of the following
assembly language snippet (offset and machine code prefixed):
asm("nop");
asm("nop");
}
-#elif defined(alpha_TARGET_ARCH)
+#elif defined(alpha_HOST_ARCH)
/* Magic constant computed by inspecting the code length of
the following assembly language snippet
(offset and machine code prefixed; note that the machine code
divided by 4, taking the lowest 14 bits.
We only support passing 4 or fewer argument words, for the same
- reason described under sparc_TARGET_ARCH above by JRS, 21 Aug 01.
+ reason described under sparc_HOST_ARCH above by JRS, 21 Aug 01.
On the Alpha the first 6 integer arguments are in a0 through a5,
and the rest on the stack. Hence we want to shuffle the original
caller's arguments by two.
/* Ensure that instruction cache is consistent with our new code */
__asm__ volatile("call_pal %0" : : "i" (PAL_imb));
}
-#elif defined(powerpc_TARGET_ARCH) && defined(linux_TARGET_OS)
+#elif defined(powerpc_HOST_ARCH) && defined(linux_HOST_OS)
#define OP_LO(op,lo) ((((unsigned)(op)) << 16) | (((unsigned)(lo)) & 0xFFFF))
#define OP_HI(op,hi) ((((unsigned)(op)) << 16) | (((unsigned)(hi)) >> 16))
}
}
-#elif defined(powerpc_TARGET_ARCH) || defined(powerpc64_TARGET_ARCH)
+#elif defined(powerpc_HOST_ARCH) || defined(powerpc64_HOST_ARCH)
#define OP_LO(op,lo) ((((unsigned)(op)) << 16) | (((unsigned)(lo)) & 0xFFFF))
#define OP_HI(op,hi) ((((unsigned)(op)) << 16) | (((unsigned)(hi)) >> 16))
// no function descriptors :-(
// We need to do things "by hand".
-#if defined(powerpc_TARGET_ARCH)
+#if defined(powerpc_HOST_ARCH)
// lis r2, hi(adjustorStub)
adjustorStub->lis = OP_HI(0x3c40, adjustorStub);
// ori r2, r2, lo(adjustorStub)
switch(t)
{
-#if defined(powerpc_TARGET_ARCH)
+#if defined(powerpc_HOST_ARCH)
// on 32-bit platforms, Double and Int64 occupy two words.
case 'd':
case 'l':
adjustorStub->extrawords_plus_one = extra_sz + 1;
}
-#elif defined(ia64_TARGET_ARCH)
+#elif defined(ia64_HOST_ARCH)
/*
Up to 8 inputs are passed in registers. We flush the last two inputs to
the stack, initially into the 16-byte scratch region left by the caller.
void
freeHaskellFunctionPtr(void* ptr)
{
-#if defined(i386_TARGET_ARCH)
+#if defined(i386_HOST_ARCH)
if ( *(unsigned char*)ptr != 0x68 &&
*(unsigned char*)ptr != 0x58 ) {
errorBelch("freeHaskellFunctionPtr: not for me, guv! %p\n", ptr);
freeStablePtr(*((StgStablePtr*)((unsigned char*)ptr + 0x01)));
} else {
freeStablePtr(*((StgStablePtr*)((unsigned char*)ptr + 0x02)));
- }
-#elif defined(sparc_TARGET_ARCH)
+ }
+#elif defined(x86_64_HOST_ARCH)
+ if ( *(StgWord16 *)ptr == 0x894d ) {
+ freeStablePtr(*(StgStablePtr*)(ptr+32));
+ } else if ( *(StgWord16 *)ptr == 0x5141 ) {
+ freeStablePtr(*(StgStablePtr*)(ptr+40));
+ } else {
+ errorBelch("freeHaskellFunctionPtr: not for me, guv! %p\n", ptr);
+ return;
+ }
+#elif defined(sparc_HOST_ARCH)
if ( *(unsigned long*)ptr != 0x9C23A008UL ) {
errorBelch("freeHaskellFunctionPtr: not for me, guv! %p\n", ptr);
return;
/* Free the stable pointer first..*/
freeStablePtr(*((StgStablePtr*)((unsigned long*)ptr + 11)));
-#elif defined(alpha_TARGET_ARCH)
+#elif defined(alpha_HOST_ARCH)
if ( *(StgWord64*)ptr != 0xa77b0018a61b0010L ) {
errorBelch("freeHaskellFunctionPtr: not for me, guv! %p\n", ptr);
return;
/* Free the stable pointer first..*/
freeStablePtr(*((StgStablePtr*)((unsigned char*)ptr + 0x10)));
-#elif defined(powerpc_TARGET_ARCH) && defined(linux_TARGET_OS)
+#elif defined(powerpc_HOST_ARCH) && defined(linux_HOST_OS)
if ( *(StgWord*)ptr != 0x48000008 ) {
errorBelch("freeHaskellFunctionPtr: not for me, guv! %p\n", ptr);
return;
}
freeStablePtr(((StgStablePtr*)ptr)[1]);
-#elif defined(powerpc_TARGET_ARCH) || defined(powerpc64_TARGET_ARCH)
+#elif defined(powerpc_HOST_ARCH) || defined(powerpc64_HOST_ARCH)
extern void* adjustorCode;
if ( ((AdjustorStub*)ptr)->code != (StgFunPtr) &adjustorCode ) {
errorBelch("freeHaskellFunctionPtr: not for me, guv! %p\n", ptr);
return;
}
freeStablePtr(((AdjustorStub*)ptr)->hptr);
-#elif defined(ia64_TARGET_ARCH)
+#elif defined(ia64_HOST_ARCH)
IA64FunDesc *fdesc = (IA64FunDesc *)ptr;
StgWord64 *code = (StgWord64 *)(fdesc+1);
void
initAdjustor(void)
{
-#if defined(i386_TARGET_ARCH)
- /* Now here's something obscure for you:
-
- When generating an adjustor thunk that uses the C calling
- convention, we have to make sure that the thunk kicks off
- the process of jumping into Haskell with a tail jump. Why?
- Because as a result of jumping in into Haskell we may end
- up freeing the very adjustor thunk we came from using
- freeHaskellFunctionPtr(). Hence, we better not return to
- the adjustor code on our way out, since it could by then
- point to junk.
-
- The fix is readily at hand, just include the opcodes
- for the C stack fixup code that we need to perform when
- returning in some static piece of memory and arrange
- to return to it before tail jumping from the adjustor thunk.
- */
-
- obscure_ccall_ret_code = mallocBytesRWX(4);
-
- obscure_ccall_ret_code[0x00] = (unsigned char)0x83; /* addl $0x4, %esp */
- obscure_ccall_ret_code[0x01] = (unsigned char)0xc4;
- obscure_ccall_ret_code[0x02] = (unsigned char)0x04;
-
- obscure_ccall_ret_code[0x03] = (unsigned char)0xc3; /* ret */
+#if defined(i386_HOST_ARCH) && defined(openbsd_HOST_OS)
+ obscure_ccall_ret_code_dyn = mallocBytesRWX(4);
+ obscure_ccall_ret_code_dyn[0] = ((unsigned char *)obscure_ccall_ret_code)[0];
+ obscure_ccall_ret_code_dyn[1] = ((unsigned char *)obscure_ccall_ret_code)[1];
+ obscure_ccall_ret_code_dyn[2] = ((unsigned char *)obscure_ccall_ret_code)[2];
+ obscure_ccall_ret_code_dyn[3] = ((unsigned char *)obscure_ccall_ret_code)[3];
#endif
}