the code is through with it, it has to be freed in order to release Haskell
and C resources. Failure to do so result in memory leaks on both the C and
Haskell side.
-
*/
+
#include "PosixSource.h"
#include "Rts.h"
#include "RtsExternal.h"
#include <windows.h>
#endif
-/* Heavily arch-specific, I'm afraid.. */
+#if defined(openbsd_TARGET_OS)
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/mman.h>
+
+/* no C99 header stdint.h on OpenBSD? */
+typedef unsigned long my_uintptr_t;
+#endif
-typedef enum {
- pageExecuteRead,
- pageExecuteReadWrite
-} pageMode;
+/* Heavily arch-specific, I'm afraid.. */
/*
- * Function: execPage()
- *
- * Set the executable bit on page containing addr.
+ * Allocate len bytes which are readable, writable, and executable.
*
- * TODO: Can the code span more than one page? If yes, we need to make two
- * pages executable!
+ * ToDo: If this turns out to be a performance bottleneck, one could
+ * e.g. cache the last VirtualProtect/mprotect-ed region and do
+ * nothing in case of a cache hit.
*/
-static rtsBool
-execPage (void* addr, pageMode mode)
+static void*
+mallocBytesRWX(int len)
{
-#if defined(i386_TARGET_ARCH) && defined(_WIN32) && 0
- SYSTEM_INFO sInfo;
- DWORD dwOldProtect = 0;
-
- /* doesn't return a result, so presumably it can't fail... */
- GetSystemInfo(&sInfo);
-
- if ( VirtualProtect ( (void*)((unsigned long)addr & (sInfo.dwPageSize - 1)),
- sInfo.dwPageSize,
- ( mode == pageExecuteReadWrite ? PAGE_EXECUTE_READWRITE : PAGE_EXECUTE_READ),
- &dwOldProtect) == 0 ) {
-# if 1
- DWORD rc = GetLastError();
- prog_belch("execPage: failed to protect 0x%p; error=%lu; old protection: %lu\n", addr, rc, dwOldProtect);
-# endif
- return rtsFalse;
- }
- return rtsTrue;
-#else
- (void)addr; (void)mode; /* keep gcc -Wall happy */
- return rtsTrue;
+ void *addr = stgMallocBytes(len, "mallocBytesRWX");
+#if defined(i386_TARGET_ARCH) && defined(_WIN32)
+ /* This could be necessary for processors which distinguish between READ and
+ EXECUTE memory accesses, e.g. Itaniums. */
+ DWORD dwOldProtect = 0;
+ if (VirtualProtect (addr, len, PAGE_EXECUTE_READWRITE, &dwOldProtect) == 0) {
+ barf("mallocBytesRWX: failed to protect 0x%p; error=%lu; old protection: %lu\n",
+ addr, (unsigned long)GetLastError(), (unsigned long)dwOldProtect);
+ }
+#elif defined(openbsd_TARGET_OS)
+ /* malloced memory isn't executable by default on OpenBSD */
+ my_uintptr_t pageSize = sysconf(_SC_PAGESIZE);
+ my_uintptr_t mask = ~(pageSize - 1);
+ my_uintptr_t startOfFirstPage = ((my_uintptr_t)addr ) & mask;
+ my_uintptr_t startOfLastPage = ((my_uintptr_t)addr + len - 1) & mask;
+ my_uintptr_t size = startOfLastPage - startOfFirstPage + pageSize;
+ if (mprotect((void*)startOfFirstPage, (size_t)size, PROT_EXEC | PROT_READ | PROT_WRITE) != 0) {
+ barf("mallocBytesRWX: failed to protect 0x%p\n", addr);
+ }
#endif
+ return addr;
}
#if defined(i386_TARGET_ARCH)
}
#endif
+#if defined(powerpc64_TARGET_ARCH)
+// We don't need to generate dynamic code on powerpc64-[linux|AIX],
+// but we do need a piece of (static) inline assembly code:
+
+static void
+adjustorCodeWrittenInAsm()
+{
+ __asm__ volatile (
+ "adjustorCode:\n\t"
+ "mr 10,8\n\t"
+ "mr 9,7\n\t"
+ "mr 8,6\n\t"
+ "mr 7,5\n\t"
+ "mr 6,4\n\t"
+ "mr 5,3\n\t"
+ "mr 3,11\n\t"
+ "ld 0,0(2)\n\t"
+ "ld 11,16(2)\n\t"
+ "mtctr 0\n\t"
+ "ld 2,8(2)\n\t"
+ "bctr"
+ : : );
+}
+#endif
+
void*
createAdjustor(int cconv, StgStablePtr hptr, StgFunPtr wptr)
{
<c>: ff e0 jmp %eax # and jump to it.
# the callee cleans up the stack
*/
- adjustor = stgMallocBytes(14, "createAdjustor");
- unsigned char *const adj_code = (unsigned char *)adjustor;
- adj_code[0x00] = (unsigned char)0x58; /* popl %eax */
+ adjustor = mallocBytesRWX(14);
+ {
+ unsigned char *const adj_code = (unsigned char *)adjustor;
+ adj_code[0x00] = (unsigned char)0x58; /* popl %eax */
- adj_code[0x01] = (unsigned char)0x68; /* pushl hptr (which is a dword immediate ) */
- *((StgStablePtr*)(adj_code + 0x02)) = (StgStablePtr)hptr;
+ adj_code[0x01] = (unsigned char)0x68; /* pushl hptr (which is a dword immediate ) */
+ *((StgStablePtr*)(adj_code + 0x02)) = (StgStablePtr)hptr;
- adj_code[0x06] = (unsigned char)0x50; /* pushl %eax */
+ adj_code[0x06] = (unsigned char)0x50; /* pushl %eax */
- adj_code[0x07] = (unsigned char)0xb8; /* movl $wptr, %eax */
- *((StgFunPtr*)(adj_code + 0x08)) = (StgFunPtr)wptr;
+ adj_code[0x07] = (unsigned char)0xb8; /* movl $wptr, %eax */
+ *((StgFunPtr*)(adj_code + 0x08)) = (StgFunPtr)wptr;
- adj_code[0x0c] = (unsigned char)0xff; /* jmp %eax */
- adj_code[0x0d] = (unsigned char)0xe0;
-
- execPage(adjustor, pageExecuteReadWrite);
+ adj_code[0x0c] = (unsigned char)0xff; /* jmp %eax */
+ adj_code[0x0d] = (unsigned char)0xe0;
+ }
#endif
break;
That's (thankfully) the case here with the restricted set of
return types that we support.
*/
- adjustor = stgMallocBytes(17, "createAdjustor");
- unsigned char *const adj_code = (unsigned char *)adjustor;
-
- adj_code[0x00] = (unsigned char)0x68; /* pushl hptr (which is a dword immediate ) */
- *((StgStablePtr*)(adj_code+0x01)) = (StgStablePtr)hptr;
+ adjustor = mallocBytesRWX(17);
+ {
+ unsigned char *const adj_code = (unsigned char *)adjustor;
- adj_code[0x05] = (unsigned char)0xb8; /* movl $wptr, %eax */
- *((StgFunPtr*)(adj_code + 0x06)) = (StgFunPtr)wptr;
+ adj_code[0x00] = (unsigned char)0x68; /* pushl hptr (which is a dword immediate ) */
+ *((StgStablePtr*)(adj_code+0x01)) = (StgStablePtr)hptr;
- adj_code[0x0a] = (unsigned char)0x68; /* pushl obscure_ccall_ret_code */
- *((StgFunPtr*)(adj_code + 0x0b)) = (StgFunPtr)obscure_ccall_ret_code;
+ adj_code[0x05] = (unsigned char)0xb8; /* movl $wptr, %eax */
+ *((StgFunPtr*)(adj_code + 0x06)) = (StgFunPtr)wptr;
- adj_code[0x0f] = (unsigned char)0xff; /* jmp *%eax */
- adj_code[0x10] = (unsigned char)0xe0;
+ adj_code[0x0a] = (unsigned char)0x68; /* pushl obscure_ccall_ret_code */
+ *((StgFunPtr*)(adj_code + 0x0b)) = (StgFunPtr)obscure_ccall_ret_code;
- execPage(adjustor, pageExecuteReadWrite);
+ adj_code[0x0f] = (unsigned char)0xff; /* jmp *%eax */
+ adj_code[0x10] = (unsigned char)0xe0;
+ }
#elif defined(sparc_TARGET_ARCH)
/* Magic constant computed by inspecting the code length of the following
assembly language snippet (offset and machine code prefixed):
similarly, and local variables should be accessed via %fp, not %sp. In a
nutshell: This should work! (Famous last words! :-)
*/
- adjustor = stgMallocBytes(4*(11+1), "createAdjustor");
- unsigned long *const adj_code = (unsigned long *)adjustor;
-
- adj_code[ 0] = 0x9C23A008UL; /* sub %sp, 8, %sp */
- adj_code[ 1] = 0xDA23A060UL; /* st %o5, [%sp + 96] */
- adj_code[ 2] = 0xD823A05CUL; /* st %o4, [%sp + 92] */
- adj_code[ 3] = 0x9A10000BUL; /* mov %o3, %o5 */
- adj_code[ 4] = 0x9810000AUL; /* mov %o2, %o4 */
- adj_code[ 5] = 0x96100009UL; /* mov %o1, %o3 */
- adj_code[ 6] = 0x94100008UL; /* mov %o0, %o2 */
- adj_code[ 7] = 0x13000000UL; /* sethi %hi(wptr), %o1 */
- adj_code[ 7] |= ((unsigned long)wptr) >> 10;
- adj_code[ 8] = 0x11000000UL; /* sethi %hi(hptr), %o0 */
- adj_code[ 8] |= ((unsigned long)hptr) >> 10;
- adj_code[ 9] = 0x81C26000UL; /* jmp %o1 + %lo(wptr) */
- adj_code[ 9] |= ((unsigned long)wptr) & 0x000003FFUL;
- adj_code[10] = 0x90122000UL; /* or %o0, %lo(hptr), %o0 */
- adj_code[10] |= ((unsigned long)hptr) & 0x000003FFUL;
-
- adj_code[11] = (unsigned long)hptr;
-
- /* flush cache */
- asm("flush %0" : : "r" (adj_code ));
- asm("flush %0" : : "r" (adj_code + 2));
- asm("flush %0" : : "r" (adj_code + 4));
- asm("flush %0" : : "r" (adj_code + 6));
- asm("flush %0" : : "r" (adj_code + 10));
-
- /* max. 5 instructions latency, and we need at >= 1 for returning */
- asm("nop");
- asm("nop");
- asm("nop");
- asm("nop");
+ adjustor = mallocBytesRWX(4*(11+1));
+ {
+ unsigned long *const adj_code = (unsigned long *)adjustor;
+
+ adj_code[ 0] = 0x9C23A008UL; /* sub %sp, 8, %sp */
+ adj_code[ 1] = 0xDA23A060UL; /* st %o5, [%sp + 96] */
+ adj_code[ 2] = 0xD823A05CUL; /* st %o4, [%sp + 92] */
+ adj_code[ 3] = 0x9A10000BUL; /* mov %o3, %o5 */
+ adj_code[ 4] = 0x9810000AUL; /* mov %o2, %o4 */
+ adj_code[ 5] = 0x96100009UL; /* mov %o1, %o3 */
+ adj_code[ 6] = 0x94100008UL; /* mov %o0, %o2 */
+ adj_code[ 7] = 0x13000000UL; /* sethi %hi(wptr), %o1 */
+ adj_code[ 7] |= ((unsigned long)wptr) >> 10;
+ adj_code[ 8] = 0x11000000UL; /* sethi %hi(hptr), %o0 */
+ adj_code[ 8] |= ((unsigned long)hptr) >> 10;
+ adj_code[ 9] = 0x81C26000UL; /* jmp %o1 + %lo(wptr) */
+ adj_code[ 9] |= ((unsigned long)wptr) & 0x000003FFUL;
+ adj_code[10] = 0x90122000UL; /* or %o0, %lo(hptr), %o0 */
+ adj_code[10] |= ((unsigned long)hptr) & 0x000003FFUL;
+
+ adj_code[11] = (unsigned long)hptr;
+
+ /* flush cache */
+ asm("flush %0" : : "r" (adj_code ));
+ asm("flush %0" : : "r" (adj_code + 2));
+ asm("flush %0" : : "r" (adj_code + 4));
+ asm("flush %0" : : "r" (adj_code + 6));
+ asm("flush %0" : : "r" (adj_code + 10));
+
+ /* max. 5 instructions latency, and we need at >= 1 for returning */
+ asm("nop");
+ asm("nop");
+ asm("nop");
+ asm("nop");
+ }
#elif defined(alpha_TARGET_ARCH)
/* Magic constant computed by inspecting the code length of
the following assembly language snippet
4 bytes (getting rid of the nop), hence saving memory. [ccshan]
*/
ASSERT(((StgWord64)wptr & 3) == 0);
- adjustor = stgMallocBytes(48, "createAdjustor");
- StgWord64 *const code = (StgWord64 *)adjustor;
+ adjustor = mallocBytesRWX(48);
+ {
+ StgWord64 *const code = (StgWord64 *)adjustor;
- code[0] = 0x4610041246520414L;
- code[1] = 0x46730415a61b0020L;
- code[2] = 0x46310413a77b0028L;
- code[3] = 0x000000006bfb0000L
- | (((StgWord32*)(wptr) - (StgWord32*)(code) - 3) & 0x3fff);
+ code[0] = 0x4610041246520414L;
+ code[1] = 0x46730415a61b0020L;
+ code[2] = 0x46310413a77b0028L;
+ code[3] = 0x000000006bfb0000L
+ | (((StgWord32*)(wptr) - (StgWord32*)(code) - 3) & 0x3fff);
- code[4] = (StgWord64)hptr;
- code[5] = (StgWord64)wptr;
+ code[4] = (StgWord64)hptr;
+ code[5] = (StgWord64)wptr;
- /* Ensure that instruction cache is consistent with our new code */
- __asm__ volatile("call_pal %0" : : "i" (PAL_imb));
+ /* Ensure that instruction cache is consistent with our new code */
+ __asm__ volatile("call_pal %0" : : "i" (PAL_imb));
+ }
#elif defined(powerpc_TARGET_ARCH)
/*
For PowerPC, the following code is used:
this code, it only works for up to 6 arguments (when floating point arguments
are involved, this may be more or less, depending on the exact situation).
*/
- adjustor = stgMallocBytes(4*13, "createAdjustor");
- unsigned long *const adj_code = (unsigned long *)adjustor;
-
- // make room for extra arguments
- adj_code[0] = 0x7d0a4378; //mr r10,r8
- adj_code[1] = 0x7ce93b78; //mr r9,r7
- adj_code[2] = 0x7cc83378; //mr r8,r6
- adj_code[3] = 0x7ca72b78; //mr r7,r5
- adj_code[4] = 0x7c862378; //mr r6,r4
- adj_code[5] = 0x7c651b78; //mr r5,r3
+ adjustor = mallocBytesRWX(4*13);
+ {
+ unsigned long *const adj_code = (unsigned long *)adjustor;
+
+ // make room for extra arguments
+ adj_code[0] = 0x7d0a4378; //mr r10,r8
+ adj_code[1] = 0x7ce93b78; //mr r9,r7
+ adj_code[2] = 0x7cc83378; //mr r8,r6
+ adj_code[3] = 0x7ca72b78; //mr r7,r5
+ adj_code[4] = 0x7c862378; //mr r6,r4
+ adj_code[5] = 0x7c651b78; //mr r5,r3
- adj_code[6] = 0x3c000000; //lis r0,hi(wptr)
- adj_code[6] |= ((unsigned long)wptr) >> 16;
+ adj_code[6] = 0x3c000000; //lis r0,hi(wptr)
+ adj_code[6] |= ((unsigned long)wptr) >> 16;
- adj_code[7] = 0x3c600000; //lis r3,hi(hptr)
- adj_code[7] |= ((unsigned long)hptr) >> 16;
+ adj_code[7] = 0x3c600000; //lis r3,hi(hptr)
+ adj_code[7] |= ((unsigned long)hptr) >> 16;
- adj_code[8] = 0x60000000; //ori r0,r0,lo(wptr)
- adj_code[8] |= ((unsigned long)wptr) & 0xFFFF;
+ adj_code[8] = 0x60000000; //ori r0,r0,lo(wptr)
+ adj_code[8] |= ((unsigned long)wptr) & 0xFFFF;
- adj_code[9] = 0x60630000; //ori r3,r3,lo(hptr)
- adj_code[9] |= ((unsigned long)hptr) & 0xFFFF;
+ adj_code[9] = 0x60630000; //ori r3,r3,lo(hptr)
+ adj_code[9] |= ((unsigned long)hptr) & 0xFFFF;
- adj_code[10] = 0x7c0903a6; //mtctr r0
- adj_code[11] = 0x4e800420; //bctr
- adj_code[12] = (unsigned long)hptr;
+ adj_code[10] = 0x7c0903a6; //mtctr r0
+ adj_code[11] = 0x4e800420; //bctr
+ adj_code[12] = (unsigned long)hptr;
- /* Flush the Instruction cache: */
- /* MakeDataExecutable(adjustor,4*13); */
- /* This would require us to link with CoreServices.framework */
- { /* this should do the same: */
- int n = 13;
- unsigned long *p = adj_code;
- while (n--) {
- __asm__ volatile ("dcbf 0,%0\n\tsync\n\ticbi 0,%0" : : "r" (p));
- p++;
- }
- __asm__ volatile ("sync\n\tisync");
- }
+ // Flush the Instruction cache:
+ // MakeDataExecutable(adjustor,4*13);
+ /* This would require us to link with CoreServices.framework */
+ { /* this should do the same: */
+ int n = 13;
+ unsigned long *p = adj_code;
+ while(n--)
+ {
+ __asm__ volatile ("dcbf 0,%0\n\tsync\n\ticbi 0,%0"
+ : : "r" (p));
+ p++;
+ }
+ __asm__ volatile ("sync\n\tisync");
+ }
+ }
+#elif defined(powerpc64_TARGET_ARCH)
+ // This is for powerpc64 linux and powerpc64 AIX.
+ // It probably won't apply to powerpc64-darwin.
+
+ {
+ typedef struct {
+ StgFunPtr code;
+ void* toc;
+ void* env;
+ } FunDesc;
+
+ FunDesc *desc = malloc(sizeof(FunDesc));
+ extern void *adjustorCode;
+
+ desc->code = (void*) &adjustorCode;
+ desc->toc = (void*) wptr;
+ desc->env = (void*) hptr;
+
+ adjustor = (void*) desc;
+ }
+ break;
+
#elif defined(ia64_TARGET_ARCH)
/*
Up to 8 inputs are passed in registers. We flush the last two inputs to
#if defined(i386_TARGET_ARCH)
if ( *(unsigned char*)ptr != 0x68 &&
*(unsigned char*)ptr != 0x58 ) {
- prog_belch("freeHaskellFunctionPtr: not for me, guv! %p\n", ptr);
+ errorBelch("freeHaskellFunctionPtr: not for me, guv! %p\n", ptr);
return;
}
}
#elif defined(sparc_TARGET_ARCH)
if ( *(unsigned long*)ptr != 0x9C23A008UL ) {
- prog_belch("freeHaskellFunctionPtr: not for me, guv! %p\n", ptr);
+ errorBelch("freeHaskellFunctionPtr: not for me, guv! %p\n", ptr);
return;
}
freeStablePtr(*((StgStablePtr*)((unsigned long*)ptr + 11)));
#elif defined(alpha_TARGET_ARCH)
if ( *(StgWord64*)ptr != 0xa77b0018a61b0010L ) {
- prog_belch("freeHaskellFunctionPtr: not for me, guv! %p\n", ptr);
+ errorBelch("freeHaskellFunctionPtr: not for me, guv! %p\n", ptr);
return;
}
freeStablePtr(*((StgStablePtr*)((unsigned char*)ptr + 0x10)));
#elif defined(powerpc_TARGET_ARCH)
if ( *(StgWord*)ptr != 0x7d0a4378 ) {
- prog_belch("freeHaskellFunctionPtr: not for me, guv! %p\n", ptr);
+ errorBelch("freeHaskellFunctionPtr: not for me, guv! %p\n", ptr);
return;
}
freeStablePtr(*((StgStablePtr*)((unsigned char*)ptr + 4*12)));
StgWord64 *code = (StgWord64 *)(fdesc+1);
if (fdesc->ip != (StgWord64)code) {
- prog_belch("freeHaskellFunctionPtr: not for me, guv! %p\n", ptr);
+ errorBelch("freeHaskellFunctionPtr: not for me, guv! %p\n", ptr);
return;
}
freeStablePtr((StgStablePtr)code[16]);
*
* Perform initialisation of adjustor thunk layer (if needed.)
*/
-rtsBool
+void
initAdjustor(void)
{
#if defined(i386_TARGET_ARCH)
to return to it before tail jumping from the adjustor thunk.
*/
- obscure_ccall_ret_code = stgMallocBytes(4, "initAdjustor");
+ obscure_ccall_ret_code = mallocBytesRWX(4);
obscure_ccall_ret_code[0x00] = (unsigned char)0x83; /* addl $0x4, %esp */
obscure_ccall_ret_code[0x01] = (unsigned char)0xc4;
obscure_ccall_ret_code[0x02] = (unsigned char)0x04;
obscure_ccall_ret_code[0x03] = (unsigned char)0xc3; /* ret */
-
- execPage(obscure_ccall_ret_code, pageExecuteRead);
#endif
- return rtsTrue;
}