the code is through with it, it has to be freed in order to release Haskell
and C resources. Failure to do so result in memory leaks on both the C and
Haskell side.
-
*/
+
#include "PosixSource.h"
#include "Rts.h"
+#include "RtsExternal.h"
#include "RtsUtils.h"
-#include "RtsFlags.h"
-
#include <stdlib.h>
+#if defined(_WIN32)
+#include <windows.h>
+#endif
+
+#if defined(openbsd_TARGET_OS)
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/mman.h>
+
+/* no C99 header stdint.h on OpenBSD? */
+typedef unsigned long my_uintptr_t;
+#endif
+
/* Heavily arch-specific, I'm afraid.. */
+/*
+ * Allocate len bytes which are readable, writable, and executable.
+ *
+ * ToDo: If this turns out to be a performance bottleneck, one could
+ * e.g. cache the last VirtualProtect/mprotect-ed region and do
+ * nothing in case of a cache hit.
+ */
+static void*
+mallocBytesRWX(int len)
+{
+ void *addr = stgMallocBytes(len, "mallocBytesRWX");
+#if defined(i386_TARGET_ARCH) && defined(_WIN32)
+ /* This could be necessary for processors which distinguish between READ and
+ EXECUTE memory accesses, e.g. Itaniums. */
+ DWORD dwOldProtect = 0;
+ if (VirtualProtect (addr, len, PAGE_EXECUTE_READWRITE, &dwOldProtect) == 0) {
+ barf("mallocBytesRWX: failed to protect 0x%p; error=%lu; old protection: %lu\n",
+ addr, (unsigned long)GetLastError(), (unsigned long)dwOldProtect);
+ }
+#elif defined(openbsd_TARGET_OS)
+ /* malloced memory isn't executable by default on OpenBSD */
+ my_uintptr_t pageSize = sysconf(_SC_PAGESIZE);
+ my_uintptr_t mask = ~(pageSize - 1);
+ my_uintptr_t startOfFirstPage = ((my_uintptr_t)addr ) & mask;
+ my_uintptr_t startOfLastPage = ((my_uintptr_t)addr + len - 1) & mask;
+ my_uintptr_t size = startOfLastPage - startOfFirstPage + pageSize;
+ if (mprotect((void*)startOfFirstPage, (size_t)size, PROT_EXEC | PROT_READ | PROT_WRITE) != 0) {
+ barf("mallocBytesRWX: failed to protect 0x%p\n", addr);
+ }
+#endif
+ return addr;
+}
+
#if defined(i386_TARGET_ARCH)
-/* Now here's something obscure for you:
-
- When generating an adjustor thunk that uses the C calling
- convention, we have to make sure that the thunk kicks off
- the process of jumping into Haskell with a tail jump. Why?
- Because as a result of jumping in into Haskell we may end
- up freeing the very adjustor thunk we came from using
- freeHaskellFunctionPtr(). Hence, we better not return to
- the adjustor code on our way out, since it could by then
- point to junk.
-
- The fix is readily at hand, just include the opcodes
- for the C stack fixup code that we need to perform when
- returning in some static piece of memory and arrange
- to return to it before tail jumping from the adjustor thunk.
-
- For this to work we make the assumption that bytes in .data
- are considered executable.
-*/
-static unsigned char __obscure_ccall_ret_code [] =
- { 0x83, 0xc4, 0x04 /* addl $0x4, %esp */
- , 0xc3 /* ret */
- };
+static unsigned char *obscure_ccall_ret_code;
#endif
#if defined(alpha_TARGET_ARCH)
}
#endif
+#if defined(powerpc64_TARGET_ARCH)
+// We don't need to generate dynamic code on powerpc64-[linux|AIX],
+// but we do need a piece of (static) inline assembly code:
+
+static void
+adjustorCodeWrittenInAsm()
+{
+ __asm__ volatile (
+ "adjustorCode:\n\t"
+ "mr 10,8\n\t"
+ "mr 9,7\n\t"
+ "mr 8,6\n\t"
+ "mr 7,5\n\t"
+ "mr 6,4\n\t"
+ "mr 5,3\n\t"
+ "mr 3,11\n\t"
+ "ld 0,0(2)\n\t"
+ "ld 11,16(2)\n\t"
+ "mtctr 0\n\t"
+ "ld 2,8(2)\n\t"
+ "bctr"
+ : : );
+}
+#endif
+
void*
createAdjustor(int cconv, StgStablePtr hptr, StgFunPtr wptr)
{
<c>: ff e0 jmp %eax # and jump to it.
# the callee cleans up the stack
*/
- if ((adjustor = stgMallocBytes(14, "createAdjustor")) != NULL) {
+ adjustor = mallocBytesRWX(14);
+ {
unsigned char *const adj_code = (unsigned char *)adjustor;
adj_code[0x00] = (unsigned char)0x58; /* popl %eax */
<00>: 68 ef be ad de pushl $0xdeadbeef # constant is large enough to
# hold a StgStablePtr
<05>: b8 fa ef ff 00 movl $0x00ffeffa, %eax # load up wptr
- <0a>: 68 ef be ad de pushl $__obscure_ccall_ret_code # push the return address
+ <0a>: 68 ef be ad de pushl $obscure_ccall_ret_code # push the return address
<0f>: ff e0 jmp *%eax # jump to wptr
The ccall'ing version is a tad different, passing in the return
via the stable pointer.) (The auto-generated C stub is in on this
game, don't worry :-)
- See the comment next to __obscure_ccall_ret_code why we need to
+ See the comment next to obscure_ccall_ret_code why we need to
perform a tail jump instead of a call, followed by some C stack
fixup.
That's (thankfully) the case here with the restricted set of
return types that we support.
*/
- if ((adjustor = stgMallocBytes(17, "createAdjustor")) != NULL) {
+ adjustor = mallocBytesRWX(17);
+ {
unsigned char *const adj_code = (unsigned char *)adjustor;
adj_code[0x00] = (unsigned char)0x68; /* pushl hptr (which is a dword immediate ) */
adj_code[0x05] = (unsigned char)0xb8; /* movl $wptr, %eax */
*((StgFunPtr*)(adj_code + 0x06)) = (StgFunPtr)wptr;
- adj_code[0x0a] = (unsigned char)0x68; /* pushl __obscure_ccall_ret_code */
- *((StgFunPtr*)(adj_code + 0x0b)) = (StgFunPtr)__obscure_ccall_ret_code;
+ adj_code[0x0a] = (unsigned char)0x68; /* pushl obscure_ccall_ret_code */
+ *((StgFunPtr*)(adj_code + 0x0b)) = (StgFunPtr)obscure_ccall_ret_code;
adj_code[0x0f] = (unsigned char)0xff; /* jmp *%eax */
adj_code[0x10] = (unsigned char)0xe0;
similarly, and local variables should be accessed via %fp, not %sp. In a
nutshell: This should work! (Famous last words! :-)
*/
- if ((adjustor = stgMallocBytes(4*(11+1), "createAdjustor")) != NULL) {
+ adjustor = mallocBytesRWX(4*(11+1));
+ {
unsigned long *const adj_code = (unsigned long *)adjustor;
adj_code[ 0] = 0x9C23A008UL; /* sub %sp, 8, %sp */
4 bytes (getting rid of the nop), hence saving memory. [ccshan]
*/
ASSERT(((StgWord64)wptr & 3) == 0);
- if ((adjustor = stgMallocBytes(48, "createAdjustor")) != NULL) {
+ adjustor = mallocBytesRWX(48);
+ {
StgWord64 *const code = (StgWord64 *)adjustor;
code[0] = 0x4610041246520414L;
this code, it only works for up to 6 arguments (when floating point arguments
are involved, this may be more or less, depending on the exact situation).
*/
- if ((adjustor = stgMallocBytes(4*13, "createAdjustor")) != NULL) {
+ adjustor = mallocBytesRWX(4*13);
+ {
unsigned long *const adj_code = (unsigned long *)adjustor;
// make room for extra arguments
__asm__ volatile ("sync\n\tisync");
}
}
+#elif defined(powerpc64_TARGET_ARCH)
+ // This is for powerpc64 linux and powerpc64 AIX.
+ // It probably won't apply to powerpc64-darwin.
+
+ {
+ typedef struct {
+ StgFunPtr code;
+ void* toc;
+ void* env;
+ } FunDesc;
+
+ FunDesc *desc = malloc(sizeof(FunDesc));
+ extern void *adjustorCode;
+
+ desc->code = (void*) &adjustorCode;
+ desc->toc = (void*) wptr;
+ desc->env = (void*) hptr;
+
+ adjustor = (void*) desc;
+ }
+ break;
+
#elif defined(ia64_TARGET_ARCH)
/*
Up to 8 inputs are passed in registers. We flush the last two inputs to
#if defined(i386_TARGET_ARCH)
if ( *(unsigned char*)ptr != 0x68 &&
*(unsigned char*)ptr != 0x58 ) {
- fprintf(stderr, "freeHaskellFunctionPtr: not for me, guv! %p\n", ptr);
+ errorBelch("freeHaskellFunctionPtr: not for me, guv! %p\n", ptr);
return;
}
}
#elif defined(sparc_TARGET_ARCH)
if ( *(unsigned long*)ptr != 0x9C23A008UL ) {
- fprintf(stderr, "freeHaskellFunctionPtr: not for me, guv! %p\n", ptr);
+ errorBelch("freeHaskellFunctionPtr: not for me, guv! %p\n", ptr);
return;
}
freeStablePtr(*((StgStablePtr*)((unsigned long*)ptr + 11)));
#elif defined(alpha_TARGET_ARCH)
if ( *(StgWord64*)ptr != 0xa77b0018a61b0010L ) {
- fprintf(stderr, "freeHaskellFunctionPtr: not for me, guv! %p\n", ptr);
+ errorBelch("freeHaskellFunctionPtr: not for me, guv! %p\n", ptr);
return;
}
freeStablePtr(*((StgStablePtr*)((unsigned char*)ptr + 0x10)));
#elif defined(powerpc_TARGET_ARCH)
if ( *(StgWord*)ptr != 0x7d0a4378 ) {
- fprintf(stderr, "freeHaskellFunctionPtr: not for me, guv! %p\n", ptr);
+ errorBelch("freeHaskellFunctionPtr: not for me, guv! %p\n", ptr);
return;
}
freeStablePtr(*((StgStablePtr*)((unsigned char*)ptr + 4*12)));
StgWord64 *code = (StgWord64 *)(fdesc+1);
if (fdesc->ip != (StgWord64)code) {
- fprintf(stderr, "freeHaskellFunctionPtr: not for me, guv! %p\n", ptr);
+ errorBelch("freeHaskellFunctionPtr: not for me, guv! %p\n", ptr);
return;
}
freeStablePtr((StgStablePtr)code[16]);
stgFree(ptr);
}
+
+/*
+ * Function: initAdjustor()
+ *
+ * Perform initialisation of adjustor thunk layer (if needed.)
+ */
+void
+initAdjustor(void)
+{
+#if defined(i386_TARGET_ARCH)
+ /* Now here's something obscure for you:
+
+ When generating an adjustor thunk that uses the C calling
+ convention, we have to make sure that the thunk kicks off
+ the process of jumping into Haskell with a tail jump. Why?
+ Because as a result of jumping in into Haskell we may end
+ up freeing the very adjustor thunk we came from using
+ freeHaskellFunctionPtr(). Hence, we better not return to
+ the adjustor code on our way out, since it could by then
+ point to junk.
+
+ The fix is readily at hand, just include the opcodes
+ for the C stack fixup code that we need to perform when
+ returning in some static piece of memory and arrange
+ to return to it before tail jumping from the adjustor thunk.
+ */
+
+ obscure_ccall_ret_code = mallocBytesRWX(4);
+
+ obscure_ccall_ret_code[0x00] = (unsigned char)0x83; /* addl $0x4, %esp */
+ obscure_ccall_ret_code[0x01] = (unsigned char)0xc4;
+ obscure_ccall_ret_code[0x02] = (unsigned char)0x04;
+
+ obscure_ccall_ret_code[0x03] = (unsigned char)0xc3; /* ret */
+#endif
+}