1 /* -----------------------------------------------------------------------------
2 * Foreign export adjustor thunks
6 * ---------------------------------------------------------------------------*/
8 /* A little bit of background...
10 An adjustor thunk is a dynamically allocated code snippet that allows
11 Haskell closures to be viewed as C function pointers.
13 Stable pointers provide a way for the outside world to get access to,
14 and evaluate, Haskell heap objects, with the RTS providing a small
15 range of ops for doing so. So, assuming we've got a stable pointer in
16 our hand in C, we can jump into the Haskell world and evaluate a callback
17 procedure, say. This works OK in some cases where callbacks are used, but
18 does require the external code to know about stable pointers and how to deal
19 with them. We'd like to hide the Haskell-nature of a callback and have it
20 be invoked just like any other C function pointer.
22 Enter adjustor thunks. An adjustor thunk is a little piece of code
23 that's generated on-the-fly (one per Haskell closure being exported)
24 that, when entered using some 'universal' calling convention (e.g., the
25 C calling convention on platform X), pushes an implicit stable pointer
26 (to the Haskell callback) before calling another (static) C function stub
27 which takes care of entering the Haskell code via its stable pointer.
29 An adjustor thunk is allocated on the C heap, and is called from within
30 Haskell just before handing out the function pointer to the Haskell (IO)
31 action. User code should never have to invoke it explicitly.
33 An adjustor thunk differs from a C function pointer in one respect: when
34 the code is through with it, it has to be freed in order to release Haskell
35 and C resources. Failure to do so result in memory leaks on both the C and
39 #include "PosixSource.h"
41 #include "RtsExternal.h"
49 /* Heavily arch-specific, I'm afraid.. */
57 * Function: execPage()
59 * Set the executable bit on page containing addr.
61 * TODO: Can the code span more than one page? If yes, we need to make two
65 execPage (void* addr, pageMode mode)
67 #if defined(i386_TARGET_ARCH) && defined(_WIN32) && 0
69 DWORD dwOldProtect = 0;
71 /* doesn't return a result, so presumably it can't fail... */
72 GetSystemInfo(&sInfo);
74 if ( VirtualProtect ( (void*)((unsigned long)addr & (sInfo.dwPageSize - 1)),
76 ( mode == pageExecuteReadWrite ? PAGE_EXECUTE_READWRITE : PAGE_EXECUTE_READ),
77 &dwOldProtect) == 0 ) {
79 DWORD rc = GetLastError();
80 prog_belch("execPage: failed to protect 0x%p; error=%lu; old protection: %lu\n", addr, rc, dwOldProtect);
86 (void)addr; (void)mode; /* keep gcc -Wall happy */
92 static unsigned char __obscure_ccall_ret_code [] =
93 #if defined(i386_TARGET_ARCH)
94 /* Now here's something obscure for you:
96 When generating an adjustor thunk that uses the C calling
97 convention, we have to make sure that the thunk kicks off
98 the process of jumping into Haskell with a tail jump. Why?
99 Because as a result of jumping in into Haskell we may end
100 up freeing the very adjustor thunk we came from using
101 freeHaskellFunctionPtr(). Hence, we better not return to
102 the adjustor code on our way out, since it could by then
105 The fix is readily at hand, just include the opcodes
106 for the C stack fixup code that we need to perform when
107 returning in some static piece of memory and arrange
108 to return to it before tail jumping from the adjustor thunk.
110 For this to work we make the assumption that bytes in .data
111 are considered executable.
113 { 0x83, 0xc4, 0x04 /* addl $0x4, %esp */
117 /* No such mind-twisters on non-Intel platforms */
121 #if defined(alpha_TARGET_ARCH)
122 /* To get the definition of PAL_imb: */
123 # if defined(linux_TARGET_OS)
124 # include <asm/pal.h>
126 # include <machine/pal.h>
130 #if defined(ia64_TARGET_ARCH)
133 /* Layout of a function descriptor */
134 typedef struct _IA64FunDesc {
140 stgAllocStable(size_t size_in_bytes, StgStablePtr *stable)
143 nat data_size_in_words, total_size_in_words;
145 /* round up to a whole number of words */
146 data_size_in_words = (size_in_bytes + sizeof(W_) + 1) / sizeof(W_);
147 total_size_in_words = sizeofW(StgArrWords) + data_size_in_words;
149 /* allocate and fill it in */
150 arr = (StgArrWords *)allocate(total_size_in_words);
151 SET_ARR_HDR(arr, &stg_ARR_WORDS_info, CCCS, data_size_in_words);
153 /* obtain a stable ptr */
154 *stable = getStablePtr((StgPtr)arr);
156 /* and return a ptr to the goods inside the array */
157 return(BYTE_ARR_CTS(arr));
162 createAdjustor(int cconv, StgStablePtr hptr, StgFunPtr wptr)
164 void *adjustor = NULL;
168 case 0: /* _stdcall */
169 #if defined(i386_TARGET_ARCH)
170 /* Magic constant computed by inspecting the code length of
171 the following assembly language snippet
172 (offset and machine code prefixed):
174 <0>: 58 popl %eax # temp. remove ret addr..
175 <1>: 68 fd fc fe fa pushl 0xfafefcfd # constant is large enough to
176 # hold a StgStablePtr
177 <6>: 50 pushl %eax # put back ret. addr
178 <7>: b8 fa ef ff 00 movl $0x00ffeffa, %eax # load up wptr
179 <c>: ff e0 jmp %eax # and jump to it.
180 # the callee cleans up the stack
182 if ((adjustor = stgMallocBytes(14, "createAdjustor")) != NULL) {
183 unsigned char *const adj_code = (unsigned char *)adjustor;
184 adj_code[0x00] = (unsigned char)0x58; /* popl %eax */
186 adj_code[0x01] = (unsigned char)0x68; /* pushl hptr (which is a dword immediate ) */
187 *((StgStablePtr*)(adj_code + 0x02)) = (StgStablePtr)hptr;
189 adj_code[0x06] = (unsigned char)0x50; /* pushl %eax */
191 adj_code[0x07] = (unsigned char)0xb8; /* movl $wptr, %eax */
192 *((StgFunPtr*)(adj_code + 0x08)) = (StgFunPtr)wptr;
194 adj_code[0x0c] = (unsigned char)0xff; /* jmp %eax */
195 adj_code[0x0d] = (unsigned char)0xe0;
197 execPage(adjustor, pageExecuteReadWrite);
203 #if defined(i386_TARGET_ARCH)
204 /* Magic constant computed by inspecting the code length of
205 the following assembly language snippet
206 (offset and machine code prefixed):
208 <00>: 68 ef be ad de pushl $0xdeadbeef # constant is large enough to
209 # hold a StgStablePtr
210 <05>: b8 fa ef ff 00 movl $0x00ffeffa, %eax # load up wptr
211 <0a>: 68 ef be ad de pushl $__obscure_ccall_ret_code # push the return address
212 <0f>: ff e0 jmp *%eax # jump to wptr
214 The ccall'ing version is a tad different, passing in the return
215 address of the caller to the auto-generated C stub (which enters
216 via the stable pointer.) (The auto-generated C stub is in on this
217 game, don't worry :-)
219 See the comment next to __obscure_ccall_ret_code why we need to
220 perform a tail jump instead of a call, followed by some C stack
223 Note: The adjustor makes the assumption that any return value
224 coming back from the C stub is not stored on the stack.
225 That's (thankfully) the case here with the restricted set of
226 return types that we support.
228 if ((adjustor = stgMallocBytes(17, "createAdjustor")) != NULL) {
229 unsigned char *const adj_code = (unsigned char *)adjustor;
231 adj_code[0x00] = (unsigned char)0x68; /* pushl hptr (which is a dword immediate ) */
232 *((StgStablePtr*)(adj_code+0x01)) = (StgStablePtr)hptr;
234 adj_code[0x05] = (unsigned char)0xb8; /* movl $wptr, %eax */
235 *((StgFunPtr*)(adj_code + 0x06)) = (StgFunPtr)wptr;
237 adj_code[0x0a] = (unsigned char)0x68; /* pushl __obscure_ccall_ret_code */
238 *((StgFunPtr*)(adj_code + 0x0b)) = (StgFunPtr)__obscure_ccall_ret_code;
240 adj_code[0x0f] = (unsigned char)0xff; /* jmp *%eax */
241 adj_code[0x10] = (unsigned char)0xe0;
243 execPage(adjustor, pageExecuteReadWrite);
245 #elif defined(sparc_TARGET_ARCH)
246 /* Magic constant computed by inspecting the code length of the following
247 assembly language snippet (offset and machine code prefixed):
249 <00>: 9C23A008 sub %sp, 8, %sp ! make room for %o4/%o5 in caller's frame
250 <04>: DA23A060 st %o5, [%sp + 96] ! shift registers by 2 positions
251 <08>: D823A05C st %o4, [%sp + 92]
252 <0C>: 9A10000B mov %o3, %o5
253 <10>: 9810000A mov %o2, %o4
254 <14>: 96100009 mov %o1, %o3
255 <18>: 94100008 mov %o0, %o2
256 <1C>: 13000000 sethi %hi(wptr), %o1 ! load up wptr (1 of 2)
257 <20>: 11000000 sethi %hi(hptr), %o0 ! load up hptr (1 of 2)
258 <24>: 81C26000 jmp %o1 + %lo(wptr) ! jump to wptr (load 2 of 2)
259 <28>: 90122000 or %o0, %lo(hptr), %o0 ! load up hptr (2 of 2, delay slot)
260 <2C> 00000000 ! place for getting hptr back easily
262 ccall'ing on SPARC is easy, because we are quite lucky to push a
263 multiple of 8 bytes (1 word hptr + 1 word dummy arg) in front of the
264 existing arguments (note that %sp must stay double-word aligned at
265 all times, see ABI spec at http://www.sparc.org/standards/psABI3rd.pdf).
266 To do this, we extend the *caller's* stack frame by 2 words and shift
267 the output registers used for argument passing (%o0 - %o5, we are a *leaf*
268 procedure because of the tail-jump) by 2 positions. This makes room in
269 %o0 and %o1 for the additinal arguments, namely hptr and a dummy (used
270 for destination addr of jump on SPARC, return address on x86, ...). This
271 shouldn't cause any problems for a C-like caller: alloca is implemented
272 similarly, and local variables should be accessed via %fp, not %sp. In a
273 nutshell: This should work! (Famous last words! :-)
275 if ((adjustor = stgMallocBytes(4*(11+1), "createAdjustor")) != NULL) {
276 unsigned long *const adj_code = (unsigned long *)adjustor;
278 adj_code[ 0] = 0x9C23A008UL; /* sub %sp, 8, %sp */
279 adj_code[ 1] = 0xDA23A060UL; /* st %o5, [%sp + 96] */
280 adj_code[ 2] = 0xD823A05CUL; /* st %o4, [%sp + 92] */
281 adj_code[ 3] = 0x9A10000BUL; /* mov %o3, %o5 */
282 adj_code[ 4] = 0x9810000AUL; /* mov %o2, %o4 */
283 adj_code[ 5] = 0x96100009UL; /* mov %o1, %o3 */
284 adj_code[ 6] = 0x94100008UL; /* mov %o0, %o2 */
285 adj_code[ 7] = 0x13000000UL; /* sethi %hi(wptr), %o1 */
286 adj_code[ 7] |= ((unsigned long)wptr) >> 10;
287 adj_code[ 8] = 0x11000000UL; /* sethi %hi(hptr), %o0 */
288 adj_code[ 8] |= ((unsigned long)hptr) >> 10;
289 adj_code[ 9] = 0x81C26000UL; /* jmp %o1 + %lo(wptr) */
290 adj_code[ 9] |= ((unsigned long)wptr) & 0x000003FFUL;
291 adj_code[10] = 0x90122000UL; /* or %o0, %lo(hptr), %o0 */
292 adj_code[10] |= ((unsigned long)hptr) & 0x000003FFUL;
294 adj_code[11] = (unsigned long)hptr;
297 asm("flush %0" : : "r" (adj_code ));
298 asm("flush %0" : : "r" (adj_code + 2));
299 asm("flush %0" : : "r" (adj_code + 4));
300 asm("flush %0" : : "r" (adj_code + 6));
301 asm("flush %0" : : "r" (adj_code + 10));
303 /* max. 5 instructions latency, and we need at >= 1 for returning */
309 #elif defined(alpha_TARGET_ARCH)
310 /* Magic constant computed by inspecting the code length of
311 the following assembly language snippet
312 (offset and machine code prefixed; note that the machine code
313 shown is longwords stored in little-endian order):
315 <00>: 46520414 mov a2, a4
316 <04>: 46100412 mov a0, a2
317 <08>: a61b0020 ldq a0, 0x20(pv) # load up hptr
318 <0c>: 46730415 mov a3, a5
319 <10>: a77b0028 ldq pv, 0x28(pv) # load up wptr
320 <14>: 46310413 mov a1, a3
321 <18>: 6bfb---- jmp (pv), <hint> # jump to wptr (with hint)
322 <1c>: 00000000 # padding for alignment
323 <20>: [8 bytes for hptr quadword]
324 <28>: [8 bytes for wptr quadword]
326 The "computed" jump at <08> above is really a jump to a fixed
327 location. Accordingly, we place an always-correct hint in the
328 jump instruction, namely the address offset from <0c> to wptr,
329 divided by 4, taking the lowest 14 bits.
331 We only support passing 4 or fewer argument words, for the same
332 reason described under sparc_TARGET_ARCH above by JRS, 21 Aug 01.
333 On the Alpha the first 6 integer arguments are in a0 through a5,
334 and the rest on the stack. Hence we want to shuffle the original
335 caller's arguments by two.
337 On the Alpha the calling convention is so complex and dependent
338 on the callee's signature -- for example, the stack pointer has
339 to be a multiple of 16 -- that it seems impossible to me [ccshan]
340 to handle the general case correctly without changing how the
341 adjustor is called from C. For now, our solution of shuffling
342 registers only and ignoring the stack only works if the original
343 caller passed 4 or fewer argument words.
345 TODO: Depending on how much allocation overhead stgMallocBytes uses for
346 header information (more precisely, if the overhead is no more than
347 4 bytes), we should move the first three instructions above down by
348 4 bytes (getting rid of the nop), hence saving memory. [ccshan]
350 ASSERT(((StgWord64)wptr & 3) == 0);
351 if ((adjustor = stgMallocBytes(48, "createAdjustor")) != NULL) {
352 StgWord64 *const code = (StgWord64 *)adjustor;
354 code[0] = 0x4610041246520414L;
355 code[1] = 0x46730415a61b0020L;
356 code[2] = 0x46310413a77b0028L;
357 code[3] = 0x000000006bfb0000L
358 | (((StgWord32*)(wptr) - (StgWord32*)(code) - 3) & 0x3fff);
360 code[4] = (StgWord64)hptr;
361 code[5] = (StgWord64)wptr;
363 /* Ensure that instruction cache is consistent with our new code */
364 __asm__ volatile("call_pal %0" : : "i" (PAL_imb));
366 #elif defined(powerpc_TARGET_ARCH)
368 For PowerPC, the following code is used:
376 lis r0,0xDEAD ;hi(wptr)
377 lis r3,0xDEAF ;hi(hptr)
378 ori r0,r0,0xBEEF ; lo(wptr)
379 ori r3,r3,0xFACE ; lo(hptr)
383 The arguments (passed in registers r3 - r10) are shuffled along by two to
384 make room for hptr and a dummy argument. As r9 and r10 are overwritten by
385 this code, it only works for up to 6 arguments (when floating point arguments
386 are involved, this may be more or less, depending on the exact situation).
388 if ((adjustor = stgMallocBytes(4*13, "createAdjustor")) != NULL) {
389 unsigned long *const adj_code = (unsigned long *)adjustor;
391 // make room for extra arguments
392 adj_code[0] = 0x7d0a4378; //mr r10,r8
393 adj_code[1] = 0x7ce93b78; //mr r9,r7
394 adj_code[2] = 0x7cc83378; //mr r8,r6
395 adj_code[3] = 0x7ca72b78; //mr r7,r5
396 adj_code[4] = 0x7c862378; //mr r6,r4
397 adj_code[5] = 0x7c651b78; //mr r5,r3
399 adj_code[6] = 0x3c000000; //lis r0,hi(wptr)
400 adj_code[6] |= ((unsigned long)wptr) >> 16;
402 adj_code[7] = 0x3c600000; //lis r3,hi(hptr)
403 adj_code[7] |= ((unsigned long)hptr) >> 16;
405 adj_code[8] = 0x60000000; //ori r0,r0,lo(wptr)
406 adj_code[8] |= ((unsigned long)wptr) & 0xFFFF;
408 adj_code[9] = 0x60630000; //ori r3,r3,lo(hptr)
409 adj_code[9] |= ((unsigned long)hptr) & 0xFFFF;
411 adj_code[10] = 0x7c0903a6; //mtctr r0
412 adj_code[11] = 0x4e800420; //bctr
413 adj_code[12] = (unsigned long)hptr;
415 // Flush the Instruction cache:
416 // MakeDataExecutable(adjustor,4*13);
417 /* This would require us to link with CoreServices.framework */
418 { /* this should do the same: */
420 unsigned long *p = adj_code;
423 __asm__ volatile ("dcbf 0,%0\n\tsync\n\ticbi 0,%0"
427 __asm__ volatile ("sync\n\tisync");
430 #elif defined(ia64_TARGET_ARCH)
432 Up to 8 inputs are passed in registers. We flush the last two inputs to
433 the stack, initially into the 16-byte scratch region left by the caller.
434 We then shuffle the others along by 4 (taking 2 registers for ourselves
435 to save return address and previous function state - we need to come back
436 here on the way out to restore the stack, so this is a real function
437 rather than just a trampoline).
439 The function descriptor we create contains the gp of the target function
440 so gp is already loaded correctly.
442 [MLX] alloc r16=ar.pfs,10,2,0
444 [MII] st8.spill [r12]=r38,8 // spill in6 (out4)
445 mov r41=r37 // out7 = in5 (out3)
446 mov r40=r36;; // out6 = in4 (out2)
447 [MII] st8.spill [r12]=r39 // spill in7 (out5)
449 mov r38=r34;; // out4 = in2 (out0)
450 [MII] mov r39=r35 // out5 = in3 (out1)
451 mov r37=r33 // out3 = in1 (loc1)
452 mov r36=r32 // out2 = in0 (loc0)
453 [MLX] adds r12=-24,r12 // update sp
454 movl r34=hptr;; // out0 = hptr
455 [MIB] mov r33=r16 // loc1 = ar.pfs
456 mov r32=b0 // loc0 = retaddr
457 br.call.sptk.many b0=b6;;
459 [MII] adds r12=-16,r12
464 br.ret.sptk.many b0;;
467 /* These macros distribute a long constant into the two words of an MLX bundle */
468 #define BITS(val,start,count) (((val) >> (start)) & ((1 << (count))-1))
469 #define MOVL_LOWORD(val) (BITS(val,22,18) << 46)
470 #define MOVL_HIWORD(val) (BITS(val,40,23) | (BITS(val,0,7) << 36) | (BITS(val,7,9) << 50) \
471 | (BITS(val,16,5) << 55) | (BITS(val,21,1) << 44) | BITS(val,63,1) << 59)
475 IA64FunDesc *wdesc = (IA64FunDesc *)wptr;
476 StgWord64 wcode = wdesc->ip;
480 /* we allocate on the Haskell heap since malloc'd memory isn't executable - argh */
481 adjustor = stgAllocStable(sizeof(IA64FunDesc)+18*8, &stable);
483 fdesc = (IA64FunDesc *)adjustor;
484 code = (StgWord64 *)(fdesc + 1);
485 fdesc->ip = (StgWord64)code;
486 fdesc->gp = wdesc->gp;
488 code[0] = 0x0000058004288004 | MOVL_LOWORD(wcode);
489 code[1] = 0x6000000220000000 | MOVL_HIWORD(wcode);
490 code[2] = 0x029015d818984001;
491 code[3] = 0x8401200500420094;
492 code[4] = 0x886011d8189c0001;
493 code[5] = 0x84011004c00380c0;
494 code[6] = 0x0250210046013800;
495 code[7] = 0x8401000480420084;
496 code[8] = 0x0000233f19a06005 | MOVL_LOWORD((StgWord64)hptr);
497 code[9] = 0x6000000440000000 | MOVL_HIWORD((StgWord64)hptr);
498 code[10] = 0x0200210020010811;
499 code[11] = 0x1080006800006200;
500 code[12] = 0x0000210018406000;
501 code[13] = 0x00aa021000038005;
502 code[14] = 0x000000010000001d;
503 code[15] = 0x0084000880000200;
505 /* save stable pointers in convenient form */
506 code[16] = (StgWord64)hptr;
507 code[17] = (StgWord64)stable;
510 barf("adjustor creation not supported on this platform");
525 freeHaskellFunctionPtr(void* ptr)
527 #if defined(i386_TARGET_ARCH)
528 if ( *(unsigned char*)ptr != 0x68 &&
529 *(unsigned char*)ptr != 0x58 ) {
530 prog_belch("freeHaskellFunctionPtr: not for me, guv! %p\n", ptr);
534 /* Free the stable pointer first..*/
535 if (*(unsigned char*)ptr == 0x68) { /* Aha, a ccall adjustor! */
536 freeStablePtr(*((StgStablePtr*)((unsigned char*)ptr + 0x01)));
538 freeStablePtr(*((StgStablePtr*)((unsigned char*)ptr + 0x02)));
540 #elif defined(sparc_TARGET_ARCH)
541 if ( *(unsigned long*)ptr != 0x9C23A008UL ) {
542 prog_belch("freeHaskellFunctionPtr: not for me, guv! %p\n", ptr);
546 /* Free the stable pointer first..*/
547 freeStablePtr(*((StgStablePtr*)((unsigned long*)ptr + 11)));
548 #elif defined(alpha_TARGET_ARCH)
549 if ( *(StgWord64*)ptr != 0xa77b0018a61b0010L ) {
550 prog_belch("freeHaskellFunctionPtr: not for me, guv! %p\n", ptr);
554 /* Free the stable pointer first..*/
555 freeStablePtr(*((StgStablePtr*)((unsigned char*)ptr + 0x10)));
556 #elif defined(powerpc_TARGET_ARCH)
557 if ( *(StgWord*)ptr != 0x7d0a4378 ) {
558 prog_belch("freeHaskellFunctionPtr: not for me, guv! %p\n", ptr);
561 freeStablePtr(*((StgStablePtr*)((unsigned char*)ptr + 4*12)));
562 #elif defined(ia64_TARGET_ARCH)
563 IA64FunDesc *fdesc = (IA64FunDesc *)ptr;
564 StgWord64 *code = (StgWord64 *)(fdesc+1);
566 if (fdesc->ip != (StgWord64)code) {
567 prog_belch("freeHaskellFunctionPtr: not for me, guv! %p\n", ptr);
570 freeStablePtr((StgStablePtr)code[16]);
571 freeStablePtr((StgStablePtr)code[17]);
576 *((unsigned char*)ptr) = '\0';
583 * Function: initAdjustor()
585 * Perform initialisation of adjustor thunk layer (if needed.)
590 return execPage(__obscure_ccall_ret_code, pageExecuteRead);