Getting this right requires three extremely @MAGIC@ macros, no doubt
chock full of assembly gook for the current platform. These are
-@MAGIC_CALL_SETUP@, which gets ready for one of these magic calls,
+@MAGIC_CALL_SET
+UP@, which gets ready for one of these magic calls,
@MAGIC_CALL@, which performs the call and stashes away all possible
results, and @MAGIC_RETURN@, which collects all possible results back
up again.
#define WRAPPER_NAME(f) /* nothing */
+/*
+ Threaded code needs to be able to grab the return address, in case we have
+ an intervening context switch.
+ */
+
+#define SET_RETADDR(loc) { register StgFunPtrFunPtr ra __asm__ ("$31"); loc = ra; }
+
#define WRAPPER_SETUP(f,ignore1,ignore2) SaveAllStgContext();
#define WRAPPER_RETURN(x) \
%************************************************************************
\begin{code}
-#if powerpc_TARGET_ARCH
-
-/* shift 4 arg registers down one */
+#if powerpc_TARGET_ARCH || rs6000_TARGET_ARCH
#define MAGIC_CALL_SETUP \
register void (*f)() __asm__("$2"); \
__asm__ volatile ( \
- "move $2,$4\n" \
- "\tmove $4,$5\n" \
- "\tmove $5,$6\n" \
- "\tmove $6,$7\n" \
- "\tlw $7,16($sp)\n" \
- "\taddu $sp,$sp,4\n" \
+ "not used!!!????\n" \
: : : "$2" );
#define MAGIC_CALL \
(*f)(); \
__asm__ volatile ( \
- "subu $sp,$sp,4\n" \
- "\ts.d $f0, -8($sp)\n" \
- "\tsw $2, -12($sp)");
+ "not used!!!????\n");
#define MAGIC_RETURN \
__asm__ volatile ( \
- "l.d $f0, -8($sp)\n" \
- "\tlw $2, -12($sp)");
+ "not used!!!????\n");
#define WRAPPER_NAME(f) /* nothing */
+#define SET_RETADDR(loc) \
+ __asm__ volatile ( \
+ "mflr 0\n" \
+ "\tst 0,%0" \
+ :"=m" (loc) :: "0");
+/* __asm__ volatile ("st %%r0, %0" : "=m" ((void *)(loc))); */
+
#define WRAPPER_SETUP(f,ignore1,ignore2) SaveAllStgContext();
+/* we have to make sure the STG registers are restored.
+GCC tries to restore the value the registers had in
+the beginning of the current call, which we don't want.
+We defeat it by saving the registers in the stack again. :-( */
+
#define WRAPPER_RETURN(x) \
- do {RestoreAllStgRegs(); if(x) JMP_(EnterNodeCode);} while(0);
+ do {RestoreAllStgRegs(); if(x) JMP_(EnterNodeCode);} while(0); \
+ __asm__ volatile ( \
+ "cal 1,136(1)\n" \
+ "\tstm 14,-72(1)\n" \
+ "\tstu 1,-136(1)");
#define SEPARATE_WRAPPER_RESTORE /* none */
"\tstd %i2,[%fp-32]\n" \
"\tstd %i4,[%fp-24]");
-/* We leave nothing to chance here; we have seen
- GCC stick "unwanted" code in the branch delay
- slot, causing mischief (WDP 96/05)
+/* Lest GCC attempt to stick something in
+ the delay slot: with compile with
+ -fno-delayed-branch. A weak solution. WDP 96/07
*/
-#ifdef GRAN
#define MAGIC_CALL \
__asm__ volatile ( \
"ld [%%fp-40],%%o5\n" \
__asm__ volatile ( \
"std %f0,[%fp-40]\n" \
"\tstd %o0,[%fp-32]");
-#else
+#if 0
+/* We leave nothing to chance here; we have seen
+ GCC stick "unwanted" code in the branch delay
+ slot, causing mischief (WDP 96/05)
+*/
+/* the problem with this one: GCC has no way of
+ knowing there is a "call" in there, so it
+ does not do any calling-convention stuff
+ (e.g., saving used regs). WDP 96/07
+*/
#define MAGIC_CALL \
__asm__ volatile ( \
"ld [%%fp-40],%%o5\n" \
"\tnop\n" \
"\tstd %%f0,[%%fp-40]\n"\
"\tstd %%o0,[%%fp-32]" \
- : : : "%o0", "%o1", "%o2", "%o3", "%o4", "%o5", "%f0", "%g1", "%g2", "%g3", "%g4", "memory");
-#endif
+ : : : "%o0", "%o1", "%o2", "%o3", "%o4", "%o5", "%o7", "%f0", "memory");
+#endif /* 0 */
#define MAGIC_RETURN \
__asm__ volatile ( \