break;
case BCO:
- ASSERT(BCO_ARITY(obj) > 0);
+ ASSERT(((StgBCO *)obj)->arity > 0);
break;
case AP: /* Copied from stg_AP_entry. */
nat arity, i;
Sp++;
- arity = BCO_ARITY(obj);
+ arity = ((StgBCO *)obj)->arity;
ASSERT(arity > 0);
if (arity < n) {
// n must be greater than 1, and the only kinds of
{
register int bciPtr = 1; /* instruction pointer */
register StgBCO* bco = (StgBCO*)obj;
- register StgWord16* instrs = (StgWord16*)(BCO_INSTRS(bco));
+ register StgWord16* instrs = (StgWord16*)(bco->instrs->payload);
register StgWord* literals = (StgWord*)(&bco->literals->payload[0]);
register StgPtr* ptrs = (StgPtr*)(&bco->ptrs->payload[0]);
register StgInfoTable** itbls = (StgInfoTable**)
case bci_ALLOC_AP: {
StgAP* ap;
- int n_payload = BCO_NEXT - 1;
+ int n_payload = BCO_NEXT;
int request = PAP_sizeW(n_payload);
ap = (StgAP*)allocate_UPD(request);
Sp[-1] = (W_)ap;
case bci_ALLOC_PAP: {
StgPAP* pap;
int arity = BCO_NEXT;
- int n_payload = BCO_NEXT - 1;
+ int n_payload = BCO_NEXT;
int request = PAP_sizeW(n_payload);
pap = (StgPAP*)allocate_NONUPD(request);
Sp[-1] = (W_)pap;
case bci_MKAP: {
int i;
int stkoff = BCO_NEXT;
- int n_payload = BCO_NEXT - 1;
+ int n_payload = BCO_NEXT;
StgAP* ap = (StgAP*)Sp[stkoff];
ASSERT((int)ap->n_args == n_payload);
ap->fun = (StgClosure*)Sp[0];
int stk_offset = BCO_NEXT;
int o_itbl = BCO_NEXT;
void(*marshall_fn)(void*) = (void (*)(void*))BCO_LIT(o_itbl);
- StgTSO *tso = cap->r.rCurrentTSO;
+ int ret_dyn_size =
+ RET_DYN_BITMAP_SIZE + RET_DYN_NONPTR_REGS_SIZE
+ + sizeofW(StgRetDyn);
+
+#ifdef RTS_SUPPORTS_THREADS
+ // Threaded RTS:
+ // Arguments on the TSO stack are not good, because garbage
+ // collection might move the TSO as soon as we call
+ // suspendThread below.
+
+ W_ arguments[stk_offset];
+
+ memcpy(arguments, Sp, sizeof(W_) * stk_offset);
+#endif
// There are a bunch of non-ptr words on the stack (the
// ccall args, the ccall fun address and space for the
// CCALL instruction. So we build a RET_DYN stack frame
// on the stack frame to describe this chunk of stack.
//
- Sp -= RET_DYN_SIZE + sizeofW(StgRetDyn);
+ Sp -= ret_dyn_size;
((StgRetDyn *)Sp)->liveness = ALL_NON_PTRS | N_NONPTRS(stk_offset);
((StgRetDyn *)Sp)->info = (StgInfoTable *)&stg_gc_gen_info;
SAVE_STACK_POINTERS;
tok = suspendThread(&cap->r,rtsFalse);
+#ifndef RTS_SUPPORTS_THREADS
// Careful:
// suspendThread might have shifted the stack
// around (stack squeezing), so we have to grab the real
// Sp out of the TSO to find the ccall args again.
- // We don't own the capability anymore, so we mustn't use it.
- // Instead, we have to save the TSO ptr beforehand.
- // Also note that GC may strike at any time now (from another thread).
- // FIXME - DANGER!! Can GC move our TSO?
- // If so, we have to copy the args elsewhere!
- marshall_fn ( (void*)(tso->sp + RET_DYN_SIZE + sizeofW(StgRetDyn)) );
-
+
+ marshall_fn ( (void*)(cap->r.rCurrentTSO->sp + ret_dyn_size) );
+#else
+ // Threaded RTS:
+ // We already made a malloced copy of the arguments above.
+
+ marshall_fn ( arguments );
+#endif
+
// And restart the thread again, popping the RET_DYN frame.
cap = (Capability *)((void *)resumeThread(tok,rtsFalse) - sizeof(StgFunTable));
LOAD_STACK_POINTERS;
- Sp += RET_DYN_SIZE + sizeofW(StgRetDyn);
+ Sp += ret_dyn_size;
+
+#ifdef RTS_SUPPORTS_THREADS
+ // Threaded RTS:
+ // Copy the "arguments", which might include a return value,
+ // back to the TSO stack. It would of course be enough to
+ // just copy the return value, but we don't know the offset.
+ memcpy(Sp, arguments, sizeof(W_) * stk_offset);
+#endif
+
goto nextInsn;
}