StgClosure *p = (StgClosure *)allocateLocal(cap,CONSTR_sizeW(0,1));
SET_HDR(p, I8zh_con_info, CCS_SYSTEM);
/* Make sure we mask out the bits above the lowest 8 */
- p->payload[0] = (StgClosure *)(StgInt)((unsigned)i & 0xff);
+ p->payload[0] = (StgClosure *)(StgInt)i;
return p;
}
StgClosure *p = (StgClosure *)allocateLocal(cap,CONSTR_sizeW(0,1));
SET_HDR(p, I16zh_con_info, CCS_SYSTEM);
/* Make sure we mask out the relevant bits */
- p->payload[0] = (StgClosure *)(StgInt)((unsigned)i & 0xffff);
+ p->payload[0] = (StgClosure *)(StgInt)i;
return p;
}
{
StgClosure *p = (StgClosure *)allocateLocal(cap,CONSTR_sizeW(0,1));
SET_HDR(p, I32zh_con_info, CCS_SYSTEM);
- p->payload[0] = (StgClosure *)(StgInt)((unsigned)i & 0xffffffff);
+ p->payload[0] = (StgClosure *)(StgInt)i;
return p;
}
+
+#ifdef sparc_HOST_ARCH
+/* The closures returned by allocateLocal are only guaranteed to be 32 bit
+ aligned, because that's the size of pointers. SPARC v9 can't do
+ misaligned loads/stores, so we have to write the 64bit word in chunks */
+
+HaskellObj
+rts_mkInt64 (Capability *cap, HsInt64 i_)
+{
+ StgInt64 i = (StgInt64)i_;
+ StgInt32 *tmp;
+
+ StgClosure *p = (StgClosure *)allocateLocal(cap,CONSTR_sizeW(0,2));
+ SET_HDR(p, I64zh_con_info, CCS_SYSTEM);
+
+ tmp = (StgInt32*)&(p->payload[0]);
+
+ tmp[0] = (StgInt32)((StgInt64)i >> 32);
+ tmp[1] = (StgInt32)i; /* truncate high 32 bits */
+
+ return p;
+}
+
+#else
+
HaskellObj
rts_mkInt64 (Capability *cap, HsInt64 i)
{
return p;
}
+#endif /* sparc_HOST_ARCH */
+
+
HaskellObj
rts_mkWord (Capability *cap, HsWord i)
{
return (HsInt32)(HsInt)(UNTAG_CLOSURE(p)->payload[0]);
}
+
+#ifdef sparc_HOST_ARCH
+/* The closures returned by allocateLocal are only guaranteed to be 32 bit
+ aligned, because that's the size of pointers. SPARC v9 can't do
+ misaligned loads/stores, so we have to read the 64bit word in chunks */
+
+HsInt64
+rts_getInt64 (HaskellObj p)
+{
+ HsInt32* tmp;
+ // See comment above:
+ // ASSERT(p->header.info == I64zh_con_info ||
+ // p->header.info == I64zh_static_info);
+ tmp = (HsInt32*)&(UNTAG_CLOSURE(p)->payload[0]);
+
+ HsInt64 i = (HsInt64)(tmp[0] << 32) | (HsInt64)tmp[1];
+ return i;
+}
+
+#else
+
HsInt64
rts_getInt64 (HaskellObj p)
{
tmp = (HsInt64*)&(UNTAG_CLOSURE(p)->payload[0]);
return *tmp;
}
+
+#endif /* sparc_HOST_ARCH */
+
+
HsWord
rts_getWord (HaskellObj p)
{
task = cap->running_task;
ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
- // slightly delicate ordering of operations below, pay attention!
-
- // We are no longer a bound task/thread. This is important,
- // because the GC can run when we release the Capability below,
- // and we don't want it to treat this as a live TSO pointer.
- task->tso = NULL;
-
// Now release the Capability. With the capability released, GC
// may happen. NB. does not try to put the current Task on the
// worker queue.
- releaseCapability(cap);
+ // NB. keep cap->lock held while we call boundTaskExiting(). This
+ // is necessary during shutdown, where we want the invariant that
+ // after shutdownCapability(), all the Tasks associated with the
+ // Capability have completed their shutdown too. Otherwise we
+ // could have boundTaskExiting()/workerTaskStop() running at some
+ // random point in the future, which causes problems for
+ // freeTaskManager().
+ ACQUIRE_LOCK(&cap->lock);
+ releaseCapability_(cap,rtsFalse);
// Finally, we can release the Task to the free list.
boundTaskExiting(task);
+ RELEASE_LOCK(&cap->lock);
}