X-Git-Url: http://git.megacz.com/?a=blobdiff_plain;f=rts%2FRtsAPI.c;h=911e703b75d005a7c195556243b900eb955aefca;hb=de75026f5a48d3d052135a973ab4dff76c5b20f5;hp=716b4a2f2b028fb9c699f25df89bdd985cca7090;hpb=6015a94f9108a502150565577b66c23650796639;p=ghc-hetmet.git diff --git a/rts/RtsAPI.c b/rts/RtsAPI.c index 716b4a2..911e703 100644 --- a/rts/RtsAPI.c +++ b/rts/RtsAPI.c @@ -51,7 +51,7 @@ rts_mkInt8 (Capability *cap, HsInt8 i) StgClosure *p = (StgClosure *)allocateLocal(cap,CONSTR_sizeW(0,1)); SET_HDR(p, I8zh_con_info, CCS_SYSTEM); /* Make sure we mask out the bits above the lowest 8 */ - p->payload[0] = (StgClosure *)(StgInt)((unsigned)i & 0xff); + p->payload[0] = (StgClosure *)(StgInt)i; return p; } @@ -61,7 +61,7 @@ rts_mkInt16 (Capability *cap, HsInt16 i) StgClosure *p = (StgClosure *)allocateLocal(cap,CONSTR_sizeW(0,1)); SET_HDR(p, I16zh_con_info, CCS_SYSTEM); /* Make sure we mask out the relevant bits */ - p->payload[0] = (StgClosure *)(StgInt)((unsigned)i & 0xffff); + p->payload[0] = (StgClosure *)(StgInt)i; return p; } @@ -70,10 +70,35 @@ rts_mkInt32 (Capability *cap, HsInt32 i) { StgClosure *p = (StgClosure *)allocateLocal(cap,CONSTR_sizeW(0,1)); SET_HDR(p, I32zh_con_info, CCS_SYSTEM); - p->payload[0] = (StgClosure *)(StgInt)((unsigned)i & 0xffffffff); + p->payload[0] = (StgClosure *)(StgInt)i; + return p; +} + + +#ifdef sparc_HOST_ARCH +/* The closures returned by allocateLocal are only guaranteed to be 32 bit + aligned, because that's the size of pointers. SPARC v9 can't do + misaligned loads/stores, so we have to write the 64bit word in chunks */ + +HaskellObj +rts_mkInt64 (Capability *cap, HsInt64 i_) +{ + StgInt64 i = (StgInt64)i_; + StgInt32 *tmp; + + StgClosure *p = (StgClosure *)allocateLocal(cap,CONSTR_sizeW(0,2)); + SET_HDR(p, I64zh_con_info, CCS_SYSTEM); + + tmp = (StgInt32*)&(p->payload[0]); + + tmp[0] = (StgInt32)((StgInt64)i >> 32); + tmp[1] = (StgInt32)i; /* truncate high 32 bits */ + return p; } +#else + HaskellObj rts_mkInt64 (Capability *cap, HsInt64 i) { @@ -85,6 +110,9 @@ rts_mkInt64 (Capability *cap, HsInt64 i) return p; } +#endif /* sparc_HOST_ARCH */ + + HaskellObj rts_mkWord (Capability *cap, HsWord i) { @@ -124,6 +152,31 @@ rts_mkWord32 (Capability *cap, HsWord32 w) return p; } + +#ifdef sparc_HOST_ARCH +/* The closures returned by allocateLocal are only guaranteed to be 32 bit + aligned, because that's the size of pointers. SPARC v9 can't do + misaligned loads/stores, so we have to write the 64bit word in chunks */ + +HaskellObj +rts_mkWord64 (Capability *cap, HsWord64 w_) +{ + StgWord64 w = (StgWord64)w_; + StgWord32 *tmp; + + StgClosure *p = (StgClosure *)allocateLocal(cap,CONSTR_sizeW(0,2)); + /* see mk_Int8 comment */ + SET_HDR(p, W64zh_con_info, CCS_SYSTEM); + + tmp = (StgWord32*)&(p->payload[0]); + + tmp[0] = (StgWord32)((StgWord64)w >> 32); + tmp[1] = (StgWord32)w; /* truncate high 32 bits */ + return p; +} + +#else + HaskellObj rts_mkWord64 (Capability *cap, HsWord64 w) { @@ -137,6 +190,9 @@ rts_mkWord64 (Capability *cap, HsWord64 w) return p; } +#endif + + HaskellObj rts_mkFloat (Capability *cap, HsFloat f) { @@ -264,6 +320,27 @@ rts_getInt32 (HaskellObj p) return (HsInt32)(HsInt)(UNTAG_CLOSURE(p)->payload[0]); } + +#ifdef sparc_HOST_ARCH +/* The closures returned by allocateLocal are only guaranteed to be 32 bit + aligned, because that's the size of pointers. SPARC v9 can't do + misaligned loads/stores, so we have to read the 64bit word in chunks */ + +HsInt64 +rts_getInt64 (HaskellObj p) +{ + HsInt32* tmp; + // See comment above: + // ASSERT(p->header.info == I64zh_con_info || + // p->header.info == I64zh_static_info); + tmp = (HsInt32*)&(UNTAG_CLOSURE(p)->payload[0]); + + HsInt64 i = (HsInt64)((HsInt64)(tmp[0]) << 32) | (HsInt64)tmp[1]; + return i; +} + +#else + HsInt64 rts_getInt64 (HaskellObj p) { @@ -274,6 +351,10 @@ rts_getInt64 (HaskellObj p) tmp = (HsInt64*)&(UNTAG_CLOSURE(p)->payload[0]); return *tmp; } + +#endif /* sparc_HOST_ARCH */ + + HsWord rts_getWord (HaskellObj p) { @@ -311,6 +392,26 @@ rts_getWord32 (HaskellObj p) } +#ifdef sparc_HOST_ARCH +/* The closures returned by allocateLocal are only guaranteed to be 32 bit + aligned, because that's the size of pointers. SPARC v9 can't do + misaligned loads/stores, so we have to write the 64bit word in chunks */ + +HsWord64 +rts_getWord64 (HaskellObj p) +{ + HsInt32* tmp; + // See comment above: + // ASSERT(p->header.info == I64zh_con_info || + // p->header.info == I64zh_static_info); + tmp = (HsInt32*)&(UNTAG_CLOSURE(p)->payload[0]); + + HsInt64 i = (HsWord64)((HsWord64)(tmp[0]) << 32) | (HsWord64)tmp[1]; + return i; +} + +#else + HsWord64 rts_getWord64 (HaskellObj p) { @@ -322,6 +423,9 @@ rts_getWord64 (HaskellObj p) return *tmp; } +#endif + + HsFloat rts_getFloat (HaskellObj p) { @@ -491,6 +595,9 @@ rts_evalStableIO (Capability *cap, HsStablePtr s, /*out*/HsStablePtr *ret) p = (StgClosure *)deRefStablePtr(s); tso = createStrictIOThread(cap, RtsFlags.GcFlags.initialStkSize, p); + // async exceptions are always blocked by default in the created + // thread. See #1048. + tso->flags |= TSO_BLOCKEX | TSO_INTERRUPTIBLE; cap = scheduleWaitThread(tso,&r,cap); stat = rts_getSchedStatus(cap); @@ -584,18 +691,20 @@ rts_unlock (Capability *cap) task = cap->running_task; ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task); - // slightly delicate ordering of operations below, pay attention! - - // We are no longer a bound task/thread. This is important, - // because the GC can run when we release the Capability below, - // and we don't want it to treat this as a live TSO pointer. - task->tso = NULL; - // Now release the Capability. With the capability released, GC // may happen. NB. does not try to put the current Task on the // worker queue. - releaseCapability(cap); + // NB. keep cap->lock held while we call boundTaskExiting(). This + // is necessary during shutdown, where we want the invariant that + // after shutdownCapability(), all the Tasks associated with the + // Capability have completed their shutdown too. Otherwise we + // could have boundTaskExiting()/workerTaskStop() running at some + // random point in the future, which causes problems for + // freeTaskManager(). + ACQUIRE_LOCK(&cap->lock); + releaseCapability_(cap,rtsFalse); // Finally, we can release the Task to the free list. boundTaskExiting(task); + RELEASE_LOCK(&cap->lock); }