X-Git-Url: http://git.megacz.com/?a=blobdiff_plain;f=rts%2FRtsAPI.c;h=911e703b75d005a7c195556243b900eb955aefca;hb=de75026f5a48d3d052135a973ab4dff76c5b20f5;hp=b1b1d9c52d7f4c4e53e87547039887ad065e5107;hpb=0065d5ab628975892cea1ec7303f968c3338cbe1;p=ghc-hetmet.git diff --git a/rts/RtsAPI.c b/rts/RtsAPI.c index b1b1d9c..911e703 100644 --- a/rts/RtsAPI.c +++ b/rts/RtsAPI.c @@ -9,7 +9,6 @@ #include "PosixSource.h" #include "Rts.h" #include "OSThreads.h" -#include "Storage.h" #include "RtsAPI.h" #include "SchedAPI.h" #include "RtsFlags.h" @@ -17,11 +16,16 @@ #include "Prelude.h" #include "Schedule.h" #include "Capability.h" +#include "Stable.h" #include /* ---------------------------------------------------------------------------- Building Haskell objects from C datatypes. + + TODO: Currently this code does not tag created pointers, + however it is not unsafe (the contructor code will do it) + just inefficient. ------------------------------------------------------------------------- */ HaskellObj rts_mkChar (Capability *cap, HsChar c) @@ -47,7 +51,7 @@ rts_mkInt8 (Capability *cap, HsInt8 i) StgClosure *p = (StgClosure *)allocateLocal(cap,CONSTR_sizeW(0,1)); SET_HDR(p, I8zh_con_info, CCS_SYSTEM); /* Make sure we mask out the bits above the lowest 8 */ - p->payload[0] = (StgClosure *)(StgInt)((unsigned)i & 0xff); + p->payload[0] = (StgClosure *)(StgInt)i; return p; } @@ -57,7 +61,7 @@ rts_mkInt16 (Capability *cap, HsInt16 i) StgClosure *p = (StgClosure *)allocateLocal(cap,CONSTR_sizeW(0,1)); SET_HDR(p, I16zh_con_info, CCS_SYSTEM); /* Make sure we mask out the relevant bits */ - p->payload[0] = (StgClosure *)(StgInt)((unsigned)i & 0xffff); + p->payload[0] = (StgClosure *)(StgInt)i; return p; } @@ -66,10 +70,35 @@ rts_mkInt32 (Capability *cap, HsInt32 i) { StgClosure *p = (StgClosure *)allocateLocal(cap,CONSTR_sizeW(0,1)); SET_HDR(p, I32zh_con_info, CCS_SYSTEM); - p->payload[0] = (StgClosure *)(StgInt)((unsigned)i & 0xffffffff); + p->payload[0] = (StgClosure *)(StgInt)i; + return p; +} + + +#ifdef sparc_HOST_ARCH +/* The closures returned by allocateLocal are only guaranteed to be 32 bit + aligned, because that's the size of pointers. SPARC v9 can't do + misaligned loads/stores, so we have to write the 64bit word in chunks */ + +HaskellObj +rts_mkInt64 (Capability *cap, HsInt64 i_) +{ + StgInt64 i = (StgInt64)i_; + StgInt32 *tmp; + + StgClosure *p = (StgClosure *)allocateLocal(cap,CONSTR_sizeW(0,2)); + SET_HDR(p, I64zh_con_info, CCS_SYSTEM); + + tmp = (StgInt32*)&(p->payload[0]); + + tmp[0] = (StgInt32)((StgInt64)i >> 32); + tmp[1] = (StgInt32)i; /* truncate high 32 bits */ + return p; } +#else + HaskellObj rts_mkInt64 (Capability *cap, HsInt64 i) { @@ -81,6 +110,9 @@ rts_mkInt64 (Capability *cap, HsInt64 i) return p; } +#endif /* sparc_HOST_ARCH */ + + HaskellObj rts_mkWord (Capability *cap, HsWord i) { @@ -120,6 +152,31 @@ rts_mkWord32 (Capability *cap, HsWord32 w) return p; } + +#ifdef sparc_HOST_ARCH +/* The closures returned by allocateLocal are only guaranteed to be 32 bit + aligned, because that's the size of pointers. SPARC v9 can't do + misaligned loads/stores, so we have to write the 64bit word in chunks */ + +HaskellObj +rts_mkWord64 (Capability *cap, HsWord64 w_) +{ + StgWord64 w = (StgWord64)w_; + StgWord32 *tmp; + + StgClosure *p = (StgClosure *)allocateLocal(cap,CONSTR_sizeW(0,2)); + /* see mk_Int8 comment */ + SET_HDR(p, W64zh_con_info, CCS_SYSTEM); + + tmp = (StgWord32*)&(p->payload[0]); + + tmp[0] = (StgWord32)((StgWord64)w >> 32); + tmp[1] = (StgWord32)w; /* truncate high 32 bits */ + return p; +} + +#else + HaskellObj rts_mkWord64 (Capability *cap, HsWord64 w) { @@ -133,6 +190,9 @@ rts_mkWord64 (Capability *cap, HsWord64 w) return p; } +#endif + + HaskellObj rts_mkFloat (Capability *cap, HsFloat f) { @@ -221,7 +281,7 @@ rts_getChar (HaskellObj p) // See comment above: // ASSERT(p->header.info == Czh_con_info || // p->header.info == Czh_static_info); - return (StgChar)(StgWord)(p->payload[0]); + return (StgChar)(StgWord)(UNTAG_CLOSURE(p)->payload[0]); } HsInt @@ -230,7 +290,7 @@ rts_getInt (HaskellObj p) // See comment above: // ASSERT(p->header.info == Izh_con_info || // p->header.info == Izh_static_info); - return (HsInt)(p->payload[0]); + return (HsInt)(UNTAG_CLOSURE(p)->payload[0]); } HsInt8 @@ -239,7 +299,7 @@ rts_getInt8 (HaskellObj p) // See comment above: // ASSERT(p->header.info == I8zh_con_info || // p->header.info == I8zh_static_info); - return (HsInt8)(HsInt)(p->payload[0]); + return (HsInt8)(HsInt)(UNTAG_CLOSURE(p)->payload[0]); } HsInt16 @@ -248,7 +308,7 @@ rts_getInt16 (HaskellObj p) // See comment above: // ASSERT(p->header.info == I16zh_con_info || // p->header.info == I16zh_static_info); - return (HsInt16)(HsInt)(p->payload[0]); + return (HsInt16)(HsInt)(UNTAG_CLOSURE(p)->payload[0]); } HsInt32 @@ -257,9 +317,30 @@ rts_getInt32 (HaskellObj p) // See comment above: // ASSERT(p->header.info == I32zh_con_info || // p->header.info == I32zh_static_info); - return (HsInt32)(HsInt)(p->payload[0]); + return (HsInt32)(HsInt)(UNTAG_CLOSURE(p)->payload[0]); +} + + +#ifdef sparc_HOST_ARCH +/* The closures returned by allocateLocal are only guaranteed to be 32 bit + aligned, because that's the size of pointers. SPARC v9 can't do + misaligned loads/stores, so we have to read the 64bit word in chunks */ + +HsInt64 +rts_getInt64 (HaskellObj p) +{ + HsInt32* tmp; + // See comment above: + // ASSERT(p->header.info == I64zh_con_info || + // p->header.info == I64zh_static_info); + tmp = (HsInt32*)&(UNTAG_CLOSURE(p)->payload[0]); + + HsInt64 i = (HsInt64)((HsInt64)(tmp[0]) << 32) | (HsInt64)tmp[1]; + return i; } +#else + HsInt64 rts_getInt64 (HaskellObj p) { @@ -267,16 +348,20 @@ rts_getInt64 (HaskellObj p) // See comment above: // ASSERT(p->header.info == I64zh_con_info || // p->header.info == I64zh_static_info); - tmp = (HsInt64*)&(p->payload[0]); + tmp = (HsInt64*)&(UNTAG_CLOSURE(p)->payload[0]); return *tmp; } + +#endif /* sparc_HOST_ARCH */ + + HsWord rts_getWord (HaskellObj p) { // See comment above: // ASSERT(p->header.info == Wzh_con_info || // p->header.info == Wzh_static_info); - return (HsWord)(p->payload[0]); + return (HsWord)(UNTAG_CLOSURE(p)->payload[0]); } HsWord8 @@ -285,7 +370,7 @@ rts_getWord8 (HaskellObj p) // See comment above: // ASSERT(p->header.info == W8zh_con_info || // p->header.info == W8zh_static_info); - return (HsWord8)(HsWord)(p->payload[0]); + return (HsWord8)(HsWord)(UNTAG_CLOSURE(p)->payload[0]); } HsWord16 @@ -294,7 +379,7 @@ rts_getWord16 (HaskellObj p) // See comment above: // ASSERT(p->header.info == W16zh_con_info || // p->header.info == W16zh_static_info); - return (HsWord16)(HsWord)(p->payload[0]); + return (HsWord16)(HsWord)(UNTAG_CLOSURE(p)->payload[0]); } HsWord32 @@ -303,10 +388,30 @@ rts_getWord32 (HaskellObj p) // See comment above: // ASSERT(p->header.info == W32zh_con_info || // p->header.info == W32zh_static_info); - return (HsWord32)(HsWord)(p->payload[0]); + return (HsWord32)(HsWord)(UNTAG_CLOSURE(p)->payload[0]); } +#ifdef sparc_HOST_ARCH +/* The closures returned by allocateLocal are only guaranteed to be 32 bit + aligned, because that's the size of pointers. SPARC v9 can't do + misaligned loads/stores, so we have to write the 64bit word in chunks */ + +HsWord64 +rts_getWord64 (HaskellObj p) +{ + HsInt32* tmp; + // See comment above: + // ASSERT(p->header.info == I64zh_con_info || + // p->header.info == I64zh_static_info); + tmp = (HsInt32*)&(UNTAG_CLOSURE(p)->payload[0]); + + HsInt64 i = (HsWord64)((HsWord64)(tmp[0]) << 32) | (HsWord64)tmp[1]; + return i; +} + +#else + HsWord64 rts_getWord64 (HaskellObj p) { @@ -314,17 +419,20 @@ rts_getWord64 (HaskellObj p) // See comment above: // ASSERT(p->header.info == W64zh_con_info || // p->header.info == W64zh_static_info); - tmp = (HsWord64*)&(p->payload[0]); + tmp = (HsWord64*)&(UNTAG_CLOSURE(p)->payload[0]); return *tmp; } +#endif + + HsFloat rts_getFloat (HaskellObj p) { // See comment above: // ASSERT(p->header.info == Fzh_con_info || // p->header.info == Fzh_static_info); - return (float)(PK_FLT((P_)p->payload)); + return (float)(PK_FLT((P_)UNTAG_CLOSURE(p)->payload)); } HsDouble @@ -333,7 +441,7 @@ rts_getDouble (HaskellObj p) // See comment above: // ASSERT(p->header.info == Dzh_con_info || // p->header.info == Dzh_static_info); - return (double)(PK_DBL((P_)p->payload)); + return (double)(PK_DBL((P_)UNTAG_CLOSURE(p)->payload)); } HsStablePtr @@ -342,7 +450,7 @@ rts_getStablePtr (HaskellObj p) // See comment above: // ASSERT(p->header.info == StablePtr_con_info || // p->header.info == StablePtr_static_info); - return (StgStablePtr)(p->payload[0]); + return (StgStablePtr)(UNTAG_CLOSURE(p)->payload[0]); } HsPtr @@ -351,7 +459,7 @@ rts_getPtr (HaskellObj p) // See comment above: // ASSERT(p->header.info == Ptr_con_info || // p->header.info == Ptr_static_info); - return (Capability *)(p->payload[0]); + return (Capability *)(UNTAG_CLOSURE(p)->payload[0]); } HsFunPtr @@ -360,7 +468,7 @@ rts_getFunPtr (HaskellObj p) // See comment above: // ASSERT(p->header.info == FunPtr_con_info || // p->header.info == FunPtr_static_info); - return (void *)(p->payload[0]); + return (void *)(UNTAG_CLOSURE(p)->payload[0]); } HsBool @@ -368,7 +476,7 @@ rts_getBool (HaskellObj p) { StgInfoTable *info; - info = get_itbl((StgClosure *)p); + info = get_itbl((StgClosure *)UNTAG_CLOSURE(p)); if (info->srt_bitmap == 0) { // srt_bitmap is the constructor tag return 0; } else { @@ -487,6 +595,9 @@ rts_evalStableIO (Capability *cap, HsStablePtr s, /*out*/HsStablePtr *ret) p = (StgClosure *)deRefStablePtr(s); tso = createStrictIOThread(cap, RtsFlags.GcFlags.initialStkSize, p); + // async exceptions are always blocked by default in the created + // thread. See #1048. + tso->flags |= TSO_BLOCKEX | TSO_INTERRUPTIBLE; cap = scheduleWaitThread(tso,&r,cap); stat = rts_getSchedStatus(cap); @@ -580,18 +691,20 @@ rts_unlock (Capability *cap) task = cap->running_task; ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task); - // slightly delicate ordering of operations below, pay attention! - - // We are no longer a bound task/thread. This is important, - // because the GC can run when we release the Capability below, - // and we don't want it to treat this as a live TSO pointer. - task->tso = NULL; - // Now release the Capability. With the capability released, GC // may happen. NB. does not try to put the current Task on the // worker queue. - releaseCapability(cap); + // NB. keep cap->lock held while we call boundTaskExiting(). This + // is necessary during shutdown, where we want the invariant that + // after shutdownCapability(), all the Tasks associated with the + // Capability have completed their shutdown too. Otherwise we + // could have boundTaskExiting()/workerTaskStop() running at some + // random point in the future, which causes problems for + // freeTaskManager(). + ACQUIRE_LOCK(&cap->lock); + releaseCapability_(cap,rtsFalse); // Finally, we can release the Task to the free list. boundTaskExiting(task); + RELEASE_LOCK(&cap->lock); }