X-Git-Url: http://git.megacz.com/?a=blobdiff_plain;f=rts%2FRtsAPI.c;h=0748871ca701b8580d9eebe193e8be41449e3f56;hb=1a77fd719aef44c2f91a25ddf312c70651bce1f1;hp=d0bdead1c9b36181ada4c01de354e1e4085e1a35;hpb=5cbe7adb6051a9d1738dfb5735c8c923b74c5945;p=ghc-hetmet.git diff --git a/rts/RtsAPI.c b/rts/RtsAPI.c index d0bdead..0748871 100644 --- a/rts/RtsAPI.c +++ b/rts/RtsAPI.c @@ -74,6 +74,31 @@ rts_mkInt32 (Capability *cap, HsInt32 i) return p; } + +#ifdef sparc_HOST_ARCH +/* The closures returned by allocateLocal are only guaranteed to be 32 bit + aligned, because that's the size of pointers. SPARC v9 can't do + misaligned loads/stores, so we have to write the 64bit word in chunks */ + +HaskellObj +rts_mkInt64 (Capability *cap, HsInt64 i_) +{ + StgInt64 i = (StgInt64)i_; + StgInt32 *tmp; + + StgClosure *p = (StgClosure *)allocateLocal(cap,CONSTR_sizeW(0,2)); + SET_HDR(p, I64zh_con_info, CCS_SYSTEM); + + tmp = (StgInt32*)&(p->payload[0]); + + tmp[0] = (StgInt32)((StgInt64)i >> 32); + tmp[1] = (StgInt32)i; /* truncate high 32 bits */ + + return p; +} + +#else + HaskellObj rts_mkInt64 (Capability *cap, HsInt64 i) { @@ -85,6 +110,9 @@ rts_mkInt64 (Capability *cap, HsInt64 i) return p; } +#endif /* sparc_HOST_ARCH */ + + HaskellObj rts_mkWord (Capability *cap, HsWord i) { @@ -124,6 +152,31 @@ rts_mkWord32 (Capability *cap, HsWord32 w) return p; } + +#ifdef sparc_HOST_ARCH +/* The closures returned by allocateLocal are only guaranteed to be 32 bit + aligned, because that's the size of pointers. SPARC v9 can't do + misaligned loads/stores, so we have to write the 64bit word in chunks */ + +HaskellObj +rts_mkWord64 (Capability *cap, HsWord64 w_) +{ + StgWord64 w = (StgWord64)w_; + StgWord32 *tmp; + + StgClosure *p = (StgClosure *)allocateLocal(cap,CONSTR_sizeW(0,2)); + /* see mk_Int8 comment */ + SET_HDR(p, W64zh_con_info, CCS_SYSTEM); + + tmp = (StgWord32*)&(p->payload[0]); + + tmp[0] = (StgWord32)((StgWord64)w >> 32); + tmp[1] = (StgWord32)w; /* truncate high 32 bits */ + return p; +} + +#else + HaskellObj rts_mkWord64 (Capability *cap, HsWord64 w) { @@ -137,6 +190,9 @@ rts_mkWord64 (Capability *cap, HsWord64 w) return p; } +#endif + + HaskellObj rts_mkFloat (Capability *cap, HsFloat f) { @@ -264,6 +320,27 @@ rts_getInt32 (HaskellObj p) return (HsInt32)(HsInt)(UNTAG_CLOSURE(p)->payload[0]); } + +#ifdef sparc_HOST_ARCH +/* The closures returned by allocateLocal are only guaranteed to be 32 bit + aligned, because that's the size of pointers. SPARC v9 can't do + misaligned loads/stores, so we have to read the 64bit word in chunks */ + +HsInt64 +rts_getInt64 (HaskellObj p) +{ + HsInt32* tmp; + // See comment above: + // ASSERT(p->header.info == I64zh_con_info || + // p->header.info == I64zh_static_info); + tmp = (HsInt32*)&(UNTAG_CLOSURE(p)->payload[0]); + + HsInt64 i = (HsInt64)((HsInt64)(tmp[0]) << 32) | (HsInt64)tmp[1]; + return i; +} + +#else + HsInt64 rts_getInt64 (HaskellObj p) { @@ -274,6 +351,10 @@ rts_getInt64 (HaskellObj p) tmp = (HsInt64*)&(UNTAG_CLOSURE(p)->payload[0]); return *tmp; } + +#endif /* sparc_HOST_ARCH */ + + HsWord rts_getWord (HaskellObj p) { @@ -311,6 +392,26 @@ rts_getWord32 (HaskellObj p) } +#ifdef sparc_HOST_ARCH +/* The closures returned by allocateLocal are only guaranteed to be 32 bit + aligned, because that's the size of pointers. SPARC v9 can't do + misaligned loads/stores, so we have to write the 64bit word in chunks */ + +HsWord64 +rts_getWord64 (HaskellObj p) +{ + HsInt32* tmp; + // See comment above: + // ASSERT(p->header.info == I64zh_con_info || + // p->header.info == I64zh_static_info); + tmp = (HsInt32*)&(UNTAG_CLOSURE(p)->payload[0]); + + HsInt64 i = (HsWord64)((HsWord64)(tmp[0]) << 32) | (HsWord64)tmp[1]; + return i; +} + +#else + HsWord64 rts_getWord64 (HaskellObj p) { @@ -322,6 +423,9 @@ rts_getWord64 (HaskellObj p) return *tmp; } +#endif + + HsFloat rts_getFloat (HaskellObj p) { @@ -393,11 +497,7 @@ StgTSO * createGenThread (Capability *cap, nat stack_size, StgClosure *closure) { StgTSO *t; -#if defined(GRAN) - t = createThread (cap, stack_size, NO_PRI); -#else t = createThread (cap, stack_size); -#endif pushClosure(t, (W_)closure); pushClosure(t, (W_)&stg_enter_info); return t; @@ -407,11 +507,7 @@ StgTSO * createIOThread (Capability *cap, nat stack_size, StgClosure *closure) { StgTSO *t; -#if defined(GRAN) - t = createThread (cap, stack_size, NO_PRI); -#else t = createThread (cap, stack_size); -#endif pushClosure(t, (W_)&stg_noforceIO_info); pushClosure(t, (W_)&stg_ap_v_info); pushClosure(t, (W_)closure); @@ -428,11 +524,7 @@ StgTSO * createStrictIOThread(Capability *cap, nat stack_size, StgClosure *closure) { StgTSO *t; -#if defined(GRAN) - t = createThread(cap, stack_size, NO_PRI); -#else t = createThread(cap, stack_size); -#endif pushClosure(t, (W_)&stg_forceIO_info); pushClosure(t, (W_)&stg_ap_v_info); pushClosure(t, (W_)closure); @@ -560,13 +652,7 @@ rts_lock (void) Capability *cap; Task *task; - // ToDo: get rid of this lock in the common case. We could store - // a free Task in thread-local storage, for example. That would - // leave just one lock on the path into the RTS: cap->lock when - // acquiring the Capability. - ACQUIRE_LOCK(&sched_mutex); task = newBoundTask(); - RELEASE_LOCK(&sched_mutex); cap = NULL; waitForReturnCapability(&cap, task);