X-Git-Url: http://git.megacz.com/?a=blobdiff_plain;f=rts%2Fsm%2FStorage.c;h=9320232e97dd1c2ce9f480613455914c3da10b40;hb=0f10e070774287eb7cb07689207777a64cc31414;hp=6f6d59101ea6a1db2f4c6762a4a8ccd635c35322;hpb=6a405b1efd138a4af4ed93ce4ff173a4c5704512;p=ghc-hetmet.git diff --git a/rts/sm/Storage.c b/rts/sm/Storage.c index 6f6d591..9320232 100644 --- a/rts/sm/Storage.c +++ b/rts/sm/Storage.c @@ -69,20 +69,9 @@ step *nurseries = NULL; /* array of nurseries, >1 only if THREADED_RTS * * simultaneous access by two STG threads. */ Mutex sm_mutex; -/* - * This mutex is used by atomicModifyMutVar# only - */ -Mutex atomic_modify_mutvar_mutex; #endif -/* - * Forward references - */ -static void *stgAllocForGMP (size_t size_in_bytes); -static void *stgReallocForGMP (void *ptr, size_t old_size, size_t new_size); -static void stgDeallocForGMP (void *ptr, size_t size); - static void initStep (step *stp, int g, int s) { @@ -104,7 +93,6 @@ initStep (step *stp, int g, int s) stp->compact = 0; stp->bitmap = NULL; #ifdef THREADED_RTS - initSpinLock(&stp->sync_todo); initSpinLock(&stp->sync_large_objects); #endif stp->threads = END_TSO_QUEUE; @@ -149,7 +137,6 @@ initStorage( void ) #if defined(THREADED_RTS) initMutex(&sm_mutex); - initMutex(&atomic_modify_mutvar_mutex); #endif ACQUIRE_SM_LOCK; @@ -267,9 +254,6 @@ initStorage( void ) exec_block = NULL; - /* Tell GNU multi-precision pkg about our custom alloc functions */ - mp_set_memory_functions(stgAllocForGMP, stgReallocForGMP, stgDeallocForGMP); - #ifdef THREADED_RTS initSpinLock(&gc_alloc_block_sync); whitehole_spin = 0; @@ -298,7 +282,6 @@ freeStorage (void) freeAllMBlocks(); #if defined(THREADED_RTS) closeMutex(&sm_mutex); - closeMutex(&atomic_modify_mutvar_mutex); #endif stgFree(nurseries); } @@ -808,7 +791,9 @@ allocatePinned( lnat n ) // If the request is for a large object, then allocate() // will give us a pinned object anyway. if (n >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) { - return allocate(n); + p = allocate(n); + Bdescr(p)->flags |= BF_PINNED; + return p; } ACQUIRE_SM_LOCK; @@ -816,13 +801,6 @@ allocatePinned( lnat n ) TICK_ALLOC_HEAP_NOCTR(n); CCS_ALLOC(CCCS,n); - // we always return 8-byte aligned memory. bd->free must be - // 8-byte aligned to begin with, so we just round up n to - // the nearest multiple of 8 bytes. - if (sizeof(StgWord) == 4) { - n = (n+1) & ~1; - } - // If we don't have a block of pinned objects yet, or the current // one isn't large enough to hold the new object, allocate a new one. if (bd == NULL || (bd->free + n) > (bd->start + BLOCK_SIZE_W)) { @@ -911,63 +889,6 @@ dirty_MVAR(StgRegTable *reg, StgClosure *p) } /* ----------------------------------------------------------------------------- - Allocation functions for GMP. - - These all use the allocate() interface - we can't have any garbage - collection going on during a gmp operation, so we use allocate() - which always succeeds. The gmp operations which might need to - allocate will ask the storage manager (via doYouWantToGC()) whether - a garbage collection is required, in case we get into a loop doing - only allocate() style allocation. - -------------------------------------------------------------------------- */ - -static void * -stgAllocForGMP (size_t size_in_bytes) -{ - StgArrWords* arr; - nat data_size_in_words, total_size_in_words; - - /* round up to a whole number of words */ - data_size_in_words = (size_in_bytes + sizeof(W_) + 1) / sizeof(W_); - total_size_in_words = sizeofW(StgArrWords) + data_size_in_words; - - /* allocate and fill it in. */ -#if defined(THREADED_RTS) - arr = (StgArrWords *)allocateLocal(myTask()->cap, total_size_in_words); -#else - arr = (StgArrWords *)allocateLocal(&MainCapability, total_size_in_words); -#endif - SET_ARR_HDR(arr, &stg_ARR_WORDS_info, CCCS, data_size_in_words); - - /* and return a ptr to the goods inside the array */ - return arr->payload; -} - -static void * -stgReallocForGMP (void *ptr, size_t old_size, size_t new_size) -{ - size_t min_size; - void *new_stuff_ptr = stgAllocForGMP(new_size); - nat i = 0; - char *p = (char *) ptr; - char *q = (char *) new_stuff_ptr; - - min_size = old_size < new_size ? old_size : new_size; - for (; i < min_size; i++, p++, q++) { - *q = *p; - } - - return(new_stuff_ptr); -} - -static void -stgDeallocForGMP (void *ptr STG_UNUSED, - size_t size STG_UNUSED) -{ - /* easy for us: the garbage collector does the dealloc'n */ -} - -/* ----------------------------------------------------------------------------- * Stats and stuff * -------------------------------------------------------------------------- */ @@ -1471,7 +1392,7 @@ checkSanity( void ) if (RtsFlags.GcFlags.generations == 1) { checkHeap(g0s0->blocks); - checkChain(g0s0->large_objects); + checkLargeObjects(g0s0->large_objects); } else { for (g = 0; g < RtsFlags.GcFlags.generations; g++) { @@ -1482,10 +1403,7 @@ checkSanity( void ) ASSERT(countBlocks(generations[g].steps[s].large_objects) == generations[g].steps[s].n_large_blocks); checkHeap(generations[g].steps[s].blocks); - checkChain(generations[g].steps[s].large_objects); - if (g > 0) { - checkMutableList(generations[g].mut_list, g); - } + checkLargeObjects(generations[g].steps[s].large_objects); } } @@ -1502,9 +1420,9 @@ checkSanity( void ) #if defined(THREADED_RTS) // check the stacks too in threaded mode, because we don't do a // full heap sanity check in this case (see checkHeap()) - checkGlobalTSOList(rtsTrue); + checkMutableLists(rtsTrue); #else - checkGlobalTSOList(rtsFalse); + checkMutableLists(rtsFalse); #endif }