X-Git-Url: http://git.megacz.com/?a=blobdiff_plain;f=rts%2Fsm%2FStorage.c;h=ebb33f4cd4640dfea434c4d045a626e4ccb0fc3a;hb=e562d3a5cefc282213f64f2a3111007ef7987c8b;hp=bf7c452d9be185b4954e37cb567a032e40845524;hpb=3ebcd3deb769a03f4ded0fca2cf38201048c0214;p=ghc-hetmet.git diff --git a/rts/sm/Storage.c b/rts/sm/Storage.c index bf7c452..ebb33f4 100644 --- a/rts/sm/Storage.c +++ b/rts/sm/Storage.c @@ -69,10 +69,6 @@ step *nurseries = NULL; /* array of nurseries, >1 only if THREADED_RTS * * simultaneous access by two STG threads. */ Mutex sm_mutex; -/* - * This mutex is used by atomicModifyMutVar# only - */ -Mutex atomic_modify_mutvar_mutex; #endif @@ -104,7 +100,6 @@ initStep (step *stp, int g, int s) stp->compact = 0; stp->bitmap = NULL; #ifdef THREADED_RTS - initSpinLock(&stp->sync_todo); initSpinLock(&stp->sync_large_objects); #endif stp->threads = END_TSO_QUEUE; @@ -149,7 +144,6 @@ initStorage( void ) #if defined(THREADED_RTS) initMutex(&sm_mutex); - initMutex(&atomic_modify_mutvar_mutex); #endif ACQUIRE_SM_LOCK; @@ -272,7 +266,6 @@ initStorage( void ) #ifdef THREADED_RTS initSpinLock(&gc_alloc_block_sync); - initSpinLock(&recordMutableGen_sync); whitehole_spin = 0; #endif @@ -299,7 +292,6 @@ freeStorage (void) freeAllMBlocks(); #if defined(THREADED_RTS) closeMutex(&sm_mutex); - closeMutex(&atomic_modify_mutvar_mutex); #endif stgFree(nurseries); } @@ -376,7 +368,7 @@ newCAF(StgClosure* caf) * any more and can use it as a STATIC_LINK. */ ((StgIndStatic *)caf)->saved_info = NULL; - recordMutableGen(caf, oldest_gen); + recordMutableGen(caf, oldest_gen->no); } RELEASE_SM_LOCK; @@ -617,6 +609,13 @@ allocateInGen (generation *g, lnat n) if (RtsFlags.GcFlags.maxHeapSize > 0 && req_blocks >= RtsFlags.GcFlags.maxHeapSize) { heapOverflow(); + // heapOverflow() doesn't exit (see #2592), but we aren't + // in a position to do a clean shutdown here: we + // either have to allocate the memory or exit now. + // Allocating the memory would be bad, because the user + // has requested that we not exceed maxHeapSize, so we + // just exit. + stg_exit(EXIT_HEAPOVERFLOW); } bd = allocGroup(req_blocks); @@ -802,7 +801,9 @@ allocatePinned( lnat n ) // If the request is for a large object, then allocate() // will give us a pinned object anyway. if (n >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) { - return allocate(n); + p = allocate(n); + Bdescr(p)->flags |= BF_PINNED; + return p; } ACQUIRE_SM_LOCK; @@ -810,13 +811,6 @@ allocatePinned( lnat n ) TICK_ALLOC_HEAP_NOCTR(n); CCS_ALLOC(CCCS,n); - // we always return 8-byte aligned memory. bd->free must be - // 8-byte aligned to begin with, so we just round up n to - // the nearest multiple of 8 bytes. - if (sizeof(StgWord) == 4) { - n = (n+1) & ~1; - } - // If we don't have a block of pinned objects yet, or the current // one isn't large enough to hold the new object, allocate a new one. if (bd == NULL || (bd->free + n) > (bd->start + BLOCK_SIZE_W)) { @@ -1465,7 +1459,7 @@ checkSanity( void ) if (RtsFlags.GcFlags.generations == 1) { checkHeap(g0s0->blocks); - checkChain(g0s0->large_objects); + checkLargeObjects(g0s0->large_objects); } else { for (g = 0; g < RtsFlags.GcFlags.generations; g++) { @@ -1476,10 +1470,7 @@ checkSanity( void ) ASSERT(countBlocks(generations[g].steps[s].large_objects) == generations[g].steps[s].n_large_blocks); checkHeap(generations[g].steps[s].blocks); - checkChain(generations[g].steps[s].large_objects); - if (g > 0) { - checkMutableList(generations[g].mut_list, g); - } + checkLargeObjects(generations[g].steps[s].large_objects); } } @@ -1496,9 +1487,9 @@ checkSanity( void ) #if defined(THREADED_RTS) // check the stacks too in threaded mode, because we don't do a // full heap sanity check in this case (see checkHeap()) - checkGlobalTSOList(rtsTrue); + checkMutableLists(rtsTrue); #else - checkGlobalTSOList(rtsFalse); + checkMutableLists(rtsFalse); #endif }