X-Git-Url: http://git.megacz.com/?a=blobdiff_plain;f=rts%2Fsm%2FStorage.c;h=9dea30e193fc1fb3e4ee88ce9a10840783585e8d;hb=1b62aecee4a58f52999cfa53f1c6b7744b29b808;hp=8d237c15f3df66936374879210da4ac0678b7240;hpb=73f77be09896b041dc77ce31b349a89267e662c6;p=ghc-hetmet.git diff --git a/rts/sm/Storage.c b/rts/sm/Storage.c index 8d237c1..9dea30e 100644 --- a/rts/sm/Storage.c +++ b/rts/sm/Storage.c @@ -35,6 +35,8 @@ #include #include +#include "ffi.h" + /* * All these globals require sm_mutex to access in THREADED_RTS mode. */ @@ -46,6 +48,8 @@ bdescr *pinned_object_block; /* allocate pinned objects into this block */ nat alloc_blocks; /* number of allocate()d blocks since GC */ nat alloc_blocks_lim; /* approximate limit on alloc_blocks */ +static bdescr *exec_block; + generation *generations = NULL; /* all the generations */ generation *g0 = NULL; /* generation 0, for convenience */ generation *oldest_gen = NULL; /* oldest generation, for convenience */ @@ -261,15 +265,20 @@ initStorage( void ) alloc_blocks = 0; alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize; + exec_block = NULL; + /* Tell GNU multi-precision pkg about our custom alloc functions */ mp_set_memory_functions(stgAllocForGMP, stgReallocForGMP, stgDeallocForGMP); #ifdef THREADED_RTS initSpinLock(&gc_alloc_block_sync); - initSpinLock(&recordMutableGen_sync); whitehole_spin = 0; #endif + N = 0; + + initGcThreads(); + IF_DEBUG(gc, statDescribeGens()); RELEASE_SM_LOCK; @@ -366,7 +375,7 @@ newCAF(StgClosure* caf) * any more and can use it as a STATIC_LINK. */ ((StgIndStatic *)caf)->saved_info = NULL; - recordMutableGen(caf, oldest_gen); + recordMutableGen(caf, oldest_gen->no); } RELEASE_SM_LOCK; @@ -607,11 +616,19 @@ allocateInGen (generation *g, lnat n) if (RtsFlags.GcFlags.maxHeapSize > 0 && req_blocks >= RtsFlags.GcFlags.maxHeapSize) { heapOverflow(); + // heapOverflow() doesn't exit (see #2592), but we aren't + // in a position to do a clean shutdown here: we + // either have to allocate the memory or exit now. + // Allocating the memory would be bad, because the user + // has requested that we not exceed maxHeapSize, so we + // just exit. + stg_exit(EXIT_HEAPOVERFLOW); } bd = allocGroup(req_blocks); dbl_link_onto(bd, &stp->large_objects); stp->n_large_blocks += bd->blocks; // might be larger than req_blocks + alloc_blocks += bd->blocks; bd->gen_no = g->no; bd->step = stp; bd->flags = BF_LARGE; @@ -661,8 +678,8 @@ allocatedBytes( void ) return allocated; } -// split N blocks off the start of the given bdescr, returning the -// remainder as a new block group. We treat the remainder as if it +// split N blocks off the front of the given bdescr, returning the +// new block group. We treat the remainder as if it // had been freshly allocated in generation 0. bdescr * splitLargeBlock (bdescr *bd, nat blocks) @@ -680,6 +697,7 @@ splitLargeBlock (bdescr *bd, nat blocks) new_bd->step = g0s0; new_bd->flags = BF_LARGE; new_bd->free = bd->free; + ASSERT(new_bd->free <= new_bd->start + new_bd->blocks * BLOCK_SIZE_W); // add the new number of blocks to the counter. Due to the gaps // for block descriptor, new_bd->blocks + bd->blocks might not be @@ -687,7 +705,7 @@ splitLargeBlock (bdescr *bd, nat blocks) bd->step->n_large_blocks += bd->blocks; return new_bd; -} +} /* ----------------------------------------------------------------------------- allocateLocal() @@ -737,7 +755,9 @@ allocateLocal (Capability *cap, lnat n) bd->flags = 0; // NO: alloc_blocks++; // calcAllocated() uses the size of the nursery, and we've - // already bumpted nursery->n_blocks above. + // already bumpted nursery->n_blocks above. We'll GC + // pretty quickly now anyway, because MAYBE_GC() will + // notice that CurrentNursery->link is NULL. } else { // we have a block in the nursery: take it and put // it at the *front* of the nursery list, and use it @@ -788,7 +808,9 @@ allocatePinned( lnat n ) // If the request is for a large object, then allocate() // will give us a pinned object anyway. if (n >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) { - return allocate(n); + p = allocate(n); + Bdescr(p)->flags |= BF_PINNED; + return p; } ACQUIRE_SM_LOCK; @@ -796,13 +818,6 @@ allocatePinned( lnat n ) TICK_ALLOC_HEAP_NOCTR(n); CCS_ALLOC(CCCS,n); - // we always return 8-byte aligned memory. bd->free must be - // 8-byte aligned to begin with, so we just round up n to - // the nearest multiple of 8 bytes. - if (sizeof(StgWord) == 4) { - n = (n+1) & ~1; - } - // If we don't have a block of pinned objects yet, or the current // one isn't large enough to hold the new object, allocate a new one. if (bd == NULL || (bd->free + n) > (bd->start + BLOCK_SIZE_W)) { @@ -1133,9 +1148,37 @@ calcNeeded(void) should be modified to use allocateExec instead of VirtualAlloc. ------------------------------------------------------------------------- */ -static bdescr *exec_block; +#if defined(linux_HOST_OS) + +// On Linux we need to use libffi for allocating executable memory, +// because it knows how to work around the restrictions put in place +// by SELinux. + +void *allocateExec (nat bytes, void **exec_ret) +{ + void **ret, **exec; + ACQUIRE_SM_LOCK; + ret = ffi_closure_alloc (sizeof(void *) + (size_t)bytes, (void**)&exec); + RELEASE_SM_LOCK; + if (ret == NULL) return ret; + *ret = ret; // save the address of the writable mapping, for freeExec(). + *exec_ret = exec + 1; + return (ret + 1); +} -void *allocateExec (nat bytes) +// freeExec gets passed the executable address, not the writable address. +void freeExec (void *addr) +{ + void *writable; + writable = *((void**)addr - 1); + ACQUIRE_SM_LOCK; + ffi_closure_free (writable); + RELEASE_SM_LOCK +} + +#else + +void *allocateExec (nat bytes, void **exec_ret) { void *ret; nat n; @@ -1171,6 +1214,7 @@ void *allocateExec (nat bytes) exec_block->free += n + 1; RELEASE_SM_LOCK + *exec_ret = ret; return ret; } @@ -1208,6 +1252,8 @@ void freeExec (void *addr) RELEASE_SM_LOCK } +#endif /* mingw32_HOST_OS */ + /* ----------------------------------------------------------------------------- Debugging @@ -1432,9 +1478,6 @@ checkSanity( void ) == generations[g].steps[s].n_large_blocks); checkHeap(generations[g].steps[s].blocks); checkChain(generations[g].steps[s].large_objects); - if (g > 0) { - checkMutableList(generations[g].mut_list, g); - } } } @@ -1451,9 +1494,9 @@ checkSanity( void ) #if defined(THREADED_RTS) // check the stacks too in threaded mode, because we don't do a // full heap sanity check in this case (see checkHeap()) - checkGlobalTSOList(rtsTrue); + checkMutableLists(rtsTrue); #else - checkGlobalTSOList(rtsFalse); + checkMutableLists(rtsFalse); #endif }