X-Git-Url: http://git.megacz.com/?a=blobdiff_plain;f=rts%2Fsm%2FStorage.c;h=6c45cbed594a94c702af06d44d3fec4a57b0990f;hb=ebfa6fde6d9797ad2434a2af73a4c85b2984e00a;hp=69e441de5f6fb7e9378c04eebe79d46e47d805d4;hpb=74ee9df9f9e79e7110e9d8541b84010f35c464c5;p=ghc-hetmet.git diff --git a/rts/sm/Storage.c b/rts/sm/Storage.c index 69e441d..6c45cbe 100644 --- a/rts/sm/Storage.c +++ b/rts/sm/Storage.c @@ -35,6 +35,8 @@ #include #include +#include "ffi.h" + /* * All these globals require sm_mutex to access in THREADED_RTS mode. */ @@ -46,6 +48,8 @@ bdescr *pinned_object_block; /* allocate pinned objects into this block */ nat alloc_blocks; /* number of allocate()d blocks since GC */ nat alloc_blocks_lim; /* approximate limit on alloc_blocks */ +static bdescr *exec_block; + generation *generations = NULL; /* all the generations */ generation *g0 = NULL; /* generation 0, for convenience */ generation *oldest_gen = NULL; /* oldest generation, for convenience */ @@ -124,7 +128,7 @@ initStorage( void ) * doing something reasonable. */ /* We use the NOT_NULL variant or gcc warns that the test is always true */ - ASSERT(LOOKS_LIKE_INFO_PTR_NOT_NULL(&stg_BLACKHOLE_info)); + ASSERT(LOOKS_LIKE_INFO_PTR_NOT_NULL((StgWord)&stg_BLACKHOLE_info)); ASSERT(LOOKS_LIKE_CLOSURE_PTR(&stg_dummy_ret_closure)); ASSERT(!HEAP_ALLOCED(&stg_dummy_ret_closure)); @@ -261,6 +265,8 @@ initStorage( void ) alloc_blocks = 0; alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize; + exec_block = NULL; + /* Tell GNU multi-precision pkg about our custom alloc functions */ mp_set_memory_functions(stgAllocForGMP, stgReallocForGMP, stgDeallocForGMP); @@ -585,7 +591,7 @@ move_TSO (StgTSO *src, StgTSO *dest) -------------------------------------------------------------------------- */ StgPtr -allocateInGen (generation *g, nat n) +allocateInGen (generation *g, lnat n) { step *stp; bdescr *bd; @@ -600,7 +606,7 @@ allocateInGen (generation *g, nat n) if (n >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) { - nat req_blocks = (lnat)BLOCK_ROUND_UP(n*sizeof(W_)) / BLOCK_SIZE; + lnat req_blocks = (lnat)BLOCK_ROUND_UP(n*sizeof(W_)) / BLOCK_SIZE; // Attempting to allocate an object larger than maxHeapSize // should definitely be disallowed. (bug #1791) @@ -612,6 +618,7 @@ allocateInGen (generation *g, nat n) bd = allocGroup(req_blocks); dbl_link_onto(bd, &stp->large_objects); stp->n_large_blocks += bd->blocks; // might be larger than req_blocks + alloc_blocks += bd->blocks; bd->gen_no = g->no; bd->step = stp; bd->flags = BF_LARGE; @@ -642,7 +649,7 @@ allocateInGen (generation *g, nat n) } StgPtr -allocate (nat n) +allocate (lnat n) { return allocateInGen(g0,n); } @@ -661,8 +668,8 @@ allocatedBytes( void ) return allocated; } -// split N blocks off the start of the given bdescr, returning the -// remainder as a new block group. We treat the remainder as if it +// split N blocks off the front of the given bdescr, returning the +// new block group. We treat the remainder as if it // had been freshly allocated in generation 0. bdescr * splitLargeBlock (bdescr *bd, nat blocks) @@ -680,6 +687,7 @@ splitLargeBlock (bdescr *bd, nat blocks) new_bd->step = g0s0; new_bd->flags = BF_LARGE; new_bd->free = bd->free; + ASSERT(new_bd->free <= new_bd->start + new_bd->blocks * BLOCK_SIZE_W); // add the new number of blocks to the counter. Due to the gaps // for block descriptor, new_bd->blocks + bd->blocks might not be @@ -687,7 +695,7 @@ splitLargeBlock (bdescr *bd, nat blocks) bd->step->n_large_blocks += bd->blocks; return new_bd; -} +} /* ----------------------------------------------------------------------------- allocateLocal() @@ -703,7 +711,7 @@ splitLargeBlock (bdescr *bd, nat blocks) -------------------------------------------------------------------------- */ StgPtr -allocateLocal (Capability *cap, nat n) +allocateLocal (Capability *cap, lnat n) { bdescr *bd; StgPtr p; @@ -737,7 +745,9 @@ allocateLocal (Capability *cap, nat n) bd->flags = 0; // NO: alloc_blocks++; // calcAllocated() uses the size of the nursery, and we've - // already bumpted nursery->n_blocks above. + // already bumpted nursery->n_blocks above. We'll GC + // pretty quickly now anyway, because MAYBE_GC() will + // notice that CurrentNursery->link is NULL. } else { // we have a block in the nursery: take it and put // it at the *front* of the nursery list, and use it @@ -780,7 +790,7 @@ allocateLocal (Capability *cap, nat n) ------------------------------------------------------------------------- */ StgPtr -allocatePinned( nat n ) +allocatePinned( lnat n ) { StgPtr p; bdescr *bd = pinned_object_block; @@ -926,12 +936,14 @@ stgAllocForGMP (size_t size_in_bytes) static void * stgReallocForGMP (void *ptr, size_t old_size, size_t new_size) { + size_t min_size; void *new_stuff_ptr = stgAllocForGMP(new_size); nat i = 0; char *p = (char *) ptr; char *q = (char *) new_stuff_ptr; - for (; i < old_size; i++, p++, q++) { + min_size = old_size < new_size ? old_size : new_size; + for (; i < min_size; i++, p++, q++) { *q = *p; } @@ -1131,9 +1143,37 @@ calcNeeded(void) should be modified to use allocateExec instead of VirtualAlloc. ------------------------------------------------------------------------- */ -static bdescr *exec_block; +#if defined(linux_HOST_OS) -void *allocateExec (nat bytes) +// On Linux we need to use libffi for allocating executable memory, +// because it knows how to work around the restrictions put in place +// by SELinux. + +void *allocateExec (nat bytes, void **exec_ret) +{ + void **ret, **exec; + ACQUIRE_SM_LOCK; + ret = ffi_closure_alloc (sizeof(void *) + (size_t)bytes, (void**)&exec); + RELEASE_SM_LOCK; + if (ret == NULL) return ret; + *ret = ret; // save the address of the writable mapping, for freeExec(). + *exec_ret = exec + 1; + return (ret + 1); +} + +// freeExec gets passed the executable address, not the writable address. +void freeExec (void *addr) +{ + void *writable; + writable = *((void**)addr - 1); + ACQUIRE_SM_LOCK; + ffi_closure_free (writable); + RELEASE_SM_LOCK +} + +#else + +void *allocateExec (nat bytes, void **exec_ret) { void *ret; nat n; @@ -1169,6 +1209,7 @@ void *allocateExec (nat bytes) exec_block->free += n + 1; RELEASE_SM_LOCK + *exec_ret = ret; return ret; } @@ -1206,6 +1247,8 @@ void freeExec (void *addr) RELEASE_SM_LOCK } +#endif /* mingw32_HOST_OS */ + /* ----------------------------------------------------------------------------- Debugging @@ -1270,6 +1313,51 @@ stepBlocks (step *stp) countAllocdBlocks(stp->large_objects); } +// If memInventory() calculates that we have a memory leak, this +// function will try to find the block(s) that are leaking by marking +// all the ones that we know about, and search through memory to find +// blocks that are not marked. In the debugger this can help to give +// us a clue about what kind of block leaked. In the future we might +// annotate blocks with their allocation site to give more helpful +// info. +static void +findMemoryLeak (void) +{ + nat g, s, i; + for (g = 0; g < RtsFlags.GcFlags.generations; g++) { + for (i = 0; i < n_capabilities; i++) { + markBlocks(capabilities[i].mut_lists[g]); + } + markBlocks(generations[g].mut_list); + for (s = 0; s < generations[g].n_steps; s++) { + markBlocks(generations[g].steps[s].blocks); + markBlocks(generations[g].steps[s].large_objects); + } + } + + for (i = 0; i < n_nurseries; i++) { + markBlocks(nurseries[i].blocks); + markBlocks(nurseries[i].large_objects); + } + +#ifdef PROFILING + // TODO: + // if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_RETAINER) { + // markRetainerBlocks(); + // } +#endif + + // count the blocks allocated by the arena allocator + // TODO: + // markArenaBlocks(); + + // count the blocks containing executable memory + markBlocks(exec_block); + + reportUnmarkedBlocks(); +} + + void memInventory (rtsBool show) { @@ -1327,8 +1415,6 @@ memInventory (rtsBool show) leak = live_blocks + free_blocks != mblocks_allocated * BLOCKS_PER_MBLOCK; - ASSERT(n_alloc_blocks == live_blocks); - if (show || leak) { if (leak) { @@ -1357,6 +1443,13 @@ memInventory (rtsBool show) mblocks_allocated * BLOCKS_PER_MBLOCK, mblocks_allocated); } } + + if (leak) { + debugBelch("\n"); + findMemoryLeak(); + } + ASSERT(n_alloc_blocks == live_blocks); + ASSERT(!leak); } @@ -1395,6 +1488,14 @@ checkSanity( void ) checkFreeListSanity(); } + +#if defined(THREADED_RTS) + // check the stacks too in threaded mode, because we don't do a + // full heap sanity check in this case (see checkHeap()) + checkGlobalTSOList(rtsTrue); +#else + checkGlobalTSOList(rtsFalse); +#endif } /* Nursery sanity check */