X-Git-Url: http://git.megacz.com/?a=blobdiff_plain;f=ghc%2Frts%2FStorage.c;h=45d94aea01f38fd3c771f4748175b0f15bca1dce;hb=c883f6969ad957637649f3af1a2b6977555bdd32;hp=f92e65389de748e71658297bef3f8bb1ddcaa1bc;hpb=4864e9d7088661849d37bd00356cc210c6a091d2;p=ghc-hetmet.git diff --git a/ghc/rts/Storage.c b/ghc/rts/Storage.c index f92e653..45d94ae 100644 --- a/ghc/rts/Storage.c +++ b/ghc/rts/Storage.c @@ -1,7 +1,6 @@ /* ----------------------------------------------------------------------------- - * $Id: Storage.c,v 1.76 2003/02/01 09:10:17 mthomas Exp $ * - * (c) The GHC Team, 1998-1999 + * (c) The GHC Team, 1998-2004 * * Storage manager front end * @@ -22,7 +21,6 @@ #include "Storage.h" #include "Schedule.h" #include "OSThreads.h" -#include "StoragePriv.h" #include "RetainerProfile.h" // for counting memory blocks (memInventory) @@ -30,6 +28,8 @@ #include StgClosure *caf_list = NULL; +StgClosure *revertible_caf_list = NULL; +rtsBool keepCAFs; bdescr *small_alloc_list; /* allocate()d small objects */ bdescr *pinned_object_block; /* allocate pinned objects into this block */ @@ -44,7 +44,7 @@ generation *g0 = NULL; /* generation 0, for convenience */ generation *oldest_gen = NULL; /* oldest generation, for convenience */ step *g0s0 = NULL; /* generation 0, step 0, for convenience */ -lnat total_allocated = 0; /* total memory allocated during run */ +ullong total_allocated = 0; /* total memory allocated during run */ /* * Storage manager mutex: protects all the above state from @@ -73,13 +73,13 @@ initStorage( void ) return; } - /* Sanity check to make sure the LOOKS_LIKE_ macros appear to be - * doing something reasonable. - */ - ASSERT(LOOKS_LIKE_INFO_PTR(&stg_BLACKHOLE_info)); - ASSERT(LOOKS_LIKE_CLOSURE_PTR(&stg_dummy_ret_closure)); - ASSERT(!HEAP_ALLOCED(&stg_dummy_ret_closure)); - + /* Sanity check to make sure the LOOKS_LIKE_ macros appear to be + * doing something reasonable. + */ + ASSERT(LOOKS_LIKE_INFO_PTR(&stg_BLACKHOLE_info)); + ASSERT(LOOKS_LIKE_CLOSURE_PTR(&stg_dummy_ret_closure)); + ASSERT(!HEAP_ALLOCED(&stg_dummy_ret_closure)); + if (RtsFlags.GcFlags.maxHeapSize != 0 && RtsFlags.GcFlags.heapSizeSuggestion > RtsFlags.GcFlags.maxHeapSize) { @@ -89,14 +89,14 @@ initStorage( void ) if (RtsFlags.GcFlags.maxHeapSize != 0 && RtsFlags.GcFlags.minAllocAreaSize > RtsFlags.GcFlags.maxHeapSize) { - prog_belch("maximum heap size (-M) is smaller than minimum alloc area size (-A)"); + errorBelch("maximum heap size (-M) is smaller than minimum alloc area size (-A)"); exit(1); } initBlockAllocator(); #if defined(SMP) - initCondition(&sm_mutex); + initMutex(&sm_mutex); #endif /* allocate generation info array */ @@ -108,8 +108,7 @@ initStorage( void ) for(g = 0; g < RtsFlags.GcFlags.generations; g++) { gen = &generations[g]; gen->no = g; - gen->mut_list = END_MUT_LIST; - gen->mut_once_list = END_MUT_LIST; + gen->mut_list = allocBlock(); gen->collections = 0; gen->failed_promotions = 0; gen->max_blocks = 0; @@ -178,7 +177,7 @@ initStorage( void ) /* The oldest generation has one step and it is compacted. */ if (RtsFlags.GcFlags.compact) { if (RtsFlags.GcFlags.generations == 1) { - belch("WARNING: compaction is incompatible with -G1; disabled"); + errorBelch("WARNING: compaction is incompatible with -G1; disabled"); } else { oldest_gen->steps[0].is_compacted = 1; } @@ -200,6 +199,7 @@ initStorage( void ) weak_ptr_list = NULL; caf_list = NULL; + revertible_caf_list = NULL; /* initialise the allocate() interface */ small_alloc_list = NULL; @@ -209,10 +209,6 @@ initStorage( void ) /* Tell GNU multi-precision pkg about our custom alloc functions */ mp_set_memory_functions(stgAllocForGMP, stgReallocForGMP, stgDeallocForGMP); -#if defined(SMP) - initMutex(&sm_mutex); -#endif - IF_DEBUG(gc, statDescribeGens()); } @@ -266,25 +262,43 @@ exitStorage (void) void newCAF(StgClosure* caf) { - /* Put this CAF on the mutable list for the old generation. - * This is a HACK - the IND_STATIC closure doesn't really have - * a mut_link field, but we pretend it has - in fact we re-use - * the STATIC_LINK field for the time being, because when we - * come to do a major GC we won't need the mut_link field - * any more and can use it as a STATIC_LINK. - */ ACQUIRE_SM_LOCK; - ((StgIndStatic *)caf)->saved_info = NULL; - ((StgMutClosure *)caf)->mut_link = oldest_gen->mut_once_list; - oldest_gen->mut_once_list = (StgMutClosure *)caf; - + if(keepCAFs) + { + // HACK: + // If we are in GHCi _and_ we are using dynamic libraries, + // then we can't redirect newCAF calls to newDynCAF (see below), + // so we make newCAF behave almost like newDynCAF. + // The dynamic libraries might be used by both the interpreted + // program and GHCi itself, so they must not be reverted. + // This also means that in GHCi with dynamic libraries, CAFs are not + // garbage collected. If this turns out to be a problem, we could + // do another hack here and do an address range test on caf to figure + // out whether it is from a dynamic library. + ((StgIndStatic *)caf)->saved_info = (StgInfoTable *)caf->header.info; + ((StgIndStatic *)caf)->static_link = caf_list; + caf_list = caf; + } + else + { + /* Put this CAF on the mutable list for the old generation. + * This is a HACK - the IND_STATIC closure doesn't really have + * a mut_link field, but we pretend it has - in fact we re-use + * the STATIC_LINK field for the time being, because when we + * come to do a major GC we won't need the mut_link field + * any more and can use it as a STATIC_LINK. + */ + ((StgIndStatic *)caf)->saved_info = NULL; + recordMutableGen(caf, oldest_gen); + } + RELEASE_SM_LOCK; #ifdef PAR /* If we are PAR or DIST then we never forget a CAF */ { globalAddr *newGA; - //belch("<##> Globalising CAF %08x %s",caf,info_type(caf)); + //debugBelch("<##> Globalising CAF %08x %s",caf,info_type(caf)); newGA=makeGlobal(caf,rtsTrue); /*given full weight*/ ASSERT(newGA); } @@ -295,6 +309,8 @@ newCAF(StgClosure* caf) // object code in GHCi. In this case we want to retain *all* CAFs in // the object code, because they might be demanded at any time from an // expression evaluated on the command line. +// Also, GHCi might want to revert CAFs, so we add these to the +// revertible_caf_list. // // The linker hackily arranges that references to newCaf from dynamic // code end up pointing to newDynCAF. @@ -304,8 +320,8 @@ newDynCAF(StgClosure *caf) ACQUIRE_SM_LOCK; ((StgIndStatic *)caf)->saved_info = (StgInfoTable *)caf->header.info; - ((StgIndStatic *)caf)->static_link = caf_list; - caf_list = caf; + ((StgIndStatic *)caf)->static_link = revertible_caf_list; + revertible_caf_list = caf; RELEASE_SM_LOCK; } @@ -318,22 +334,20 @@ void allocNurseries( void ) { #ifdef SMP - { - Capability *cap; - bdescr *bd; - - g0s0->blocks = NULL; - g0s0->n_blocks = 0; - for (cap = free_capabilities; cap != NULL; cap = cap->link) { - cap->r.rNursery = allocNursery(NULL, RtsFlags.GcFlags.minAllocAreaSize); - cap->r.rCurrentNursery = cap->r.rNursery; - for (bd = cap->r.rNursery; bd != NULL; bd = bd->link) { - bd->u.back = (bdescr *)cap; - } - } + Capability *cap; + bdescr *bd; + + g0s0->blocks = NULL; + g0s0->n_blocks = 0; + for (cap = free_capabilities; cap != NULL; cap = cap->link) { + cap->r.rNursery = allocNursery(NULL, RtsFlags.GcFlags.minAllocAreaSize); + cap->r.rCurrentNursery = cap->r.rNursery; /* Set the back links to be equal to the Capability, * so we can do slightly better informed locking. */ + for (bd = cap->r.rNursery; bd != NULL; bd = bd->link) { + bd->u.back = (bdescr *)cap; + } } #else /* SMP */ g0s0->blocks = allocNursery(NULL, RtsFlags.GcFlags.minAllocAreaSize); @@ -425,7 +439,7 @@ resizeNursery ( nat blocks ) } else if (nursery_blocks < blocks) { - IF_DEBUG(gc, fprintf(stderr, "Increasing size of nursery to %d blocks\n", + IF_DEBUG(gc, debugBelch("Increasing size of nursery to %d blocks\n", blocks)); g0s0->blocks = allocNursery(g0s0->blocks, blocks-nursery_blocks); } @@ -433,7 +447,7 @@ resizeNursery ( nat blocks ) else { bdescr *next_bd; - IF_DEBUG(gc, fprintf(stderr, "Decreasing size of nursery to %d blocks\n", + IF_DEBUG(gc, debugBelch("Decreasing size of nursery to %d blocks\n", blocks)); bd = g0s0->blocks; @@ -481,15 +495,11 @@ allocate( nat n ) nat req_blocks = (lnat)BLOCK_ROUND_UP(n*sizeof(W_)) / BLOCK_SIZE; bd = allocGroup(req_blocks); dbl_link_onto(bd, &g0s0->large_objects); + g0s0->n_large_blocks += req_blocks; bd->gen_no = 0; bd->step = g0s0; bd->flags = BF_LARGE; bd->free = bd->start + n; - /* don't add these blocks to alloc_blocks, since we're assuming - * that large objects are likely to remain live for quite a while - * (eg. running threads), so garbage collecting early won't make - * much difference. - */ alloc_blocks += req_blocks; RELEASE_SM_LOCK; return bd->start; @@ -569,18 +579,17 @@ allocatePinned( nat n ) StgPtr p; bdescr *bd = pinned_object_block; - ACQUIRE_SM_LOCK; - - TICK_ALLOC_HEAP_NOCTR(n); - CCS_ALLOC(CCCS,n); - // If the request is for a large object, then allocate() // will give us a pinned object anyway. if (n >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) { - RELEASE_SM_LOCK; return allocate(n); } + ACQUIRE_SM_LOCK; + + TICK_ALLOC_HEAP_NOCTR(n); + CCS_ALLOC(CCCS,n); + // we always return 8-byte aligned memory. bd->free must be // 8-byte aligned to begin with, so we just round up n to // the nearest multiple of 8 bytes. @@ -595,7 +604,7 @@ allocatePinned( nat n ) dbl_link_onto(bd, &g0s0->large_objects); bd->gen_no = 0; bd->step = g0s0; - bd->flags = BF_LARGE; + bd->flags = BF_PINNED | BF_LARGE; bd->free = bd->start; alloc_blocks++; } @@ -632,7 +641,7 @@ stgAllocForGMP (size_t size_in_bytes) SET_ARR_HDR(arr, &stg_ARR_WORDS_info, CCCS, data_size_in_words); /* and return a ptr to the goods inside the array */ - return(BYTE_ARR_CTS(arr)); + return arr->payload; } static void * @@ -804,25 +813,28 @@ memInventory(void) /* count the blocks we current have */ for (g = 0; g < RtsFlags.GcFlags.generations; g++) { - for (s = 0; s < generations[g].n_steps; s++) { - stp = &generations[g].steps[s]; - total_blocks += stp->n_blocks; - if (RtsFlags.GcFlags.generations == 1) { - /* two-space collector has a to-space too :-) */ - total_blocks += g0s0->n_to_blocks; + for (bd = generations[g].mut_list; bd != NULL; bd = bd->link) { + total_blocks += bd->blocks; } - for (bd = stp->large_objects; bd; bd = bd->link) { - total_blocks += bd->blocks; - /* hack for megablock groups: they have an extra block or two in - the second and subsequent megablocks where the block - descriptors would normally go. - */ - if (bd->blocks > BLOCKS_PER_MBLOCK) { - total_blocks -= (MBLOCK_SIZE / BLOCK_SIZE - BLOCKS_PER_MBLOCK) - * (bd->blocks/(MBLOCK_SIZE/BLOCK_SIZE)); - } + for (s = 0; s < generations[g].n_steps; s++) { + stp = &generations[g].steps[s]; + total_blocks += stp->n_blocks; + if (RtsFlags.GcFlags.generations == 1) { + /* two-space collector has a to-space too :-) */ + total_blocks += g0s0->n_to_blocks; + } + for (bd = stp->large_objects; bd; bd = bd->link) { + total_blocks += bd->blocks; + /* hack for megablock groups: they have an extra block or two in + the second and subsequent megablocks where the block + descriptors would normally go. + */ + if (bd->blocks > BLOCKS_PER_MBLOCK) { + total_blocks -= (MBLOCK_SIZE / BLOCK_SIZE - BLOCKS_PER_MBLOCK) + * (bd->blocks/(MBLOCK_SIZE/BLOCK_SIZE)); + } + } } - } } /* any blocks held by allocate() */ @@ -832,8 +844,7 @@ memInventory(void) #ifdef PROFILING if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_RETAINER) { - for (bd = firstStack; bd != NULL; bd = bd->link) - total_blocks += bd->blocks; + total_blocks += retainerStackBlocks(); } #endif @@ -845,7 +856,7 @@ memInventory(void) if (total_blocks + free_blocks != mblocks_allocated * BLOCKS_PER_MBLOCK) { - fprintf(stderr, "Blocks: %ld live + %ld free = %ld total (%ld around)\n", + debugBelch("Blocks: %ld live + %ld free = %ld total (%ld around)\n", total_blocks, free_blocks, total_blocks + free_blocks, mblocks_allocated * BLOCKS_PER_MBLOCK); } @@ -886,7 +897,6 @@ checkSanity( void ) checkChain(generations[g].steps[s].large_objects); if (g > 0) { checkMutableList(generations[g].mut_list, g); - checkMutOnceList(generations[g].mut_once_list, g); } } }