X-Git-Url: http://git.megacz.com/?a=blobdiff_plain;ds=sidebyside;f=ghc%2Frts%2FStorage.c;h=fb0c016f5ea6c99e12e5dae04afa3914a3c797b9;hb=182b16bccea2eab1a8af93a6246db3d391e436c7;hp=a1bfc5e6e64ac0288d4da8594150e58e11032c9a;hpb=f84bb9a2e577c9c1be83a806a4cf70fccfb45b54;p=ghc-hetmet.git diff --git a/ghc/rts/Storage.c b/ghc/rts/Storage.c index a1bfc5e..fb0c016 100644 --- a/ghc/rts/Storage.c +++ b/ghc/rts/Storage.c @@ -1,5 +1,5 @@ /* ----------------------------------------------------------------------------- - * $Id: Storage.c,v 1.75 2003/01/29 10:28:56 simonmar Exp $ + * $Id: Storage.c,v 1.82 2003/10/24 09:56:45 simonmar Exp $ * * (c) The GHC Team, 1998-1999 * @@ -73,13 +73,13 @@ initStorage( void ) return; } - /* Sanity check to make sure the LOOKS_LIKE_ macros appear to be - * doing something reasonable. - */ - ASSERT(LOOKS_LIKE_INFO_PTR(&stg_BLACKHOLE_info)); - ASSERT(LOOKS_LIKE_CLOSURE_PTR(&stg_dummy_ret_closure)); - ASSERT(!HEAP_ALLOCED(&stg_dummy_ret_closure)); - + /* Sanity check to make sure the LOOKS_LIKE_ macros appear to be + * doing something reasonable. + */ + ASSERT(LOOKS_LIKE_INFO_PTR(&stg_BLACKHOLE_info)); + ASSERT(LOOKS_LIKE_CLOSURE_PTR(&stg_dummy_ret_closure)); + ASSERT(!HEAP_ALLOCED(&stg_dummy_ret_closure)); + if (RtsFlags.GcFlags.maxHeapSize != 0 && RtsFlags.GcFlags.heapSizeSuggestion > RtsFlags.GcFlags.maxHeapSize) { @@ -96,7 +96,7 @@ initStorage( void ) initBlockAllocator(); #if defined(SMP) - initCondition(&sm_mutex); + initMutex(&sm_mutex); #endif /* allocate generation info array */ @@ -148,6 +148,7 @@ initStorage( void ) stp = &generations[g].steps[s]; stp->no = s; stp->blocks = NULL; + stp->n_to_blocks = 0; stp->n_blocks = 0; stp->gen = &generations[g]; stp->gen_no = g; @@ -208,10 +209,6 @@ initStorage( void ) /* Tell GNU multi-precision pkg about our custom alloc functions */ mp_set_memory_functions(stgAllocForGMP, stgReallocForGMP, stgDeallocForGMP); -#if defined(SMP) - initMutex(&sm_mutex); -#endif - IF_DEBUG(gc, statDescribeGens()); } @@ -317,22 +314,20 @@ void allocNurseries( void ) { #ifdef SMP - { - Capability *cap; - bdescr *bd; - - g0s0->blocks = NULL; - g0s0->n_blocks = 0; - for (cap = free_capabilities; cap != NULL; cap = cap->link) { - cap->r.rNursery = allocNursery(NULL, RtsFlags.GcFlags.minAllocAreaSize); - cap->r.rCurrentNursery = cap->r.rNursery; - for (bd = cap->r.rNursery; bd != NULL; bd = bd->link) { - bd->u.back = (bdescr *)cap; - } - } + Capability *cap; + bdescr *bd; + + g0s0->blocks = NULL; + g0s0->n_blocks = 0; + for (cap = free_capabilities; cap != NULL; cap = cap->link) { + cap->r.rNursery = allocNursery(NULL, RtsFlags.GcFlags.minAllocAreaSize); + cap->r.rCurrentNursery = cap->r.rNursery; /* Set the back links to be equal to the Capability, * so we can do slightly better informed locking. */ + for (bd = cap->r.rNursery; bd != NULL; bd = bd->link) { + bd->u.back = (bdescr *)cap; + } } #else /* SMP */ g0s0->blocks = allocNursery(NULL, RtsFlags.GcFlags.minAllocAreaSize); @@ -480,15 +475,11 @@ allocate( nat n ) nat req_blocks = (lnat)BLOCK_ROUND_UP(n*sizeof(W_)) / BLOCK_SIZE; bd = allocGroup(req_blocks); dbl_link_onto(bd, &g0s0->large_objects); + g0s0->n_large_blocks += req_blocks; bd->gen_no = 0; bd->step = g0s0; bd->flags = BF_LARGE; bd->free = bd->start + n; - /* don't add these blocks to alloc_blocks, since we're assuming - * that large objects are likely to remain live for quite a while - * (eg. running threads), so garbage collecting early won't make - * much difference. - */ alloc_blocks += req_blocks; RELEASE_SM_LOCK; return bd->start; @@ -568,18 +559,17 @@ allocatePinned( nat n ) StgPtr p; bdescr *bd = pinned_object_block; - ACQUIRE_SM_LOCK; - - TICK_ALLOC_HEAP_NOCTR(n); - CCS_ALLOC(CCCS,n); - // If the request is for a large object, then allocate() // will give us a pinned object anyway. if (n >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) { - RELEASE_SM_LOCK; return allocate(n); } + ACQUIRE_SM_LOCK; + + TICK_ALLOC_HEAP_NOCTR(n); + CCS_ALLOC(CCCS,n); + // we always return 8-byte aligned memory. bd->free must be // 8-byte aligned to begin with, so we just round up n to // the nearest multiple of 8 bytes. @@ -594,7 +584,7 @@ allocatePinned( nat n ) dbl_link_onto(bd, &g0s0->large_objects); bd->gen_no = 0; bd->step = g0s0; - bd->flags = BF_LARGE; + bd->flags = BF_PINNED | BF_LARGE; bd->free = bd->start; alloc_blocks++; } @@ -831,8 +821,7 @@ memInventory(void) #ifdef PROFILING if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_RETAINER) { - for (bd = firstStack; bd != NULL; bd = bd->link) - total_blocks += bd->blocks; + total_blocks += retainerStackBlocks(); } #endif