#if defined(THREADED_RTS)
closeMutex(&sm_mutex);
closeMutex(&atomic_modify_mutvar_mutex);
+ stgFree(nurseries);
#endif
}
nat req_blocks = (lnat)BLOCK_ROUND_UP(n*sizeof(W_)) / BLOCK_SIZE;
bd = allocGroup(req_blocks);
dbl_link_onto(bd, &g0s0->large_objects);
- g0s0->n_large_blocks += req_blocks;
+ g0s0->n_large_blocks += bd->blocks; // might be larger than req_blocks
bd->gen_no = 0;
bd->step = g0s0;
bd->flags = BF_LARGE;
ACQUIRE_SM_LOCK;
bd = allocGroup(req_blocks);
dbl_link_onto(bd, &g0s0->large_objects);
- g0s0->n_large_blocks += req_blocks;
+ g0s0->n_large_blocks += bd->blocks; // might be larger than req_blocks
bd->gen_no = 0;
bd->step = g0s0;
bd->flags = BF_LARGE;
in the page, and when the page is emptied (all objects on the page
are free) we free the page again, not forgetting to make it
non-executable.
+
+ TODO: The inability to handle objects bigger than BLOCK_SIZE_W means that
+ the linker cannot use allocateExec for loading object code files
+ on Windows. Once allocateExec can handle larger objects, the linker
+ should be modified to use allocateExec instead of VirtualAlloc.
------------------------------------------------------------------------- */
static bdescr *exec_block;
// the head of the queue.
if (bd->gen_no == 0 && bd != exec_block) {
debugTrace(DEBUG_gc, "free exec block %p", bd->start);
- if (bd->u.back) {
- bd->u.back->link = bd->link;
- } else {
- exec_block = bd->link;
- }
- if (bd->link) {
- bd->link->u.back = bd->u.back;
- }
+ dbl_link_remove(bd, &exec_block);
setExecutable(bd->start, bd->blocks * BLOCK_SIZE, rtsFalse);
freeGroup(bd);
}
#ifdef DEBUG
-static lnat
-stepBlocks (step *stp)
+nat
+countBlocks(bdescr *bd)
{
- lnat total_blocks;
- bdescr *bd;
+ nat n;
+ for (n=0; bd != NULL; bd=bd->link) {
+ n += bd->blocks;
+ }
+ return n;
+}
- total_blocks = stp->n_blocks;
- total_blocks += stp->n_old_blocks;
- for (bd = stp->large_objects; bd; bd = bd->link) {
- total_blocks += bd->blocks;
- /* hack for megablock groups: they have an extra block or two in
- the second and subsequent megablocks where the block
- descriptors would normally go.
- */
+// (*1) Just like countBlocks, except that we adjust the count for a
+// megablock group so that it doesn't include the extra few blocks
+// that would be taken up by block descriptors in the second and
+// subsequent megablock. This is so we can tally the count with the
+// number of blocks allocated in the system, for memInventory().
+static nat
+countAllocdBlocks(bdescr *bd)
+{
+ nat n;
+ for (n=0; bd != NULL; bd=bd->link) {
+ n += bd->blocks;
+ // hack for megablock groups: see (*1) above
if (bd->blocks > BLOCKS_PER_MBLOCK) {
- total_blocks -= (MBLOCK_SIZE / BLOCK_SIZE - BLOCKS_PER_MBLOCK)
+ n -= (MBLOCK_SIZE / BLOCK_SIZE - BLOCKS_PER_MBLOCK)
* (bd->blocks/(MBLOCK_SIZE/BLOCK_SIZE));
}
}
- return total_blocks;
+ return n;
+}
+
+static lnat
+stepBlocks (step *stp)
+{
+ ASSERT(countBlocks(stp->blocks) == stp->n_blocks);
+ ASSERT(countBlocks(stp->large_objects) == stp->n_large_blocks);
+ return stp->n_blocks + stp->n_old_blocks +
+ countAllocdBlocks(stp->large_objects);
}
void
{
nat g, s, i;
step *stp;
- bdescr *bd;
lnat gen_blocks[RtsFlags.GcFlags.generations];
lnat nursery_blocks, allocate_blocks, retainer_blocks,
arena_blocks, exec_blocks;
for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
gen_blocks[g] = 0;
for (i = 0; i < n_capabilities; i++) {
- for (bd = capabilities[i].mut_lists[g]; bd != NULL; bd = bd->link) {
- gen_blocks[g] += bd->blocks;
- }
+ gen_blocks[g] += countBlocks(capabilities[i].mut_lists[g]);
}
- for (bd = generations[g].mut_list; bd != NULL; bd = bd->link) {
- gen_blocks[g] += bd->blocks;
- }
+ gen_blocks[g] += countAllocdBlocks(generations[g].mut_list);
for (s = 0; s < generations[g].n_steps; s++) {
+#if !defined(THREADED_RTS)
+ // We put pinned object blocks in g0s0, so better count
+ // blocks there too.
if (g==0 && s==0) continue;
+#endif
stp = &generations[g].steps[s];
gen_blocks[g] += stepBlocks(stp);
}
for (i = 0; i < n_nurseries; i++) {
nursery_blocks += stepBlocks(&nurseries[i]);
}
-#ifdef THREADED_RTS
- // We put pinned object blocks in g0s0, so better count blocks there too.
- gen_blocks[0] += stepBlocks(g0s0);
-#endif
/* any blocks held by allocate() */
- allocate_blocks = 0;
- for (bd = small_alloc_list; bd; bd = bd->link) {
- allocate_blocks += bd->blocks;
- }
+ allocate_blocks = countAllocdBlocks(small_alloc_list);
retainer_blocks = 0;
#ifdef PROFILING
arena_blocks = arenaBlocks();
// count the blocks containing executable memory
- exec_blocks = 0;
- for (bd = exec_block; bd; bd = bd->link) {
- exec_blocks += bd->blocks;
- }
+ exec_blocks = countAllocdBlocks(exec_block);
/* count the blocks on the free list */
free_blocks = countFreeList();
debugBelch(" exec : %4lu\n", exec_blocks);
debugBelch(" free : %4lu\n", free_blocks);
debugBelch(" total : %4lu\n\n", live_blocks + free_blocks);
- debugBelch(" in system : %4lu\n", mblocks_allocated + BLOCKS_PER_MBLOCK);
+ debugBelch(" in system : %4lu\n", mblocks_allocated * BLOCKS_PER_MBLOCK);
ASSERT(0);
}
}
-nat
-countBlocks(bdescr *bd)
-{
- nat n;
- for (n=0; bd != NULL; bd=bd->link) {
- n += bd->blocks;
- }
- return n;
-}
-
/* Full heap sanity check. */
void
checkSanity( void )