#include "Rts.h"
#include "Storage.h"
+#include "GCThread.h"
#include "RtsUtils.h"
#include "Stats.h"
#include "BlockAlloc.h"
gen->large_objects = NULL;
gen->n_large_blocks = 0;
gen->n_new_large_words = 0;
- gen->mut_list = allocBlock();
gen->scavenged_large_objects = NULL;
gen->n_scavenged_large_blocks = 0;
gen->mark = 0;
gen->compact = 0;
gen->bitmap = NULL;
#ifdef THREADED_RTS
- initSpinLock(&gen->sync_large_objects);
+ initSpinLock(&gen->sync);
#endif
gen->threads = END_TSO_QUEUE;
gen->old_threads = END_TSO_QUEUE;
// If we don't have a block of pinned objects yet, or the current
// one isn't large enough to hold the new object, allocate a new one.
if (bd == NULL || (bd->free + n) > (bd->start + BLOCK_SIZE_W)) {
+ // The pinned_object_block remains attached to the capability
+ // until it is full, even if a GC occurs. We want this
+ // behaviour because otherwise the unallocated portion of the
+ // block would be forever slop, and under certain workloads
+ // (allocating a few ByteStrings per GC) we accumulate a lot
+ // of slop.
+ //
+ // So, the pinned_object_block is initially marked
+ // BF_EVACUATED so the GC won't touch it. When it is full,
+ // we place it on the large_objects list, and at the start of
+ // the next GC the BF_EVACUATED flag will be cleared, and the
+ // block will be promoted as usual (if anything in it is
+ // live).
ACQUIRE_SM_LOCK;
- cap->pinned_object_block = bd = allocBlock();
- dbl_link_onto(bd, &g0->large_objects);
- g0->n_large_blocks++;
+ if (bd != NULL) {
+ dbl_link_onto(bd, &g0->large_objects);
+ g0->n_large_blocks++;
+ g0->n_new_large_words += bd->free - bd->start;
+ }
+ cap->pinned_object_block = bd = allocBlock();
RELEASE_SM_LOCK;
initBdescr(bd, g0, g0);
- bd->flags = BF_PINNED | BF_LARGE;
+ bd->flags = BF_PINNED | BF_LARGE | BF_EVACUATED;
bd->free = bd->start;
}
- g0->n_new_large_words += n;
p = bd->free;
bd->free += n;
return p;
calcAllocated (rtsBool include_nurseries)
{
nat allocated = 0;
- bdescr *bd;
nat i;
// When called from GC.c, we already have the allocation count for
if (include_nurseries)
{
for (i = 0; i < n_capabilities; i++) {
- for (bd = nurseries[i].blocks; bd; bd = bd->link) {
- allocated += (lnat)(bd->free - bd->start);
- }
+ allocated += countOccupied(nurseries[i].blocks);
}
}
return allocated;
}
-/* Approximate the amount of live data in the heap. To be called just
- * after garbage collection (see GarbageCollect()).
- */
-lnat calcLiveBlocks (void)
-{
- nat g;
- lnat live = 0;
- generation *gen;
-
- for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
- /* approximate amount of live data (doesn't take into account slop
- * at end of each block).
- */
- gen = &generations[g];
- live += gen->n_large_blocks + gen->n_blocks;
- }
- return live;
-}
-
lnat countOccupied (bdescr *bd)
{
lnat words;
return words;
}
+lnat genLiveWords (generation *gen)
+{
+ return gen->n_words + countOccupied(gen->large_objects);
+}
+
+lnat genLiveBlocks (generation *gen)
+{
+ return gen->n_blocks + gen->n_large_blocks;
+}
+
+lnat gcThreadLiveWords (nat i, nat g)
+{
+ lnat words;
+
+ words = countOccupied(gc_threads[i]->gens[g].todo_bd);
+ words += countOccupied(gc_threads[i]->gens[g].part_list);
+ words += countOccupied(gc_threads[i]->gens[g].scavd_list);
+
+ return words;
+}
+
+lnat gcThreadLiveBlocks (nat i, nat g)
+{
+ lnat blocks;
+
+ blocks = countBlocks(gc_threads[i]->gens[g].todo_bd);
+ blocks += gc_threads[i]->gens[g].n_part_blocks;
+ blocks += gc_threads[i]->gens[g].n_scavd_blocks;
+
+ return blocks;
+}
+
// Return an accurate count of the live data in the heap, excluding
// generation 0.
lnat calcLiveWords (void)
{
nat g;
lnat live;
- generation *gen;
-
+
live = 0;
for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
- gen = &generations[g];
- live += gen->n_words + countOccupied(gen->large_objects);
+ live += genLiveWords(&generations[g]);
+ }
+ return live;
+}
+
+lnat calcLiveBlocks (void)
+{
+ nat g;
+ lnat live;
+
+ live = 0;
+ for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
+ live += genLiveBlocks(&generations[g]);
}
return live;
}