/* -----------------------------------------------------------------------------
- * $Id: ProfHeap.c,v 1.28 2001/11/27 15:30:06 simonmar Exp $
+ * $Id: ProfHeap.c,v 1.39 2002/11/01 11:05:46 simonmar Exp $
*
* (c) The GHC Team, 1998-2000
*
#include "RetainerProfile.h"
#include "LdvProfile.h"
#include "Arena.h"
-
-#ifdef DEBUG_HEAP_PROF
#include "Printer.h"
-static void fprint_data(FILE *fp);
-#endif
+
+#include <string.h>
/* -----------------------------------------------------------------------------
* era stores the current time period. It is the same as the
* RESTRICTION:
* era must be no longer than LDV_SHIFT (15 or 30) bits.
* Invariants:
- * era is initialized to 0 in initHeapProfiling().
+ * era is initialized to 1 in initHeapProfiling().
*
* max_era is initialized to 2^LDV_SHIFT in initHeapProfiling().
* When era reaches max_era, the profiling stops because a closure can
static nat max_era;
/* -----------------------------------------------------------------------------
- counters
- -------------------------------------------------------------------------- */
+ * Counters
+ *
+ * For most heap profiles each closure identity gets a simple count
+ * of live words in the heap at each census. However, if we're
+ * selecting by biography, then we have to keep the various
+ * lag/drag/void counters for each identity.
+ * -------------------------------------------------------------------------- */
typedef struct _counter {
void *identity;
union {
nat resid;
struct {
int prim; // total size of 'inherently used' closures
- int unused; // total size of 'never used' closures
+ int not_used; // total size of 'never used' closures
int used; // total size of 'used at least once' closures
- int void_new; // current total size of 'destroyed without being used' closures
- int drag_new; // current total size of 'used at least once and waiting to die'
+ int void_total; // current total size of 'destroyed without being used' closures
+ int drag_total; // current total size of 'used at least once and waiting to die'
} ldv;
} c;
struct _counter *next;
} counter;
+static inline void
+initLDVCtr( counter *ctr )
+{
+ ctr->c.ldv.prim = 0;
+ ctr->c.ldv.not_used = 0;
+ ctr->c.ldv.used = 0;
+ ctr->c.ldv.void_total = 0;
+ ctr->c.ldv.drag_total = 0;
+}
+
typedef struct {
double time; // the time in MUT time when the census is made
HashTable * hash;
int drag_total;
} Census;
-Census *censuses = NULL;
-nat n_censuses = 0;
+static Census *censuses = NULL;
+static nat n_censuses = 0;
+
+#ifdef PROFILING
+static void aggregateCensusInfo( void );
+#endif
+
+static void dumpCensus( Census *census );
+
+/* -----------------------------------------------------------------------------
+ Closure Type Profiling;
+
+ PROBABLY TOTALLY OUT OF DATE -- ToDo (SDM)
+ -------------------------------------------------------------------------- */
+
+#ifdef DEBUG_HEAP_PROF
+static char *type_names[] = {
+ "INVALID_OBJECT"
+ , "CONSTR"
+ , "CONSTR_INTLIKE"
+ , "CONSTR_CHARLIKE"
+ , "CONSTR_STATIC"
+ , "CONSTR_NOCAF_STATIC"
+
+ , "FUN"
+ , "FUN_STATIC"
+
+ , "THUNK"
+ , "THUNK_STATIC"
+ , "THUNK_SELECTOR"
+
+ , "BCO"
+ , "AP_UPD"
+
+ , "PAP"
+
+ , "IND"
+ , "IND_OLDGEN"
+ , "IND_PERM"
+ , "IND_OLDGEN_PERM"
+ , "IND_STATIC"
+
+ , "RET_BCO"
+ , "RET_SMALL"
+ , "RET_VEC_SMALL"
+ , "RET_BIG"
+ , "RET_VEC_BIG"
+ , "RET_DYN"
+ , "UPDATE_FRAME"
+ , "CATCH_FRAME"
+ , "STOP_FRAME"
+ , "SEQ_FRAME"
+
+ , "BLACKHOLE"
+ , "BLACKHOLE_BQ"
+ , "MVAR"
+
+ , "ARR_WORDS"
+
+ , "MUT_ARR_PTRS"
+ , "MUT_ARR_PTRS_FROZEN"
+ , "MUT_VAR"
+
+ , "WEAK"
+ , "FOREIGN"
+
+ , "TSO"
+
+ , "BLOCKED_FETCH"
+ , "FETCH_ME"
+
+ , "EVACUATED"
+};
+
+#endif /* DEBUG_HEAP_PROF */
+
+/* -----------------------------------------------------------------------------
+ * Find the "closure identity", which is a unique pointer reresenting
+ * the band to which this closure's heap space is attributed in the
+ * heap profile.
+ * ------------------------------------------------------------------------- */
+static inline void *
+closureIdentity( StgClosure *p )
+{
+ switch (RtsFlags.ProfFlags.doHeapProfile) {
+
+#ifdef PROFILING
+ case HEAP_BY_CCS:
+ return p->header.prof.ccs;
+ case HEAP_BY_MOD:
+ return p->header.prof.ccs->cc->module;
+ case HEAP_BY_DESCR:
+ return get_itbl(p)->prof.closure_desc;
+ case HEAP_BY_TYPE:
+ return get_itbl(p)->prof.closure_type;
+ case HEAP_BY_RETAINER:
+ // AFAIK, the only closures in the heap which might not have a
+ // valid retainer set are DEAD_WEAK closures.
+ if (isRetainerSetFieldValid(p))
+ return retainerSetOf(p);
+ else
+ return NULL;
+
+#else // DEBUG
+ case HEAP_BY_INFOPTR:
+ return (void *)((StgClosure *)p)->header.info;
+ case HEAP_BY_CLOSURE_TYPE:
+ return type_names[get_itbl(p)->type];
+
+#endif
+ default:
+ barf("closureIdentity");
+ }
+}
/* --------------------------------------------------------------------------
* Profiling type predicates
void
LDV_recordDead( StgClosure *c, nat size )
{
+ void *id;
+ nat t;
+ counter *ctr;
+
if (era > 0 && closureSatisfiesConstraints(c)) {
- nat t;
size -= sizeofW(StgProfHeader);
if ((LDVW((c)) & LDV_STATE_MASK) == LDV_STATE_CREATE) {
t = (LDVW((c)) & LDV_CREATE_MASK) >> LDV_SHIFT;
if (t < era) {
- censuses[t].void_total += (int)size;
- censuses[era].void_total -= (int)size;
+ if (RtsFlags.ProfFlags.bioSelector == NULL) {
+ censuses[t].void_total += (int)size;
+ censuses[era].void_total -= (int)size;
+ } else {
+ id = closureIdentity(c);
+ ctr = lookupHashTable(censuses[t].hash, (StgWord)id);
+ ASSERT( ctr != NULL );
+ ctr->c.ldv.void_total += (int)size;
+ ctr = lookupHashTable(censuses[era].hash, (StgWord)id);
+ if (ctr == NULL) {
+ ctr = arenaAlloc(censuses[era].arena, sizeof(counter));
+ initLDVCtr(ctr);
+ insertHashTable(censuses[era].hash, (StgWord)id, ctr);
+ ctr->identity = id;
+ ctr->next = censuses[era].ctrs;
+ censuses[era].ctrs = ctr;
+ }
+ ctr->c.ldv.void_total -= (int)size;
+ }
}
} else {
t = LDVW((c)) & LDV_LAST_MASK;
if (t + 1 < era) {
- censuses[t + 1].drag_total += size;
- censuses[era].drag_total -= size;
+ if (RtsFlags.ProfFlags.bioSelector == NULL) {
+ censuses[t+1].drag_total += size;
+ censuses[era].drag_total -= size;
+ } else {
+ void *id;
+ id = closureIdentity(c);
+ ctr = lookupHashTable(censuses[t+1].hash, (StgWord)id);
+ ASSERT( ctr != NULL );
+ ctr->c.ldv.drag_total += (int)size;
+ ctr = lookupHashTable(censuses[era].hash, (StgWord)id);
+ if (ctr == NULL) {
+ ctr = arenaAlloc(censuses[era].arena, sizeof(counter));
+ initLDVCtr(ctr);
+ insertHashTable(censuses[era].hash, (StgWord)id, ctr);
+ ctr->identity = id;
+ ctr->next = censuses[era].ctrs;
+ censuses[era].ctrs = ctr;
+ }
+ ctr->c.ldv.drag_total -= (int)size;
+ }
}
}
}
* Initialize censuses[era];
* ----------------------------------------------------------------------- */
static inline void
-initEra(void)
+initEra(Census *census)
{
- censuses[era].not_used = 0;
- censuses[era].used = 0;
- censuses[era].prim = 0;
- censuses[era].void_total = 0;
- censuses[era].drag_total = 0;
+ census->hash = allocHashTable();
+ census->ctrs = NULL;
+ census->arena = newArena();
+
+ census->not_used = 0;
+ census->used = 0;
+ census->prim = 0;
+ census->void_total = 0;
+ census->drag_total = 0;
}
/* --------------------------------------------------------------------------
}
#endif // PROFILING
- initEra();
+ initEra( &censuses[era] );
}
-/* -------------------------------------------------------------------------- */
+/* -----------------------------------------------------------------------------
+ * DEBUG heap profiling, by info table
+ * -------------------------------------------------------------------------- */
#ifdef DEBUG_HEAP_PROF
FILE *hp_file;
}
#endif /* DEBUG_HEAP_PROF */
+/* --------------------------------------------------------------------------
+ * Initialize the heap profilier
+ * ----------------------------------------------------------------------- */
nat
initHeapProfiling(void)
{
return 0;
}
+#ifdef PROFILING
+ if (doingLDVProfiling() && doingRetainerProfiling()) {
+ prog_belch("cannot mix -hb and -hr");
+ stg_exit(1);
+ }
+#endif
+
// we only count eras if we're doing LDV profiling. Otherwise era
// is fixed at zero.
#ifdef PROFILING
n_censuses = 32;
censuses = stgMallocBytes(sizeof(Census) * n_censuses, "initHeapProfiling");
+ initEra( &censuses[era] );
+
fprintf(hp_file, "JOB \"%s", prog_argv[0]);
#ifdef PROFILING
#endif
#ifdef PROFILING
- // Note:
- // We do not need to perform a major garbage collection because all the
- // closures created since the last census will not affect the profiling
- // statistics anyhow.
- if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_LDV)
- LdvCensusKillAll();
-#endif
-
-#ifdef PROFILING
- // At last... we can output the census info for LDV profiling
- if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_LDV) {
+ if (doingLDVProfiling()) {
nat t;
- int sumVoidNew, sumDragNew;
-
- // Now we compute void_total and drag_total for each census
- sumVoidNew = 0;
- sumDragNew = 0;
- for (t = 1; t < era; t++) { // note: start at 1, not 0
- sumVoidNew += censuses[t].void_total;
- sumDragNew += censuses[t].drag_total;
- censuses[t].void_total = sumVoidNew;
- censuses[t].drag_total = sumDragNew;
- ASSERT( censuses[t].void_total < censuses[t].not_used );
- ASSERT( censuses[t].drag_total < censuses[t].used );
- }
-
- for (t = 1; t < era; t++) { // note: start at 1, not 0
- fprintf(hp_file, "MARK %f\n", censuses[t].time);
- fprintf(hp_file, "BEGIN_SAMPLE %f\n", censuses[t].time);
- fprintf(hp_file, "VOID\t%u\n", censuses[t].void_total * sizeof(W_));
- fprintf(hp_file, "LAG\t%u\n",
- (censuses[t].not_used - censuses[t].void_total) * sizeof(W_));
- fprintf(hp_file, "USE\t%u\n",
- (censuses[t].used - censuses[t].drag_total) * sizeof(W_));
- fprintf(hp_file, "INHERENT_USE\t%u\n",
- censuses[t].prim * sizeof(W_));
- fprintf(hp_file, "DRAG\t%u\n", censuses[t].drag_total * sizeof(W_));
- fprintf(hp_file, "END_SAMPLE %f\n", censuses[t].time);
+ LdvCensusKillAll();
+ aggregateCensusInfo();
+ for (t = 1; t < era; t++) {
+ dumpCensus( &censuses[t] );
}
}
#endif
fclose(hp_file);
}
-#ifdef DEBUG_HEAP_PROF
-/* -----------------------------------------------------------------------------
- Closure Type Profiling;
-
- PROBABLY TOTALLY OUT OF DATE -- ToDo (SDM)
- -------------------------------------------------------------------------- */
-
-static char *type_names[] = {
- "INVALID_OBJECT"
- , "CONSTR"
- , "CONSTR_INTLIKE"
- , "CONSTR_CHARLIKE"
- , "CONSTR_STATIC"
- , "CONSTR_NOCAF_STATIC"
-
- , "FUN"
- , "FUN_STATIC"
-
- , "THUNK"
- , "THUNK_STATIC"
- , "THUNK_SELECTOR"
-
- , "BCO"
- , "AP_UPD"
-
- , "PAP"
-
- , "IND"
- , "IND_OLDGEN"
- , "IND_PERM"
- , "IND_OLDGEN_PERM"
- , "IND_STATIC"
-
- , "RET_BCO"
- , "RET_SMALL"
- , "RET_VEC_SMALL"
- , "RET_BIG"
- , "RET_VEC_BIG"
- , "RET_DYN"
- , "UPDATE_FRAME"
- , "CATCH_FRAME"
- , "STOP_FRAME"
- , "SEQ_FRAME"
-
- , "BLACKHOLE"
- , "BLACKHOLE_BQ"
- , "MVAR"
-
- , "ARR_WORDS"
-
- , "MUT_ARR_PTRS"
- , "MUT_ARR_PTRS_FROZEN"
- , "MUT_VAR"
-
- , "WEAK"
- , "FOREIGN"
-
- , "TSO"
-
- , "BLOCKED_FETCH"
- , "FETCH_ME"
-
- , "EVACUATED"
-};
-
-#endif /* DEBUG_HEAP_PROF */
#ifdef PROFILING
return;
}
+ fprintf(fp, "(%d)", ccs->ccsID);
+
// keep printing components of the stack until we run out of space
// in the buffer. If we run out of space, end with "...".
for (; ccs != NULL && ccs != CCS_MAIN; ccs = ccs->prevStack) {
// CAF cost centres print as M.CAF, but we leave the module
// name out of all the others to save space.
if (!strcmp(ccs->cc->label,"CAF")) {
+#ifdef HAVE_SNPRINTF
written = snprintf(buf+next_offset,
(int)max_length-3-(int)next_offset,
"%s.CAF", ccs->cc->module);
+#else
+ written = sprintf(buf+next_offset,
+ "%s.CAF", ccs->cc->module);
+#endif
} else {
if (ccs->prevStack != NULL && ccs->prevStack != CCS_MAIN) {
template = "%s/";
} else {
template = "%s";
}
+#ifdef HAVE_SNPRINTF
written = snprintf(buf+next_offset,
(int)max_length-3-(int)next_offset,
template, ccs->cc->label);
+#else
+ written = sprintf(buf+next_offset,
+ template, ccs->cc->label);
+#endif
}
if (next_offset+written >= max_length-4) {
}
fprintf(fp, "%s", buf);
}
+#endif // PROFILING
-static rtsBool
-str_matches_selector( char* str, char* sel )
+rtsBool
+strMatchesSelector( char* str, char* sel )
{
char* p;
// fprintf(stderr, "str_matches_selector %s %s\n", str, sel);
}
}
-// Figure out whether a closure should be counted in this census, by
-// testing against all the specified constraints.
+/* -----------------------------------------------------------------------------
+ * Figure out whether a closure should be counted in this census, by
+ * testing against all the specified constraints.
+ * -------------------------------------------------------------------------- */
rtsBool
closureSatisfiesConstraints( StgClosure* p )
{
+#ifdef DEBUG_HEAP_PROF
+ return rtsTrue;
+#else
rtsBool b;
- if (RtsFlags.ProfFlags.modSelector) {
- b = str_matches_selector( ((StgClosure *)p)->header.prof.ccs->cc->module,
- RtsFlags.ProfFlags.modSelector );
- if (!b) return rtsFalse;
+
+ // The CCS has a selected field to indicate whether this closure is
+ // deselected by not being mentioned in the module, CC, or CCS
+ // selectors.
+ if (!p->header.prof.ccs->selected) {
+ return rtsFalse;
}
+
if (RtsFlags.ProfFlags.descrSelector) {
- b = str_matches_selector( (get_itbl((StgClosure *)p))->prof.closure_desc,
+ b = strMatchesSelector( (get_itbl((StgClosure *)p))->prof.closure_desc,
RtsFlags.ProfFlags.descrSelector );
if (!b) return rtsFalse;
}
if (RtsFlags.ProfFlags.typeSelector) {
- b = str_matches_selector( (get_itbl((StgClosure *)p))->prof.closure_type,
+ b = strMatchesSelector( (get_itbl((StgClosure *)p))->prof.closure_type,
RtsFlags.ProfFlags.typeSelector );
if (!b) return rtsFalse;
}
- if (RtsFlags.ProfFlags.ccSelector) {
- b = str_matches_selector( ((StgClosure *)p)->header.prof.ccs->cc->label,
- RtsFlags.ProfFlags.ccSelector );
- if (!b) return rtsFalse;
- }
if (RtsFlags.ProfFlags.retainerSelector) {
RetainerSet *rs;
nat i;
rs = retainerSetOf((StgClosure *)p);
if (rs != NULL) {
for (i = 0; i < rs->num; i++) {
- b = str_matches_selector( rs->element[i]->cc->label,
+ b = strMatchesSelector( rs->element[i]->cc->label,
RtsFlags.ProfFlags.retainerSelector );
if (b) return rtsTrue;
}
return rtsFalse;
}
return rtsTrue;
-}
#endif /* PROFILING */
+}
+
+/* -----------------------------------------------------------------------------
+ * Aggregate the heap census info for biographical profiling
+ * -------------------------------------------------------------------------- */
+#ifdef PROFILING
+static void
+aggregateCensusInfo( void )
+{
+ HashTable *acc;
+ nat t;
+ counter *c, *d, *ctrs;
+ Arena *arena;
+
+ if (!doingLDVProfiling()) return;
+
+ // Aggregate the LDV counters when displaying by biography.
+ if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_LDV) {
+ int void_total, drag_total;
+
+ // Now we compute void_total and drag_total for each census
+ void_total = 0;
+ drag_total = 0;
+ for (t = 1; t < era; t++) { // note: start at 1, not 0
+ void_total += censuses[t].void_total;
+ drag_total += censuses[t].drag_total;
+ censuses[t].void_total = void_total;
+ censuses[t].drag_total = drag_total;
+ ASSERT( censuses[t].void_total <= censuses[t].not_used );
+ ASSERT( censuses[t].drag_total <= censuses[t].used );
+ }
+
+ return;
+ }
+
+ // otherwise... we're doing a heap profile that is restricted to
+ // some combination of lag, drag, void or use. We've kept all the
+ // census info for all censuses so far, but we still need to
+ // aggregate the counters forwards.
+
+ arena = newArena();
+ acc = allocHashTable();
+ ctrs = NULL;
+
+ for (t = 1; t < era; t++) {
+
+ // first look through all the counters we're aggregating
+ for (c = ctrs; c != NULL; c = c->next) {
+ // if one of the totals is non-zero, then this closure
+ // type must be present in the heap at this census time...
+ d = lookupHashTable(censuses[t].hash, (StgWord)c->identity);
+
+ if (d == NULL) {
+ // if this closure identity isn't present in the
+ // census for this time period, then our running
+ // totals *must* be zero.
+ ASSERT(c->c.ldv.void_total == 0 && c->c.ldv.drag_total == 0);
+
+ // fprintCCS(stderr,c->identity);
+ // fprintf(stderr," census=%d void_total=%d drag_total=%d\n",
+ // t, c->c.ldv.void_total, c->c.ldv.drag_total);
+ } else {
+ d->c.ldv.void_total += c->c.ldv.void_total;
+ d->c.ldv.drag_total += c->c.ldv.drag_total;
+ c->c.ldv.void_total = d->c.ldv.void_total;
+ c->c.ldv.drag_total = d->c.ldv.drag_total;
+
+ ASSERT( c->c.ldv.void_total >= 0 );
+ ASSERT( c->c.ldv.drag_total >= 0 );
+ }
+ }
+
+ // now look through the counters in this census to find new ones
+ for (c = censuses[t].ctrs; c != NULL; c = c->next) {
+ d = lookupHashTable(acc, (StgWord)c->identity);
+ if (d == NULL) {
+ d = arenaAlloc( arena, sizeof(counter) );
+ initLDVCtr(d);
+ insertHashTable( acc, (StgWord)c->identity, d );
+ d->identity = c->identity;
+ d->next = ctrs;
+ ctrs = d;
+ d->c.ldv.void_total = c->c.ldv.void_total;
+ d->c.ldv.drag_total = c->c.ldv.drag_total;
+ }
+ ASSERT( c->c.ldv.void_total >= 0 );
+ ASSERT( c->c.ldv.drag_total >= 0 );
+ }
+ }
+
+ freeHashTable(acc, NULL);
+ arenaFree(arena);
+}
+#endif
/* -----------------------------------------------------------------------------
* Print out the results of a heap census.
dumpCensus( Census *census )
{
counter *ctr;
+ int count;
+
+ fprintf(hp_file, "BEGIN_SAMPLE %0.2f\n", census->time);
#ifdef PROFILING
- // We can't generate any info for LDV profiling until
- // the end of the run...
- if (doingLDVProfiling()) { return; }
+ if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_LDV) {
+ fprintf(hp_file, "VOID\t%u\n", census->void_total * sizeof(W_));
+ fprintf(hp_file, "LAG\t%u\n",
+ (census->not_used - census->void_total) * sizeof(W_));
+ fprintf(hp_file, "USE\t%u\n",
+ (census->used - census->drag_total) * sizeof(W_));
+ fprintf(hp_file, "INHERENT_USE\t%u\n",
+ census->prim * sizeof(W_));
+ fprintf(hp_file, "DRAG\t%u\n", census->drag_total *
+ sizeof(W_));
+ fprintf(hp_file, "END_SAMPLE %0.2f\n", census->time);
+ return;
+ }
#endif
- fprintf(hp_file, "BEGIN_SAMPLE %0.2f\n", census->time);
-
for (ctr = census->ctrs; ctr != NULL; ctr = ctr->next) {
+#ifdef PROFILING
+ if (RtsFlags.ProfFlags.bioSelector != NULL) {
+ count = 0;
+ if (strMatchesSelector("lag", RtsFlags.ProfFlags.bioSelector))
+ count += ctr->c.ldv.not_used - ctr->c.ldv.void_total;
+ if (strMatchesSelector("drag", RtsFlags.ProfFlags.bioSelector))
+ count += ctr->c.ldv.drag_total;
+ if (strMatchesSelector("void", RtsFlags.ProfFlags.bioSelector))
+ count += ctr->c.ldv.void_total;
+ if (strMatchesSelector("use", RtsFlags.ProfFlags.bioSelector))
+ count += ctr->c.ldv.used - ctr->c.ldv.drag_total;
+ } else
+#endif
+ {
+ count = ctr->c.resid;
+ }
+
+ ASSERT( count >= 0 );
+
+ if (count == 0) continue;
+
#ifdef DEBUG_HEAP_PROF
switch (RtsFlags.ProfFlags.doHeapProfile) {
case HEAP_BY_INFOPTR:
- fprint_data(hp_file);
+ fprintf(hp_file, "%s", lookupGHCName(ctr->identity));
break;
case HEAP_BY_CLOSURE_TYPE:
- fprint_closure_types(hp_file);
+ fprintf(hp_file, "%s", (char *)ctr->identity);
break;
}
#endif
#ifdef PROFILING
switch (RtsFlags.ProfFlags.doHeapProfile) {
case HEAP_BY_CCS:
- fprint_ccs(hp_file, (CostCentreStack *)ctr->identity, 30);
+ fprint_ccs(hp_file, (CostCentreStack *)ctr->identity, 25);
break;
case HEAP_BY_MOD:
case HEAP_BY_DESCR:
}
#endif
- fprintf(hp_file, "\t%d\n", ctr->c.resid * sizeof(W_));
+ fprintf(hp_file, "\t%d\n", count * sizeof(W_));
}
fprintf(hp_file, "END_SAMPLE %0.2f\n", census->time);
#ifdef DEBUG_HEAP_PROF
real_size = size;
- switch (RtsFlags.ProfFlags.doHeapProfile) {
- case HEAP_BY_INFOPTR:
- identity = (void *)((StgClosure *)p)->header.info;
- break;
- case HEAP_BY_CLOSURE_TYPE:
- identity = type_names[info->type];
- break;
- default:
- barf("heapCensus; doHeapProfile");
- }
-#endif
-
-#ifdef PROFILING
+#else
// subtract the profiling overhead
real_size = size - sizeofW(StgProfHeader);
+#endif
if (closureSatisfiesConstraints((StgClosure*)p)) {
- switch (RtsFlags.ProfFlags.doHeapProfile) {
- case HEAP_BY_CCS:
- identity = ((StgClosure *)p)->header.prof.ccs;
- break;
- case HEAP_BY_MOD:
- identity = ((StgClosure *)p)->header.prof.ccs->cc->module;
- break;
- case HEAP_BY_DESCR:
- identity = (get_itbl((StgClosure *)p))->prof.closure_desc;
- break;
- case HEAP_BY_TYPE:
- identity = (get_itbl((StgClosure *)p))->prof.closure_type;
- break;
- case HEAP_BY_RETAINER:
- identity = retainerSetOf((StgClosure *)p);
- break;
- case HEAP_BY_LDV:
+#ifdef PROFILING
+ if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_LDV) {
if (prim)
census->prim += real_size;
else if ((LDVW(p) & LDV_STATE_MASK) == LDV_STATE_CREATE)
census->not_used += real_size;
else
census->used += real_size;
- // NOTE: don't break here. We're not using the
- // hash table.
- p += size;
- continue;
- default:
- barf("heapCensus; doHeapProfile");
- }
- }
+ } else
#endif
+ {
+ identity = closureIdentity((StgClosure *)p);
- if (identity != NULL) {
- ctr = lookupHashTable( census->hash, (StgWord)identity );
- if (ctr != NULL) {
- ctr->c.resid += real_size;
- } else {
- ctr = arenaAlloc( census->arena, sizeof(counter) );
- insertHashTable( census->hash, (StgWord)identity, ctr );
- ctr->c.resid = real_size;
- ctr->identity = identity;
- ctr->next = census->ctrs;
- census->ctrs = ctr;
+ if (identity != NULL) {
+ ctr = lookupHashTable( census->hash, (StgWord)identity );
+ if (ctr != NULL) {
+#ifdef PROFILING
+ if (RtsFlags.ProfFlags.bioSelector != NULL) {
+ if (prim)
+ ctr->c.ldv.prim += real_size;
+ else if ((LDVW(p) & LDV_STATE_MASK) == LDV_STATE_CREATE)
+ ctr->c.ldv.not_used += real_size;
+ else
+ ctr->c.ldv.used += real_size;
+ } else
+#endif
+ {
+ ctr->c.resid += real_size;
+ }
+ } else {
+ ctr = arenaAlloc( census->arena, sizeof(counter) );
+ initLDVCtr(ctr);
+ insertHashTable( census->hash, (StgWord)identity, ctr );
+ ctr->identity = identity;
+ ctr->next = census->ctrs;
+ census->ctrs = ctr;
+
+#ifdef PROFILING
+ if (RtsFlags.ProfFlags.bioSelector != NULL) {
+ if (prim)
+ ctr->c.ldv.prim = real_size;
+ else if ((LDVW(p) & LDV_STATE_MASK) == LDV_STATE_CREATE)
+ ctr->c.ldv.not_used = real_size;
+ else
+ ctr->c.ldv.used = real_size;
+ } else
+#endif
+ {
+ ctr->c.resid = real_size;
+ }
+ }
+ }
}
}
nat g, s;
Census *census;
- stat_startHeapCensus();
-
census = &censuses[era];
census->time = mut_user_time();
- census->hash = allocHashTable();
- census->ctrs = NULL;
- census->arena = newArena();
// calculate retainer sets if necessary
#ifdef PROFILING
}
#endif
- // traverse the heap, collecting the census info
+#ifdef PROFILING
+ stat_startHeapCensus();
+#endif
+
+ // Traverse the heap, collecting the census info
+
+ // First the small_alloc_list: we have to fix the free pointer at
+ // the end by calling tidyAllocatedLists() first.
+ tidyAllocateLists();
heapCensusChain( census, small_alloc_list );
+
+ // Now traverse the heap in each generation/step.
if (RtsFlags.GcFlags.generations == 1) {
heapCensusChain( census, g0s0->to_blocks );
} else {
}
// dump out the census info
- dumpCensus( census );
+#ifdef PROFILING
+ // We can't generate any info for LDV profiling until
+ // the end of the run...
+ if (!doingLDVProfiling())
+ dumpCensus( census );
+#else
+ dumpCensus( census );
+#endif
- // free our storage
- freeHashTable(census->hash, NULL/* don't free the elements */);
- arenaFree(census->arena);
+
+ // free our storage, unless we're keeping all the census info for
+ // future restriction by biography.
+#ifdef PROFILING
+ if (RtsFlags.ProfFlags.bioSelector == NULL)
+#endif
+ {
+ freeHashTable( census->hash, NULL/* don't free the elements */ );
+ arenaFree( census->arena );
+ census->hash = NULL;
+ census->arena = NULL;
+ }
// we're into the next time period now
nextEra();
+#ifdef PROFILING
stat_endHeapCensus();
+#endif
}
#endif /* PROFILING || DEBUG_HEAP_PROF */