/* -----------------------------------------------------------------------------
- * $Id: ProfHeap.c,v 1.30 2001/11/28 15:43:23 simonmar Exp $
+ * $Id: ProfHeap.c,v 1.50 2003/11/12 17:49:08 sof Exp $
*
- * (c) The GHC Team, 1998-2000
+ * (c) The GHC Team, 1998-2003
*
* Support for heap profiling
*
#include "Arena.h"
#include "Printer.h"
+#include <string.h>
+#include <stdlib.h>
+
/* -----------------------------------------------------------------------------
* era stores the current time period. It is the same as the
* number of censuses that have been performed.
* RESTRICTION:
* era must be no longer than LDV_SHIFT (15 or 30) bits.
* Invariants:
- * era is initialized to 0 in initHeapProfiling().
+ * era is initialized to 1 in initHeapProfiling().
*
* max_era is initialized to 2^LDV_SHIFT in initHeapProfiling().
* When era reaches max_era, the profiling stops because a closure can
struct _counter *next;
} counter;
-static inline void
+STATIC_INLINE void
initLDVCtr( counter *ctr )
{
ctr->c.ldv.prim = 0;
int drag_total;
} Census;
-Census *censuses = NULL;
-nat n_censuses = 0;
+static Census *censuses = NULL;
+static nat n_censuses = 0;
#ifdef PROFILING
static void aggregateCensusInfo( void );
, "THUNK_SELECTOR"
, "BCO"
- , "AP_UPD"
+ , "AP_STACK"
+ , "AP"
, "PAP"
, "UPDATE_FRAME"
, "CATCH_FRAME"
, "STOP_FRAME"
- , "SEQ_FRAME"
, "BLACKHOLE"
, "BLACKHOLE_BQ"
* the band to which this closure's heap space is attributed in the
* heap profile.
* ------------------------------------------------------------------------- */
-static inline void *
+STATIC_INLINE void *
closureIdentity( StgClosure *p )
{
switch (RtsFlags.ProfFlags.doHeapProfile) {
#ifdef PROFILING
case HEAP_BY_CCS:
- return ((StgClosure *)p)->header.prof.ccs;
+ return p->header.prof.ccs;
case HEAP_BY_MOD:
- return ((StgClosure *)p)->header.prof.ccs->cc->module;
+ return p->header.prof.ccs->cc->module;
case HEAP_BY_DESCR:
- return (get_itbl((StgClosure *)p))->prof.closure_desc;
+ return get_itbl(p)->prof.closure_desc;
case HEAP_BY_TYPE:
- return (get_itbl((StgClosure *)p))->prof.closure_type;
+ return get_itbl(p)->prof.closure_type;
case HEAP_BY_RETAINER:
- return retainerSetOf((StgClosure *)p);
+ // AFAIK, the only closures in the heap which might not have a
+ // valid retainer set are DEAD_WEAK closures.
+ if (isRetainerSetFieldValid(p))
+ return retainerSetOf(p);
+ else
+ return NULL;
+
#else // DEBUG
case HEAP_BY_INFOPTR:
return (void *)((StgClosure *)p)->header.info;
case HEAP_BY_CLOSURE_TYPE:
return type_names[get_itbl(p)->type];
+
#endif
default:
barf("closureIdentity");
* Profiling type predicates
* ----------------------------------------------------------------------- */
#ifdef PROFILING
-static inline rtsBool
+STATIC_INLINE rtsBool
doingLDVProfiling( void )
{
return (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_LDV
|| RtsFlags.ProfFlags.bioSelector != NULL);
}
-static inline rtsBool
+STATIC_INLINE rtsBool
doingRetainerProfiling( void )
{
return (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_RETAINER
/* --------------------------------------------------------------------------
* Initialize censuses[era];
* ----------------------------------------------------------------------- */
-static inline void
+STATIC_INLINE void
initEra(Census *census)
{
census->hash = allocHashTable();
era++;
if (era == max_era) {
- barf("maximum number of censuses reached; use +RTS -i to reduce");
+ prog_belch("maximum number of censuses reached; use +RTS -i to reduce");
+ stg_exit(EXIT_FAILURE);
}
if (era == n_censuses) {
}
}
#endif // PROFILING
-
+
initEra( &censuses[era] );
}
#ifdef DEBUG_HEAP_PROF
FILE *hp_file;
+static char *hp_filename;
void initProfiling1( void )
{
void initProfiling2( void )
{
+ if (RtsFlags.ProfFlags.doHeapProfile) {
+ /* Initialise the log file name */
+ hp_filename = stgMallocBytes(strlen(prog_name) + 6, "hpFileName");
+ sprintf(hp_filename, "%s.hp", prog_name);
+
+ /* open the log file */
+ if ((hp_file = fopen(hp_filename, "w")) == NULL) {
+ fprintf(stderr, "Can't open profiling report file %s\n",
+ hp_filename);
+ RtsFlags.ProfFlags.doHeapProfile = 0;
+ return;
+ }
+ }
+
initHeapProfiling();
}
return 0;
}
+#ifdef PROFILING
+ if (doingLDVProfiling() && doingRetainerProfiling()) {
+ prog_belch("cannot mix -hb and -hr");
+ stg_exit(1);
+ }
+#endif
+
// we only count eras if we're doing LDV profiling. Otherwise era
// is fixed at zero.
#ifdef PROFILING
initEra( &censuses[era] );
- fprintf(hp_file, "JOB \"%s", prog_argv[0]);
+ /* initProfilingLogFile(); */
+ fprintf(hp_file, "JOB \"%s", prog_name);
#ifdef PROFILING
{
fprintf(hp_file, "END_SAMPLE 0.00\n");
#ifdef DEBUG_HEAP_PROF
- DEBUG_LoadSymbols(prog_argv[0]);
+ DEBUG_LoadSymbols(prog_name);
#endif
#ifdef PROFILING
#endif
#ifdef PROFILING
- // Note:
- // We do not need to perform a major garbage collection because all the
- // closures created since the last census will not affect the profiling
- // statistics anyhow.
- if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_LDV)
- LdvCensusKillAll();
-#endif
-
-#ifdef PROFILING
- if (RtsFlags.ProfFlags.bioSelector != NULL) {
+ if (doingLDVProfiling()) {
nat t;
+ LdvCensusKillAll();
aggregateCensusInfo();
for (t = 1; t < era; t++) {
dumpCensus( &censuses[t] );
#ifdef PROFILING
+static size_t
+buf_append(char *p, const char *q, char *end)
+{
+ int m;
+
+ for (m = 0; p < end; p++, q++, m++) {
+ *p = *q;
+ if (*q == '\0') { break; }
+ }
+ return m;
+}
+
static void
fprint_ccs(FILE *fp, CostCentreStack *ccs, nat max_length)
{
- char buf[max_length+1];
+ char buf[max_length+1], *p, *buf_end;
nat next_offset = 0;
nat written;
- char *template;
// MAIN on its own gets printed as "MAIN", otherwise we ignore MAIN.
if (ccs == CCS_MAIN) {
return;
}
+ fprintf(fp, "(%d)", ccs->ccsID);
+
+ p = buf;
+ buf_end = buf + max_length + 1;
+
// keep printing components of the stack until we run out of space
// in the buffer. If we run out of space, end with "...".
for (; ccs != NULL && ccs != CCS_MAIN; ccs = ccs->prevStack) {
// CAF cost centres print as M.CAF, but we leave the module
// name out of all the others to save space.
if (!strcmp(ccs->cc->label,"CAF")) {
- written = snprintf(buf+next_offset,
- (int)max_length-3-(int)next_offset,
- "%s.CAF", ccs->cc->module);
+ p += buf_append(p, ccs->cc->module, buf_end);
+ p += buf_append(p, ".CAF", buf_end);
} else {
if (ccs->prevStack != NULL && ccs->prevStack != CCS_MAIN) {
- template = "%s/";
- } else {
- template = "%s";
+ p += buf_append(p, "/", buf_end);
}
- written = snprintf(buf+next_offset,
- (int)max_length-3-(int)next_offset,
- template, ccs->cc->label);
+ p += buf_append(p, ccs->cc->label, buf_end);
}
-
- if (next_offset+written >= max_length-4) {
+
+ if (p >= buf_end) {
sprintf(buf+max_length-4, "...");
break;
} else {
}
fprintf(fp, "%s", buf);
}
+#endif // PROFILING
-static rtsBool
-str_matches_selector( char* str, char* sel )
+rtsBool
+strMatchesSelector( char* str, char* sel )
{
char* p;
// fprintf(stderr, "str_matches_selector %s %s\n", str, sel);
if (*sel == '\0') return rtsFalse;
}
}
-#endif // PROFILING
/* -----------------------------------------------------------------------------
* Figure out whether a closure should be counted in this census, by
return rtsTrue;
#else
rtsBool b;
- if (RtsFlags.ProfFlags.modSelector) {
- b = str_matches_selector( ((StgClosure *)p)->header.prof.ccs->cc->module,
- RtsFlags.ProfFlags.modSelector );
- if (!b) return rtsFalse;
+
+ // The CCS has a selected field to indicate whether this closure is
+ // deselected by not being mentioned in the module, CC, or CCS
+ // selectors.
+ if (!p->header.prof.ccs->selected) {
+ return rtsFalse;
}
+
if (RtsFlags.ProfFlags.descrSelector) {
- b = str_matches_selector( (get_itbl((StgClosure *)p))->prof.closure_desc,
+ b = strMatchesSelector( (get_itbl((StgClosure *)p))->prof.closure_desc,
RtsFlags.ProfFlags.descrSelector );
if (!b) return rtsFalse;
}
if (RtsFlags.ProfFlags.typeSelector) {
- b = str_matches_selector( (get_itbl((StgClosure *)p))->prof.closure_type,
+ b = strMatchesSelector( (get_itbl((StgClosure *)p))->prof.closure_type,
RtsFlags.ProfFlags.typeSelector );
if (!b) return rtsFalse;
}
- if (RtsFlags.ProfFlags.ccSelector) {
- b = str_matches_selector( ((StgClosure *)p)->header.prof.ccs->cc->label,
- RtsFlags.ProfFlags.ccSelector );
- if (!b) return rtsFalse;
- }
if (RtsFlags.ProfFlags.retainerSelector) {
RetainerSet *rs;
nat i;
- rs = retainerSetOf((StgClosure *)p);
- if (rs != NULL) {
- for (i = 0; i < rs->num; i++) {
- b = str_matches_selector( rs->element[i]->cc->label,
- RtsFlags.ProfFlags.retainerSelector );
- if (b) return rtsTrue;
+ // We must check that the retainer set is valid here. One
+ // reason it might not be valid is if this closure is a
+ // a newly deceased weak pointer (i.e. a DEAD_WEAK), since
+ // these aren't reached by the retainer profiler's traversal.
+ if (isRetainerSetFieldValid((StgClosure *)p)) {
+ rs = retainerSetOf((StgClosure *)p);
+ if (rs != NULL) {
+ for (i = 0; i < rs->num; i++) {
+ b = strMatchesSelector( rs->element[i]->cc->label,
+ RtsFlags.ProfFlags.retainerSelector );
+ if (b) return rtsTrue;
+ }
}
}
return rtsFalse;
drag_total += censuses[t].drag_total;
censuses[t].void_total = void_total;
censuses[t].drag_total = drag_total;
- ASSERT( censuses[t].void_total < censuses[t].not_used );
- ASSERT( censuses[t].drag_total < censuses[t].used );
- }
-
- for (t = 1; t < era; t++) { // note: start at 1, not 0
- fprintf(hp_file, "MARK %f\n", censuses[t].time);
- fprintf(hp_file, "BEGIN_SAMPLE %f\n", censuses[t].time);
- fprintf(hp_file, "VOID\t%u\n", censuses[t].void_total * sizeof(W_));
- fprintf(hp_file, "LAG\t%u\n",
- (censuses[t].not_used - censuses[t].void_total)
- * sizeof(W_));
- fprintf(hp_file, "USE\t%u\n",
- (censuses[t].used - censuses[t].drag_total) * sizeof(W_));
- fprintf(hp_file, "INHERENT_USE\t%u\n",
- censuses[t].prim * sizeof(W_));
- fprintf(hp_file, "DRAG\t%u\n", censuses[t].drag_total * sizeof(W_));
- fprintf(hp_file, "END_SAMPLE %f\n", censuses[t].time);
+ ASSERT( censuses[t].void_total <= censuses[t].not_used );
+ ASSERT( censuses[t].drag_total <= censuses[t].used );
}
return;
fprintf(hp_file, "BEGIN_SAMPLE %0.2f\n", census->time);
+#ifdef PROFILING
+ if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_LDV) {
+ fprintf(hp_file, "VOID\t%u\n", census->void_total * sizeof(W_));
+ fprintf(hp_file, "LAG\t%u\n",
+ (census->not_used - census->void_total) * sizeof(W_));
+ fprintf(hp_file, "USE\t%u\n",
+ (census->used - census->drag_total) * sizeof(W_));
+ fprintf(hp_file, "INHERENT_USE\t%u\n",
+ census->prim * sizeof(W_));
+ fprintf(hp_file, "DRAG\t%u\n", census->drag_total *
+ sizeof(W_));
+ fprintf(hp_file, "END_SAMPLE %0.2f\n", census->time);
+ return;
+ }
+#endif
+
for (ctr = census->ctrs; ctr != NULL; ctr = ctr->next) {
#ifdef PROFILING
if (RtsFlags.ProfFlags.bioSelector != NULL) {
count = 0;
- if (str_matches_selector("lag", RtsFlags.ProfFlags.bioSelector))
+ if (strMatchesSelector("lag", RtsFlags.ProfFlags.bioSelector))
count += ctr->c.ldv.not_used - ctr->c.ldv.void_total;
- if (str_matches_selector("drag", RtsFlags.ProfFlags.bioSelector))
+ if (strMatchesSelector("drag", RtsFlags.ProfFlags.bioSelector))
count += ctr->c.ldv.drag_total;
- if (str_matches_selector("void", RtsFlags.ProfFlags.bioSelector))
+ if (strMatchesSelector("void", RtsFlags.ProfFlags.bioSelector))
count += ctr->c.ldv.void_total;
- if (str_matches_selector("use", RtsFlags.ProfFlags.bioSelector))
+ if (strMatchesSelector("use", RtsFlags.ProfFlags.bioSelector))
count += ctr->c.ldv.used - ctr->c.ldv.drag_total;
} else
#endif
#ifdef PROFILING
switch (RtsFlags.ProfFlags.doHeapProfile) {
case HEAP_BY_CCS:
- fprint_ccs(hp_file, (CostCentreStack *)ctr->identity, 30);
+ fprint_ccs(hp_file, (CostCentreStack *)ctr->identity, 25);
break;
case HEAP_BY_MOD:
case HEAP_BY_DESCR:
rtsBool prim;
for (; bd != NULL; bd = bd->link) {
+
+ // HACK: ignore pinned blocks, because they contain gaps.
+ // It's not clear exactly what we'd like to do here, since we
+ // can't tell which objects in the block are actually alive.
+ // Perhaps the whole block should be counted as SYSTEM memory.
+ if (bd->flags & BF_PINNED) {
+ continue;
+ }
+
p = bd->start;
while (p < bd->free) {
info = get_itbl((StgClosure *)p);
case FUN:
case THUNK:
case IND_PERM:
+ case IND_OLDGEN:
case IND_OLDGEN_PERM:
case CAF_BLACKHOLE:
case SE_CAF_BLACKHOLE:
break;
case BCO:
+ prim = rtsTrue;
+ size = bco_sizeW((StgBCO *)p);
+ break;
+
case MVAR:
case WEAK:
case FOREIGN:
size = sizeofW(StgHeader) + MIN_UPD_SIZE;
break;
+ case AP:
case PAP:
- case AP_UPD:
size = pap_sizeW((StgPAP *)p);
break;
+
+ case AP_STACK:
+ size = ap_stack_sizeW((StgAP_STACK *)p);
+ break;
case ARR_WORDS:
prim = rtsTrue;
case TSO:
prim = rtsTrue;
+#ifdef DEBUG_HEAP_PROF
size = tso_sizeW((StgTSO *)p);
break;
-
+#else
+ if (RtsFlags.ProfFlags.includeTSOs) {
+ size = tso_sizeW((StgTSO *)p);
+ break;
+ } else {
+ // Skip this TSO and move on to the next object
+ p += tso_sizeW((StgTSO *)p);
+ continue;
+ }
+#endif
+
default:
barf("heapCensus");
}
stat_startHeapCensus();
#endif
- // traverse the heap, collecting the census info
+ // Traverse the heap, collecting the census info
+
+ // First the small_alloc_list: we have to fix the free pointer at
+ // the end by calling tidyAllocatedLists() first.
+ tidyAllocateLists();
heapCensusChain( census, small_alloc_list );
+
+ // Now traverse the heap in each generation/step.
if (RtsFlags.GcFlags.generations == 1) {
heapCensusChain( census, g0s0->to_blocks );
} else {
heapCensusChain( census, generations[g].steps[s].blocks );
// Are we interested in large objects? might be
// confusing to include the stack in a heap profile.
- // heapCensusChain( census, generations[g].steps[s].large_objects );
+ heapCensusChain( census, generations[g].steps[s].large_objects );
}
}
}