1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team, 1998-2005
5 * Statistics and timing-related functions.
7 * ---------------------------------------------------------------------------*/
9 #include "PosixSource.h"
15 #include "Profiling.h"
17 #include "sm/Storage.h"
18 #include "sm/GC.h" // gc_alloc_block_sync, whitehole_spin
19 #include "sm/GCThread.h"
20 #include "sm/BlockAlloc.h"
27 #define BIG_STRING_LEN 512
29 #define TICK_TO_DBL(t) ((double)(t) / TICKS_PER_SECOND)
32 start_init_cpu, start_init_elapsed,
33 end_init_cpu, end_init_elapsed,
34 start_exit_cpu, start_exit_elapsed,
35 end_exit_cpu, end_exit_elapsed;
37 static Ticks GC_tot_cpu = 0;
39 static StgWord64 GC_tot_alloc = 0;
40 static StgWord64 GC_tot_copied = 0;
42 static StgWord64 GC_par_max_copied = 0;
43 static StgWord64 GC_par_avg_copied = 0;
46 static Ticks RP_start_time = 0, RP_tot_time = 0; // retainer prof user time
47 static Ticks RPe_start_time = 0, RPe_tot_time = 0; // retainer prof elap time
49 static Ticks HC_start_time, HC_tot_time = 0; // heap census prof user time
50 static Ticks HCe_start_time, HCe_tot_time = 0; // heap census prof elap time
54 #define PROF_VAL(x) (x)
59 static lnat max_residency = 0; // in words; for stats only
60 static lnat avg_residency = 0;
61 static lnat residency_samples = 0; // for stats only
62 static lnat max_slop = 0;
64 static lnat GC_end_faults = 0;
66 static Ticks *GC_coll_cpu = NULL;
67 static Ticks *GC_coll_elapsed = NULL;
68 static Ticks *GC_coll_max_pause = NULL;
70 static void statsFlush( void );
71 static void statsClose( void );
73 /* -----------------------------------------------------------------------------
75 ------------------------------------------------------------------------- */
77 Ticks stat_getElapsedTime(void)
79 return getProcessElapsedTime() - start_init_elapsed;
82 /* ---------------------------------------------------------------------------
83 Measure the current MUT time, for profiling
84 ------------------------------------------------------------------------ */
90 cpu = getProcessCPUTime();
91 return TICK_TO_DBL(cpu - GC_tot_cpu - PROF_VAL(RP_tot_time + HC_tot_time));
96 mut_user_time_during_RP() returns the MUT time during retainer profiling.
97 The same is for mut_user_time_during_HC();
100 mut_user_time_during_RP( void )
102 return TICK_TO_DBL(RP_start_time - GC_tot_cpu - RP_tot_time - HC_tot_time);
106 mut_user_time_during_heap_census( void )
108 return TICK_TO_DBL(HC_start_time - GC_tot_cpu - RP_tot_time - HC_tot_time);
110 #endif /* PROFILING */
112 /* ---------------------------------------------------------------------------
113 initStats0() has no dependencies, it can be called right at the beginning
114 ------------------------------------------------------------------------ */
120 start_init_elapsed = 0;
122 end_init_elapsed = 0;
125 start_exit_elapsed = 0;
127 end_exit_elapsed = 0;
131 GC_par_max_copied = 0;
132 GC_par_avg_copied = 0;
149 residency_samples = 0;
155 /* ---------------------------------------------------------------------------
156 initStats1() can be called after setupRtsFlags()
157 ------------------------------------------------------------------------ */
164 if (RtsFlags.GcFlags.giveStats >= VERBOSE_GC_STATS) {
165 statsPrintf(" Alloc Copied Live GC GC TOT TOT Page Flts\n");
166 statsPrintf(" bytes bytes bytes user elap user elap\n");
169 (Ticks *)stgMallocBytes(
170 sizeof(Ticks)*RtsFlags.GcFlags.generations,
173 (Ticks *)stgMallocBytes(
174 sizeof(Ticks)*RtsFlags.GcFlags.generations,
177 (Ticks *)stgMallocBytes(
178 sizeof(Ticks)*RtsFlags.GcFlags.generations,
180 for (i = 0; i < RtsFlags.GcFlags.generations; i++) {
182 GC_coll_elapsed[i] = 0;
183 GC_coll_max_pause[i] = 0;
187 /* -----------------------------------------------------------------------------
188 Initialisation time...
189 -------------------------------------------------------------------------- */
194 getProcessTimes(&start_init_cpu, &start_init_elapsed);
200 getProcessTimes(&end_init_cpu, &end_init_elapsed);
203 /* We start counting events for the mutator
204 * when garbage collection starts
205 * we switch to the GC event set. */
206 papi_start_mutator_count();
208 /* This flag is needed to avoid counting the last GC */
209 papi_is_reporting = 1;
214 /* -----------------------------------------------------------------------------
215 stat_startExit and stat_endExit
217 These two measure the time taken in shutdownHaskell().
218 -------------------------------------------------------------------------- */
223 getProcessTimes(&start_exit_cpu, &start_exit_elapsed);
226 /* We stop counting mutator events
227 * GC events are not being counted at this point */
228 papi_stop_mutator_count();
230 /* This flag is needed, because GC is run once more after this function */
231 papi_is_reporting = 0;
238 getProcessTimes(&end_exit_cpu, &end_exit_elapsed);
241 /* -----------------------------------------------------------------------------
242 Called at the beginning of each GC
243 -------------------------------------------------------------------------- */
245 static nat rub_bell = 0;
248 stat_startGC (gc_thread *gct)
250 nat bell = RtsFlags.GcFlags.ringBell;
262 if(papi_is_reporting) {
263 /* Switch to counting GC events */
264 papi_stop_mutator_count();
265 papi_start_gc_count();
269 getProcessTimes(&gct->gc_start_cpu, &gct->gc_start_elapsed);
270 gct->gc_start_thread_cpu = getThreadCPUTime();
272 if (RtsFlags.GcFlags.giveStats != NO_GC_STATS)
274 gct->gc_start_faults = getPageFaults();
279 stat_gcWorkerThreadStart (gc_thread *gct)
281 if (RtsFlags.GcFlags.giveStats != NO_GC_STATS)
283 getProcessTimes(&gct->gc_start_cpu, &gct->gc_start_elapsed);
284 gct->gc_start_thread_cpu = getThreadCPUTime();
289 stat_gcWorkerThreadDone (gc_thread *gct)
291 Ticks thread_cpu, elapsed, gc_cpu, gc_elapsed;
293 if (RtsFlags.GcFlags.giveStats != NO_GC_STATS)
295 elapsed = getProcessElapsedTime();
296 thread_cpu = getThreadCPUTime();
298 gc_cpu = thread_cpu - gct->gc_start_thread_cpu;
299 gc_elapsed = elapsed - gct->gc_start_elapsed;
301 taskDoneGC(gct->cap->running_task, gc_cpu, gc_elapsed);
305 /* -----------------------------------------------------------------------------
306 Called at the end of each GC
307 -------------------------------------------------------------------------- */
310 stat_endGC (gc_thread *gct,
311 lnat alloc, lnat live, lnat copied, nat gen,
312 lnat max_copied, lnat avg_copied, lnat slop)
314 if (RtsFlags.GcFlags.giveStats != NO_GC_STATS ||
315 RtsFlags.ProfFlags.doHeapProfile)
316 // heap profiling needs GC_tot_time
318 Ticks cpu, elapsed, thread_gc_cpu, gc_cpu, gc_elapsed;
320 getProcessTimes(&cpu, &elapsed);
321 gc_elapsed = elapsed - gct->gc_start_elapsed;
323 thread_gc_cpu = getThreadCPUTime() - gct->gc_start_thread_cpu;
325 gc_cpu = cpu - gct->gc_start_cpu;
327 taskDoneGC(gct->cap->running_task, thread_gc_cpu, gc_elapsed);
329 if (RtsFlags.GcFlags.giveStats == VERBOSE_GC_STATS) {
330 nat faults = getPageFaults();
332 statsPrintf("%9ld %9ld %9ld",
333 alloc*sizeof(W_), copied*sizeof(W_),
335 statsPrintf(" %5.2f %5.2f %7.2f %7.2f %4ld %4ld (Gen: %2d)\n",
337 TICK_TO_DBL(gc_elapsed),
339 TICK_TO_DBL(elapsed - start_init_elapsed),
340 faults - gct->gc_start_faults,
341 gct->gc_start_faults - GC_end_faults,
344 GC_end_faults = faults;
348 GC_coll_cpu[gen] += gc_cpu;
349 GC_coll_elapsed[gen] += gc_elapsed;
350 if (GC_coll_max_pause[gen] < gc_elapsed) {
351 GC_coll_max_pause[gen] = gc_elapsed;
354 GC_tot_copied += (StgWord64) copied;
355 GC_tot_alloc += (StgWord64) alloc;
356 GC_par_max_copied += (StgWord64) max_copied;
357 GC_par_avg_copied += (StgWord64) avg_copied;
358 GC_tot_cpu += gc_cpu;
360 if (gen == RtsFlags.GcFlags.generations-1) { /* major GC? */
361 if (live > max_residency) {
362 max_residency = live;
365 avg_residency += live;
368 if (slop > max_slop) max_slop = slop;
372 debugBelch("\b\b\b \b\b\b");
377 if(papi_is_reporting) {
378 /* Switch to counting mutator events */
380 papi_stop_gc0_count();
382 papi_stop_gc1_count();
384 papi_start_mutator_count();
389 /* -----------------------------------------------------------------------------
390 Called at the beginning of each Retainer Profiliing
391 -------------------------------------------------------------------------- */
397 getProcessTimes( &user, &elapsed );
399 RP_start_time = user;
400 RPe_start_time = elapsed;
402 #endif /* PROFILING */
404 /* -----------------------------------------------------------------------------
405 Called at the end of each Retainer Profiliing
406 -------------------------------------------------------------------------- */
411 nat retainerGeneration,
412 #ifdef DEBUG_RETAINER
416 double averageNumVisit)
419 getProcessTimes( &user, &elapsed );
421 RP_tot_time += user - RP_start_time;
422 RPe_tot_time += elapsed - RPe_start_time;
424 fprintf(prof_file, "Retainer Profiling: %d, at %f seconds\n",
425 retainerGeneration, mut_user_time_during_RP());
426 #ifdef DEBUG_RETAINER
427 fprintf(prof_file, "\tMax C stack size = %u\n", maxCStackSize);
428 fprintf(prof_file, "\tMax auxiliary stack size = %u\n", maxStackSize);
430 fprintf(prof_file, "\tAverage number of visits per object = %f\n", averageNumVisit);
432 #endif /* PROFILING */
434 /* -----------------------------------------------------------------------------
435 Called at the beginning of each heap census
436 -------------------------------------------------------------------------- */
439 stat_startHeapCensus(void)
442 getProcessTimes( &user, &elapsed );
444 HC_start_time = user;
445 HCe_start_time = elapsed;
447 #endif /* PROFILING */
449 /* -----------------------------------------------------------------------------
450 Called at the end of each heap census
451 -------------------------------------------------------------------------- */
454 stat_endHeapCensus(void)
457 getProcessTimes( &user, &elapsed );
459 HC_tot_time += user - HC_start_time;
460 HCe_tot_time += elapsed - HCe_start_time;
462 #endif /* PROFILING */
464 /* -----------------------------------------------------------------------------
465 Called at the end of execution
467 NOTE: number of allocations is not entirely accurate: it doesn't
468 take into account the few bytes at the end of the heap that
469 were left unused when the heap-check failed.
470 -------------------------------------------------------------------------- */
473 #define TICK_VAR_INI(arity) \
474 StgInt SLOW_CALLS_##arity = 1; \
475 StgInt RIGHT_ARITY_##arity = 1; \
476 StgInt TAGGED_PTR_##arity = 0;
481 StgInt TOTAL_CALLS=1;
484 /* Report the value of a counter */
485 #define REPORT(counter) \
487 showStgWord64(counter,temp,rtsTrue/*commas*/); \
488 statsPrintf(" (" #counter ") : %s\n",temp); \
491 /* Report the value of a counter as a percentage of another counter */
492 #define REPORT_PCT(counter,countertot) \
493 statsPrintf(" (" #counter ") %% of (" #countertot ") : %.1f%%\n", \
494 counter*100.0/countertot)
496 #define TICK_PRINT(arity) \
497 REPORT(SLOW_CALLS_##arity); \
498 REPORT_PCT(RIGHT_ARITY_##arity,SLOW_CALLS_##arity); \
499 REPORT_PCT(TAGGED_PTR_##arity,RIGHT_ARITY_##arity); \
500 REPORT(RIGHT_ARITY_##arity); \
501 REPORT(TAGGED_PTR_##arity)
503 #define TICK_PRINT_TOT(arity) \
504 statsPrintf(" (SLOW_CALLS_" #arity ") %% of (TOTAL_CALLS) : %.1f%%\n", \
505 SLOW_CALLS_##arity * 100.0/TOTAL_CALLS)
512 Ticks gc_elapsed = 0;
514 Ticks init_elapsed = 0;
516 Ticks mut_elapsed = 0;
518 Ticks exit_elapsed = 0;
520 if (RtsFlags.GcFlags.giveStats != NO_GC_STATS) {
522 char temp[BIG_STRING_LEN];
525 nat i, g, total_collections = 0;
527 getProcessTimes( &tot_cpu, &tot_elapsed );
528 tot_elapsed -= start_init_elapsed;
530 GC_tot_alloc += alloc;
532 /* Count total garbage collections */
533 for (g = 0; g < RtsFlags.GcFlags.generations; g++)
534 total_collections += generations[g].collections;
536 /* avoid divide by zero if tot_cpu is measured as 0.00 seconds -- SDM */
537 if (tot_cpu == 0.0) tot_cpu = 1;
538 if (tot_elapsed == 0.0) tot_elapsed = 1;
540 if (RtsFlags.GcFlags.giveStats >= VERBOSE_GC_STATS) {
541 statsPrintf("%9ld %9.9s %9.9s", (lnat)alloc*sizeof(W_), "", "");
542 statsPrintf(" %5.2f %5.2f\n\n", 0.0, 0.0);
545 for (i = 0; i < RtsFlags.GcFlags.generations; i++) {
546 gc_cpu += GC_coll_cpu[i];
547 gc_elapsed += GC_coll_elapsed[i];
550 if (RtsFlags.GcFlags.giveStats >= SUMMARY_GC_STATS) {
551 showStgWord64(GC_tot_alloc*sizeof(W_),
552 temp, rtsTrue/*commas*/);
553 statsPrintf("%16s bytes allocated in the heap\n", temp);
555 showStgWord64(GC_tot_copied*sizeof(W_),
556 temp, rtsTrue/*commas*/);
557 statsPrintf("%16s bytes copied during GC\n", temp);
559 if ( residency_samples > 0 ) {
560 showStgWord64(max_residency*sizeof(W_),
561 temp, rtsTrue/*commas*/);
562 statsPrintf("%16s bytes maximum residency (%ld sample(s))\n",
563 temp, residency_samples);
566 showStgWord64(max_slop*sizeof(W_), temp, rtsTrue/*commas*/);
567 statsPrintf("%16s bytes maximum slop\n", temp);
569 statsPrintf("%16ld MB total memory in use (%ld MB lost due to fragmentation)\n\n",
570 peak_mblocks_allocated * MBLOCK_SIZE_W / (1024 * 1024 / sizeof(W_)),
571 (peak_mblocks_allocated * BLOCKS_PER_MBLOCK * BLOCK_SIZE_W - hw_alloc_blocks * BLOCK_SIZE_W) / (1024 * 1024 / sizeof(W_)));
573 /* Print garbage collections in each gen */
574 statsPrintf(" Tot time (elapsed) Avg pause Max pause\n");
575 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
576 gen = &generations[g];
577 statsPrintf(" Gen %2d %5d colls, %5d par %5.2fs %5.2fs %3.4fs %3.4fs\n",
580 gen->par_collections,
581 TICK_TO_DBL(GC_coll_cpu[g]),
582 TICK_TO_DBL(GC_coll_elapsed[g]),
583 gen->collections == 0 ? 0 : TICK_TO_DBL(GC_coll_elapsed[g] / gen->collections),
584 TICK_TO_DBL(GC_coll_max_pause[g]));
587 #if defined(THREADED_RTS)
588 if (RtsFlags.ParFlags.parGcEnabled) {
589 statsPrintf("\n Parallel GC work balance: %.2f (%ld / %ld, ideal %d)\n",
590 (double)GC_par_avg_copied / (double)GC_par_max_copied,
591 (lnat)GC_par_avg_copied, (lnat)GC_par_max_copied,
592 RtsFlags.ParFlags.nNodes
598 #if defined(THREADED_RTS)
602 statsPrintf(" MUT time (elapsed) GC time (elapsed)\n");
603 for (i = 0, task = all_tasks;
605 i++, task = task->all_link) {
606 statsPrintf(" Task %2d %-8s : %6.2fs (%6.2fs) %6.2fs (%6.2fs)\n",
608 (task->worker) ? "(worker)" : "(bound)",
609 TICK_TO_DBL(task->mut_time),
610 TICK_TO_DBL(task->mut_etime),
611 TICK_TO_DBL(task->gc_time),
612 TICK_TO_DBL(task->gc_etime));
620 lnat sparks_created = 0;
622 lnat sparks_converted = 0;
624 lnat sparks_fizzled = 0;
625 for (i = 0; i < n_capabilities; i++) {
626 sparks_created += capabilities[i].sparks_created;
627 sparks_dud += capabilities[i].sparks_dud;
628 sparks_converted += capabilities[i].sparks_converted;
629 sparks_gcd += capabilities[i].sparks_gcd;
630 sparks_fizzled += capabilities[i].sparks_fizzled;
633 statsPrintf(" SPARKS: %ld (%ld converted, %ld dud, %ld GC'd, %ld fizzled)\n\n",
634 sparks_created + sparks_dud, sparks_converted, sparks_dud, sparks_gcd, sparks_fizzled);
638 init_cpu = end_init_cpu - start_init_cpu;
639 init_elapsed = end_init_elapsed - start_init_elapsed;
641 exit_cpu = end_exit_cpu - start_exit_cpu;
642 exit_elapsed = end_exit_elapsed - start_exit_elapsed;
644 statsPrintf(" INIT time %6.2fs (%6.2fs elapsed)\n",
645 TICK_TO_DBL(init_cpu), TICK_TO_DBL(init_elapsed));
647 mut_elapsed = start_exit_elapsed - end_init_elapsed - gc_elapsed;
649 mut_cpu = start_exit_cpu - end_init_cpu - gc_cpu
650 - PROF_VAL(RP_tot_time + HC_tot_time);
651 if (mut_cpu < 0) { mut_cpu = 0; }
653 statsPrintf(" MUT time %6.2fs (%6.2fs elapsed)\n",
654 TICK_TO_DBL(mut_cpu), TICK_TO_DBL(mut_elapsed));
655 statsPrintf(" GC time %6.2fs (%6.2fs elapsed)\n",
656 TICK_TO_DBL(gc_cpu), TICK_TO_DBL(gc_elapsed));
659 statsPrintf(" RP time %6.2fs (%6.2fs elapsed)\n",
660 TICK_TO_DBL(RP_tot_time), TICK_TO_DBL(RPe_tot_time));
661 statsPrintf(" PROF time %6.2fs (%6.2fs elapsed)\n",
662 TICK_TO_DBL(HC_tot_time), TICK_TO_DBL(HCe_tot_time));
664 statsPrintf(" EXIT time %6.2fs (%6.2fs elapsed)\n",
665 TICK_TO_DBL(exit_cpu), TICK_TO_DBL(exit_elapsed));
666 statsPrintf(" Total time %6.2fs (%6.2fs elapsed)\n\n",
667 TICK_TO_DBL(tot_cpu), TICK_TO_DBL(tot_elapsed));
669 statsPrintf(" %%GC time %5.1f%% (%.1f%% elapsed)\n\n",
670 TICK_TO_DBL(gc_cpu)*100/TICK_TO_DBL(tot_cpu),
671 TICK_TO_DBL(gc_elapsed)*100/TICK_TO_DBL(tot_elapsed));
674 if (tot_cpu - GC_tot_cpu - PROF_VAL(RP_tot_time + HC_tot_time) == 0)
675 showStgWord64(0, temp, rtsTrue/*commas*/);
678 (StgWord64)((GC_tot_alloc*sizeof(W_))/
679 TICK_TO_DBL(tot_cpu - GC_tot_cpu -
680 PROF_VAL(RP_tot_time + HC_tot_time))),
681 temp, rtsTrue/*commas*/);
683 statsPrintf(" Alloc rate %s bytes per MUT second\n\n", temp);
685 statsPrintf(" Productivity %5.1f%% of total user, %.1f%% of total elapsed\n\n",
686 TICK_TO_DBL(tot_cpu - GC_tot_cpu -
687 PROF_VAL(RP_tot_time + HC_tot_time) - init_cpu) * 100
688 / TICK_TO_DBL(tot_cpu),
689 TICK_TO_DBL(tot_cpu - GC_tot_cpu -
690 PROF_VAL(RP_tot_time + HC_tot_time) - init_cpu) * 100
691 / TICK_TO_DBL(tot_elapsed));
704 #if defined(THREADED_RTS) && defined(PROF_SPIN)
708 statsPrintf("gc_alloc_block_sync: %"FMT_Word64"\n", gc_alloc_block_sync.spin);
709 statsPrintf("whitehole_spin: %"FMT_Word64"\n", whitehole_spin);
710 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
711 statsPrintf("gen[%d].sync: %"FMT_Word64"\n", g, generations[g].sync.spin);
717 if (RtsFlags.GcFlags.giveStats == ONELINE_GC_STATS) {
719 if (RtsFlags.MiscFlags.machineReadable) {
720 fmt1 = " [(\"bytes allocated\", \"%llu\")\n";
721 fmt2 = " ,(\"num_GCs\", \"%d\")\n"
722 " ,(\"average_bytes_used\", \"%ld\")\n"
723 " ,(\"max_bytes_used\", \"%ld\")\n"
724 " ,(\"num_byte_usage_samples\", \"%ld\")\n"
725 " ,(\"peak_megabytes_allocated\", \"%lu\")\n"
726 " ,(\"init_cpu_seconds\", \"%.2f\")\n"
727 " ,(\"init_wall_seconds\", \"%.2f\")\n"
728 " ,(\"mutator_cpu_seconds\", \"%.2f\")\n"
729 " ,(\"mutator_wall_seconds\", \"%.2f\")\n"
730 " ,(\"GC_cpu_seconds\", \"%.2f\")\n"
731 " ,(\"GC_wall_seconds\", \"%.2f\")\n"
735 fmt1 = "<<ghc: %llu bytes, ";
736 fmt2 = "%d GCs, %ld/%ld avg/max bytes residency (%ld samples), %luM in use, %.2f INIT (%.2f elapsed), %.2f MUT (%.2f elapsed), %.2f GC (%.2f elapsed) :ghc>>\n";
738 /* print the long long separately to avoid bugginess on mingwin (2001-07-02, mingw-0.5) */
739 statsPrintf(fmt1, GC_tot_alloc*(StgWord64)sizeof(W_));
742 residency_samples == 0 ? 0 :
743 avg_residency*sizeof(W_)/residency_samples,
744 max_residency*sizeof(W_),
746 (unsigned long)(peak_mblocks_allocated * MBLOCK_SIZE / (1024L * 1024L)),
747 TICK_TO_DBL(init_cpu), TICK_TO_DBL(init_elapsed),
748 TICK_TO_DBL(mut_cpu), TICK_TO_DBL(mut_elapsed),
749 TICK_TO_DBL(gc_cpu), TICK_TO_DBL(gc_elapsed));
757 stgFree(GC_coll_cpu);
760 if (GC_coll_elapsed) {
761 stgFree(GC_coll_elapsed);
762 GC_coll_elapsed = NULL;
764 if (GC_coll_max_pause) {
765 stgFree(GC_coll_max_pause);
766 GC_coll_max_pause = NULL;
770 /* -----------------------------------------------------------------------------
773 Produce some detailed info on the state of the generational GC.
774 -------------------------------------------------------------------------- */
776 statDescribeGens(void)
780 lnat tot_live, tot_slop;
781 lnat gen_live, gen_blocks;
786 "----------------------------------------------------------\n"
787 " Gen Max Mut-list Blocks Large Live Slop\n"
788 " Blocks Bytes Objects \n"
789 "----------------------------------------------------------\n");
794 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
795 gen = &generations[g];
797 for (bd = gen->large_objects, lge = 0; bd; bd = bd->link) {
801 gen_live = genLiveWords(gen);
802 gen_blocks = genLiveBlocks(gen);
805 for (i = 0; i < n_capabilities; i++) {
806 mut += countOccupied(capabilities[i].mut_lists[g]);
808 // Add the pinned object block.
809 bd = capabilities[i].pinned_object_block;
811 gen_live += bd->free - bd->start;
812 gen_blocks += bd->blocks;
815 gen_live += gcThreadLiveWords(i,g);
816 gen_live += gcThreadLiveWords(i,g);
817 gen_blocks += gcThreadLiveBlocks(i,g);
820 debugBelch("%5d %7d %9d", g, gen->max_blocks, mut);
822 gen_slop = gen_blocks * BLOCK_SIZE_W - gen_live;
824 debugBelch("%8ld %8d %8ld %8ld\n", gen_blocks, lge,
825 gen_live*sizeof(W_), gen_slop*sizeof(W_));
826 tot_live += gen_live;
827 tot_slop += gen_slop;
829 debugBelch("----------------------------------------------------------\n");
830 debugBelch("%41s%8ld %8ld\n","",tot_live*sizeof(W_),tot_slop*sizeof(W_));
831 debugBelch("----------------------------------------------------------\n");
835 /* -----------------------------------------------------------------------------
836 Stats available via a programmatic interface, so eg. GHCi can time
837 each compilation and expression evaluation.
838 -------------------------------------------------------------------------- */
840 extern HsInt64 getAllocations( void )
841 { return (HsInt64)GC_tot_alloc * sizeof(W_); }
843 /* -----------------------------------------------------------------------------
844 Dumping stuff in the stats file, or via the debug message interface
845 -------------------------------------------------------------------------- */
848 statsPrintf( char *s, ... )
850 FILE *sf = RtsFlags.GcFlags.statsFile;
865 FILE *sf = RtsFlags.GcFlags.statsFile;
874 FILE *sf = RtsFlags.GcFlags.statsFile;