1 /* -----------------------------------------------------------------------------
2 * $Id: GCCompact.c,v 1.1 2001/07/23 17:23:19 simonmar Exp $
4 * (c) The GHC Team 2001
6 * Compacting garbage collector
8 * ---------------------------------------------------------------------------*/
14 #include "BlockAlloc.h"
16 #include "GCCompact.h"
18 #include "StablePriv.h"
23 StgPtr q = (StgPtr)*p;
24 ASSERT(!LOOKS_LIKE_GHC_INFO(q));
25 if (HEAP_ALLOCED(q)) {
32 unthread( StgPtr p, StgPtr free )
34 StgPtr q = (StgPtr)*p, r;
36 while (!LOOKS_LIKE_GHC_INFO(q)) {
44 static inline StgInfoTable *
45 get_threaded_info( StgPtr p )
47 StgPtr q = (P_)GET_INFO((StgClosure *)p);
49 while (!LOOKS_LIKE_GHC_INFO(q)) {
52 return INFO_PTR_TO_STRUCT((StgInfoTable *)q);
55 // A word-aligned memmove will be faster for small objects than libc's or gcc's.
56 // Remember, the two regions *might* overlap, but: to <= from.
58 move(StgPtr to, StgPtr from, nat size)
60 for(; size > 0; --size) {
66 obj_sizeW( StgClosure *p, StgInfoTable *info )
73 return sizeofW(StgHeader) + 1;
85 return sizeofW(StgHeader) + 2; // MIN_UPD_SIZE
87 return THUNK_SELECTOR_sizeW();
90 return pap_sizeW((StgPAP *)p);
92 return arr_words_sizeW((StgArrWords *)p);
94 case MUT_ARR_PTRS_FROZEN:
95 return mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
97 return tso_sizeW((StgTSO *)p);
99 return sizeW_fromITBL(info);
104 thread_static( StgClosure* p )
106 const StgInfoTable *info;
108 // keep going until we've threaded all the objects on the linked
110 while (p != END_OF_STATIC_LIST) {
113 switch (info->type) {
116 thread((StgPtr)&((StgInd *)p)->indirectee);
125 barf("thread_static: strange closure %d", (int)(info->type));
128 p = STATIC_LINK(info,p);
133 thread_stack(StgPtr p, StgPtr stack_end)
136 const StgInfoTable* info;
139 // highly similar to scavenge_stack, but we do pointer threading here.
141 while (p < stack_end) {
144 // If we've got a tag, skip over that many words on the stack
145 if ( IS_ARG_TAG((W_)q) ) {
150 // Is q a pointer to a closure?
151 if ( !LOOKS_LIKE_GHC_INFO(q) ) {
157 // Otherwise, q must be the info pointer of an activation
158 // record. All activation records have 'bitmap' style layout
161 info = get_itbl((StgClosure *)p);
163 switch (info->type) {
165 // Dynamic bitmap: the mask is stored on the stack
167 bitmap = ((StgRetDyn *)p)->liveness;
168 p = (P_)&((StgRetDyn *)p)->payload[0];
171 // probably a slow-entry point return address:
177 // small bitmap (< 32 entries, or 64 on a 64-bit machine)
185 bitmap = info->layout.bitmap;
187 // this assumes that the payload starts immediately after the info-ptr
189 while (bitmap != 0) {
190 if ((bitmap & 1) == 0) {
194 bitmap = bitmap >> 1;
198 // large bitmap (> 32 entries)
203 StgLargeBitmap *large_bitmap;
206 large_bitmap = info->layout.large_bitmap;
209 for (i=0; i<large_bitmap->size; i++) {
210 bitmap = large_bitmap->bitmap[i];
211 q = p + sizeof(W_) * 8;
212 while (bitmap != 0) {
213 if ((bitmap & 1) == 0) {
217 bitmap = bitmap >> 1;
219 if (i+1 < large_bitmap->size) {
230 barf("thread_stack: weird activation record found on stack: %d",
237 update_fwd_large( bdescr *bd )
240 const StgInfoTable* info;
242 for (; bd != NULL; bd = bd->link) {
246 info = get_itbl((StgClosure *)p);
248 switch (info->type) {
255 case MUT_ARR_PTRS_FROZEN:
260 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
261 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
269 StgTSO *tso = (StgTSO *)p;
270 thread_stack(tso->sp, &(tso->stack[tso->stack_size]));
277 StgPAP* pap = (StgPAP *)p;
278 thread((StgPtr)&pap->fun);
279 thread_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
284 barf("update_fwd_large: unknown/strange object %d", (int)(info->type));
290 update_fwd( bdescr *blocks )
299 barf("update_fwd: ToDo");
302 // cycle through all the blocks in the step
303 for (; bd != NULL; bd = bd->link) {
306 // linearly scan the objects in this block
307 while (p < bd->free) {
309 /* unthread the info ptr */
311 info = get_itbl((StgClosure *)p);
313 ASSERT(p && (LOOKS_LIKE_GHC_INFO(info)
314 || IS_HUGS_CONSTR_INFO(info)));
316 switch (info->type) {
319 p += sizeofW(StgHeader) + 1;
324 thread((StgPtr)&((StgClosure *)p)->payload[0]);
325 p += sizeofW(StgHeader) + 1;
329 thread((StgPtr)&((StgClosure *)p)->payload[0]);
330 p += sizeofW(StgHeader) + 2; // MIN_UPD_SIZE
333 case THUNK_0_1: // MIN_UPD_SIZE
337 p += sizeofW(StgHeader) + 2;
343 thread((StgPtr)&((StgClosure *)p)->payload[0]);
344 p += sizeofW(StgHeader) + 2;
350 thread((StgPtr)&((StgClosure *)p)->payload[0]);
351 thread((StgPtr)&((StgClosure *)p)->payload[1]);
352 p += sizeofW(StgHeader) + 2;
365 case SE_CAF_BLACKHOLE:
372 end = (P_)((StgClosure *)p)->payload +
373 info->layout.payload.ptrs;
374 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
377 p += info->layout.payload.nptrs;
381 // the info table for a weak ptr lies about the number of ptrs
382 // (because we have special GC routines for them, but we
383 // want to use the standard evacuate code). So we have to
384 // special case here.
387 StgWeak *w = (StgWeak *)p;
388 thread((StgPtr)&w->key);
389 thread((StgPtr)&w->value);
390 thread((StgPtr)&w->finalizer);
391 if (w->link != NULL) {
392 thread((StgPtr)&w->link);
394 p += sizeofW(StgWeak);
398 // again, the info table for MVar isn't suitable here (it includes
399 // the mut_link field as a pointer, and we don't want to
403 StgMVar *mvar = (StgMVar *)p;
404 thread((StgPtr)&mvar->head);
405 thread((StgPtr)&mvar->tail);
406 thread((StgPtr)&mvar->value);
407 p += sizeofW(StgMVar);
411 // specialise this case, because we want to update the
412 // mut_link field too.
414 case IND_OLDGEN_PERM:
416 StgIndOldGen *ind = (StgIndOldGen *)p;
417 thread((StgPtr)&ind->indirectee);
418 if (ind->mut_link != NULL) {
419 thread((StgPtr)&ind->mut_link);
426 StgSelector *s = (StgSelector *)p;
427 thread((StgPtr)&s->selectee);
428 p += THUNK_SELECTOR_sizeW();
432 case AP_UPD: // same as PAPs
435 StgPAP* pap = (StgPAP *)p;
437 thread((P_)&pap->fun);
438 thread_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
444 p += arr_words_sizeW((StgArrWords *)p);
448 case MUT_ARR_PTRS_FROZEN:
453 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
454 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
462 StgTSO *tso = (StgTSO *)p;
463 thread_stack(tso->sp, &(tso->stack[tso->stack_size]));
464 thread((StgPtr)&tso->link);
465 thread((StgPtr)&tso->global_link);
471 barf("update_fwd: unknown/strange object %d", (int)(info->type));
478 update_fwd_compact( bdescr *blocks )
482 bdescr *bd, *free_bd;
488 free = free_bd->start;
491 barf("update_fwd: ToDo");
494 // cycle through all the blocks in the step
495 for (; bd != NULL; bd = bd->link) {
498 while (p < bd->free ) {
500 while ( p < bd->free && !is_marked(p,bd) ) {
509 m = * ((StgPtr)bd->u.bitmap + ((p - bd->start) / (BITS_IN(StgWord))));
510 m >>= ((p - bd->start) & (BITS_IN(StgWord) - 1));
512 while ( p < bd->free ) {
517 if (((StgWord)p & (sizeof(W_) * BITS_IN(StgWord))) == 0) {
525 // Problem: we need to know the destination for this cell
526 // in order to unthread its info pointer. But we can't
527 // know the destination without the size, because we may
528 // spill into the next block. So we have to run down the
529 // threaded list and get the info ptr first.
530 info = get_threaded_info(p);
533 ASSERT(p && (LOOKS_LIKE_GHC_INFO(info)
534 || IS_HUGS_CONSTR_INFO(info)));
536 switch (info->type) {
539 p += sizeofW(StgHeader) + 1;
544 thread((StgPtr)&((StgClosure *)p)->payload[0]);
545 p += sizeofW(StgHeader) + 1;
549 thread((StgPtr)&((StgClosure *)p)->payload[0]);
550 p += sizeofW(StgHeader) + 2; // MIN_UPD_SIZE
553 case THUNK_0_1: // MIN_UPD_SIZE
557 p += sizeofW(StgHeader) + 2;
563 thread((StgPtr)&((StgClosure *)p)->payload[0]);
564 p += sizeofW(StgHeader) + 2;
570 thread((StgPtr)&((StgClosure *)p)->payload[0]);
571 thread((StgPtr)&((StgClosure *)p)->payload[1]);
572 p += sizeofW(StgHeader) + 2;
585 case SE_CAF_BLACKHOLE:
592 end = (P_)((StgClosure *)p)->payload +
593 info->layout.payload.ptrs;
594 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
597 p += info->layout.payload.nptrs;
603 StgWeak *w = (StgWeak *)p;
604 thread((StgPtr)&w->key);
605 thread((StgPtr)&w->value);
606 thread((StgPtr)&w->finalizer);
607 if (w->link != NULL) {
608 thread((StgPtr)&w->link);
610 p += sizeofW(StgWeak);
616 StgMVar *mvar = (StgMVar *)p;
617 thread((StgPtr)&mvar->head);
618 thread((StgPtr)&mvar->tail);
619 thread((StgPtr)&mvar->value);
620 p += sizeofW(StgMVar);
625 case IND_OLDGEN_PERM:
626 // specialise this case, because we want to update the
627 // mut_link field too.
629 StgIndOldGen *ind = (StgIndOldGen *)p;
630 thread((StgPtr)&ind->indirectee);
631 if (ind->mut_link != NULL) {
632 thread((StgPtr)&ind->mut_link);
634 p += sizeofW(StgIndOldGen);
640 StgSelector *s = (StgSelector *)p;
641 thread((StgPtr)&s->selectee);
642 p += THUNK_SELECTOR_sizeW();
646 case AP_UPD: // same as PAPs
649 StgPAP* pap = (StgPAP *)p;
651 thread((P_)&pap->fun);
652 thread_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
658 p += arr_words_sizeW((StgArrWords *)p);
662 case MUT_ARR_PTRS_FROZEN:
667 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
668 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
676 StgTSO *tso = (StgTSO *)p;
677 thread_stack(tso->sp, &(tso->stack[tso->stack_size]));
678 thread((StgPtr)&tso->link);
679 thread((StgPtr)&tso->global_link);
685 barf("update_fwd: unknown/strange object %d", (int)(info->type));
689 if (free + size > free_bd->start + BLOCK_SIZE_W) {
690 free_bd = free_bd->link;
691 free = free_bd->start;
704 update_bkwd( bdescr *blocks )
713 barf("update_bkwd: ToDo");
716 // cycle through all the blocks in the step
717 for (; bd != NULL; bd = bd->link) {
720 // linearly scan the objects in this block
721 while (p < bd->free) {
723 // must unthread before we look at the info ptr...
726 info = get_itbl((StgClosure *)p);
727 ASSERT(p && (LOOKS_LIKE_GHC_INFO(info)
728 || IS_HUGS_CONSTR_INFO(info)));
730 p += obj_sizeW((StgClosure *)p,info);
736 update_bkwd_compact( step *stp )
740 bdescr *bd, *free_bd;
742 nat size, free_blocks;
744 bd = free_bd = stp->blocks;
745 free = free_bd->start;
749 barf("update_bkwd: ToDo");
752 // cycle through all the blocks in the step
753 for (; bd != NULL; bd = bd->link) {
756 while (p < bd->free ) {
758 while ( p < bd->free && !is_marked(p,bd) ) {
767 m = * ((StgPtr)bd->u.bitmap + ((p - bd->start) / (BITS_IN(StgWord))));
768 m >>= ((p - bd->start) & (BITS_IN(StgWord) - 1));
770 while ( p < bd->free ) {
775 if (((StgWord)p & (sizeof(W_) * BITS_IN(StgWord))) == 0) {
783 // must unthread before we look at the info ptr...
784 info = get_threaded_info(p);
786 ASSERT(p && (LOOKS_LIKE_GHC_INFO(info)
787 || IS_HUGS_CONSTR_INFO(info)));
789 size = obj_sizeW((StgClosure *)p,info);
791 if (free + size > free_bd->start + BLOCK_SIZE_W) {
792 // don't forget to update the free ptr in the block desc.
793 free_bd->free = free;
794 free_bd = free_bd->link;
795 free = free_bd->start;
802 // Rebuild the mutable list for the old generation.
803 // (the mut_once list is updated using threading, with
804 // special cases for IND_OLDGEN and MUT_CONS above).
805 if (ip_MUTABLE(info)) {
806 recordMutable((StgMutClosure *)free);
810 if (info->type == TSO) {
811 move_TSO((StgTSO *)p, (StgTSO *)free);
822 // free the remaining blocks and count what's left.
823 free_bd->free = free;
824 if (free_bd->link != NULL) {
825 freeChain(free_bd->link);
826 free_bd->link = NULL;
828 stp->n_blocks = free_blocks;
834 update_bkwd_large( bdescr *blocks )
838 for (bd = blocks; bd != NULL; bd = bd->link ) {
839 unthread(bd->start, bd->start);
845 compact( void (*get_roots)(evac_fn) )
849 extern StgWeak *old_weak_ptr_list; // tmp
851 // 1. thread the roots
852 get_roots((evac_fn)thread);
854 // the weak pointer lists...
855 if (weak_ptr_list != NULL) {
856 thread((StgPtr)&weak_ptr_list);
858 if (old_weak_ptr_list != NULL) {
859 thread((StgPtr)&old_weak_ptr_list); // tmp
862 // mutable lists (ToDo: all gens)
863 thread((StgPtr)&oldest_gen->mut_list);
864 thread((StgPtr)&oldest_gen->mut_once_list);
866 // the global thread list
867 thread((StgPtr)&all_threads);
869 // the static objects
870 thread_static(scavenged_static_objects);
872 // the stable pointer table
873 threadStablePtrTable((evac_fn)thread);
875 // 2. update forward ptrs
876 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
877 for (s = 0; s < generations[g].n_steps; s++) {
878 stp = &generations[g].steps[s];
879 IF_DEBUG(gc, fprintf(stderr,"update_fwd: %d.%d\n", stp->gen->no, stp->no););
881 update_fwd(stp->to_blocks);
882 update_fwd_large(stp->scavenged_large_objects);
883 if (g == RtsFlags.GcFlags.generations-1 && stp->blocks != NULL) {
884 IF_DEBUG(gc, fprintf(stderr,"update_fwd: %d.%d (compact)\n", stp->gen->no, stp->no););
885 update_fwd_compact(stp->blocks);
890 // 3. update backward ptrs
891 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
892 for (s = 0; s < generations[g].n_steps; s++) {
893 stp = &generations[g].steps[s];
894 IF_DEBUG(gc, fprintf(stderr,"update_bkwd: %d.%d\n", stp->gen->no, stp->no););
895 update_bkwd(stp->to_blocks);
896 update_bkwd_large(stp->scavenged_large_objects);
897 if (g == RtsFlags.GcFlags.generations-1 && stp->blocks != NULL) {
898 IF_DEBUG(gc, fprintf(stderr,"update_bkwd: %d.%d (compact)\n", stp->gen->no, stp->no););
899 blocks = update_bkwd_compact(stp);
900 IF_DEBUG(gc, fprintf(stderr,"update_bkwd: %d.%d (compact, old: %d blocks, now %d blocks)\n",
901 stp->gen->no, stp->no,
902 stp->n_blocks, blocks););
903 stp->n_blocks = blocks;