1 \section[SM-compacting]{Compacting Collector Subroutines}
3 This is a collection of C functions used in implementing the compacting
6 The motivation for making this a separate file/section is twofold:
8 1) It lets us focus on one thing.
10 2) If we don't do this, there will be a huge amount of repetition
11 between the various GC schemes --- a maintenance nightmare.
13 The second is the major motivation.
15 ToDo ADR: trash contents of other semispace after GC in debugging version
18 #if defined(GC1s) || defined(GCdu) || defined(GCap) || defined(GCgn)
24 #include "SMinternal.h"
27 #else /* GCdu, GCap, GCgn */
30 #include "SMinternal.h"
35 #include "SMcompacting.h"
40 LinkRoots(roots, rootno)
46 DEBUG_STRING("Linking Roots:");
47 for (root = 0; root < rootno; root++) {
48 LINK_LOCATION_TO_CLOSURE(&(roots[root]));
57 LinkEvents(STG_NO_ARGS)
59 eventq event = EventHd;
61 # if defined(GRAN) && defined(GRAN_CHECK)
62 if ( RTSflags.GcFlags.giveStats && (RTSflags.GranFlags.debug & 0x40) )
63 fprintf(RTSflags.GcFlags.statsFile,"Linking Events ...\n");
66 DEBUG_STRING("Linking Events:");
69 if(EVENT_TYPE(event) == RESUMETHREAD ||
70 EVENT_TYPE(event) == MOVETHREAD ||
71 EVENT_TYPE(event) == CONTINUETHREAD ||
72 EVENT_TYPE(event) == STARTTHREAD )
74 { LINK_LOCATION_TO_CLOSURE( &(EVENT_TSO(event)) ); }
76 else if(EVENT_TYPE(event) == MOVESPARK)
78 { LINK_LOCATION_TO_CLOSURE( &(SPARK_NODE(EVENT_SPARK(event))) ); }
80 else if (EVENT_TYPE(event) == FETCHNODE ||
81 EVENT_TYPE(event) == FETCHREPLY )
83 LINK_LOCATION_TO_CLOSURE( &(EVENT_TSO(event)) );
85 /* In the case of packet fetching, EVENT_NODE(event) points to */
86 /* the packet (currently, malloced). The packet is just a list of */
87 /* closure addresses, with the length of the list at index 1 (the */
88 /* structure of the packet is defined in Pack.lc). */
89 if ( RTSflags.GranFlags.DoGUMMFetching &&
90 (EVENT_TYPE(event)==FETCHREPLY)) {
91 P_ buffer = (P_) EVENT_NODE(event);
92 int size = (int) buffer[PACK_SIZE_LOCN], i;
94 for (i = PACK_HDR_SIZE; i <= size-1; i++) {
95 LINK_LOCATION_TO_CLOSURE( (buffer+i) );
98 { LINK_LOCATION_TO_CLOSURE( &(EVENT_NODE(event)) ); }
100 else if (EVENT_TYPE(event) == GLOBALBLOCK)
102 LINK_LOCATION_TO_CLOSURE( &(EVENT_TSO(event)) );
103 LINK_LOCATION_TO_CLOSURE( &(EVENT_NODE(event)) );
105 else if (EVENT_TYPE(event) == UNBLOCKTHREAD)
107 LINK_LOCATION_TO_CLOSURE( &(EVENT_TSO(event)) );
109 event = EVENT_NEXT(event);
117 #if defined(CONCURRENT)
120 LinkSparks(STG_NO_ARGS)
124 I_ pool, total_sparks=0;
126 # if defined(GRAN) && defined(GRAN_CHECK)
127 if ( RTSflags.GcFlags.giveStats && (RTSflags.GranFlags.debug & 0x40) )
128 fprintf(RTSflags.GcFlags.statsFile,"Linking Sparks ...\n");
131 DEBUG_STRING("Linking Sparks:");
132 for(proc = 0; proc < RTSflags.GranFlags.proc; ++proc) {
133 for(pool = 0; pool < SPARK_POOLS; ++pool) {
134 for(spark = PendingSparksHd[proc][pool];
136 spark = SPARK_NEXT(spark))
138 LINK_LOCATION_TO_CLOSURE( &(SPARK_NODE(spark)));
139 } /* forall spark ... */
140 } /* forall pool ... */
141 } /*forall proc .. */
147 LinkSparks(STG_NO_ARGS)
152 DEBUG_STRING("Linking Sparks:");
153 for (pool = 0; pool < SPARK_POOLS; pool++) {
154 for (sparkptr = PendingSparksHd[pool];
155 sparkptr < PendingSparksTl[pool]; sparkptr++) {
156 LINK_LOCATION_TO_CLOSURE(sparkptr);
161 #endif /* CONCURRENT */
170 LinkLiveGAs(P_ base, BitWord *bits)
175 long _hp_word, bit_index, bit;
177 DEBUG_STRING("Linking Live GAs:");
179 for (gala = liveIndirections, prev = NULL; gala != NULL; gala = next) {
181 ASSERT(gala->ga.loc.gc.gtid == mytid);
182 if (gala->ga.weight != MAX_GA_WEIGHT) {
183 LINK_LOCATION_TO_CLOSURE(&gala->la);
187 /* Since we have all of the weight, this GA is no longer needed */
188 W_ pga = PackGA(thisPE, gala->ga.loc.gc.slot);
191 fprintf(stderr, "Freeing slot %d\n", gala->ga.loc.gc.slot);
193 gala->next = freeIndirections;
194 freeIndirections->next = gala;
195 (void) removeHashTable(pGAtoGALAtable, pga, (void *) gala);
197 gala->ga.weight = 0x0d0d0d0d;
198 gala->la = (P_) 0xbadbad;
202 liveIndirections = prev;
204 prepareFreeMsgBuffers();
206 for (gala = liveRemoteGAs, prev = NULL; gala != NULL; gala = next) {
208 ASSERT(gala->ga.loc.gc.gtid != mytid);
210 _hp_word = gala->la - base;
211 bit_index = _hp_word / BITS_IN(BitWord);
212 bit = 1L << (_hp_word & (BITS_IN(BitWord) - 1));
213 if (!(bits[bit_index] & bit)) {
214 int pe = taskIDtoPE(gala->ga.loc.gc.gtid);
215 W_ pga = PackGA(pe, gala->ga.loc.gc.slot);
217 (void) removeHashTable(pGAtoGALAtable, pga, (void *) gala);
218 freeRemoteGA(pe, &(gala->ga));
219 gala->next = freeGALAList;
222 LINK_LOCATION_TO_CLOSURE(&gala->la);
227 liveRemoteGAs = prev;
229 /* If we have any remaining FREE messages to send off, do so now */
237 Note: no \tr{Link[AB]Stack} for ``parallel'' systems, because they
238 don't have a single main stack.
241 #if !defined(PAR) /* && !defined(GRAN) */ /* HWL */
244 LinkAStack(stackA, botA)
250 DEBUG_STRING("Linking A Stack:");
251 for (stackptr = stackA;
252 SUBTRACT_A_STK(stackptr, botA) >= 0;
253 stackptr = stackptr + AREL(1)) {
254 LINK_LOCATION_TO_CLOSURE(stackptr);
260 ToDo (Patrick?): Dont explicitly mark & compact unmarked Bstack frames
263 #if !defined(PAR) /* && !defined(CONCURRENT) */ /* HWL */
266 LinkBStack(stackB, botB)
268 P_ botB; /* stackB points to topmost update frame */
272 DEBUG_STRING("Linking B Stack:");
273 for (updateFramePtr = stackB;
274 SUBTRACT_B_STK(updateFramePtr, botB) > 0;
275 updateFramePtr = GRAB_SuB(updateFramePtr)) {
277 P_ updateClosurePtr = updateFramePtr + BREL(UF_UPDATEE);
279 LINK_LOCATION_TO_CLOSURE(updateClosurePtr);
287 CountCAFs(P_ CAFlist)
291 for (caf_no = 0; CAFlist != NULL; CAFlist = (P_) IND_CLOSURE_LINK(CAFlist))
302 DEBUG_STRING("Linking CAF Ptr Locations:");
303 while(CAFlist != NULL) {
304 DEBUG_LINK_CAF(CAFlist);
305 LINK_LOCATION_TO_CLOSURE(&IND_CLOSURE_PTR(CAFlist));
306 CAFlist = (P_) IND_CLOSURE_LINK(CAFlist);
310 #endif /* defined(_INFO_COMPACTING) */