1 %************************************************************************
3 \section[SMinterface.lh]{Main storage manager interface}
5 %************************************************************************
7 %% I have changed most of the text here, in an attempt to understand
8 %% what's going on. Please let me know about any mistakes, so that
9 %% I can correct them! KH@15/10/92 (UK)
11 %% I have also split the original monster into SMinterface.lh,
12 %% SMClosures.lh and SMInfoTables.lh. The latter two are
15 This describes the interface used between the STG-machine
16 reducer and the storage manager. The overriding goal is to isolate
17 the implementation details of each from the other.
19 Multi-slurp protection:
26 {}\input{epsf} % Uses encapsulated PostScript diagrams
29 %************************************************************************
31 \subsection[SM-calling-interface]{Calling interface}
33 %************************************************************************
35 The @smInfo@ structure is used to pass all information back and forth
36 between the storage manager and the STG world.
38 WARNING: If you modify this structure, you {\em must} modify the
39 native-code generator as well, because the offsets for various fields
40 are hard-coded into the NCG. (In nativeGen/StixMacro.lhs).
44 P_ hp; /* last successfully allocated word */
45 P_ hplim; /* last allocatable word */
47 I_ rootno; /* No of heap roots stored in roots */
48 P_ *roots; /* Array of heap roots -- must be allocated (not static) */
49 P_ CAFlist; /* List of updated CAF's */
51 #if defined(GCap) || defined(GCgn)
52 P_ OldMutables; /* List of old generation mutable closures */
53 P_ OldLim; /* Ptr to end of the old generation */
57 P_ ForeignObjList; /* List of all Foreign objects (in new generation) */
59 #if defined(GCap) || defined(GCgn)
60 P_ OldForeignObjList; /* List of all Foreign objects in old generation */
63 P_ StablePointerTable;
64 /* Heap allocated table used to store stable pointers in */
67 I_ hardHpOverflowSize; /* Some slop at the top of the heap which
68 (hopefully) provides enough space to let
69 us recover from heap overflow exceptions */
72 extern smInfo StorageMgrInfo;
76 Maximum number of roots storable in the heap roots array.
77 Question: Where are the stable pointer roots? (JSM)
78 Answer: They're on the heap in a "Stable Pointer Table". (ADR)
81 # define SM_MAXROOTS 8 /* 8 Vanilla Regs */
85 # define SM_MAXROOTS (10 + (MAX_PROC*2) + 2 )
86 /* unthreaded + hd/tl thread queues + Current/Main TSOs */
88 # define SM_MAXROOTS 5 /* See c-as-asm/HpOverflow.lc */
91 # define SM_MAXROOTS 6 /* See c-as-asm/HpOverflow.lc */
96 The storage manager is accessed exclusively through these routines:
98 IF_RTS(void initSM (STG_NO_ARGS);)
99 IF_RTS(rtsBool exitSM PROTO((smInfo *sm));)
100 IF_RTS(rtsBool initStacks PROTO((smInfo *sm));)
101 IF_RTS(rtsBool initHeap PROTO((smInfo *sm));)
103 IF_RTS(rtsBool initThreadPools (STG_NO_ARGS);)
106 IF_RTS(void init_gr_profiling PROTO((int, char **, int, char **));)
109 I_ collectHeap PROTO((W_ reqsize, smInfo *sm, rtsBool do_full_collection));
111 IF_RTS(void unmapMiddleStackPage PROTO((char *, int));) /* char * == caddr_t ? */
113 #if defined(PROFILING) || defined(PAR)
114 IF_RTS(void handle_tick_serial(STG_NO_ARGS);)
115 IF_RTS(void handle_tick_noserial(STG_NO_ARGS);)
118 /* EXTFUN(_startMarkWorld); */
120 StgDouble usertime(STG_NO_ARGS);
121 StgDouble elapsedtime(STG_NO_ARGS);
122 void start_time(STG_NO_ARGS);
123 void end_init(STG_NO_ARGS);
126 void EvacuateLocalGAs PROTO((rtsBool full));
127 void RebuildGAtables PROTO((rtsBool full));
131 @initSM@ finalizes any runtime parameters of the storage manager.
133 @exitSM@ does any cleaning up required by the storage manager before
134 the program is executed. Its main purpose is to print any summary
137 @initStacks@ allocates the A and B stacks (sequential only). It
138 initialises the @spa@, @spb@, @sua@, and @sub@ fields of @sm@
139 appropriately for empty stacks. Successive calls to @initStacks@
140 re-initialise the stacks.
142 @initHeap@ allocates the heap. It initialises the @hp@ and @hplim@
143 fields of @sm@ to represent an empty heap for the compiled-in garbage
144 collector. It also allocates the @roots@ array for later use within
145 @collectHeap@, and initialises @CAFlist@ to be the empty list. The
146 @roots@ array must be large enough to hold at least @SM_MAXROOTS@
147 roots. If we are using Appel's collector it also initialises the
150 In the sequential system, it also initialises the stable pointer table
151 and the @ForeignObjList@ (and @OldForeignObjList@) fields.
153 @collectHeap@ invokes the garbage collector that was requested at
154 compile time. @reqsize@ is the size of the request (in words) that
155 resulted in the overflow. If the garbage collection succeeds, then at
156 least @reqsize@ words will be available. @collectHeap@ requires all
157 the fields of @sm@ to be initialised appropriately (from the
158 STG-machine registers). The following are identified as
161 \item The @roots@ array.
162 \item The updated CAFs recorded in @CAFlist@.
164 \item Update frames on the B Stack. These may be ``squeezed'' out
165 if they are the only reference to a closure --- thus avoiding the
167 \item The stable pointer table. (In sequential system.)
170 There are three possible results from a garbage collection:
172 \item[\tr{GC_HARD_LIMIT_EXCEEDED} (\tr{reqsize > hplim - hp})]
173 The heap size exceeds the hard heap limit: we report an error and
176 \item[\tr{GC_SOFT_LIMIT_EXCEEDED} (\tr{reqsize + hardHpOverflowSize > hplim - hp})]
177 The heap size exceeds the soft heap limit: set \tr{hardHpOverflowSize}
178 to \tr{0} so that we can use the overflow space, unwind the stack and
179 call an appropriate piece of Haskell to handle the error.
181 \item[\tr{GC_SUCCESS} (\tr{reqsize + hardHpOverflowSize <= hplim - hp})]
182 The heap size is less than the soft heap limit.
185 \item @hp@ and @hplim@ will indicate the new space available for
186 allocation. But we'll subtract \tr{hardHpOverflowSize} from
187 \tr{hplim} so that we'll GC when we hit the soft limit.
189 \item The elements of the @roots@ array will point to the new
190 locations of the closures.
192 \item @spb@ and @sub@ will be updated to reflect the new state of the
193 B stack arising from any update frame ``squeezing'' [sequential only].
195 \item The elements of @CAFlist@ and the stable pointers will be
196 updated to point to the new locations of the closures they reference.
198 \item Any members of @ForeignObjList@ which became garbage should have
199 been reported (by calling their finalising routines; and the @(Old)ForeignObjList@
200 updated to contain only those Foreign objects which are still live.
206 #define GC_HARD_LIMIT_EXCEEDED 0
207 #define GC_SOFT_LIMIT_EXCEEDED 1
211 %************************************************************************
213 \subsection[SM-what-really-happens]{``What really happens in a garbage collection?''}
215 %************************************************************************
217 This is a brief tutorial on ``what really happens'' going to/from the
218 storage manager in a garbage collection.
221 %------------------------------------------------------------------------
222 \item[The heap check:]
226 If you gaze into the C output of GHC, you see many macros calls like:
228 HEAP_CHK_2PtrsLive((_FHS+2));
231 This expands into the C (roughly speaking...):
233 Hp = Hp + (_FHS+2); /* optimistically move heap pointer forward */
235 GC_WHILE_OR_IF (HEAP_OVERFLOW_OP(Hp, HpLim) OR_INTERVAL_EXPIRED) {
236 STGCALL2_GC(PerformGC, <liveness-bits>, (_FHS+2));
237 /* Heap full. Call "PerformGC" with 2 arguments, "<liveness>",
238 (info about what ptrs are live) and "_FHS+2" (words
239 requested), via the magical routine "callWrapper_GC",
240 which indicates ``I am calling a routine in which GC
241 may happen'' (a safe bet for `PerformGC').
246 In the parallel world, where we will need to re-try the heap check,
247 @GC_WHILE_OR_IF@ will be a ``while''; in the sequential world, it will
250 The ``heap lookahead'' checks, which are similar and used for
251 multi-precision @Integer@ ops, have some further complications. See
252 the commentary there (\tr{StgMacros.lh}).
254 %------------------------------------------------------------------------
255 \item[Into @callWrapper_GC@...:]
257 When we failed the heap check (above), we were inside the
258 GCC-registerised ``threaded world.'' @callWrapper_GC@ is all about
259 getting in and out of the threaded world. On SPARCs, with register
260 windows, the name of the game is not shifting windows until we have
261 what we want out of the old one. In tricky cases like this, it's best
262 written in assembly language.
264 Though the principle of ``save everything away'' is the same in both
265 the sequential and parallel worlds, the details are different.
267 For the sequential world:
270 @callWrapper_GC@ saves the return address.
272 It saves the arguments passed to it (so it doesn't get lost).
274 Save the machine registers used in the STG threaded world in their
275 \tr{*_SAVE} global-variable backup locations. E.g., register \tr{Hp}
276 is saved into \tr{Hp_SAVE}.
278 Call the routine it was asked to call; in this example, call
279 @PerformGC@ with arguments \tr{<liveness>}, and @_FHS+2@ (some constant)...
282 For the parallel world, a GC means giving up the thread of control.
283 So we must fill in the thread-state-object (TSO) [and its associated
284 stk object] with enough information for later resumption:
287 Save the return address in the TSO's PC field.
289 Save the machine registers used in the STG threaded world in their
290 corresponding TSO fields. We also save the pointer-liveness
291 information in the TSO.
293 The registers that are not thread-specific, notably \tr{Hp} and
294 \tr{HpLim}, are saved in the @StorageMgrInfo@ structure.
296 Call the routine it was asked to call; in this example, call
297 @PerformGC@ with arguments \tr{<liveness>} and @_FHS+2@ (some constant)...
299 (In the parallel world, we don't expect it to return...)
302 %------------------------------------------------------------------------
303 \item[Into the heap overflow wrapper, @PerformGC@ [sequential]:]
305 The first argument (\tr{<liveness>}, in our example) say what registers
306 are live, i.e., are ``roots'' the storage manager needs to know.
308 StorageMgrInfo.rootno = 2;
309 StorageMgrInfo.roots[0] = (P_) Ret1_SAVE;
310 StorageMgrInfo.roots[1] = (P_) Ret2_SAVE;
313 We further: (a)~move the heap-pointer back [we had optimistically
314 advanced it, in the initial heap check], (b)~load up the @smInfo@ data
315 from the STG registers' \tr{*_SAVE} locations, and (c)~FINALLY: call
318 IT IS AT THIS POINT THAT THE WORLD IS COMPLETELY TIDY.
320 %------------------------------------------------------------------------
321 \item[Into the heap overflow wrapper, @PerformGC@ [parallel]:]
323 Parallel execution is only slightly different. Most information has
324 already been saved in the TSO.
328 We still need to set up the storage manager's @roots@ array.
330 We mark on the scheduler's big ``blackboard'' that a GC is
333 We reschedule, i.e., this thread gives up control. (The scheduler
334 will presumably initiate a garbage-collection, but it may have to do
335 any number of other things---flushing, for example---before ``normal
336 execution'' resumes; and it most certainly may not be this thread that
337 resumes at that point!)
340 %------------------------------------------------------------------------
341 \item[Into/out of @collectHeap@ [sequential only]:]
343 @collectHeap@ does the business and reports back whether it freed up
346 %------------------------------------------------------------------------
347 \item[Out of the heap overflow wrapper, @PerformGC@ [sequential only]:]
349 We begin our return back to doing useful work by: (a)~reloading the
350 appropriate STG-register \tr{*_SAVE} locations from (presumably
351 changed) @smInfo@; (b) re-advance the heap-pointer---which we've been
352 trying to do for a week or two---now that there is enough space.
354 We must further restore appropriate @Ret?@ registers from the storage
355 manager's roots array; in this example:
358 Ret1_SAVE = (W_) StorageMgrInfo.roots[0];
359 Ret2_SAVE = (W_) StorageMgrInfo.roots[1];
362 %------------------------------------------------------------------------
363 \item[Out of @callWrapper_GC@ [sequential]:]
365 We pop out of heap-overflow code and are ready to resume STG
366 ``threaded world'' stuff.
368 The main thing is to re-load up the GCC-ised machine registers from
369 the relevant \tr{*_SAVE} locations; e.g., \tr{SpA} from \tr{SpA_SAVE}.
371 To conclude, @callWrapper_GC@ merely {\em jumps} back to the return
372 address which it was given originally.
374 WE'RE BACK IN (SEQUENTIAL) BUSINESS.
376 %------------------------------------------------------------------------
377 \item[Out of @callWrapper_GC@ [parallel]:]
379 When this thread is finally resumed after GC (and who knows what
380 else), it will restart by the normal enter-TSO/enter-stack-object
381 sequence, which has the effect of re-loading the registers, etc.,
382 (i.e., restoring the state).
384 Because the address we saved in the TSO's PC field was that at the end
385 of the heap check, and because the check is a while-loop in the
386 parallel system, we will now loop back around, and make sure there is
387 enough space before continuing.
390 %************************************************************************
392 \subsection[SM-stack-info]{Stacks}
394 %************************************************************************
396 There are two stacks, as in the STG paper \cite{new-stg-paper}.
399 The A stack contains only closure pointers.
401 The B stack contains, basic values, return addresses, and update
404 The A stack and B stack grow towards each other, so they overflow when
405 they collide. Currently the A stack grows downward (towards lower
406 addresses); the B stack grows upward. (We localise the stuff which
407 uses this information within macros defined in @StgDirections.h@)
409 During reduction, SpA and SpB point to the topmost allocated word of
410 the corresponding stack (though they may not be up to date in the
411 middle of a basic block).
413 Each stack also has a {\em stack update pointer}, SuA and SuB, which
414 point to the topmost word of the most recent update frame in the
415 corresponding stack. (Colloquially, SuA and Sub point to the first
416 items on their respective stacks ``that you cannot have.'')
418 A standard update frame (on the B stack) looks like this
419 (stack grows downward in this picture):
421 \mbox{\epsffile{update-frame.ps}}
423 The SuB therefore points to the Update return vector component of
424 the topmost update frame.
427 A {\em constructor} update frame, which is pushed only by closures
428 which know they will evaluate to a data object, looks just the
429 same, but without the saved SuA pointer.
431 We store the following information concerning the stacks in a global
432 structure. (sequential only).
434 #if 1 /* ndef CONCURRENT * /? HWL */
437 PP_ botA; /* Points to bottom-most word of A stack */
438 P_ botB; /* Points to bottom-most word of B stack */
441 extern stackData stackInfo;
443 #endif /* !CONCURRENT */
446 %************************************************************************
448 \subsection[SM-choose-flavour]{Deciding which GC flavour is in force...}
450 %************************************************************************
452 Each garbage collector requires different garbage collection entries
457 #define _INFO_COPYING
461 #define _INFO_COMPACTING
462 #define _INFO_MARKING
465 #if defined(GCdu) || defined (GCap) || defined (GCgn)
466 #define _INFO_COPYING
467 #define _INFO_COMPACTING
468 #define _INFO_MARKING
471 /* NO_INFO_SPECIFIED (ToDo: an #error ?) */
477 %************************************************************************
479 %\subsection[Info.lh]{Info Pointer Definitions}
481 %************************************************************************
487 %************************************************************************
489 %\subsection[Parallel.lh]{Parallel Machine Definitions}
491 %************************************************************************
498 %************************************************************************
500 %\subsection[CostCentre.lh]{Profiling Definitions}
502 %************************************************************************
505 \input{CostCentre.lh}
509 %************************************************************************
511 %\subsection[SM-closures]{Closure-Related Definitions}
513 %************************************************************************
516 \input{SMClosures.lh}
521 %************************************************************************
523 %\subsection[SM-info-tables]{Info-table Related Definitions}
525 %************************************************************************
528 \input{SMInfoTables.lh}
532 End multi-slurp protection:
534 #endif /* SMinterface_H */