1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team 1998-2006
5 * Tidying up a thread when it stops running
7 * ---------------------------------------------------------------------------*/
9 // #include "PosixSource.h"
12 #include "ThreadPaused.h"
13 #include "sm/Storage.h"
15 #include "RaiseAsync.h"
19 #include <string.h> // for memmove()
21 /* -----------------------------------------------------------------------------
24 * Code largely pinched from old RTS, then hacked to bits. We also do
25 * lazy black holing here.
27 * -------------------------------------------------------------------------- */
29 struct stack_gap { StgWord gap_size; struct stack_gap *next_gap; };
32 stackSqueeze(Capability *cap, StgTSO *tso, StgPtr bottom)
35 rtsBool prev_was_update_frame;
36 StgClosure *updatee = NULL;
37 StgRetInfoTable *info;
38 StgWord current_gap_size;
39 struct stack_gap *gap;
42 // Traverse the stack upwards, replacing adjacent update frames
43 // with a single update frame and a "stack gap". A stack gap
44 // contains two values: the size of the gap, and the distance
45 // to the next gap (or the stack top).
47 frame = tso->stackobj->sp;
49 ASSERT(frame < bottom);
51 prev_was_update_frame = rtsFalse;
53 gap = (struct stack_gap *) (frame - sizeofW(StgUpdateFrame));
55 while (frame <= bottom) {
57 info = get_ret_itbl((StgClosure *)frame);
58 switch (info->i.type) {
62 StgUpdateFrame *upd = (StgUpdateFrame *)frame;
64 if (prev_was_update_frame) {
67 /* wasn't there something about update squeezing and ticky to be
68 * sorted out? oh yes: we aren't counting each enter properly
69 * in this case. See the log somewhere. KSW 1999-04-21
71 * Check two things: that the two update frames don't point to
72 * the same object, and that the updatee_bypass isn't already an
73 * indirection. Both of these cases only happen when we're in a
74 * block hole-style loop (and there are multiple update frames
75 * on the stack pointing to the same closure), but they can both
76 * screw us up if we don't check.
78 if (upd->updatee != updatee && !closure_IND(upd->updatee)) {
79 updateThunk(cap, tso, upd->updatee, updatee);
82 // now mark this update frame as a stack gap. The gap
83 // marker resides in the bottom-most update frame of
84 // the series of adjacent frames, and covers all the
85 // frames in this series.
86 current_gap_size += sizeofW(StgUpdateFrame);
87 ((struct stack_gap *)frame)->gap_size = current_gap_size;
88 ((struct stack_gap *)frame)->next_gap = gap;
90 frame += sizeofW(StgUpdateFrame);
94 // single update frame, or the topmost update frame in a series
96 prev_was_update_frame = rtsTrue;
97 updatee = upd->updatee;
98 frame += sizeofW(StgUpdateFrame);
104 prev_was_update_frame = rtsFalse;
106 // we're not in a gap... check whether this is the end of a gap
107 // (an update frame can't be the end of a gap).
108 if (current_gap_size != 0) {
109 gap = (struct stack_gap *) (frame - sizeofW(StgUpdateFrame));
111 current_gap_size = 0;
113 frame += stack_frame_sizeW((StgClosure *)frame);
118 if (current_gap_size != 0) {
119 gap = (struct stack_gap *) (frame - sizeofW(StgUpdateFrame));
122 // Now we have a stack with gaps in it, and we have to walk down
123 // shoving the stack up to fill in the gaps. A diagram might
127 // | ********* | <- sp
131 // | stack_gap | <- gap | chunk_size
133 // | ......... | <- gap_end v
139 // 'sp' points the the current top-of-stack
140 // 'gap' points to the stack_gap structure inside the gap
141 // ***** indicates real stack data
142 // ..... indicates gap
143 // <empty> indicates unused
147 StgWord8 *gap_start, *next_gap_start, *gap_end;
150 next_gap_start = (StgWord8*)gap + sizeof(StgUpdateFrame);
153 while ((StgPtr)gap > tso->stackobj->sp) {
155 // we're working in *bytes* now...
156 gap_start = next_gap_start;
157 gap_end = gap_start - gap->gap_size * sizeof(W_);
160 next_gap_start = (StgWord8*)gap + sizeof(StgUpdateFrame);
162 chunk_size = gap_end - next_gap_start;
164 memmove(sp, next_gap_start, chunk_size);
167 tso->stackobj->sp = (StgPtr)sp;
171 /* -----------------------------------------------------------------------------
174 * We have to prepare for GC - this means doing lazy black holing
175 * here. We also take the opportunity to do stack squeezing if it's
177 * -------------------------------------------------------------------------- */
179 threadPaused(Capability *cap, StgTSO *tso)
182 StgRetInfoTable *info;
183 const StgInfoTable *bh_info;
184 const StgInfoTable *cur_bh_info USED_IF_THREADS;
187 nat words_to_squeeze = 0;
189 nat weight_pending = 0;
190 rtsBool prev_was_update_frame = rtsFalse;
192 // Check to see whether we have threads waiting to raise
193 // exceptions, and we're not blocking exceptions, or are blocked
194 // interruptibly. This is important; if a thread is running with
195 // TSO_BLOCKEX and becomes blocked interruptibly, this is the only
196 // place we ensure that the blocked_exceptions get a chance.
197 maybePerformBlockedException (cap, tso);
198 if (tso->what_next == ThreadKilled) { return; }
200 // NB. Blackholing is *compulsory*, we must either do lazy
201 // blackholing, or eager blackholing consistently. See Note
202 // [upd-black-hole] in sm/Scav.c.
204 stack_end = tso->stackobj->stack + tso->stackobj->stack_size;
206 frame = (StgClosure *)tso->stackobj->sp;
208 while ((P_)frame < stack_end) {
209 info = get_ret_itbl(frame);
211 switch (info->i.type) {
215 // If we've already marked this frame, then stop here.
216 if (frame->header.info == (StgInfoTable *)&stg_marked_upd_frame_info) {
217 if (prev_was_update_frame) {
218 words_to_squeeze += sizeofW(StgUpdateFrame);
219 weight += weight_pending;
225 SET_INFO(frame, (StgInfoTable *)&stg_marked_upd_frame_info);
227 bh = ((StgUpdateFrame *)frame)->updatee;
228 bh_info = bh->header.info;
233 if (bh_info == &stg_BLACKHOLE_info ||
234 bh_info == &stg_WHITEHOLE_info)
236 debugTrace(DEBUG_squeeze,
237 "suspending duplicate work: %ld words of stack",
238 (long)((StgPtr)frame - tso->stackobj->sp));
240 // If this closure is already an indirection, then
241 // suspend the computation up to this point.
242 // NB. check raiseAsync() to see what happens when
243 // we're in a loop (#2783).
244 suspendComputation(cap,tso,(StgUpdateFrame*)frame);
246 // Now drop the update frame, and arrange to return
247 // the value to the frame underneath:
248 tso->stackobj->sp = (StgPtr)frame + sizeofW(StgUpdateFrame) - 2;
249 tso->stackobj->sp[1] = (StgWord)bh;
250 ASSERT(bh->header.info != &stg_TSO_info);
251 tso->stackobj->sp[0] = (W_)&stg_enter_info;
253 // And continue with threadPaused; there might be
254 // yet more computation to suspend.
255 frame = (StgClosure *)(tso->stackobj->sp + 2);
256 prev_was_update_frame = rtsFalse;
261 // zero out the slop so that the sanity checker can tell
262 // where the next closure is.
263 OVERWRITING_CLOSURE(bh);
265 // an EAGER_BLACKHOLE or CAF_BLACKHOLE gets turned into a
268 // first we turn it into a WHITEHOLE to claim it, and if
269 // successful we write our TSO and then the BLACKHOLE info pointer.
270 cur_bh_info = (const StgInfoTable *)
271 cas((StgVolatilePtr)&bh->header.info,
273 (StgWord)&stg_WHITEHOLE_info);
275 if (cur_bh_info != bh_info) {
276 bh_info = cur_bh_info;
281 // The payload of the BLACKHOLE points to the TSO
282 ((StgInd *)bh)->indirectee = (StgClosure *)tso;
284 SET_INFO(bh,&stg_BLACKHOLE_info);
286 // .. and we need a write barrier, since we just mutated the closure:
287 recordClosureMutated(cap,bh);
289 // We pretend that bh has just been created.
290 LDV_RECORD_CREATE(bh);
292 frame = (StgClosure *) ((StgUpdateFrame *)frame + 1);
293 if (prev_was_update_frame) {
294 words_to_squeeze += sizeofW(StgUpdateFrame);
295 weight += weight_pending;
298 prev_was_update_frame = rtsTrue;
301 case UNDERFLOW_FRAME:
305 // normal stack frames; do nothing except advance the pointer
308 nat frame_size = stack_frame_sizeW(frame);
309 weight_pending += frame_size;
310 frame = (StgClosure *)((StgPtr)frame + frame_size);
311 prev_was_update_frame = rtsFalse;
317 debugTrace(DEBUG_squeeze,
318 "words_to_squeeze: %d, weight: %d, squeeze: %s",
319 words_to_squeeze, weight,
320 weight < words_to_squeeze ? "YES" : "NO");
322 // Should we squeeze or not? Arbitrary heuristic: we squeeze if
323 // the number of words we have to shift down is less than the
324 // number of stack words we squeeze away by doing so.
325 if (RtsFlags.GcFlags.squeezeUpdFrames == rtsTrue &&
326 ((weight <= 8 && words_to_squeeze > 0) || weight < words_to_squeeze)) {
327 // threshold above bumped from 5 to 8 as a result of #2797
328 stackSqueeze(cap, tso, (StgPtr)frame);
329 tso->flags |= TSO_SQUEEZED;
330 // This flag tells threadStackOverflow() that the stack was
331 // squeezed, because it may not need to be expanded.
333 tso->flags &= ~TSO_SQUEEZED;