2 % (c) The University of Glasgow 2006
3 % (c) The GRASP/AQUA Project, Glasgow University, 1992-1998
5 \section[CgHeapery]{Heap management functions}
9 -- The above warning supression flag is a temporary kludge.
10 -- While working on this module you are encouraged to remove it and fix
11 -- any warnings in the module. See
12 -- http://hackage.haskell.org/trac/ghc/wiki/Commentary/CodingStyle#Warnings
16 initHeapUsage, getVirtHp, setVirtHp, setRealHp,
17 getHpRelOffset, hpRel,
19 funEntryChecks, thunkEntryChecks,
20 altHeapCheck, unbxTupleHeapCheck,
21 hpChkGen, hpChkNodePointsAssignSp0,
22 stkChkGen, stkChkNodePoints,
24 layOutDynConstr, layOutStaticConstr,
25 mkVirtHeapOffsets, mkStaticClosureFields, mkStaticClosure,
27 allocDynClosure, emitSetDynHdr
30 #include "HsVersions.h"
60 %************************************************************************
62 \subsection[CgUsages-heapery]{Monad things for fiddling with heap usage}
64 %************************************************************************
66 The heap always grows upwards, so hpRel is easy
69 hpRel :: VirtualHpOffset -- virtual offset of Hp
70 -> VirtualHpOffset -- virtual offset of The Thing
71 -> WordOff -- integer word offset
72 hpRel hp off = off - hp
75 @initHeapUsage@ applies a function to the amount of heap that it uses.
76 It initialises the heap usage to zeros, and passes on an unchanged
79 It is usually a prelude to performing a GC check, so everything must
80 be in a tidy and consistent state.
82 rje: Note the slightly suble fixed point behaviour needed here
85 initHeapUsage :: (VirtualHpOffset -> Code) -> Code
87 = do { orig_hp_usage <- getHpUsage
88 ; setHpUsage initHpUsage
89 ; fixC (\heap_usage2 -> do
90 { fcode (heapHWM heap_usage2)
92 ; setHpUsage orig_hp_usage }
94 setVirtHp :: VirtualHpOffset -> Code
96 = do { hp_usage <- getHpUsage
97 ; setHpUsage (hp_usage {virtHp = new_virtHp}) }
99 getVirtHp :: FCode VirtualHpOffset
101 = do { hp_usage <- getHpUsage
102 ; return (virtHp hp_usage) }
104 setRealHp :: VirtualHpOffset -> Code
106 = do { hp_usage <- getHpUsage
107 ; setHpUsage (hp_usage {realHp = new_realHp}) }
109 getHpRelOffset :: VirtualHpOffset -> FCode CmmExpr
110 getHpRelOffset virtual_offset
111 = do { hp_usg <- getHpUsage
112 ; return (cmmRegOffW hpReg (hpRel (realHp hp_usg) virtual_offset)) }
116 %************************************************************************
118 Layout of heap objects
120 %************************************************************************
123 layOutDynConstr, layOutStaticConstr
127 [(a,VirtualHpOffset)])
129 layOutDynConstr = layOutConstr False
130 layOutStaticConstr = layOutConstr True
132 layOutConstr is_static data_con args
133 = (mkConInfo is_static data_con tot_wds ptr_wds,
136 (tot_wds, -- #ptr_wds + #nonptr_wds
138 things_w_offsets) = mkVirtHeapOffsets False{-not a thunk-} args
141 @mkVirtHeapOffsets@ always returns boxed things with smaller offsets
142 than the unboxed things, and furthermore, the offsets in the result
147 :: Bool -- True <=> is a thunk
148 -> [(CgRep,a)] -- Things to make offsets for
149 -> (WordOff, -- _Total_ number of words allocated
150 WordOff, -- Number of words allocated for *pointers*
151 [(a, VirtualHpOffset)])
152 -- Things with their offsets from start of
153 -- object in order of increasing offset
155 -- First in list gets lowest offset, which is initial offset + 1.
157 mkVirtHeapOffsets is_thunk things
158 = let non_void_things = filterOut (isVoidArg . fst) things
159 (ptrs, non_ptrs) = separateByPtrFollowness non_void_things
160 (wds_of_ptrs, ptrs_w_offsets) = mapAccumL computeOffset 0 ptrs
161 (tot_wds, non_ptrs_w_offsets) = mapAccumL computeOffset wds_of_ptrs non_ptrs
163 (tot_wds, wds_of_ptrs, ptrs_w_offsets ++ non_ptrs_w_offsets)
165 hdr_size | is_thunk = thunkHdrSize
166 | otherwise = fixedHdrSize
168 computeOffset wds_so_far (rep, thing)
169 = (wds_so_far + cgRepSizeW rep, (thing, hdr_size + wds_so_far))
173 %************************************************************************
175 Lay out a static closure
177 %************************************************************************
179 Make a static closure, adding on any extra padding needed for CAFs,
180 and adding a static link field if necessary.
183 mkStaticClosureFields
186 -> Bool -- Has CAF refs
187 -> [CmmLit] -- Payload
188 -> [CmmLit] -- The full closure
189 mkStaticClosureFields cl_info ccs caf_refs payload
190 = mkStaticClosure info_lbl ccs payload padding_wds
191 static_link_field saved_info_field
193 info_lbl = infoTableLabelFromCI cl_info
195 -- CAFs must have consistent layout, regardless of whether they
196 -- are actually updatable or not. The layout of a CAF is:
203 -- the static_link and saved_info fields must always be in the same
204 -- place. So we use closureNeedsUpdSpace rather than
205 -- closureUpdReqd here:
207 is_caf = closureNeedsUpdSpace cl_info
211 | otherwise = ASSERT(null payload) [mkIntCLit 0]
214 | is_caf || staticClosureNeedsLink cl_info = [static_link_value]
218 | is_caf = [mkIntCLit 0]
221 -- for a static constructor which has NoCafRefs, we set the
222 -- static link field to a non-zero value so the garbage
223 -- collector will ignore it.
225 | caf_refs = mkIntCLit 0
226 | otherwise = mkIntCLit 1
229 mkStaticClosure :: CLabel -> CostCentreStack -> [CmmLit]
230 -> [CmmLit] -> [CmmLit] -> [CmmLit] -> [CmmLit]
231 mkStaticClosure info_lbl ccs payload padding_wds static_link_field saved_info_field
232 = [CmmLabel info_lbl]
233 ++ variable_header_words
239 variable_header_words
246 %************************************************************************
248 \subsection[CgHeapery-heap-overflow]{Heap overflow checking}
250 %************************************************************************
252 The new code for heapChecks. For GrAnSim the code for doing a heap check
253 and doing a context switch has been separated. Especially, the HEAP_CHK
254 macro only performs a heap check. THREAD_CONTEXT_SWITCH should be used for
255 doing a context switch. GRAN_FETCH_AND_RESCHEDULE must be put at the
256 beginning of every slow entry code in order to simulate the fetching of
257 closures. If fetching is necessary (i.e. current closure is not local) then
258 an automatic context switch is done.
260 --------------------------------------------------------------
261 A heap/stack check at a function or thunk entry point.
264 funEntryChecks :: ClosureInfo -> CmmStmts -> Code -> Code
265 funEntryChecks cl_info reg_save_code code
266 = hpStkCheck cl_info True reg_save_code code
268 thunkEntryChecks :: ClosureInfo -> Code -> Code
269 thunkEntryChecks cl_info code
270 = hpStkCheck cl_info False noStmts code
272 hpStkCheck :: ClosureInfo -- Function closure
273 -> Bool -- Is a function? (not a thunk)
274 -> CmmStmts -- Register saves
278 hpStkCheck cl_info is_fun reg_save_code code
279 = getFinalStackHW $ \ spHw -> do
281 ; let stk_words = spHw - sp
282 ; initHeapUsage $ \ hpHw -> do
283 { -- Emit heap checks, but be sure to do it lazily so
284 -- that the conditionals on hpHw don't cause a black hole
286 { do_checks stk_words hpHw full_save_code rts_label
287 ; tickyAllocHeap hpHw }
293 | nodeMustPointToIt (closureLFInfo cl_info)
296 = oneStmt (CmmAssign nodeReg (CmmLit (CmmLabel closure_lbl)))
297 -- Strictly speaking, we should tag node here. But if
298 -- node doesn't point to the closure, the code for the closure
299 -- cannot depend on the value of R1 anyway, so we're safe.
300 closure_lbl = closureLabelFromCI cl_info
302 full_save_code = node_asst `plusStmts` reg_save_code
304 rts_label | is_fun = CmmReg (CmmGlobal GCFun)
305 -- Function entry point
306 | otherwise = CmmReg (CmmGlobal GCEnter1)
307 -- Thunk or case return
308 -- In the thunk/case-return case, R1 points to a closure
309 -- which should be (re)-entered after GC
312 Heap checks in a case alternative are nice and easy, provided this is
313 a bog-standard algebraic case. We have in our hand:
315 * one return address, on the stack,
316 * one return value, in Node.
318 the canned code for this heap check failure just pushes Node on the
319 stack, saying 'EnterGHC' to return. The scheduler will return by
320 entering the top value on the stack, which in turn will return through
321 the return address, getting us back to where we were. This is
322 therefore only valid if the return value is *lifted* (just being
323 boxed isn't good enough).
325 For primitive returns, we have an unlifted value in some register
326 (either R1 or FloatReg1 or DblReg1). This means using specialised
327 heap-check code for these cases.
331 :: AltType -- PolyAlt, PrimAlt, AlgAlt, but *not* UbxTupAlt
332 -- (Unboxed tuples are dealt with by ubxTupleHeapCheck)
333 -> Code -- Continuation
335 altHeapCheck alt_type code
336 = initHeapUsage $ \ hpHw -> do
338 { do_checks 0 {- no stack chk -} hpHw
339 noStmts {- nothign to save -}
341 ; tickyAllocHeap hpHw }
345 rts_label PolyAlt = CmmLit (CmmLabel (mkRtsCodeLabel SLIT( "stg_gc_unpt_r1")))
346 -- Do *not* enter R1 after a heap check in
347 -- a polymorphic case. It might be a function
348 -- and the entry code for a function (currently)
351 -- However R1 is guaranteed to be a pointer
353 rts_label (AlgAlt tc) = stg_gc_enter1
354 -- Enter R1 after the heap check; it's a pointer
356 rts_label (PrimAlt tc)
357 = CmmLit $ CmmLabel $
358 case primRepToCgRep (tyConPrimRep tc) of
359 VoidArg -> mkRtsCodeLabel SLIT( "stg_gc_noregs")
360 FloatArg -> mkRtsCodeLabel SLIT( "stg_gc_f1")
361 DoubleArg -> mkRtsCodeLabel SLIT( "stg_gc_d1")
362 LongArg -> mkRtsCodeLabel SLIT( "stg_gc_l1")
363 -- R1 is boxed but unlifted:
364 PtrArg -> mkRtsCodeLabel SLIT( "stg_gc_unpt_r1")
366 NonPtrArg -> mkRtsCodeLabel SLIT( "stg_gc_unbx_r1")
368 rts_label (UbxTupAlt _) = panic "altHeapCheck"
372 Unboxed tuple alternatives and let-no-escapes (the two most annoying
373 constructs to generate code for!) For unboxed tuple returns, there
374 are an arbitrary number of possibly unboxed return values, some of
375 which will be in registers, and the others will be on the stack. We
376 always organise the stack-resident fields into pointers &
377 non-pointers, and pass the number of each to the heap check code.
381 :: [(Id, GlobalReg)] -- Live registers
382 -> WordOff -- no. of stack slots containing ptrs
383 -> WordOff -- no. of stack slots containing nonptrs
384 -> CmmStmts -- code to insert in the failure path
388 unbxTupleHeapCheck regs ptrs nptrs fail_code code
389 -- We can't manage more than 255 pointers/non-pointers
390 -- in a generic heap check.
391 | ptrs > 255 || nptrs > 255 = panic "altHeapCheck"
393 = initHeapUsage $ \ hpHw -> do
394 { codeOnly $ do { do_checks 0 {- no stack check -} hpHw
395 full_fail_code rts_label
396 ; tickyAllocHeap hpHw }
400 full_fail_code = fail_code `plusStmts` oneStmt assign_liveness
401 assign_liveness = CmmAssign (CmmGlobal (VanillaReg 9)) -- Ho ho ho!
402 (CmmLit (mkWordCLit liveness))
403 liveness = mkRegLiveness regs ptrs nptrs
404 rts_label = CmmLit (CmmLabel (mkRtsCodeLabel SLIT("stg_gc_ut")))
409 %************************************************************************
413 %************************************************************************
415 When failing a check, we save a return address on the stack and
416 jump to a pre-compiled code fragment that saves the live registers
417 and returns to the scheduler.
419 The return address in most cases will be the beginning of the basic
420 block in which the check resides, since we need to perform the check
421 again on re-entry because someone else might have stolen the resource
425 do_checks :: WordOff -- Stack headroom
426 -> WordOff -- Heap headroom
427 -> CmmStmts -- Assignments to perform on failure
428 -> CmmExpr -- Rts address to jump to on failure
430 do_checks 0 0 _ _ = nopC
431 do_checks stk hp reg_save_code rts_lbl
432 = do_checks' (CmmLit (mkIntCLit (stk*wORD_SIZE)))
433 (CmmLit (mkIntCLit (hp*wORD_SIZE)))
434 (stk /= 0) (hp /= 0) reg_save_code rts_lbl
436 -- The offsets are now in *bytes*
437 do_checks' stk_expr hp_expr stk_nonzero hp_nonzero reg_save_code rts_lbl
438 = do { doGranAllocate hp_expr
440 -- Emit a block for the heap-check-failure code
441 ; blk_id <- forkLabelledCode $ do
443 stmtC (CmmAssign (CmmGlobal HpAlloc) hp_expr)
444 ; emitStmts reg_save_code
445 ; stmtC (CmmJump rts_lbl []) }
447 -- Check for stack overflow *FIRST*; otherwise
448 -- we might bumping Hp and then failing stack oflo
450 (stmtC (CmmCondBranch stk_oflo blk_id))
453 (stmtsC [CmmAssign hpReg
454 (cmmOffsetExprB (CmmReg hpReg) hp_expr),
455 CmmCondBranch hp_oflo blk_id])
456 -- Bump heap pointer, and test for heap exhaustion
457 -- Note that we don't move the heap pointer unless the
458 -- stack check succeeds. Otherwise we might end up
459 -- with slop at the end of the current block, which can
460 -- confuse the LDV profiler.
463 -- Stk overflow if (Sp - stk_bytes < SpLim)
464 stk_oflo = CmmMachOp mo_wordULt
465 [CmmMachOp mo_wordSub [CmmReg spReg, stk_expr],
466 CmmReg (CmmGlobal SpLim)]
468 -- Hp overflow if (Hp > HpLim)
469 -- (Hp has been incremented by now)
470 -- HpLim points to the LAST WORD of valid allocation space.
471 hp_oflo = CmmMachOp mo_wordUGt
472 [CmmReg hpReg, CmmReg (CmmGlobal HpLim)]
475 %************************************************************************
477 Generic Heap/Stack Checks - used in the RTS
479 %************************************************************************
482 hpChkGen :: CmmExpr -> CmmExpr -> CmmExpr -> Code
483 hpChkGen bytes liveness reentry
484 = do_checks' (CmmLit (mkIntCLit 0)) bytes False True assigns stg_gc_gen
487 CmmAssign (CmmGlobal (VanillaReg 9)) liveness,
488 CmmAssign (CmmGlobal (VanillaReg 10)) reentry
491 -- a heap check where R1 points to the closure to enter on return, and
492 -- we want to assign to Sp[0] on failure (used in AutoApply.cmm:BUILD_PAP).
493 hpChkNodePointsAssignSp0 :: CmmExpr -> CmmExpr -> Code
494 hpChkNodePointsAssignSp0 bytes sp0
495 = do_checks' (CmmLit (mkIntCLit 0)) bytes False True assign stg_gc_enter1
496 where assign = oneStmt (CmmStore (CmmReg spReg) sp0)
498 stkChkGen :: CmmExpr -> CmmExpr -> CmmExpr -> Code
499 stkChkGen bytes liveness reentry
500 = do_checks' bytes (CmmLit (mkIntCLit 0)) True False assigns stg_gc_gen
503 CmmAssign (CmmGlobal (VanillaReg 9)) liveness,
504 CmmAssign (CmmGlobal (VanillaReg 10)) reentry
507 stkChkNodePoints :: CmmExpr -> Code
508 stkChkNodePoints bytes
509 = do_checks' bytes (CmmLit (mkIntCLit 0)) True False noStmts stg_gc_enter1
511 stg_gc_gen = CmmLit (CmmLabel (mkRtsCodeLabel SLIT("stg_gc_gen")))
512 stg_gc_enter1 = CmmReg (CmmGlobal GCEnter1)
515 %************************************************************************
517 \subsection[initClosure]{Initialise a dynamic closure}
519 %************************************************************************
521 @allocDynClosure@ puts the thing in the heap, and modifies the virtual Hp
527 -> CmmExpr -- Cost Centre to stick in the object
528 -> CmmExpr -- Cost Centre to blame for this alloc
529 -- (usually the same; sometimes "OVERHEAD")
531 -> [(CmmExpr, VirtualHpOffset)] -- Offsets from start of the object
532 -- ie Info ptr has offset zero.
533 -> FCode VirtualHpOffset -- Returns virt offset of object
535 allocDynClosure cl_info use_cc blame_cc amodes_with_offsets
536 = do { virt_hp <- getVirtHp
538 -- FIND THE OFFSET OF THE INFO-PTR WORD
539 ; let info_offset = virt_hp + 1
540 -- info_offset is the VirtualHpOffset of the first
541 -- word of the new object
542 -- Remember, virtHp points to last allocated word,
543 -- ie 1 *before* the info-ptr word of new object.
545 info_ptr = CmmLit (CmmLabel (infoTableLabelFromCI cl_info))
546 hdr_w_offsets = initDynHdr info_ptr use_cc `zip` [0..]
548 -- SAY WHAT WE ARE ABOUT TO DO
549 ; profDynAlloc cl_info use_cc
550 -- ToDo: This is almost certainly wrong
551 -- We're ignoring blame_cc. But until we've
552 -- fixed the boxing hack in chooseDynCostCentres etc,
553 -- we're worried about making things worse by "fixing"
554 -- this part to use blame_cc!
556 ; tickyDynAlloc cl_info
558 -- ALLOCATE THE OBJECT
559 ; base <- getHpRelOffset info_offset
560 ; hpStore base (hdr_w_offsets ++ amodes_with_offsets)
562 -- BUMP THE VIRTUAL HEAP POINTER
563 ; setVirtHp (virt_hp + closureSize cl_info)
565 -- RETURN PTR TO START OF OBJECT
566 ; returnFC info_offset }
569 initDynHdr :: CmmExpr
570 -> CmmExpr -- Cost centre to put in object
572 initDynHdr info_ptr cc
574 -- ToDo: Gransim stuff
575 -- ToDo: Parallel stuff
579 hpStore :: CmmExpr -> [(CmmExpr, VirtualHpOffset)] -> Code
580 -- Store the item (expr,off) in base[off]
582 = stmtsC [ CmmStore (cmmOffsetW base off) val
585 emitSetDynHdr :: CmmExpr -> CmmExpr -> CmmExpr -> Code
586 emitSetDynHdr base info_ptr ccs
587 = hpStore base (zip (initDynHdr info_ptr ccs) [0..])