2 % (c) The University of Glasgow 2006
3 % (c) The GRASP/AQUA Project, Glasgow University, 1992-1998
5 \section[CgHeapery]{Heap management functions}
9 initHeapUsage, getVirtHp, setVirtHp, setRealHp,
10 getHpRelOffset, hpRel,
12 funEntryChecks, thunkEntryChecks,
13 altHeapCheck, unbxTupleHeapCheck,
14 hpChkGen, hpChkNodePointsAssignSp0,
15 stkChkGen, stkChkNodePoints,
17 layOutDynConstr, layOutStaticConstr,
18 mkVirtHeapOffsets, mkStaticClosureFields, mkStaticClosure,
20 allocDynClosure, emitSetDynHdr
23 #include "HsVersions.h"
53 %************************************************************************
55 \subsection[CgUsages-heapery]{Monad things for fiddling with heap usage}
57 %************************************************************************
59 The heap always grows upwards, so hpRel is easy
62 hpRel :: VirtualHpOffset -- virtual offset of Hp
63 -> VirtualHpOffset -- virtual offset of The Thing
64 -> WordOff -- integer word offset
65 hpRel hp off = off - hp
68 @initHeapUsage@ applies a function to the amount of heap that it uses.
69 It initialises the heap usage to zeros, and passes on an unchanged
72 It is usually a prelude to performing a GC check, so everything must
73 be in a tidy and consistent state.
75 rje: Note the slightly suble fixed point behaviour needed here
78 initHeapUsage :: (VirtualHpOffset -> Code) -> Code
80 = do { orig_hp_usage <- getHpUsage
81 ; setHpUsage initHpUsage
82 ; fixC (\heap_usage2 -> do
83 { fcode (heapHWM heap_usage2)
85 ; setHpUsage orig_hp_usage }
87 setVirtHp :: VirtualHpOffset -> Code
89 = do { hp_usage <- getHpUsage
90 ; setHpUsage (hp_usage {virtHp = new_virtHp}) }
92 getVirtHp :: FCode VirtualHpOffset
94 = do { hp_usage <- getHpUsage
95 ; return (virtHp hp_usage) }
97 setRealHp :: VirtualHpOffset -> Code
99 = do { hp_usage <- getHpUsage
100 ; setHpUsage (hp_usage {realHp = new_realHp}) }
102 getHpRelOffset :: VirtualHpOffset -> FCode CmmExpr
103 getHpRelOffset virtual_offset
104 = do { hp_usg <- getHpUsage
105 ; return (cmmRegOffW hpReg (hpRel (realHp hp_usg) virtual_offset)) }
109 %************************************************************************
111 Layout of heap objects
113 %************************************************************************
116 layOutDynConstr, layOutStaticConstr
120 [(a,VirtualHpOffset)])
122 layOutDynConstr = layOutConstr False
123 layOutStaticConstr = layOutConstr True
125 layOutConstr is_static data_con args
126 = (mkConInfo is_static data_con tot_wds ptr_wds,
129 (tot_wds, -- #ptr_wds + #nonptr_wds
131 things_w_offsets) = mkVirtHeapOffsets False{-not a thunk-} args
134 @mkVirtHeapOffsets@ always returns boxed things with smaller offsets
135 than the unboxed things, and furthermore, the offsets in the result
140 :: Bool -- True <=> is a thunk
141 -> [(CgRep,a)] -- Things to make offsets for
142 -> (WordOff, -- _Total_ number of words allocated
143 WordOff, -- Number of words allocated for *pointers*
144 [(a, VirtualHpOffset)])
145 -- Things with their offsets from start of
146 -- object in order of increasing offset
148 -- First in list gets lowest offset, which is initial offset + 1.
150 mkVirtHeapOffsets is_thunk things
151 = let non_void_things = filterOut (isVoidArg . fst) things
152 (ptrs, non_ptrs) = separateByPtrFollowness non_void_things
153 (wds_of_ptrs, ptrs_w_offsets) = mapAccumL computeOffset 0 ptrs
154 (tot_wds, non_ptrs_w_offsets) = mapAccumL computeOffset wds_of_ptrs non_ptrs
156 (tot_wds, wds_of_ptrs, ptrs_w_offsets ++ non_ptrs_w_offsets)
158 hdr_size | is_thunk = thunkHdrSize
159 | otherwise = fixedHdrSize
161 computeOffset wds_so_far (rep, thing)
162 = (wds_so_far + cgRepSizeW rep, (thing, hdr_size + wds_so_far))
166 %************************************************************************
168 Lay out a static closure
170 %************************************************************************
172 Make a static closure, adding on any extra padding needed for CAFs,
173 and adding a static link field if necessary.
176 mkStaticClosureFields
179 -> Bool -- Has CAF refs
180 -> [CmmLit] -- Payload
181 -> [CmmLit] -- The full closure
182 mkStaticClosureFields cl_info ccs caf_refs payload
183 = mkStaticClosure info_lbl ccs payload padding_wds
184 static_link_field saved_info_field
186 info_lbl = infoTableLabelFromCI cl_info
188 -- CAFs must have consistent layout, regardless of whether they
189 -- are actually updatable or not. The layout of a CAF is:
196 -- the static_link and saved_info fields must always be in the same
197 -- place. So we use closureNeedsUpdSpace rather than
198 -- closureUpdReqd here:
200 is_caf = closureNeedsUpdSpace cl_info
204 | otherwise = ASSERT(null payload) [mkIntCLit 0]
207 | is_caf || staticClosureNeedsLink cl_info = [static_link_value]
211 | is_caf = [mkIntCLit 0]
214 -- for a static constructor which has NoCafRefs, we set the
215 -- static link field to a non-zero value so the garbage
216 -- collector will ignore it.
218 | caf_refs = mkIntCLit 0
219 | otherwise = mkIntCLit 1
222 mkStaticClosure :: CLabel -> CostCentreStack -> [CmmLit]
223 -> [CmmLit] -> [CmmLit] -> [CmmLit] -> [CmmLit]
224 mkStaticClosure info_lbl ccs payload padding_wds static_link_field saved_info_field
225 = [CmmLabel info_lbl]
226 ++ variable_header_words
232 variable_header_words
239 %************************************************************************
241 \subsection[CgHeapery-heap-overflow]{Heap overflow checking}
243 %************************************************************************
245 The new code for heapChecks. For GrAnSim the code for doing a heap check
246 and doing a context switch has been separated. Especially, the HEAP_CHK
247 macro only performs a heap check. THREAD_CONTEXT_SWITCH should be used for
248 doing a context switch. GRAN_FETCH_AND_RESCHEDULE must be put at the
249 beginning of every slow entry code in order to simulate the fetching of
250 closures. If fetching is necessary (i.e. current closure is not local) then
251 an automatic context switch is done.
253 --------------------------------------------------------------
254 A heap/stack check at a function or thunk entry point.
257 funEntryChecks :: ClosureInfo -> CmmStmts -> Code -> Code
258 funEntryChecks cl_info reg_save_code code
259 = hpStkCheck cl_info True reg_save_code code
261 thunkEntryChecks :: ClosureInfo -> Code -> Code
262 thunkEntryChecks cl_info code
263 = hpStkCheck cl_info False noStmts code
265 hpStkCheck :: ClosureInfo -- Function closure
266 -> Bool -- Is a function? (not a thunk)
267 -> CmmStmts -- Register saves
271 hpStkCheck cl_info is_fun reg_save_code code
272 = getFinalStackHW $ \ spHw -> do
274 ; let stk_words = spHw - sp
275 ; initHeapUsage $ \ hpHw -> do
276 { -- Emit heap checks, but be sure to do it lazily so
277 -- that the conditionals on hpHw don't cause a black hole
279 { do_checks stk_words hpHw full_save_code rts_label
280 ; tickyAllocHeap hpHw }
286 | nodeMustPointToIt (closureLFInfo cl_info)
289 = oneStmt (CmmAssign nodeReg (CmmLit (CmmLabel closure_lbl)))
290 -- Strictly speaking, we should tag node here. But if
291 -- node doesn't point to the closure, the code for the closure
292 -- cannot depend on the value of R1 anyway, so we're safe.
293 closure_lbl = closureLabelFromCI cl_info
295 full_save_code = node_asst `plusStmts` reg_save_code
297 rts_label | is_fun = CmmReg (CmmGlobal GCFun)
298 -- Function entry point
299 | otherwise = CmmReg (CmmGlobal GCEnter1)
300 -- Thunk or case return
301 -- In the thunk/case-return case, R1 points to a closure
302 -- which should be (re)-entered after GC
305 Heap checks in a case alternative are nice and easy, provided this is
306 a bog-standard algebraic case. We have in our hand:
308 * one return address, on the stack,
309 * one return value, in Node.
311 the canned code for this heap check failure just pushes Node on the
312 stack, saying 'EnterGHC' to return. The scheduler will return by
313 entering the top value on the stack, which in turn will return through
314 the return address, getting us back to where we were. This is
315 therefore only valid if the return value is *lifted* (just being
316 boxed isn't good enough).
318 For primitive returns, we have an unlifted value in some register
319 (either R1 or FloatReg1 or DblReg1). This means using specialised
320 heap-check code for these cases.
324 :: AltType -- PolyAlt, PrimAlt, AlgAlt, but *not* UbxTupAlt
325 -- (Unboxed tuples are dealt with by ubxTupleHeapCheck)
326 -> Code -- Continuation
328 altHeapCheck alt_type code
329 = initHeapUsage $ \ hpHw -> do
331 { do_checks 0 {- no stack chk -} hpHw
332 noStmts {- nothign to save -}
334 ; tickyAllocHeap hpHw }
338 rts_label PolyAlt = CmmLit (CmmLabel (mkRtsCodeLabel SLIT( "stg_gc_unpt_r1")))
339 -- Do *not* enter R1 after a heap check in
340 -- a polymorphic case. It might be a function
341 -- and the entry code for a function (currently)
344 -- However R1 is guaranteed to be a pointer
346 rts_label (AlgAlt tc) = stg_gc_enter1
347 -- Enter R1 after the heap check; it's a pointer
349 rts_label (PrimAlt tc)
350 = CmmLit $ CmmLabel $
351 case primRepToCgRep (tyConPrimRep tc) of
352 VoidArg -> mkRtsCodeLabel SLIT( "stg_gc_noregs")
353 FloatArg -> mkRtsCodeLabel SLIT( "stg_gc_f1")
354 DoubleArg -> mkRtsCodeLabel SLIT( "stg_gc_d1")
355 LongArg -> mkRtsCodeLabel SLIT( "stg_gc_l1")
356 -- R1 is boxed but unlifted:
357 PtrArg -> mkRtsCodeLabel SLIT( "stg_gc_unpt_r1")
359 NonPtrArg -> mkRtsCodeLabel SLIT( "stg_gc_unbx_r1")
361 rts_label (UbxTupAlt _) = panic "altHeapCheck"
365 Unboxed tuple alternatives and let-no-escapes (the two most annoying
366 constructs to generate code for!) For unboxed tuple returns, there
367 are an arbitrary number of possibly unboxed return values, some of
368 which will be in registers, and the others will be on the stack. We
369 always organise the stack-resident fields into pointers &
370 non-pointers, and pass the number of each to the heap check code.
374 :: [(Id, GlobalReg)] -- Live registers
375 -> WordOff -- no. of stack slots containing ptrs
376 -> WordOff -- no. of stack slots containing nonptrs
377 -> CmmStmts -- code to insert in the failure path
381 unbxTupleHeapCheck regs ptrs nptrs fail_code code
382 -- We can't manage more than 255 pointers/non-pointers
383 -- in a generic heap check.
384 | ptrs > 255 || nptrs > 255 = panic "altHeapCheck"
386 = initHeapUsage $ \ hpHw -> do
387 { codeOnly $ do { do_checks 0 {- no stack check -} hpHw
388 full_fail_code rts_label
389 ; tickyAllocHeap hpHw }
393 full_fail_code = fail_code `plusStmts` oneStmt assign_liveness
394 assign_liveness = CmmAssign (CmmGlobal (VanillaReg 9)) -- Ho ho ho!
395 (CmmLit (mkWordCLit liveness))
396 liveness = mkRegLiveness regs ptrs nptrs
397 rts_label = CmmLit (CmmLabel (mkRtsCodeLabel SLIT("stg_gc_ut")))
402 %************************************************************************
406 %************************************************************************
408 When failing a check, we save a return address on the stack and
409 jump to a pre-compiled code fragment that saves the live registers
410 and returns to the scheduler.
412 The return address in most cases will be the beginning of the basic
413 block in which the check resides, since we need to perform the check
414 again on re-entry because someone else might have stolen the resource
418 do_checks :: WordOff -- Stack headroom
419 -> WordOff -- Heap headroom
420 -> CmmStmts -- Assignments to perform on failure
421 -> CmmExpr -- Rts address to jump to on failure
423 do_checks 0 0 _ _ = nopC
424 do_checks stk hp reg_save_code rts_lbl
425 = do_checks' (CmmLit (mkIntCLit (stk*wORD_SIZE)))
426 (CmmLit (mkIntCLit (hp*wORD_SIZE)))
427 (stk /= 0) (hp /= 0) reg_save_code rts_lbl
429 -- The offsets are now in *bytes*
430 do_checks' stk_expr hp_expr stk_nonzero hp_nonzero reg_save_code rts_lbl
431 = do { doGranAllocate hp_expr
433 -- Emit a block for the heap-check-failure code
434 ; blk_id <- forkLabelledCode $ do
436 stmtC (CmmAssign (CmmGlobal HpAlloc) hp_expr)
437 ; emitStmts reg_save_code
438 ; stmtC (CmmJump rts_lbl []) }
440 -- Check for stack overflow *FIRST*; otherwise
441 -- we might bumping Hp and then failing stack oflo
443 (stmtC (CmmCondBranch stk_oflo blk_id))
446 (stmtsC [CmmAssign hpReg
447 (cmmOffsetExprB (CmmReg hpReg) hp_expr),
448 CmmCondBranch hp_oflo blk_id])
449 -- Bump heap pointer, and test for heap exhaustion
450 -- Note that we don't move the heap pointer unless the
451 -- stack check succeeds. Otherwise we might end up
452 -- with slop at the end of the current block, which can
453 -- confuse the LDV profiler.
456 -- Stk overflow if (Sp - stk_bytes < SpLim)
457 stk_oflo = CmmMachOp mo_wordULt
458 [CmmMachOp mo_wordSub [CmmReg spReg, stk_expr],
459 CmmReg (CmmGlobal SpLim)]
461 -- Hp overflow if (Hpp > HpLim)
462 -- (Hp has been incremented by now)
463 -- HpLim points to the LAST WORD of valid allocation space.
464 hp_oflo = CmmMachOp mo_wordUGt
465 [CmmReg hpReg, CmmReg (CmmGlobal HpLim)]
468 %************************************************************************
470 Generic Heap/Stack Checks - used in the RTS
472 %************************************************************************
475 hpChkGen :: CmmExpr -> CmmExpr -> CmmExpr -> Code
476 hpChkGen bytes liveness reentry
477 = do_checks' (CmmLit (mkIntCLit 0)) bytes False True assigns stg_gc_gen
480 CmmAssign (CmmGlobal (VanillaReg 9)) liveness,
481 CmmAssign (CmmGlobal (VanillaReg 10)) reentry
484 -- a heap check where R1 points to the closure to enter on return, and
485 -- we want to assign to Sp[0] on failure (used in AutoApply.cmm:BUILD_PAP).
486 hpChkNodePointsAssignSp0 :: CmmExpr -> CmmExpr -> Code
487 hpChkNodePointsAssignSp0 bytes sp0
488 = do_checks' (CmmLit (mkIntCLit 0)) bytes False True assign stg_gc_enter1
489 where assign = oneStmt (CmmStore (CmmReg spReg) sp0)
491 stkChkGen :: CmmExpr -> CmmExpr -> CmmExpr -> Code
492 stkChkGen bytes liveness reentry
493 = do_checks' bytes (CmmLit (mkIntCLit 0)) True False assigns stg_gc_gen
496 CmmAssign (CmmGlobal (VanillaReg 9)) liveness,
497 CmmAssign (CmmGlobal (VanillaReg 10)) reentry
500 stkChkNodePoints :: CmmExpr -> Code
501 stkChkNodePoints bytes
502 = do_checks' bytes (CmmLit (mkIntCLit 0)) True False noStmts stg_gc_enter1
504 stg_gc_gen = CmmLit (CmmLabel (mkRtsCodeLabel SLIT("stg_gc_gen")))
505 stg_gc_enter1 = CmmReg (CmmGlobal GCEnter1)
508 %************************************************************************
510 \subsection[initClosure]{Initialise a dynamic closure}
512 %************************************************************************
514 @allocDynClosure@ puts the thing in the heap, and modifies the virtual Hp
520 -> CmmExpr -- Cost Centre to stick in the object
521 -> CmmExpr -- Cost Centre to blame for this alloc
522 -- (usually the same; sometimes "OVERHEAD")
524 -> [(CmmExpr, VirtualHpOffset)] -- Offsets from start of the object
525 -- ie Info ptr has offset zero.
526 -> FCode VirtualHpOffset -- Returns virt offset of object
528 allocDynClosure cl_info use_cc blame_cc amodes_with_offsets
529 = do { virt_hp <- getVirtHp
531 -- FIND THE OFFSET OF THE INFO-PTR WORD
532 ; let info_offset = virt_hp + 1
533 -- info_offset is the VirtualHpOffset of the first
534 -- word of the new object
535 -- Remember, virtHp points to last allocated word,
536 -- ie 1 *before* the info-ptr word of new object.
538 info_ptr = CmmLit (CmmLabel (infoTableLabelFromCI cl_info))
539 hdr_w_offsets = initDynHdr info_ptr use_cc `zip` [0..]
541 -- SAY WHAT WE ARE ABOUT TO DO
542 ; profDynAlloc cl_info use_cc
543 -- ToDo: This is almost certainly wrong
544 -- We're ignoring blame_cc. But until we've
545 -- fixed the boxing hack in chooseDynCostCentres etc,
546 -- we're worried about making things worse by "fixing"
547 -- this part to use blame_cc!
549 ; tickyDynAlloc cl_info
551 -- ALLOCATE THE OBJECT
552 ; base <- getHpRelOffset info_offset
553 ; hpStore base (hdr_w_offsets ++ amodes_with_offsets)
555 -- BUMP THE VIRTUAL HEAP POINTER
556 ; setVirtHp (virt_hp + closureSize cl_info)
558 -- RETURN PTR TO START OF OBJECT
559 ; returnFC info_offset }
562 initDynHdr :: CmmExpr
563 -> CmmExpr -- Cost centre to put in object
565 initDynHdr info_ptr cc
567 -- ToDo: Gransim stuff
568 -- ToDo: Parallel stuff
572 hpStore :: CmmExpr -> [(CmmExpr, VirtualHpOffset)] -> Code
573 -- Store the item (expr,off) in base[off]
575 = stmtsC [ CmmStore (cmmOffsetW base off) val
578 emitSetDynHdr :: CmmExpr -> CmmExpr -> CmmExpr -> Code
579 emitSetDynHdr base info_ptr ccs
580 = hpStore base (zip (initDynHdr info_ptr ccs) [0..])