2 % (c) The University of Glasgow 2006
3 % (c) The GRASP/AQUA Project, Glasgow University, 1992-1998
5 \section[CgHeapery]{Heap management functions}
9 -- The above warning supression flag is a temporary kludge.
10 -- While working on this module you are encouraged to remove it and fix
11 -- any warnings in the module. See
12 -- http://hackage.haskell.org/trac/ghc/wiki/Commentary/CodingStyle#Warnings
16 initHeapUsage, getVirtHp, setVirtHp, setRealHp,
17 getHpRelOffset, hpRel,
19 funEntryChecks, thunkEntryChecks,
20 altHeapCheck, unbxTupleHeapCheck,
21 hpChkGen, hpChkNodePointsAssignSp0,
22 stkChkGen, stkChkNodePoints,
24 layOutDynConstr, layOutStaticConstr,
25 mkVirtHeapOffsets, mkStaticClosureFields, mkStaticClosure,
27 allocDynClosure, emitSetDynHdr
30 #include "HsVersions.h"
61 %************************************************************************
63 \subsection[CgUsages-heapery]{Monad things for fiddling with heap usage}
65 %************************************************************************
67 The heap always grows upwards, so hpRel is easy
70 hpRel :: VirtualHpOffset -- virtual offset of Hp
71 -> VirtualHpOffset -- virtual offset of The Thing
72 -> WordOff -- integer word offset
73 hpRel hp off = off - hp
76 @initHeapUsage@ applies a function to the amount of heap that it uses.
77 It initialises the heap usage to zeros, and passes on an unchanged
80 It is usually a prelude to performing a GC check, so everything must
81 be in a tidy and consistent state.
83 rje: Note the slightly suble fixed point behaviour needed here
86 initHeapUsage :: (VirtualHpOffset -> Code) -> Code
88 = do { orig_hp_usage <- getHpUsage
89 ; setHpUsage initHpUsage
90 ; fixC (\heap_usage2 -> do
91 { fcode (heapHWM heap_usage2)
93 ; setHpUsage orig_hp_usage }
95 setVirtHp :: VirtualHpOffset -> Code
97 = do { hp_usage <- getHpUsage
98 ; setHpUsage (hp_usage {virtHp = new_virtHp}) }
100 getVirtHp :: FCode VirtualHpOffset
102 = do { hp_usage <- getHpUsage
103 ; return (virtHp hp_usage) }
105 setRealHp :: VirtualHpOffset -> Code
107 = do { hp_usage <- getHpUsage
108 ; setHpUsage (hp_usage {realHp = new_realHp}) }
110 getHpRelOffset :: VirtualHpOffset -> FCode CmmExpr
111 getHpRelOffset virtual_offset
112 = do { hp_usg <- getHpUsage
113 ; return (cmmRegOffW hpReg (hpRel (realHp hp_usg) virtual_offset)) }
117 %************************************************************************
119 Layout of heap objects
121 %************************************************************************
124 layOutDynConstr, layOutStaticConstr
128 [(a,VirtualHpOffset)])
130 layOutDynConstr = layOutConstr False
131 layOutStaticConstr = layOutConstr True
133 layOutConstr is_static data_con args
134 = (mkConInfo is_static data_con tot_wds ptr_wds,
137 (tot_wds, -- #ptr_wds + #nonptr_wds
139 things_w_offsets) = mkVirtHeapOffsets False{-not a thunk-} args
142 @mkVirtHeapOffsets@ always returns boxed things with smaller offsets
143 than the unboxed things, and furthermore, the offsets in the result
148 :: Bool -- True <=> is a thunk
149 -> [(CgRep,a)] -- Things to make offsets for
150 -> (WordOff, -- _Total_ number of words allocated
151 WordOff, -- Number of words allocated for *pointers*
152 [(a, VirtualHpOffset)])
153 -- Things with their offsets from start of
154 -- object in order of increasing offset
156 -- First in list gets lowest offset, which is initial offset + 1.
158 mkVirtHeapOffsets is_thunk things
159 = let non_void_things = filterOut (isVoidArg . fst) things
160 (ptrs, non_ptrs) = separateByPtrFollowness non_void_things
161 (wds_of_ptrs, ptrs_w_offsets) = mapAccumL computeOffset 0 ptrs
162 (tot_wds, non_ptrs_w_offsets) = mapAccumL computeOffset wds_of_ptrs non_ptrs
164 (tot_wds, wds_of_ptrs, ptrs_w_offsets ++ non_ptrs_w_offsets)
166 hdr_size | is_thunk = thunkHdrSize
167 | otherwise = fixedHdrSize
169 computeOffset wds_so_far (rep, thing)
170 = (wds_so_far + cgRepSizeW rep, (thing, hdr_size + wds_so_far))
174 %************************************************************************
176 Lay out a static closure
178 %************************************************************************
180 Make a static closure, adding on any extra padding needed for CAFs,
181 and adding a static link field if necessary.
184 mkStaticClosureFields
187 -> Bool -- Has CAF refs
188 -> [CmmLit] -- Payload
189 -> [CmmLit] -- The full closure
190 mkStaticClosureFields cl_info ccs caf_refs payload
191 = mkStaticClosure info_lbl ccs payload padding_wds
192 static_link_field saved_info_field
194 info_lbl = infoTableLabelFromCI cl_info
196 -- CAFs must have consistent layout, regardless of whether they
197 -- are actually updatable or not. The layout of a CAF is:
204 -- the static_link and saved_info fields must always be in the same
205 -- place. So we use closureNeedsUpdSpace rather than
206 -- closureUpdReqd here:
208 is_caf = closureNeedsUpdSpace cl_info
212 | otherwise = ASSERT(null payload) [mkIntCLit 0]
215 | is_caf || staticClosureNeedsLink cl_info = [static_link_value]
219 | is_caf = [mkIntCLit 0]
222 -- for a static constructor which has NoCafRefs, we set the
223 -- static link field to a non-zero value so the garbage
224 -- collector will ignore it.
226 | caf_refs = mkIntCLit 0
227 | otherwise = mkIntCLit 1
230 mkStaticClosure :: CLabel -> CostCentreStack -> [CmmLit]
231 -> [CmmLit] -> [CmmLit] -> [CmmLit] -> [CmmLit]
232 mkStaticClosure info_lbl ccs payload padding_wds static_link_field saved_info_field
233 = [CmmLabel info_lbl]
234 ++ variable_header_words
235 ++ concatMap padLitToWord payload
240 variable_header_words
246 padLitToWord :: CmmLit -> [CmmLit]
247 padLitToWord lit = lit : padding pad_length
248 where rep = cmmLitRep lit
249 pad_length = wORD_SIZE - machRepByteWidth rep :: Int
251 padding n | n <= 0 = []
252 | n `rem` 2 /= 0 = CmmInt 0 I8 : padding (n-1)
253 | n `rem` 4 /= 0 = CmmInt 0 I16 : padding (n-2)
254 | n `rem` 8 /= 0 = CmmInt 0 I32 : padding (n-4)
255 | otherwise = CmmInt 0 I64 : padding (n-8)
258 %************************************************************************
260 \subsection[CgHeapery-heap-overflow]{Heap overflow checking}
262 %************************************************************************
264 The new code for heapChecks. For GrAnSim the code for doing a heap check
265 and doing a context switch has been separated. Especially, the HEAP_CHK
266 macro only performs a heap check. THREAD_CONTEXT_SWITCH should be used for
267 doing a context switch. GRAN_FETCH_AND_RESCHEDULE must be put at the
268 beginning of every slow entry code in order to simulate the fetching of
269 closures. If fetching is necessary (i.e. current closure is not local) then
270 an automatic context switch is done.
272 --------------------------------------------------------------
273 A heap/stack check at a function or thunk entry point.
276 funEntryChecks :: ClosureInfo -> CmmStmts -> Code -> Code
277 funEntryChecks cl_info reg_save_code code
278 = hpStkCheck cl_info True reg_save_code code
280 thunkEntryChecks :: ClosureInfo -> Code -> Code
281 thunkEntryChecks cl_info code
282 = hpStkCheck cl_info False noStmts code
284 hpStkCheck :: ClosureInfo -- Function closure
285 -> Bool -- Is a function? (not a thunk)
286 -> CmmStmts -- Register saves
290 hpStkCheck cl_info is_fun reg_save_code code
291 = getFinalStackHW $ \ spHw -> do
293 ; let stk_words = spHw - sp
294 ; initHeapUsage $ \ hpHw -> do
295 { -- Emit heap checks, but be sure to do it lazily so
296 -- that the conditionals on hpHw don't cause a black hole
298 { do_checks stk_words hpHw full_save_code rts_label
299 ; tickyAllocHeap hpHw }
305 | nodeMustPointToIt (closureLFInfo cl_info)
308 = oneStmt (CmmAssign nodeReg (CmmLit (CmmLabel closure_lbl)))
309 -- Strictly speaking, we should tag node here. But if
310 -- node doesn't point to the closure, the code for the closure
311 -- cannot depend on the value of R1 anyway, so we're safe.
312 closure_lbl = closureLabelFromCI cl_info
314 full_save_code = node_asst `plusStmts` reg_save_code
316 rts_label | is_fun = CmmReg (CmmGlobal GCFun)
317 -- Function entry point
318 | otherwise = CmmReg (CmmGlobal GCEnter1)
319 -- Thunk or case return
320 -- In the thunk/case-return case, R1 points to a closure
321 -- which should be (re)-entered after GC
324 Heap checks in a case alternative are nice and easy, provided this is
325 a bog-standard algebraic case. We have in our hand:
327 * one return address, on the stack,
328 * one return value, in Node.
330 the canned code for this heap check failure just pushes Node on the
331 stack, saying 'EnterGHC' to return. The scheduler will return by
332 entering the top value on the stack, which in turn will return through
333 the return address, getting us back to where we were. This is
334 therefore only valid if the return value is *lifted* (just being
335 boxed isn't good enough).
337 For primitive returns, we have an unlifted value in some register
338 (either R1 or FloatReg1 or DblReg1). This means using specialised
339 heap-check code for these cases.
343 :: AltType -- PolyAlt, PrimAlt, AlgAlt, but *not* UbxTupAlt
344 -- (Unboxed tuples are dealt with by ubxTupleHeapCheck)
345 -> Code -- Continuation
347 altHeapCheck alt_type code
348 = initHeapUsage $ \ hpHw -> do
350 { do_checks 0 {- no stack chk -} hpHw
351 noStmts {- nothign to save -}
353 ; tickyAllocHeap hpHw }
357 rts_label PolyAlt = CmmLit (CmmLabel (mkRtsCodeLabel SLIT( "stg_gc_unpt_r1")))
358 -- Do *not* enter R1 after a heap check in
359 -- a polymorphic case. It might be a function
360 -- and the entry code for a function (currently)
363 -- However R1 is guaranteed to be a pointer
365 rts_label (AlgAlt tc) = stg_gc_enter1
366 -- Enter R1 after the heap check; it's a pointer
368 rts_label (PrimAlt tc)
369 = CmmLit $ CmmLabel $
370 case primRepToCgRep (tyConPrimRep tc) of
371 VoidArg -> mkRtsCodeLabel SLIT( "stg_gc_noregs")
372 FloatArg -> mkRtsCodeLabel SLIT( "stg_gc_f1")
373 DoubleArg -> mkRtsCodeLabel SLIT( "stg_gc_d1")
374 LongArg -> mkRtsCodeLabel SLIT( "stg_gc_l1")
375 -- R1 is boxed but unlifted:
376 PtrArg -> mkRtsCodeLabel SLIT( "stg_gc_unpt_r1")
378 NonPtrArg -> mkRtsCodeLabel SLIT( "stg_gc_unbx_r1")
380 rts_label (UbxTupAlt _) = panic "altHeapCheck"
384 Unboxed tuple alternatives and let-no-escapes (the two most annoying
385 constructs to generate code for!) For unboxed tuple returns, there
386 are an arbitrary number of possibly unboxed return values, some of
387 which will be in registers, and the others will be on the stack. We
388 always organise the stack-resident fields into pointers &
389 non-pointers, and pass the number of each to the heap check code.
393 :: [(Id, GlobalReg)] -- Live registers
394 -> WordOff -- no. of stack slots containing ptrs
395 -> WordOff -- no. of stack slots containing nonptrs
396 -> CmmStmts -- code to insert in the failure path
400 unbxTupleHeapCheck regs ptrs nptrs fail_code code
401 -- We can't manage more than 255 pointers/non-pointers
402 -- in a generic heap check.
403 | ptrs > 255 || nptrs > 255 = panic "altHeapCheck"
405 = initHeapUsage $ \ hpHw -> do
406 { codeOnly $ do { do_checks 0 {- no stack check -} hpHw
407 full_fail_code rts_label
408 ; tickyAllocHeap hpHw }
412 full_fail_code = fail_code `plusStmts` oneStmt assign_liveness
413 assign_liveness = CmmAssign (CmmGlobal (VanillaReg 9)) -- Ho ho ho!
414 (CmmLit (mkWordCLit liveness))
415 liveness = mkRegLiveness regs ptrs nptrs
416 rts_label = CmmLit (CmmLabel (mkRtsCodeLabel SLIT("stg_gc_ut")))
421 %************************************************************************
425 %************************************************************************
427 When failing a check, we save a return address on the stack and
428 jump to a pre-compiled code fragment that saves the live registers
429 and returns to the scheduler.
431 The return address in most cases will be the beginning of the basic
432 block in which the check resides, since we need to perform the check
433 again on re-entry because someone else might have stolen the resource
437 do_checks :: WordOff -- Stack headroom
438 -> WordOff -- Heap headroom
439 -> CmmStmts -- Assignments to perform on failure
440 -> CmmExpr -- Rts address to jump to on failure
442 do_checks 0 0 _ _ = nopC
443 do_checks stk hp reg_save_code rts_lbl
444 = do_checks' (CmmLit (mkIntCLit (stk*wORD_SIZE)))
445 (CmmLit (mkIntCLit (hp*wORD_SIZE)))
446 (stk /= 0) (hp /= 0) reg_save_code rts_lbl
448 -- The offsets are now in *bytes*
449 do_checks' stk_expr hp_expr stk_nonzero hp_nonzero reg_save_code rts_lbl
450 = do { doGranAllocate hp_expr
452 -- Emit a block for the heap-check-failure code
453 ; blk_id <- forkLabelledCode $ do
455 stmtC (CmmAssign (CmmGlobal HpAlloc) hp_expr)
456 ; emitStmts reg_save_code
457 ; stmtC (CmmJump rts_lbl []) }
459 -- Check for stack overflow *FIRST*; otherwise
460 -- we might bumping Hp and then failing stack oflo
462 (stmtC (CmmCondBranch stk_oflo blk_id))
465 (stmtsC [CmmAssign hpReg
466 (cmmOffsetExprB (CmmReg hpReg) hp_expr),
467 CmmCondBranch hp_oflo blk_id])
468 -- Bump heap pointer, and test for heap exhaustion
469 -- Note that we don't move the heap pointer unless the
470 -- stack check succeeds. Otherwise we might end up
471 -- with slop at the end of the current block, which can
472 -- confuse the LDV profiler.
475 -- Stk overflow if (Sp - stk_bytes < SpLim)
476 stk_oflo = CmmMachOp mo_wordULt
477 [CmmMachOp mo_wordSub [CmmReg spReg, stk_expr],
478 CmmReg (CmmGlobal SpLim)]
480 -- Hp overflow if (Hp > HpLim)
481 -- (Hp has been incremented by now)
482 -- HpLim points to the LAST WORD of valid allocation space.
483 hp_oflo = CmmMachOp mo_wordUGt
484 [CmmReg hpReg, CmmReg (CmmGlobal HpLim)]
487 %************************************************************************
489 Generic Heap/Stack Checks - used in the RTS
491 %************************************************************************
494 hpChkGen :: CmmExpr -> CmmExpr -> CmmExpr -> Code
495 hpChkGen bytes liveness reentry
496 = do_checks' (CmmLit (mkIntCLit 0)) bytes False True assigns stg_gc_gen
499 CmmAssign (CmmGlobal (VanillaReg 9)) liveness,
500 CmmAssign (CmmGlobal (VanillaReg 10)) reentry
503 -- a heap check where R1 points to the closure to enter on return, and
504 -- we want to assign to Sp[0] on failure (used in AutoApply.cmm:BUILD_PAP).
505 hpChkNodePointsAssignSp0 :: CmmExpr -> CmmExpr -> Code
506 hpChkNodePointsAssignSp0 bytes sp0
507 = do_checks' (CmmLit (mkIntCLit 0)) bytes False True assign stg_gc_enter1
508 where assign = oneStmt (CmmStore (CmmReg spReg) sp0)
510 stkChkGen :: CmmExpr -> CmmExpr -> CmmExpr -> Code
511 stkChkGen bytes liveness reentry
512 = do_checks' bytes (CmmLit (mkIntCLit 0)) True False assigns stg_gc_gen
515 CmmAssign (CmmGlobal (VanillaReg 9)) liveness,
516 CmmAssign (CmmGlobal (VanillaReg 10)) reentry
519 stkChkNodePoints :: CmmExpr -> Code
520 stkChkNodePoints bytes
521 = do_checks' bytes (CmmLit (mkIntCLit 0)) True False noStmts stg_gc_enter1
523 stg_gc_gen = CmmLit (CmmLabel (mkRtsCodeLabel SLIT("stg_gc_gen")))
524 stg_gc_enter1 = CmmReg (CmmGlobal GCEnter1)
527 %************************************************************************
529 \subsection[initClosure]{Initialise a dynamic closure}
531 %************************************************************************
533 @allocDynClosure@ puts the thing in the heap, and modifies the virtual Hp
539 -> CmmExpr -- Cost Centre to stick in the object
540 -> CmmExpr -- Cost Centre to blame for this alloc
541 -- (usually the same; sometimes "OVERHEAD")
543 -> [(CmmExpr, VirtualHpOffset)] -- Offsets from start of the object
544 -- ie Info ptr has offset zero.
545 -> FCode VirtualHpOffset -- Returns virt offset of object
547 allocDynClosure cl_info use_cc blame_cc amodes_with_offsets
548 = do { virt_hp <- getVirtHp
550 -- FIND THE OFFSET OF THE INFO-PTR WORD
551 ; let info_offset = virt_hp + 1
552 -- info_offset is the VirtualHpOffset of the first
553 -- word of the new object
554 -- Remember, virtHp points to last allocated word,
555 -- ie 1 *before* the info-ptr word of new object.
557 info_ptr = CmmLit (CmmLabel (infoTableLabelFromCI cl_info))
558 hdr_w_offsets = initDynHdr info_ptr use_cc `zip` [0..]
560 -- SAY WHAT WE ARE ABOUT TO DO
561 ; profDynAlloc cl_info use_cc
562 -- ToDo: This is almost certainly wrong
563 -- We're ignoring blame_cc. But until we've
564 -- fixed the boxing hack in chooseDynCostCentres etc,
565 -- we're worried about making things worse by "fixing"
566 -- this part to use blame_cc!
568 ; tickyDynAlloc cl_info
570 -- ALLOCATE THE OBJECT
571 ; base <- getHpRelOffset info_offset
572 ; hpStore base (hdr_w_offsets ++ amodes_with_offsets)
574 -- BUMP THE VIRTUAL HEAP POINTER
575 ; setVirtHp (virt_hp + closureSize cl_info)
577 -- RETURN PTR TO START OF OBJECT
578 ; returnFC info_offset }
581 initDynHdr :: CmmExpr
582 -> CmmExpr -- Cost centre to put in object
584 initDynHdr info_ptr cc
586 -- ToDo: Gransim stuff
587 -- ToDo: Parallel stuff
591 hpStore :: CmmExpr -> [(CmmExpr, VirtualHpOffset)] -> Code
592 -- Store the item (expr,off) in base[off]
594 = stmtsC [ CmmStore (cmmOffsetW base off) val
597 emitSetDynHdr :: CmmExpr -> CmmExpr -> CmmExpr -> Code
598 emitSetDynHdr base info_ptr ccs
599 = hpStore base (zip (initDynHdr info_ptr ccs) [0..])