2 % (c) The GRASP/AQUA Project, Glasgow University, 1992-1998
4 % $Id: CgHeapery.lhs,v 1.40 2004/08/13 13:06:00 simonmar Exp $
6 \section[CgHeapery]{Heap management functions}
10 initHeapUsage, getVirtHp, setVirtHp, setRealHp,
11 getHpRelOffset, hpRel,
13 funEntryChecks, thunkEntryChecks,
14 altHeapCheck, unbxTupleHeapCheck,
15 hpChkGen, hpChkNodePointsAssignSp0,
16 stkChkGen, stkChkNodePoints,
18 layOutDynConstr, layOutStaticConstr,
19 mkVirtHeapOffsets, mkStaticClosureFields, mkStaticClosure,
21 allocDynClosure, emitSetDynHdr
24 #include "HsVersions.h"
26 import Constants ( mIN_UPD_SIZE )
27 import StgSyn ( AltType(..) )
28 import CLabel ( CLabel, mkRtsCodeLabel )
29 import CgUtils ( mkWordCLit, cmmRegOffW, cmmOffsetW,
32 import CgProf ( staticProfHdr, profDynAlloc, dynProfHdr )
33 import CgTicky ( staticTickyHdr, tickyDynAlloc, tickyAllocHeap )
34 import CgParallel ( staticGranHdr, staticParHdr, doGranAllocate )
35 import CgStackery ( getFinalStackHW, getRealSp )
36 import CgCallConv ( mkRegLiveness )
37 import ClosureInfo ( closureSize, closureUpdReqd,
38 staticClosureNeedsLink,
40 infoTableLabelFromCI, closureLabelFromCI,
41 nodeMustPointToIt, closureLFInfo,
43 import SMRep ( CgRep(..), cgRepSizeW, separateByPtrFollowness,
44 WordOff, fixedHdrSize, isVoidArg, primRepToCgRep )
46 import Cmm ( CmmLit(..), CmmStmt(..), CmmExpr(..), GlobalReg(..),
47 CmmReg(..), hpReg, nodeReg, spReg )
48 import MachOp ( mo_wordULt, mo_wordUGt, mo_wordSub )
49 import CmmUtils ( mkIntCLit, CmmStmts, noStmts, oneStmt, plusStmts,
52 import DataCon ( DataCon )
53 import TyCon ( tyConPrimRep )
54 import CostCentre ( CostCentreStack )
55 import Util ( mapAccumL, filterOut )
56 import Constants ( wORD_SIZE )
64 %************************************************************************
66 \subsection[CgUsages-heapery]{Monad things for fiddling with heap usage}
68 %************************************************************************
70 The heap always grows upwards, so hpRel is easy
73 hpRel :: VirtualHpOffset -- virtual offset of Hp
74 -> VirtualHpOffset -- virtual offset of The Thing
75 -> WordOff -- integer word offset
76 hpRel hp off = off - hp
79 @initHeapUsage@ applies a function to the amount of heap that it uses.
80 It initialises the heap usage to zeros, and passes on an unchanged
83 It is usually a prelude to performing a GC check, so everything must
84 be in a tidy and consistent state.
86 rje: Note the slightly suble fixed point behaviour needed here
89 initHeapUsage :: (VirtualHpOffset -> Code) -> Code
91 = do { orig_hp_usage <- getHpUsage
92 ; setHpUsage initHpUsage
93 ; fixC (\heap_usage2 -> do
94 { fcode (heapHWM heap_usage2)
96 ; setHpUsage orig_hp_usage }
98 setVirtHp :: VirtualHpOffset -> Code
100 = do { hp_usage <- getHpUsage
101 ; setHpUsage (hp_usage {virtHp = new_virtHp}) }
103 getVirtHp :: FCode VirtualHpOffset
105 = do { hp_usage <- getHpUsage
106 ; return (virtHp hp_usage) }
108 setRealHp :: VirtualHpOffset -> Code
110 = do { hp_usage <- getHpUsage
111 ; setHpUsage (hp_usage {realHp = new_realHp}) }
113 getHpRelOffset :: VirtualHpOffset -> FCode CmmExpr
114 getHpRelOffset virtual_offset
115 = do { hp_usg <- getHpUsage
116 ; return (cmmRegOffW hpReg (hpRel (realHp hp_usg) virtual_offset)) }
120 %************************************************************************
122 Layout of heap objects
124 %************************************************************************
127 layOutDynConstr, layOutStaticConstr
131 [(a,VirtualHpOffset)])
133 layOutDynConstr = layOutConstr False
134 layOutStaticConstr = layOutConstr True
136 layOutConstr is_static data_con args
137 = (mkConInfo is_static data_con tot_wds ptr_wds,
140 (tot_wds, -- #ptr_wds + #nonptr_wds
142 things_w_offsets) = mkVirtHeapOffsets args
145 @mkVirtHeapOffsets@ always returns boxed things with smaller offsets
146 than the unboxed things, and furthermore, the offsets in the result
151 :: [(CgRep,a)] -- Things to make offsets for
152 -> (WordOff, -- *Total* number of words allocated
153 WordOff, -- Number of words allocated for *pointers*
154 [(a, VirtualHpOffset)])
155 -- Things with their offsets from start of
156 -- object in order of increasing offset
158 -- First in list gets lowest offset, which is initial offset + 1.
160 mkVirtHeapOffsets things
161 = let non_void_things = filterOut (isVoidArg . fst) things
162 (ptrs, non_ptrs) = separateByPtrFollowness non_void_things
163 (wds_of_ptrs, ptrs_w_offsets) = mapAccumL computeOffset 0 ptrs
164 (tot_wds, non_ptrs_w_offsets) = mapAccumL computeOffset wds_of_ptrs non_ptrs
166 (tot_wds, wds_of_ptrs, ptrs_w_offsets ++ non_ptrs_w_offsets)
168 computeOffset wds_so_far (rep, thing)
169 = (wds_so_far + cgRepSizeW rep, (thing, fixedHdrSize + wds_so_far))
173 %************************************************************************
175 Lay out a static closure
177 %************************************************************************
179 Make a static closure, adding on any extra padding needed for CAFs,
180 and adding a static link field if necessary.
183 mkStaticClosureFields
186 -> Bool -- Has CAF refs
187 -> [CmmLit] -- Payload
188 -> [CmmLit] -- The full closure
189 mkStaticClosureFields cl_info ccs caf_refs payload
190 = mkStaticClosure info_lbl ccs payload padding_wds static_link_field
192 info_lbl = infoTableLabelFromCI cl_info
194 upd_reqd = closureUpdReqd cl_info
196 -- for the purposes of laying out the static closure, we consider all
197 -- thunks to be "updatable", so that the static link field is always
198 -- in the same place.
201 | otherwise = replicate n (mkIntCLit 0) -- a bunch of 0s
202 where n = max 0 (mIN_UPD_SIZE - length payload)
204 -- We always have a static link field for a thunk, it's used to
205 -- save the closure's info pointer when we're reverting CAFs
206 -- (see comment in Storage.c)
208 | upd_reqd || staticClosureNeedsLink cl_info = [static_link_value]
211 -- for a static constructor which has NoCafRefs, we set the
212 -- static link field to a non-zero value so the garbage
213 -- collector will ignore it.
215 | caf_refs = mkIntCLit 0
216 | otherwise = mkIntCLit 1
218 mkStaticClosure :: CLabel -> CostCentreStack -> [CmmLit]
219 -> [CmmLit] -> [CmmLit] -> [CmmLit]
220 mkStaticClosure info_lbl ccs payload padding_wds static_link_field
221 = [CmmLabel info_lbl]
222 ++ variable_header_words
227 variable_header_words
234 %************************************************************************
236 \subsection[CgHeapery-heap-overflow]{Heap overflow checking}
238 %************************************************************************
240 The new code for heapChecks. For GrAnSim the code for doing a heap check
241 and doing a context switch has been separated. Especially, the HEAP_CHK
242 macro only performs a heap check. THREAD_CONTEXT_SWITCH should be used for
243 doing a context switch. GRAN_FETCH_AND_RESCHEDULE must be put at the
244 beginning of every slow entry code in order to simulate the fetching of
245 closures. If fetching is necessary (i.e. current closure is not local) then
246 an automatic context switch is done.
248 --------------------------------------------------------------
249 A heap/stack check at a function or thunk entry point.
252 funEntryChecks :: ClosureInfo -> CmmStmts -> Code -> Code
253 funEntryChecks cl_info reg_save_code code
254 = hpStkCheck cl_info True reg_save_code code
256 thunkEntryChecks :: ClosureInfo -> Code -> Code
257 thunkEntryChecks cl_info code
258 = hpStkCheck cl_info False noStmts code
260 hpStkCheck :: ClosureInfo -- Function closure
261 -> Bool -- Is a function? (not a thunk)
262 -> CmmStmts -- Register saves
266 hpStkCheck cl_info is_fun reg_save_code code
267 = getFinalStackHW $ \ spHw -> do
269 ; let stk_words = spHw - sp
270 ; initHeapUsage $ \ hpHw -> do
271 { -- Emit heap checks, but be sure to do it lazily so
272 -- that the conditionals on hpHw don't cause a black hole
274 { do_checks stk_words hpHw full_save_code rts_label
275 ; tickyAllocHeap hpHw }
281 | nodeMustPointToIt (closureLFInfo cl_info)
284 = oneStmt (CmmAssign nodeReg (CmmLit (CmmLabel closure_lbl)))
285 closure_lbl = closureLabelFromCI cl_info
287 full_save_code = node_asst `plusStmts` reg_save_code
289 rts_label | is_fun = CmmReg (CmmGlobal GCFun)
290 -- Function entry point
291 | otherwise = CmmReg (CmmGlobal GCEnter1)
292 -- Thunk or case return
293 -- In the thunk/case-return case, R1 points to a closure
294 -- which should be (re)-entered after GC
297 Heap checks in a case alternative are nice and easy, provided this is
298 a bog-standard algebraic case. We have in our hand:
300 * one return address, on the stack,
301 * one return value, in Node.
303 the canned code for this heap check failure just pushes Node on the
304 stack, saying 'EnterGHC' to return. The scheduler will return by
305 entering the top value on the stack, which in turn will return through
306 the return address, getting us back to where we were. This is
307 therefore only valid if the return value is *lifted* (just being
308 boxed isn't good enough).
310 For primitive returns, we have an unlifted value in some register
311 (either R1 or FloatReg1 or DblReg1). This means using specialised
312 heap-check code for these cases.
316 :: AltType -- PolyAlt, PrimAlt, AlgAlt, but *not* UbxTupAlt
317 -- (Unboxed tuples are dealt with by ubxTupleHeapCheck)
318 -> Code -- Continuation
320 altHeapCheck alt_type code
321 = initHeapUsage $ \ hpHw -> do
323 { do_checks 0 {- no stack chk -} hpHw
324 noStmts {- nothign to save -}
326 ; tickyAllocHeap hpHw }
330 rts_label PolyAlt = CmmLit (CmmLabel (mkRtsCodeLabel SLIT( "stg_gc_unpt_r1")))
331 -- Do *not* enter R1 after a heap check in
332 -- a polymorphic case. It might be a function
333 -- and the entry code for a function (currently)
336 -- However R1 is guaranteed to be a pointer
338 rts_label (AlgAlt tc) = stg_gc_enter1
339 -- Enter R1 after the heap check; it's a pointer
341 rts_label (PrimAlt tc)
342 = CmmLit $ CmmLabel $
343 case primRepToCgRep (tyConPrimRep tc) of
344 VoidArg -> mkRtsCodeLabel SLIT( "stg_gc_noregs")
345 FloatArg -> mkRtsCodeLabel SLIT( "stg_gc_f1")
346 DoubleArg -> mkRtsCodeLabel SLIT( "stg_gc_d1")
347 LongArg -> mkRtsCodeLabel SLIT( "stg_gc_l1")
348 -- R1 is boxed but unlifted:
349 PtrArg -> mkRtsCodeLabel SLIT( "stg_gc_unpt_r1")
351 NonPtrArg -> mkRtsCodeLabel SLIT( "stg_gc_unbx_r1")
353 rts_label (UbxTupAlt _) = panic "altHeapCheck"
357 Unboxed tuple alternatives and let-no-escapes (the two most annoying
358 constructs to generate code for!) For unboxed tuple returns, there
359 are an arbitrary number of possibly unboxed return values, some of
360 which will be in registers, and the others will be on the stack. We
361 always organise the stack-resident fields into pointers &
362 non-pointers, and pass the number of each to the heap check code.
366 :: [(Id, GlobalReg)] -- Live registers
367 -> WordOff -- no. of stack slots containing ptrs
368 -> WordOff -- no. of stack slots containing nonptrs
369 -> CmmStmts -- code to insert in the failure path
373 unbxTupleHeapCheck regs ptrs nptrs fail_code code
374 -- We can't manage more than 255 pointers/non-pointers
375 -- in a generic heap check.
376 | ptrs > 255 || nptrs > 255 = panic "altHeapCheck"
378 = initHeapUsage $ \ hpHw -> do
379 { codeOnly $ do { do_checks 0 {- no stack check -} hpHw
380 full_fail_code rts_label
381 ; tickyAllocHeap hpHw }
385 full_fail_code = fail_code `plusStmts` oneStmt assign_liveness
386 assign_liveness = CmmAssign (CmmGlobal (VanillaReg 9)) -- Ho ho ho!
387 (CmmLit (mkWordCLit liveness))
388 liveness = mkRegLiveness regs ptrs nptrs
389 rts_label = CmmLit (CmmLabel (mkRtsCodeLabel SLIT("stg_gc_ut")))
394 %************************************************************************
398 %************************************************************************
400 When failing a check, we save a return address on the stack and
401 jump to a pre-compiled code fragment that saves the live registers
402 and returns to the scheduler.
404 The return address in most cases will be the beginning of the basic
405 block in which the check resides, since we need to perform the check
406 again on re-entry because someone else might have stolen the resource
410 do_checks :: WordOff -- Stack headroom
411 -> WordOff -- Heap headroom
412 -> CmmStmts -- Assignments to perform on failure
413 -> CmmExpr -- Rts address to jump to on failure
415 do_checks 0 0 _ _ = nopC
416 do_checks stk hp reg_save_code rts_lbl
417 = do_checks' (CmmLit (mkIntCLit (stk*wORD_SIZE)))
418 (CmmLit (mkIntCLit (hp*wORD_SIZE)))
419 (stk /= 0) (hp /= 0) reg_save_code rts_lbl
421 -- The offsets are now in *bytes*
422 do_checks' stk_expr hp_expr stk_nonzero hp_nonzero reg_save_code rts_lbl
423 = do { doGranAllocate hp_expr
425 -- Emit a block for the heap-check-failure code
426 ; blk_id <- forkLabelledCode $ do
428 stmtC (CmmAssign (CmmGlobal HpAlloc) hp_expr)
429 ; emitStmts reg_save_code
430 ; stmtC (CmmJump rts_lbl []) }
432 -- Check for stack overflow *FIRST*; otherwise
433 -- we might bumping Hp and then failing stack oflo
435 (stmtC (CmmCondBranch stk_oflo blk_id))
438 (stmtsC [CmmAssign hpReg
439 (cmmOffsetExprB (CmmReg hpReg) hp_expr),
440 CmmCondBranch hp_oflo blk_id])
441 -- Bump heap pointer, and test for heap exhaustion
442 -- Note that we don't move the heap pointer unless the
443 -- stack check succeeds. Otherwise we might end up
444 -- with slop at the end of the current block, which can
445 -- confuse the LDV profiler.
448 -- Stk overflow if (Sp - stk_bytes < SpLim)
449 stk_oflo = CmmMachOp mo_wordULt
450 [CmmMachOp mo_wordSub [CmmReg spReg, stk_expr],
451 CmmReg (CmmGlobal SpLim)]
453 -- Hp overflow if (Hpp > HpLim)
454 -- (Hp has been incremented by now)
455 -- HpLim points to the LAST WORD of valid allocation space.
456 hp_oflo = CmmMachOp mo_wordUGt
457 [CmmReg hpReg, CmmReg (CmmGlobal HpLim)]
460 %************************************************************************
462 Generic Heap/Stack Checks - used in the RTS
464 %************************************************************************
467 hpChkGen :: CmmExpr -> CmmExpr -> CmmExpr -> Code
468 hpChkGen bytes liveness reentry
469 = do_checks' (CmmLit (mkIntCLit 0)) bytes False True assigns stg_gc_gen
472 CmmAssign (CmmGlobal (VanillaReg 9)) liveness,
473 CmmAssign (CmmGlobal (VanillaReg 10)) reentry
476 -- a heap check where R1 points to the closure to enter on return, and
477 -- we want to assign to Sp[0] on failure (used in AutoApply.cmm:BUILD_PAP).
478 hpChkNodePointsAssignSp0 :: CmmExpr -> CmmExpr -> Code
479 hpChkNodePointsAssignSp0 bytes sp0
480 = do_checks' (CmmLit (mkIntCLit 0)) bytes False True assign stg_gc_enter1
481 where assign = oneStmt (CmmStore (CmmReg spReg) sp0)
483 stkChkGen :: CmmExpr -> CmmExpr -> CmmExpr -> Code
484 stkChkGen bytes liveness reentry
485 = do_checks' bytes (CmmLit (mkIntCLit 0)) True False assigns stg_gc_gen
488 CmmAssign (CmmGlobal (VanillaReg 9)) liveness,
489 CmmAssign (CmmGlobal (VanillaReg 10)) reentry
492 stkChkNodePoints :: CmmExpr -> Code
493 stkChkNodePoints bytes
494 = do_checks' bytes (CmmLit (mkIntCLit 0)) True False noStmts stg_gc_enter1
496 stg_gc_gen = CmmLit (CmmLabel (mkRtsCodeLabel SLIT("stg_gc_gen")))
497 stg_gc_enter1 = CmmReg (CmmGlobal GCEnter1)
500 %************************************************************************
502 \subsection[initClosure]{Initialise a dynamic closure}
504 %************************************************************************
506 @allocDynClosure@ puts the thing in the heap, and modifies the virtual Hp
512 -> CmmExpr -- Cost Centre to stick in the object
513 -> CmmExpr -- Cost Centre to blame for this alloc
514 -- (usually the same; sometimes "OVERHEAD")
516 -> [(CmmExpr, VirtualHpOffset)] -- Offsets from start of the object
517 -- ie Info ptr has offset zero.
518 -> FCode VirtualHpOffset -- Returns virt offset of object
520 allocDynClosure cl_info use_cc blame_cc amodes_with_offsets
521 = do { virt_hp <- getVirtHp
523 -- FIND THE OFFSET OF THE INFO-PTR WORD
524 ; let info_offset = virt_hp + 1
525 -- info_offset is the VirtualHpOffset of the first
526 -- word of the new object
527 -- Remember, virtHp points to last allocated word,
528 -- ie 1 *before* the info-ptr word of new object.
530 info_ptr = CmmLit (CmmLabel (infoTableLabelFromCI cl_info))
531 hdr_w_offsets = initDynHdr info_ptr use_cc `zip` [0..]
533 -- SAY WHAT WE ARE ABOUT TO DO
534 ; profDynAlloc cl_info use_cc
535 -- ToDo: This is almost certainly wrong
536 -- We're ignoring blame_cc. But until we've
537 -- fixed the boxing hack in chooseDynCostCentres etc,
538 -- we're worried about making things worse by "fixing"
539 -- this part to use blame_cc!
541 ; tickyDynAlloc cl_info
543 -- ALLOCATE THE OBJECT
544 ; base <- getHpRelOffset info_offset
545 ; hpStore base (hdr_w_offsets ++ amodes_with_offsets)
547 -- BUMP THE VIRTUAL HEAP POINTER
548 ; setVirtHp (virt_hp + closureSize cl_info)
550 -- RETURN PTR TO START OF OBJECT
551 ; returnFC info_offset }
554 initDynHdr :: CmmExpr
555 -> CmmExpr -- Cost centre to put in object
557 initDynHdr info_ptr cc
559 -- ToDo: Gransim stuff
560 -- ToDo: Parallel stuff
564 hpStore :: CmmExpr -> [(CmmExpr, VirtualHpOffset)] -> Code
565 -- Store the item (expr,off) in base[off]
567 = stmtsC [ CmmStore (cmmOffsetW base off) val
570 emitSetDynHdr :: CmmExpr -> CmmExpr -> CmmExpr -> Code
571 emitSetDynHdr base info_ptr ccs
572 = hpStore base (zip (initDynHdr info_ptr ccs) [0..])