1 -----------------------------------------------------------------------------
3 -- Stg to C--: heap management functions
5 -- (c) The University of Glasgow 2004-2006
7 -----------------------------------------------------------------------------
10 getVirtHp, setVirtHp, setRealHp,
11 getHpRelOffset, hpRel,
13 entryHeapCheck, altHeapCheck,
15 layOutDynConstr, layOutStaticConstr,
16 mkVirtHeapOffsets, mkStaticClosureFields, mkStaticClosure,
18 allocDynClosure, emitSetDynHdr
21 #include "HsVersions.h"
43 import FastString( LitString, mkFastString, sLit )
47 -----------------------------------------------------------
48 -- Layout of heap objects
49 -----------------------------------------------------------
51 layOutDynConstr, layOutStaticConstr
52 :: DataCon -> [(PrimRep, a)]
53 -> (ClosureInfo, [(NonVoid a, VirtualHpOffset)])
54 -- No Void arguments in result
56 layOutDynConstr = layOutConstr False
57 layOutStaticConstr = layOutConstr True
59 layOutConstr :: Bool -> DataCon -> [(PrimRep, a)]
60 -> (ClosureInfo, [(NonVoid a, VirtualHpOffset)])
61 layOutConstr is_static data_con args
62 = (mkConInfo is_static data_con tot_wds ptr_wds,
65 (tot_wds, -- #ptr_wds + #nonptr_wds
67 things_w_offsets) = mkVirtHeapOffsets False{-not a thunk-} args
70 -----------------------------------------------------------
71 -- Initialise dynamic heap objects
72 -----------------------------------------------------------
76 -> CmmExpr -- Cost Centre to stick in the object
77 -> CmmExpr -- Cost Centre to blame for this alloc
78 -- (usually the same; sometimes "OVERHEAD")
80 -> [(NonVoid StgArg, VirtualHpOffset)] -- Offsets from start of the object
81 -- ie Info ptr has offset zero.
82 -- No void args in here
83 -> FCode (LocalReg, CmmAGraph)
85 -- allocDynClosure allocates the thing in the heap,
86 -- and modifies the virtual Hp to account for this.
87 -- The second return value is the graph that sets the value of the
88 -- returned LocalReg, which should point to the closure after executing
91 -- Note [Return a LocalReg]
92 -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
93 -- allocDynClosure returns a LocalReg, not a (Hp+8) CmmExpr.
95 -- ...allocate object...
98 -- ...here obj is still valid,
99 -- but Hp+8 means something quite different...
102 allocDynClosure cl_info use_cc _blame_cc args_w_offsets
103 = do { virt_hp <- getVirtHp
105 -- SAY WHAT WE ARE ABOUT TO DO
106 ; tickyDynAlloc cl_info
107 ; profDynAlloc cl_info use_cc
108 -- ToDo: This is almost certainly wrong
109 -- We're ignoring blame_cc. But until we've
110 -- fixed the boxing hack in chooseDynCostCentres etc,
111 -- we're worried about making things worse by "fixing"
112 -- this part to use blame_cc!
114 -- FIND THE OFFSET OF THE INFO-PTR WORD
115 ; let info_offset = virt_hp + 1
116 -- info_offset is the VirtualHpOffset of the first
117 -- word of the new object
118 -- Remember, virtHp points to last allocated word,
119 -- ie 1 *before* the info-ptr word of new object.
121 info_ptr = CmmLit (CmmLabel (infoTableLabelFromCI cl_info))
123 -- ALLOCATE THE OBJECT
124 ; base <- getHpRelOffset info_offset
125 ; emit (mkComment $ mkFastString "allocDynClosure")
126 ; emitSetDynHdr base info_ptr use_cc
127 ; let (args, offsets) = unzip args_w_offsets
128 ; cmm_args <- mapM getArgAmode args -- No void args
129 ; hpStore base cmm_args offsets
131 -- BUMP THE VIRTUAL HEAP POINTER
132 ; setVirtHp (virt_hp + closureSize cl_info)
134 -- Assign to a temporary and return
135 -- Note [Return a LocalReg]
136 ; hp_rel <- getHpRelOffset info_offset
137 ; getCodeR $ assignTemp hp_rel }
139 emitSetDynHdr :: CmmExpr -> CmmExpr -> CmmExpr -> FCode ()
140 emitSetDynHdr base info_ptr ccs
141 = hpStore base header [0..]
144 header = [info_ptr] ++ dynProfHdr ccs
145 -- ToDo: Gransim stuff
146 -- ToDo: Parallel stuff
149 hpStore :: CmmExpr -> [CmmExpr] -> [VirtualHpOffset] -> FCode ()
150 -- Store the item (expr,off) in base[off]
151 hpStore base vals offs
152 = emit (catAGraphs (zipWith mk_store vals offs))
154 mk_store val off = mkStore (cmmOffsetW base off) val
157 -----------------------------------------------------------
158 -- Layout of static closures
159 -----------------------------------------------------------
161 -- Make a static closure, adding on any extra padding needed for CAFs,
162 -- and adding a static link field if necessary.
164 mkStaticClosureFields
167 -> Bool -- Has CAF refs
168 -> [CmmLit] -- Payload
169 -> [CmmLit] -- The full closure
170 mkStaticClosureFields cl_info ccs caf_refs payload
171 = mkStaticClosure info_lbl ccs payload padding_wds
172 static_link_field saved_info_field
174 info_lbl = infoTableLabelFromCI cl_info
176 -- CAFs must have consistent layout, regardless of whether they
177 -- are actually updatable or not. The layout of a CAF is:
184 -- the static_link and saved_info fields must always be in the same
185 -- place. So we use closureNeedsUpdSpace rather than
186 -- closureUpdReqd here:
188 is_caf = closureNeedsUpdSpace cl_info
192 | otherwise = ASSERT(null payload) [mkIntCLit 0]
195 | is_caf || staticClosureNeedsLink cl_info = [static_link_value]
199 | is_caf = [mkIntCLit 0]
202 -- for a static constructor which has NoCafRefs, we set the
203 -- static link field to a non-zero value so the garbage
204 -- collector will ignore it.
206 | caf_refs = mkIntCLit 0
207 | otherwise = mkIntCLit 1
210 mkStaticClosure :: CLabel -> CostCentreStack -> [CmmLit]
211 -> [CmmLit] -> [CmmLit] -> [CmmLit] -> [CmmLit]
212 mkStaticClosure info_lbl ccs payload padding_wds static_link_field saved_info_field
213 = [CmmLabel info_lbl]
214 ++ variable_header_words
215 ++ concatMap padLitToWord payload
220 variable_header_words
226 -- JD: Simon had ellided this padding, but without it the C back end asserts failure.
227 -- Maybe it's a bad assertion, and this padding is indeed unnecessary?
228 padLitToWord :: CmmLit -> [CmmLit]
229 padLitToWord lit = lit : padding pad_length
230 where width = typeWidth (cmmLitType lit)
231 pad_length = wORD_SIZE - widthInBytes width :: Int
233 padding n | n <= 0 = []
234 | n `rem` 2 /= 0 = CmmInt 0 W8 : padding (n-1)
235 | n `rem` 4 /= 0 = CmmInt 0 W16 : padding (n-2)
236 | n `rem` 8 /= 0 = CmmInt 0 W32 : padding (n-4)
237 | otherwise = CmmInt 0 W64 : padding (n-8)
239 -----------------------------------------------------------
240 -- Heap overflow checking
241 -----------------------------------------------------------
243 {- Note [Heap checks]
245 Heap checks come in various forms. We provide the following entry
246 points to the runtime system, all of which use the native C-- entry
249 * gc() performs garbage collection and returns
250 nothing to its caller
252 * A series of canned entry points like
254 where r is a pointer. This performs gc, and
255 then returns its argument r to its caller.
257 * A series of canned entry points like
259 where f is a function closure of arity 2
260 This performs garbage collection, keeping alive the
261 three argument ptrs, and then tail-calls f(x,y)
263 These are used in the following circumstances
265 * entryHeapCheck: Function entry
266 (a) With a canned GC entry sequence
267 f( f_clo, x:ptr, y:ptr ) {
272 jump gcfun_2p( f_clo, x, y ) }
273 Note the tail call to the garbage collector;
274 it should do no register shuffling
276 (b) No canned sequence
277 f( f_clo, x:ptr, y:ptr, ...etc... ) {
282 call gc() -- Needs an info table
285 * altHeapCheck: Immediately following an eval
287 case f x y of r { (p,q) -> rhs }
288 (a) With a canned sequence for the results of f
289 (which is the very common case since
290 all boxed cases return just one pointer
293 K: -- K needs an info table
301 Here, the info table needed by the call
302 to gc_1p should be the *same* as the
303 one for the call to f; the C-- optimiser
304 spots this sharing opportunity)
306 (b) No canned sequence for results of f
307 Note second info table
309 (r1,r2,r3) = call f( x, y )
315 L: call gc() -- Extra info table here
318 * generalHeapCheck: Anywhere else
320 case branch *not* following eval,
322 Exactly the same as the previous case:
324 K: -- K needs an info table
333 --------------------------------------------------------------
334 -- A heap/stack check at a function or thunk entry point.
336 entryHeapCheck :: Maybe LocalReg -- Function (closure environment)
337 -> Int -- Arity -- not same as length args b/c of voids
338 -> [LocalReg] -- Non-void args (empty for thunk)
342 entryHeapCheck fun arity args code
343 = do updfr_sz <- getUpdFrameOff
344 heapCheck True (gc_call updfr_sz) code -- The 'fun' keeps relevant CAFs alive
346 args' = case fun of Just f -> f : args
348 arg_exprs = map (CmmReg . CmmLocal) args'
350 | arity == 0 = mkJumpGC (CmmReg (CmmGlobal GCEnter1)) arg_exprs updfr_sz
351 | otherwise = case gc_lbl args' of
352 Just lbl -> mkJumpGC (CmmLit (CmmLabel (mkRtsCodeLabel lbl)))
354 Nothing -> mkCall generic_gc (GC, GC) [] [] updfr_sz
356 gc_lbl :: [LocalReg] -> Maybe LitString
359 | isGcPtrType ty = Just (sLit "stg_gc_unpt_r1") -- "stg_gc_fun_1p"
360 | isFloatType ty = case width of
361 W32 -> Just (sLit "stg_gc_f1") -- "stg_gc_fun_f1"
362 W64 -> Just (sLit "stg_gc_d1") -- "stg_gc_fun_d1"
364 | otherwise = case width of
365 W32 -> Just (sLit "stg_gc_unbx_r1") -- "stg_gc_fun_unbx_r1"
366 W64 -> Just (sLit "stg_gc_l1") -- "stg_gc_fun_unbx_l1"
367 _other -> Nothing -- Narrow cases
369 ty = localRegType reg
373 gc_lbl regs = gc_lbl_ptrs (map (isGcPtrType . localRegType) regs)
375 gc_lbl_ptrs :: [Bool] -> Maybe LitString
376 -- JD: TEMPORARY -- UNTIL THOSE FUNCTIONS EXIST...
377 --gc_lbl_ptrs [True,True] = Just (sLit "stg_gc_fun_2p")
378 --gc_lbl_ptrs [True,True,True] = Just (sLit "stg_gc_fun_3p")
379 gc_lbl_ptrs _ = Nothing
382 altHeapCheck :: [LocalReg] -> FCode a -> FCode a
383 altHeapCheck regs code
384 = do updfr_sz <- getUpdFrameOff
385 heapCheck False (gc_call updfr_sz) code
388 | null regs = mkCall generic_gc (GC, GC) [] [] updfr_sz
390 | Just gc_lbl <- rts_label regs -- Canned call
391 = mkCall (CmmLit (CmmLabel (mkRtsCodeLabel gc_lbl))) (GC, GC)
392 regs (map (CmmReg . CmmLocal) regs) updfr_sz
393 | otherwise -- No canned call, and non-empty live vars
394 = mkCall generic_gc (GC, GC) [] [] updfr_sz
398 | isGcPtrType ty = Just (sLit "stg_gc_unpt_r1")
399 | isFloatType ty = case width of
400 W32 -> Just (sLit "stg_gc_f1")
401 W64 -> Just (sLit "stg_gc_d1")
403 | otherwise = case width of
404 W32 -> Just (sLit "stg_gc_unbx_r1")
405 W64 -> Just (sLit "stg_gc_l1") -- "stg_gc_fun_unbx_l1"
406 _other -> Nothing -- Narrow cases
408 ty = localRegType reg
412 rts_label _ = Nothing
415 generic_gc :: CmmExpr -- The generic GC procedure; no params, no resuls
416 generic_gc = CmmLit (CmmLabel (mkRtsCodeLabel (sLit "stg_gc_noregs")))
417 -- JD: TEMPORARY -- UNTIL THOSE FUNCTIONS EXIST...
418 -- generic_gc = CmmLit (CmmLabel (mkRtsCodeLabel (sLit "stg_gc_fun")))
420 -------------------------------
421 heapCheck :: Bool -> CmmAGraph -> FCode a -> FCode a
422 heapCheck checkStack do_gc code
423 = getHeapUsage $ \ hpHw ->
424 do { emit $ do_checks checkStack hpHw do_gc
425 -- Emit heap checks, but be sure to do it lazily so
426 -- that the conditionals on hpHw don't cause a black hole
427 ; tickyAllocHeap hpHw
428 ; doGranAllocate hpHw
432 do_checks :: Bool -- Should we check the stack?
433 -> WordOff -- Heap headroom
434 -> CmmAGraph -- What to do on failure
436 do_checks checkStack alloc do_gc
437 = withFreshLabel "gc" $ \ loop_id ->
438 withFreshLabel "gc" $ \ gc_id ->
440 <*> (let hpCheck = if alloc == 0 then mkNop
441 else mkAssign hpReg bump_hp <*>
442 mkCmmIfThen hp_oflo (save_alloc <*> mkBranch gc_id)
443 in if checkStack then
444 mkCmmIfThenElse sp_oflo (mkBranch gc_id) hpCheck
446 <*> mkComment (mkFastString "outOfLine should follow:")
447 <*> outOfLine (mkLabel gc_id
448 <*> mkComment (mkFastString "outOfLine here")
450 <*> mkBranch loop_id)
451 -- Test for stack pointer exhaustion, then
452 -- bump heap pointer, and test for heap exhaustion
453 -- Note that we don't move the heap pointer unless the
454 -- stack check succeeds. Otherwise we might end up
455 -- with slop at the end of the current block, which can
456 -- confuse the LDV profiler.
458 alloc_lit = CmmLit (mkIntCLit (alloc*wORD_SIZE)) -- Bytes
459 bump_hp = cmmOffsetExprB (CmmReg hpReg) alloc_lit
461 -- Sp overflow if (Sp - CmmHighStack < SpLim)
462 sp_oflo = CmmMachOp mo_wordULt
463 [CmmMachOp (MO_Sub (typeWidth (cmmRegType spReg)))
464 [CmmReg spReg, CmmLit CmmHighStackMark],
466 -- Hp overflow if (Hp > HpLim)
467 -- (Hp has been incremented by now)
468 -- HpLim points to the LAST WORD of valid allocation space.
469 hp_oflo = CmmMachOp mo_wordUGt
470 [CmmReg hpReg, CmmReg (CmmGlobal HpLim)]
472 save_alloc = mkAssign (CmmGlobal HpAlloc) alloc_lit
476 {- Unboxed tuple alternatives and let-no-escapes (the two most annoying
477 constructs to generate code for!) For unboxed tuple returns, there
478 are an arbitrary number of possibly unboxed return values, some of
479 which will be in registers, and the others will be on the stack. We
480 always organise the stack-resident fields into pointers &
481 non-pointers, and pass the number of each to the heap check code. -}
484 :: [(Id, GlobalReg)] -- Live registers
485 -> WordOff -- no. of stack slots containing ptrs
486 -> WordOff -- no. of stack slots containing nonptrs
487 -> CmmAGraph -- code to insert in the failure path
491 unbxTupleHeapCheck regs ptrs nptrs fail_code code
492 -- We can't manage more than 255 pointers/non-pointers
493 -- in a generic heap check.
494 | ptrs > 255 || nptrs > 255 = panic "altHeapCheck"
496 = initHeapUsage $ \ hpHw -> do
497 { codeOnly $ do { do_checks 0 {- no stack check -} hpHw
498 full_fail_code rts_label
499 ; tickyAllocHeap hpHw }
503 full_fail_code = fail_code `plusStmts` oneStmt assign_liveness
504 assign_liveness = CmmAssign (CmmGlobal (VanillaReg 9)) -- Ho ho ho!
505 (CmmLit (mkWordCLit liveness))
506 liveness = mkRegLiveness regs ptrs nptrs
507 rts_label = CmmLit (CmmLabel (mkRtsCodeLabel (sLit "stg_gc_ut")))
510 {- Old Gransim comment -- I have no idea whether it still makes sense (SLPJ Sep07)
511 For GrAnSim the code for doing a heap check and doing a context switch
512 has been separated. Especially, the HEAP_CHK macro only performs a
513 heap check. THREAD_CONTEXT_SWITCH should be used for doing a context
514 switch. GRAN_FETCH_AND_RESCHEDULE must be put at the beginning of
515 every slow entry code in order to simulate the fetching of
516 closures. If fetching is necessary (i.e. current closure is not local)
517 then an automatic context switch is done. -}
520 When failing a check, we save a return address on the stack and
521 jump to a pre-compiled code fragment that saves the live registers
522 and returns to the scheduler.
524 The return address in most cases will be the beginning of the basic
525 block in which the check resides, since we need to perform the check
526 again on re-entry because someone else might have stolen the resource
529 %************************************************************************
531 Generic Heap/Stack Checks - used in the RTS
533 %************************************************************************
536 hpChkGen :: CmmExpr -> CmmExpr -> CmmExpr -> FCode ()
537 hpChkGen bytes liveness reentry
538 = do_checks' bytes True assigns stg_gc_gen
541 CmmAssign (CmmGlobal (VanillaReg 9)) liveness,
542 CmmAssign (CmmGlobal (VanillaReg 10)) reentry
545 -- a heap check where R1 points to the closure to enter on return, and
546 -- we want to assign to Sp[0] on failure (used in AutoApply.cmm:BUILD_PAP).
547 hpChkNodePointsAssignSp0 :: CmmExpr -> CmmExpr -> FCode ()
548 hpChkNodePointsAssignSp0 bytes sp0
549 = do_checks' bytes True assign stg_gc_enter1
550 where assign = oneStmt (CmmStore (CmmReg spReg) sp0)
552 stg_gc_gen = CmmLit (CmmLabel (mkRtsCodeLabel (sLit "stg_gc_gen")))