1 {-# LANGUAGE GADTs, NoMonoLocalBinds, FlexibleContexts, ViewPatterns #-}
2 -- Norman likes local bindings
3 -- If this module lives on I'd like to get rid of this flag in due course
5 {-# OPTIONS_GHC -fno-warn-warnings-deprecations #-}
6 {-# OPTIONS_GHC -fno-warn-incomplete-patterns #-}
7 #if __GLASGOW_HASKELL__ >= 701
8 -- GHC 7.0.1 improved incomplete pattern warnings with GADTs
9 {-# OPTIONS_GHC -fwarn-incomplete-patterns #-}
14 , dualLiveLattice, dualLiveTransfers, dualLiveness
15 --, insertSpillsAndReloads --- XXX todo check live-in at entry against formals
16 , dualLivenessWithInsertion
19 , removeDeadAssignmentsAndReloads
27 import OptimizationFuel
31 import Outputable hiding (empty)
32 import qualified Outputable as PP
37 import Compiler.Hoopl hiding (Unique)
39 import Prelude hiding (succ, zip)
41 {- Note [Overview of spill/reload]
42 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
43 The point of this module is to insert spills and reloads to
44 establish the invariant that at a call (or at any proc point with
45 an established protocol) all live variables not expected in
46 registers are sitting on the stack. We use a backward analysis to
47 insert spills and reloads. It should be followed by a
48 forward transformation to sink reloads as deeply as possible, so as
49 to reduce register pressure.
51 A variable can be expected to be live in a register, live on the
52 stack, or both. This analysis ensures that spills and reloads are
53 inserted as needed to make sure that every live variable needed
54 after a call is available on the stack. Spills are pushed back to
55 their reaching definitions, but reloads are dropped wherever needed
56 and will have to be sunk by a later forward transformation.
59 data DualLive = DualLive { on_stack :: RegSet, in_regs :: RegSet }
61 dualUnion :: DualLive -> DualLive -> DualLive
62 dualUnion (DualLive s r) (DualLive s' r') =
63 DualLive (s `unionUniqSets` s') (r `unionUniqSets` r')
65 dualUnionList :: [DualLive] -> DualLive
66 dualUnionList ls = DualLive ss rs
67 where ss = unionManyUniqSets $ map on_stack ls
68 rs = unionManyUniqSets $ map in_regs ls
70 changeStack, changeRegs :: (RegSet -> RegSet) -> DualLive -> DualLive
71 changeStack f live = live { on_stack = f (on_stack live) }
72 changeRegs f live = live { in_regs = f (in_regs live) }
75 dualLiveLattice :: DataflowLattice DualLive
76 dualLiveLattice = DataflowLattice "variables live in registers and on stack" empty add
77 where empty = DualLive emptyRegSet emptyRegSet
78 add _ (OldFact old) (NewFact new) = (changeIf $ change1 || change2, DualLive stack regs)
79 where (change1, stack) = add1 (on_stack old) (on_stack new)
80 (change2, regs) = add1 (in_regs old) (in_regs new)
81 add1 old new = if sizeUniqSet join > sizeUniqSet old then (True, join) else (False, old)
82 where join = unionUniqSets old new
84 dualLivenessWithInsertion :: BlockSet -> CmmGraph -> FuelUniqSM CmmGraph
85 dualLivenessWithInsertion procPoints g =
86 liftM fst $ dataflowPassBwd g [] $ analRewBwd dualLiveLattice
87 (dualLiveTransfers (g_entry g) procPoints)
88 (insertSpillAndReloadRewrites g procPoints)
90 dualLiveness :: BlockSet -> CmmGraph -> FuelUniqSM (BlockEnv DualLive)
91 dualLiveness procPoints g =
92 liftM snd $ dataflowPassBwd g [] $ analBwd dualLiveLattice $ dualLiveTransfers (g_entry g) procPoints
94 dualLiveTransfers :: BlockId -> BlockSet -> (BwdTransfer CmmNode DualLive)
95 dualLiveTransfers entry procPoints = mkBTransfer3 first middle last
96 where first :: CmmNode C O -> DualLive -> DualLive
97 first (CmmEntry id) live = check live id $ -- live at procPoint => spill
98 if id /= entry && setMember id procPoints
99 then DualLive { on_stack = on_stack live `plusRegSet` in_regs live
100 , in_regs = emptyRegSet }
102 where check live id x = if id == entry then noLiveOnEntry id (in_regs live) x else x
104 middle :: CmmNode O O -> DualLive -> DualLive
105 middle m = changeStack updSlots
107 where -- Reuse middle of liveness analysis from CmmLive
108 updRegs = case getBTransfer3 xferLive of (_, middle, _) -> middle m
110 updSlots live = foldSlotsUsed reload (foldSlotsDefd spill live m) m
111 spill live s@(RegSlot r, _, _) = check s $ deleteFromRegSet live r
113 reload live s@(RegSlot r, _, _) = check s $ extendRegSet live r
115 check (RegSlot (LocalReg _ ty), o, w) x
116 | o == w && w == widthInBytes (typeWidth ty) = x
117 check _ _ = panic "middleDualLiveness unsupported: slices"
118 last :: CmmNode O C -> FactBase DualLive -> DualLive
119 last l fb = case l of
120 CmmBranch id -> lkp id
121 l@(CmmCall {cml_cont=Nothing}) -> changeRegs (gen l . kill l) empty
122 l@(CmmCall {cml_cont=Just k}) -> call l k
123 l@(CmmForeignCall {succ=k}) -> call l k
124 l@(CmmCondBranch _ t f) -> changeRegs (gen l . kill l) $ dualUnion (lkp t) (lkp f)
125 l@(CmmSwitch _ tbl) -> changeRegs (gen l . kill l) $ dualUnionList $ map lkp (catMaybes tbl)
126 where empty = fact_bot dualLiveLattice
127 lkp id = empty `fromMaybe` lookupFact id fb
128 call l k = DualLive (on_stack (lkp k)) (gen l emptyRegSet)
130 gen :: UserOfLocalRegs a => a -> RegSet -> RegSet
131 gen a live = foldRegsUsed extendRegSet live a
132 kill :: DefinerOfLocalRegs a => a -> RegSet -> RegSet
133 kill a live = foldRegsDefd deleteFromRegSet live a
135 insertSpillAndReloadRewrites :: CmmGraph -> BlockSet -> CmmBwdRewrite DualLive
136 insertSpillAndReloadRewrites graph procPoints = deepBwdRw3 first middle nothing
137 -- Beware: deepBwdRw with one polymorphic function seems more reasonable here,
138 -- but GHC miscompiles it, see bug #4044.
139 where first :: CmmNode C O -> Fact O DualLive -> CmmReplGraph C O
140 first e@(CmmEntry id) live = return $
141 if id /= (g_entry graph) && setMember id procPoints then
142 case map reload (uniqSetToList spill_regs) of
144 is -> Just $ mkFirst e <*> mkMiddles is
147 -- If we are splitting procedures, we need the LastForeignCall
148 -- to spill its results to the stack because they will only
149 -- be used by a separate procedure (so they can't stay in LocalRegs).
151 spill_regs = if splitting then in_regs live
152 else in_regs live `minusRegSet` defs
153 defs = case mapLookup id firstDefs of
155 Nothing -> emptyRegSet
156 -- A LastForeignCall may contain some definitions, which take place
157 -- on return from the function call. Therefore, we build a map (firstDefs)
158 -- from BlockId to the set of variables defined on return to the BlockId.
159 firstDefs = mapFold addLive emptyBlockMap (toBlockMap graph)
160 addLive :: CmmBlock -> BlockEnv RegSet -> BlockEnv RegSet
161 addLive b env = case lastNode b of
162 CmmForeignCall {succ=k, res=defs} -> add k (mkRegSet defs) env
164 add bid defs env = mapInsert bid defs'' env
165 where defs'' = case mapLookup bid env of
166 Just defs' -> timesRegSet defs defs'
169 middle :: CmmNode O O -> Fact O DualLive -> CmmReplGraph O O
170 middle (CmmAssign (CmmLocal reg) (CmmLoad (CmmStackSlot (RegSlot reg') _) _)) _ | reg == reg' = return Nothing
171 middle m@(CmmAssign (CmmLocal reg) _) live = return $
172 if reg `elemRegSet` on_stack live then -- must spill
173 my_trace "Spilling" (f4sep [text "spill" <+> ppr reg,
174 text "after"{-, ppr m-}]) $
175 Just $ mkMiddles $ [m, spill reg]
177 middle _ _ = return Nothing
179 nothing _ _ = return Nothing
181 regSlot :: LocalReg -> CmmExpr
182 regSlot r = CmmStackSlot (RegSlot r) (widthInBytes $ typeWidth $ localRegType r)
184 spill, reload :: LocalReg -> CmmNode O O
185 spill r = CmmStore (regSlot r) (CmmReg $ CmmLocal r)
186 reload r = CmmAssign (CmmLocal r) (CmmLoad (regSlot r) $ localRegType r)
188 removeDeadAssignmentsAndReloads :: BlockSet -> CmmGraph -> FuelUniqSM CmmGraph
189 removeDeadAssignmentsAndReloads procPoints g =
190 liftM fst $ dataflowPassBwd g [] $ analRewBwd dualLiveLattice
191 (dualLiveTransfers (g_entry g) procPoints)
193 where rewrites = deepBwdRw3 nothing middle nothing
194 -- Beware: deepBwdRw with one polymorphic function seems more reasonable here,
195 -- but GHC panics while compiling, see bug #4045.
196 middle :: CmmNode O O -> Fact O DualLive -> CmmReplGraph O O
197 middle (CmmAssign (CmmLocal reg') _) live | not (reg' `elemRegSet` in_regs live) = return $ Just emptyGraph
198 -- XXX maybe this should be somewhere else...
199 middle (CmmAssign lhs (CmmReg rhs)) _ | lhs == rhs = return $ Just emptyGraph
200 middle (CmmStore lhs (CmmLoad rhs _)) _ | lhs == rhs = return $ Just emptyGraph
201 middle _ _ = return Nothing
203 nothing _ _ = return Nothing
205 ----------------------------------------------------------------
206 --- Usage information
208 -- We decorate all register assignments with usage information,
209 -- that is, the maximum number of times the register is referenced
210 -- while it is live along all outgoing control paths. There are a few
213 -- - If a register goes dead, and then becomes live again, the usages
214 -- of the disjoint live range don't count towards the original range.
216 -- a = 1; // used once
218 -- a = 2; // used once
221 -- - A register may be used multiple times, but these all reside in
222 -- different control paths, such that any given execution only uses
223 -- it once. In that case, the usage count may still be 1.
225 -- a = 1; // used once
232 -- This policy corresponds to an inlining strategy that does not
233 -- duplicate computation but may increase binary size.
235 -- - If we naively implement a usage count, we have a counting to
236 -- infinity problem across joins. Furthermore, knowing that
237 -- something is used 2 or more times in one runtime execution isn't
238 -- particularly useful for optimizations (inlining may be beneficial,
239 -- but there's no way of knowing that without register pressure
243 -- // first iteration, b used once
244 -- // second iteration, b used twice
245 -- // third iteration ...
248 -- // b used zero times
250 -- There is an orthogonal question, which is that for every runtime
251 -- execution, the register may be used only once, but if we inline it
252 -- in every conditional path, the binary size might increase a lot.
253 -- But tracking this information would be tricky, because it violates
254 -- the finite lattice restriction Hoopl requires for termination;
255 -- we'd thus need to supply an alternate proof, which is probably
256 -- something we should defer until we actually have an optimization
257 -- that would take advantage of this. (This might also interact
258 -- strangely with liveness information.)
261 -- // a is used one time, but in X different paths
268 -- This analysis is very similar to liveness analysis; we just keep a
269 -- little extra info. (Maybe we should move it to CmmLive, and subsume
270 -- the old liveness analysis.)
272 data RegUsage = SingleUse | ManyUse
273 deriving (Ord, Eq, Show)
274 -- Absence in map = ZeroUse
277 -- minBound is bottom, maxBound is top, least-upper-bound is max
278 -- ToDo: Put this in Hoopl. Note that this isn't as useful as I
279 -- originally hoped, because you usually want to leave out the bottom
280 -- element when you have things like this put in maps. Maybe f is
281 -- useful on its own as a combining function.
282 boundedOrdLattice :: (Bounded a, Ord a) => String -> DataflowLattice a
283 boundedOrdLattice n = DataflowLattice n minBound f
284 where f _ (OldFact x) (NewFact y)
285 | x >= y = (NoChange, x)
286 | otherwise = (SomeChange, y)
289 -- Custom node type we'll rewrite to. CmmAssign nodes to local
290 -- registers are replaced with AssignLocal nodes.
291 data WithRegUsage n e x where
292 -- Plain will not contain CmmAssign nodes immediately after
293 -- transformation, but as we rewrite assignments, we may have
294 -- assignments here: these are assignments that should not be
296 Plain :: n e x -> WithRegUsage n e x
297 AssignLocal :: LocalReg -> CmmExpr -> RegUsage -> WithRegUsage n O O
299 instance UserOfLocalRegs (n e x) => UserOfLocalRegs (WithRegUsage n e x) where
300 foldRegsUsed f z (Plain n) = foldRegsUsed f z n
301 foldRegsUsed f z (AssignLocal _ e _) = foldRegsUsed f z e
303 instance DefinerOfLocalRegs (n e x) => DefinerOfLocalRegs (WithRegUsage n e x) where
304 foldRegsDefd f z (Plain n) = foldRegsDefd f z n
305 foldRegsDefd f z (AssignLocal r _ _) = foldRegsDefd f z r
307 instance NonLocal n => NonLocal (WithRegUsage n) where
308 entryLabel (Plain n) = entryLabel n
309 successors (Plain n) = successors n
311 liftRegUsage :: Graph n e x -> Graph (WithRegUsage n) e x
312 liftRegUsage = mapGraph Plain
314 eraseRegUsage :: Graph (WithRegUsage CmmNode) e x -> Graph CmmNode e x
315 eraseRegUsage = mapGraph f
316 where f :: WithRegUsage CmmNode e x -> CmmNode e x
317 f (AssignLocal l e _) = CmmAssign (CmmLocal l) e
320 type UsageMap = UniqFM RegUsage
322 usageLattice :: DataflowLattice UsageMap
323 usageLattice = DataflowLattice "usage counts for registers" emptyUFM (joinUFM f)
324 where f _ (OldFact x) (NewFact y)
325 | x >= y = (NoChange, x)
326 | otherwise = (SomeChange, y)
328 -- We reuse the names 'gen' and 'kill', although we're doing something
329 -- slightly different from the Dragon Book
330 usageTransfer :: BwdTransfer (WithRegUsage CmmNode) UsageMap
331 usageTransfer = mkBTransfer3 first middle last
333 middle :: WithRegUsage CmmNode O O -> UsageMap -> UsageMap
334 middle n f = gen_kill n f
335 last :: WithRegUsage CmmNode O C -> FactBase UsageMap -> UsageMap
336 -- Checking for CmmCall/CmmForeignCall is unnecessary, because
337 -- spills/reloads have already occurred by the time we do this
339 -- XXX Deprecated warning is puzzling: what label are we
341 -- ToDo: With a bit more cleverness here, we can avoid
342 -- disappointment and heartbreak associated with the inability
343 -- to inline into CmmCall and CmmForeignCall by
344 -- over-estimating the usage to be ManyUse.
345 last n f = gen_kill n (joinOutFacts usageLattice n f)
346 gen_kill a = gen a . kill a
347 gen a f = foldRegsUsed increaseUsage f a
348 kill a f = foldRegsDefd delFromUFM f a
349 increaseUsage f r = addToUFM_C combine f r SingleUse
350 where combine _ _ = ManyUse
352 usageRewrite :: BwdRewrite FuelUniqSM (WithRegUsage CmmNode) UsageMap
353 usageRewrite = mkBRewrite3 first middle last
354 where first _ _ = return Nothing
355 middle :: Monad m => WithRegUsage CmmNode O O -> UsageMap -> m (Maybe (Graph (WithRegUsage CmmNode) O O))
356 middle (Plain (CmmAssign (CmmLocal l) e)) f
358 $ case lookupUFM f l of
359 Nothing -> emptyGraph
360 Just usage -> mkMiddle (AssignLocal l e usage)
361 middle _ _ = return Nothing
362 last _ _ = return Nothing
364 type CmmGraphWithRegUsage = GenCmmGraph (WithRegUsage CmmNode)
365 annotateUsage :: CmmGraph -> FuelUniqSM (CmmGraphWithRegUsage)
366 annotateUsage vanilla_g =
367 let g = modifyGraph liftRegUsage vanilla_g
368 in liftM fst $ dataflowPassBwd g [(g_entry g, fact_bot usageLattice)] $
369 analRewBwd usageLattice usageTransfer usageRewrite
371 ----------------------------------------------------------------
372 --- Assignment tracking
374 -- The idea is to maintain a map of local registers do expressions,
375 -- such that the value of that register is the same as the value of that
376 -- expression at any given time. We can then do several things,
377 -- as described by Assignment.
379 -- Assignment describes the various optimizations that are valid
380 -- at a given point in the program.
382 -- This assignment can always be inlined. It is cheap or single-use.
384 -- This assignment should be sunk down to its first use. (This will
385 -- increase code size if the register is used in multiple control flow
386 -- paths, but won't increase execution time, and the reduction of
387 -- register pressure is worth it.)
389 -- We cannot safely optimize occurrences of this local register. (This
390 -- corresponds to top in the lattice structure.)
393 -- Extract the expression that is being assigned to
394 xassign :: Assignment -> Maybe CmmExpr
395 xassign (AlwaysInline e) = Just e
396 xassign (AlwaysSink e) = Just e
397 xassign NeverOptimize = Nothing
399 -- Extracts the expression, but only if they're the same constructor
400 xassign2 :: (Assignment, Assignment) -> Maybe (CmmExpr, CmmExpr)
401 xassign2 (AlwaysInline e, AlwaysInline e') = Just (e, e')
402 xassign2 (AlwaysSink e, AlwaysSink e') = Just (e, e')
405 -- Note: We'd like to make decisions about "not optimizing" as soon as
406 -- possible, because this will make running the transfer function more
408 type AssignmentMap = UniqFM Assignment
410 assignmentLattice :: DataflowLattice AssignmentMap
411 assignmentLattice = DataflowLattice "assignments for registers" emptyUFM (joinUFM add)
412 where add _ (OldFact old) (NewFact new)
414 (NeverOptimize, _) -> (NoChange, NeverOptimize)
415 (_, NeverOptimize) -> (SomeChange, NeverOptimize)
416 (xassign2 -> Just (e, e'))
417 | e == e' -> (NoChange, old)
418 | otherwise -> (SomeChange, NeverOptimize)
419 _ -> (SomeChange, NeverOptimize)
421 -- Deletes sinks from assignment map, because /this/ is the place
422 -- where it will be sunk to.
423 deleteSinks :: UserOfLocalRegs n => n -> AssignmentMap -> AssignmentMap
424 deleteSinks n m = foldRegsUsed (adjustUFM f) m n
425 where f (AlwaysSink _) = NeverOptimize
428 -- Invalidates any expressions that use a register.
429 invalidateUsersOf :: CmmReg -> AssignmentMap -> AssignmentMap
430 -- foldUFM_Directly :: (Unique -> elt -> a -> a) -> a -> UniqFM elt -> a
431 invalidateUsersOf reg m = foldUFM_Directly f m m -- [foldUFM performance]
432 where f u (xassign -> Just e) m | reg `regUsedIn` e = addToUFM_Directly m u NeverOptimize
434 {- This requires the entire spine of the map to be continually rebuilt,
435 - which causes crazy memory usage!
436 invalidateUsersOf reg = mapUFM (invalidateUsers' reg)
437 where invalidateUsers' reg (xassign -> Just e) | reg `regUsedIn` e = NeverOptimize
438 invalidateUsers' _ old = old
441 -- Note [foldUFM performance]
442 -- These calls to fold UFM no longer leak memory, but they do cause
443 -- pretty killer amounts of allocation. So they'll be something to
444 -- optimize; we need an algorithmic change to prevent us from having to
445 -- traverse the /entire/ map continually.
447 middleAssignment :: WithRegUsage CmmNode O O -> AssignmentMap -> AssignmentMap
449 -- Algorithm for annotated assignments:
450 -- 1. Delete any sinking assignments that were used by this instruction
451 -- 2. Add the assignment to our list of valid local assignments with
452 -- the correct optimization policy.
453 -- 3. Look for all assignments that reference that register and
455 middleAssignment n@(AssignLocal r e usage) assign
456 = invalidateUsersOf (CmmLocal r) . add . deleteSinks n $ assign
457 where add m = addToUFM m r
459 SingleUse -> AlwaysInline e
461 decide CmmLit{} = AlwaysInline e
462 decide CmmReg{} = AlwaysInline e
463 decide CmmLoad{} = AlwaysSink e
464 decide CmmStackSlot{} = AlwaysSink e
465 decide CmmMachOp{} = AlwaysSink e
466 -- We'll always inline simple operations on the global
467 -- registers, to reduce register pressure: Sp - 4 or Hp - 8
468 -- EZY: Justify this optimization more carefully.
469 decide CmmRegOff{} = AlwaysInline e
471 -- Algorithm for unannotated assignments of global registers:
472 -- 1. Delete any sinking assignments that were used by this instruction
473 -- 2. Look for all assignments that reference this register and
475 middleAssignment (Plain n@(CmmAssign reg@(CmmGlobal _) _)) assign
476 = invalidateUsersOf reg . deleteSinks n $ assign
478 -- Algorithm for unannotated assignments of *local* registers: do
479 -- nothing (it's a reload, so no state should have changed)
480 middleAssignment (Plain (CmmAssign (CmmLocal _) _)) assign = assign
482 -- Algorithm for stores:
483 -- 1. Delete any sinking assignments that were used by this instruction
484 -- 2. Look for all assignments that load from memory locations that
485 -- were clobbered by this store and invalidate them.
486 middleAssignment (Plain n@(CmmStore lhs rhs)) assign
487 = let m = deleteSinks n assign
488 in foldUFM_Directly f m m -- [foldUFM performance]
489 where f u (xassign -> Just x) m | (lhs, rhs) `clobbers` (u, x) = addToUFM_Directly m u NeverOptimize
492 = mapUFM_Directly p . deleteSinks n $ assign
493 -- ToDo: There's a missed opportunity here: even if a memory
494 -- access we're attempting to sink gets clobbered at some
495 -- location, it's still /better/ to sink it to right before the
496 -- point where it gets clobbered. How might we do this?
497 -- Unfortunately, it's too late to change the assignment...
498 where p r (xassign -> Just x) | (lhs, rhs) `clobbers` (r, x) = NeverOptimize
502 -- Assumption: Unsafe foreign calls don't clobber memory
503 -- Since foreign calls clobber caller saved registers, we need
504 -- invalidate any assignments that reference those global registers.
505 -- This is kind of expensive. (One way to optimize this might be to
506 -- store extra information about expressions that allow this and other
507 -- checks to be done cheaply.)
508 middleAssignment (Plain n@(CmmUnsafeForeignCall{})) assign
509 = deleteCallerSaves (foldRegsDefd (\m r -> addToUFM m r NeverOptimize) (deleteSinks n assign) n)
510 where deleteCallerSaves m = foldUFM_Directly f m m
511 f u (xassign -> Just x) m | wrapRecExpf g x False = addToUFM_Directly m u NeverOptimize
513 g (CmmReg (CmmGlobal r)) _ | callerSaves r = True
514 g (CmmRegOff (CmmGlobal r) _) _ | callerSaves r = True
517 middleAssignment (Plain (CmmComment {})) assign
521 -- * Writes using Hp do not overlap with any other memory locations
522 -- (An important invariant being relied on here is that we only ever
523 -- use Hp to allocate values on the heap, which appears to be the
524 -- case given hpReg usage, and that our heap writing code doesn't
525 -- do anything stupid like overlapping writes.)
526 -- * Stack slots do not overlap with any other memory locations
527 -- * Stack slots for different areas do not overlap
528 -- * Stack slots within the same area and different offsets may
529 -- overlap; we need to do a size check (see 'overlaps').
530 -- * Register slots only overlap with themselves. (But this shouldn't
531 -- happen in practice, because we'll fail to inline a reload across
533 -- * Non stack-slot stores always conflict with each other. (This is
534 -- not always the case; we could probably do something special for Hp)
535 clobbers :: (CmmExpr, CmmExpr) -- (lhs, rhs) of clobbering CmmStore
536 -> (Unique, CmmExpr) -- (register, expression) that may be clobbered
538 clobbers (CmmRegOff (CmmGlobal Hp) _, _) (_, _) = False
539 clobbers (CmmReg (CmmGlobal Hp), _) (_, _) = False
540 -- ToDo: Also catch MachOp case
541 clobbers (ss@CmmStackSlot{}, CmmReg (CmmLocal r)) (u, CmmLoad (ss'@CmmStackSlot{}) _)
542 | getUnique r == u, ss == ss' = False -- No-op on the stack slot (XXX: Do we need this special case?)
543 clobbers (CmmStackSlot (CallArea a) o, rhs) (_, expr) = f expr
544 where f (CmmLoad (CmmStackSlot (CallArea a') o') t)
545 = (a, o, widthInBytes (cmmExprWidth rhs)) `overlaps` (a', o', widthInBytes (typeWidth t))
546 f (CmmLoad e _) = containsStackSlot e
547 f (CmmMachOp _ es) = or (map f es)
549 -- Maybe there's an invariant broken if this actually ever
551 containsStackSlot (CmmLoad{}) = True -- load of a load, all bets off
552 containsStackSlot (CmmMachOp _ es) = or (map containsStackSlot es)
553 containsStackSlot (CmmStackSlot{}) = True
554 containsStackSlot _ = False
555 clobbers (CmmStackSlot (RegSlot l) _, _) (_, expr) = f expr
556 where f (CmmLoad (CmmStackSlot (RegSlot l') _) _) = l == l'
558 clobbers _ (_, e) = f e
559 where f (CmmLoad (CmmStackSlot _ _) _) = False
560 f (CmmLoad{}) = True -- conservative
561 f (CmmMachOp _ es) = or (map f es)
564 -- Check for memory overlapping.
571 type CallSubArea = (AreaId, Int, Int) -- area, offset, width
572 overlaps :: CallSubArea -> CallSubArea -> Bool
573 overlaps (a, _, _) (a', _, _) | a /= a' = False
574 overlaps (_, o, w) (_, o', w') =
577 in (s' < o) && (s < o) -- Not LTE, because [ I32 ][ I32 ] is OK
579 lastAssignment :: WithRegUsage CmmNode O C -> AssignmentMap -> [(Label, AssignmentMap)]
580 -- Variables are dead across calls, so invalidating all mappings is justified
581 lastAssignment (Plain (CmmCall _ (Just k) _ _ _)) assign = [(k, mapUFM (const NeverOptimize) assign)]
582 lastAssignment (Plain (CmmForeignCall {succ=k})) assign = [(k, mapUFM (const NeverOptimize) assign)]
583 lastAssignment l assign = map (\id -> (id, deleteSinks l assign)) $ successors l
585 assignmentTransfer :: FwdTransfer (WithRegUsage CmmNode) AssignmentMap
586 assignmentTransfer = mkFTransfer3 (flip const) middleAssignment ((mkFactBase assignmentLattice .) . lastAssignment)
588 assignmentRewrite :: FwdRewrite FuelUniqSM (WithRegUsage CmmNode) AssignmentMap
589 assignmentRewrite = mkFRewrite3 first middle last
591 first _ _ = return Nothing
592 middle :: WithRegUsage CmmNode O O -> AssignmentMap -> GenCmmReplGraph (WithRegUsage CmmNode) O O
593 middle (Plain m) assign = return $ rewrite assign (precompute assign m) mkMiddle m
594 middle (AssignLocal l e u) assign = return $ rewriteLocal assign (precompute assign (CmmAssign (CmmLocal l) e)) mkMiddle l e u
595 last (Plain l) assign = return $ rewrite assign (precompute assign l) mkLast l
596 -- Tuple is (inline?, reloads)
597 precompute assign n = foldRegsUsed f (False, []) n -- duplicates are harmless
598 where f (i, l) r = case lookupUFM assign r of
599 Just (AlwaysSink e) -> (i, (Plain (CmmAssign (CmmLocal r) e)):l)
600 Just (AlwaysInline _) -> (True, l)
601 Just NeverOptimize -> (i, l)
602 -- This case can show up when we have
603 -- limited optimization fuel.
605 rewrite _ (False, []) _ _ = Nothing
606 -- Note [CmmCall Inline Hack]
607 -- Conservative hack: don't do any inlining on what will
608 -- be translated into an OldCmm CmmCalls, since the code
609 -- produced here tends to be unproblematic and I need to write
610 -- lint passes to ensure that we don't put anything in the
611 -- arguments that could be construed as a global register by
612 -- some later translation pass. (For example, slots will turn
613 -- into dereferences of Sp). See [Register parameter passing].
614 -- ToDo: Fix this up to only bug out if all inlines were for
615 -- CmmExprs with global registers (we can't use the
616 -- straightforward mapExpDeep call, in this case.) ToDo: We miss
617 -- an opportunity here, where all possible inlinings should
619 rewrite _ (True, []) _ n | not (inlinable n) = Nothing -- see [CmmCall Inline Hack]
620 rewrite assign (i, xs) mk n = Just $ mkMiddles xs <*> mk (Plain (inline i assign n))
622 rewriteLocal _ (False, []) _ _ _ _ = Nothing
623 rewriteLocal assign (i, xs) mk l e u = Just $ mkMiddles xs <*> mk n'
624 where n' = AssignLocal l e' u
625 e' = if i then wrapRecExp (inlineExp assign) e else e
626 -- inlinable check omitted, since we can always inline into
629 inline :: Bool -> AssignmentMap -> CmmNode e x -> CmmNode e x
631 inline True _ n | not (inlinable n) = n -- see [CmmCall Inline Hack]
632 inline True assign n = mapExpDeep (inlineExp assign) n
634 inlineExp assign old@(CmmReg (CmmLocal r))
635 = case lookupUFM assign r of
636 Just (AlwaysInline x) -> x
638 inlineExp assign old@(CmmRegOff (CmmLocal r) i)
639 = case lookupUFM assign r of
640 Just (AlwaysInline x) ->
642 (CmmRegOff r' i') -> CmmRegOff r' (i + i')
643 _ -> CmmMachOp (MO_Add rep) [x, CmmLit (CmmInt (fromIntegral i) rep)]
644 where rep = typeWidth (localRegType r)
646 inlineExp _ old = old
648 inlinable :: CmmNode e x -> Bool
649 inlinable (CmmCall{}) = False
650 inlinable (CmmForeignCall{}) = False
651 inlinable (CmmUnsafeForeignCall{}) = False
654 rewriteAssignments :: CmmGraph -> FuelUniqSM CmmGraph
655 rewriteAssignments g = do
656 g' <- annotateUsage g
657 g'' <- liftM fst $ dataflowPassFwd g' [(g_entry g, fact_bot assignmentLattice)] $
658 analRewFwd assignmentLattice assignmentTransfer assignmentRewrite
659 return (modifyGraph eraseRegUsage g'')
661 ---------------------
664 ppr_regs :: String -> RegSet -> SDoc
665 ppr_regs s regs = text s <+> commafy (map ppr $ uniqSetToList regs)
666 where commafy xs = hsep $ punctuate comma xs
668 instance Outputable DualLive where
669 ppr (DualLive {in_regs = regs, on_stack = stack}) =
670 if isEmptyUniqSet regs && isEmptyUniqSet stack then
671 text "<nothing-live>"
673 nest 2 $ fsep [if isEmptyUniqSet regs then PP.empty
674 else (ppr_regs "live in regs =" regs),
675 if isEmptyUniqSet stack then PP.empty
676 else (ppr_regs "live on stack =" stack)]
678 -- ToDo: Outputable instance for UsageMap and AssignmentMap
680 my_trace :: String -> SDoc -> a -> a
681 my_trace = if False then pprTrace else \_ _ a -> a
683 f4sep :: [SDoc] -> SDoc
685 f4sep (d:ds) = fsep (d : map (nest 4) ds)