1 {-# LANGUAGE ViewPatterns #-}
3 {-# LANGUAGE FlexibleContexts #-}
5 {-# OPTIONS_GHC -fno-warn-warnings-deprecations #-}
7 module CmmRewriteAssignments
13 import OptimizationFuel
20 import Compiler.Hoopl hiding (Unique)
22 import Prelude hiding (succ, zip)
24 ----------------------------------------------------------------
27 -- We decorate all register assignments with usage information,
28 -- that is, the maximum number of times the register is referenced
29 -- while it is live along all outgoing control paths. There are a few
32 -- - If a register goes dead, and then becomes live again, the usages
33 -- of the disjoint live range don't count towards the original range.
35 -- a = 1; // used once
37 -- a = 2; // used once
40 -- - A register may be used multiple times, but these all reside in
41 -- different control paths, such that any given execution only uses
42 -- it once. In that case, the usage count may still be 1.
44 -- a = 1; // used once
51 -- This policy corresponds to an inlining strategy that does not
52 -- duplicate computation but may increase binary size.
54 -- - If we naively implement a usage count, we have a counting to
55 -- infinity problem across joins. Furthermore, knowing that
56 -- something is used 2 or more times in one runtime execution isn't
57 -- particularly useful for optimizations (inlining may be beneficial,
58 -- but there's no way of knowing that without register pressure
62 -- // first iteration, b used once
63 -- // second iteration, b used twice
64 -- // third iteration ...
67 -- // b used zero times
69 -- There is an orthogonal question, which is that for every runtime
70 -- execution, the register may be used only once, but if we inline it
71 -- in every conditional path, the binary size might increase a lot.
72 -- But tracking this information would be tricky, because it violates
73 -- the finite lattice restriction Hoopl requires for termination;
74 -- we'd thus need to supply an alternate proof, which is probably
75 -- something we should defer until we actually have an optimization
76 -- that would take advantage of this. (This might also interact
77 -- strangely with liveness information.)
80 -- // a is used one time, but in X different paths
87 -- This analysis is very similar to liveness analysis; we just keep a
88 -- little extra info. (Maybe we should move it to CmmLive, and subsume
89 -- the old liveness analysis.)
91 data RegUsage = SingleUse | ManyUse
92 deriving (Ord, Eq, Show)
93 -- Absence in map = ZeroUse
96 -- minBound is bottom, maxBound is top, least-upper-bound is max
97 -- ToDo: Put this in Hoopl. Note that this isn't as useful as I
98 -- originally hoped, because you usually want to leave out the bottom
99 -- element when you have things like this put in maps. Maybe f is
100 -- useful on its own as a combining function.
101 boundedOrdLattice :: (Bounded a, Ord a) => String -> DataflowLattice a
102 boundedOrdLattice n = DataflowLattice n minBound f
103 where f _ (OldFact x) (NewFact y)
104 | x >= y = (NoChange, x)
105 | otherwise = (SomeChange, y)
108 -- Custom node type we'll rewrite to. CmmAssign nodes to local
109 -- registers are replaced with AssignLocal nodes.
110 data WithRegUsage n e x where
111 -- Plain will not contain CmmAssign nodes immediately after
112 -- transformation, but as we rewrite assignments, we may have
113 -- assignments here: these are assignments that should not be
115 Plain :: n e x -> WithRegUsage n e x
116 AssignLocal :: LocalReg -> CmmExpr -> RegUsage -> WithRegUsage n O O
118 instance UserOfLocalRegs (n e x) => UserOfLocalRegs (WithRegUsage n e x) where
119 foldRegsUsed f z (Plain n) = foldRegsUsed f z n
120 foldRegsUsed f z (AssignLocal _ e _) = foldRegsUsed f z e
122 instance DefinerOfLocalRegs (n e x) => DefinerOfLocalRegs (WithRegUsage n e x) where
123 foldRegsDefd f z (Plain n) = foldRegsDefd f z n
124 foldRegsDefd f z (AssignLocal r _ _) = foldRegsDefd f z r
126 instance NonLocal n => NonLocal (WithRegUsage n) where
127 entryLabel (Plain n) = entryLabel n
128 successors (Plain n) = successors n
130 liftRegUsage :: Graph n e x -> Graph (WithRegUsage n) e x
131 liftRegUsage = mapGraph Plain
133 eraseRegUsage :: Graph (WithRegUsage CmmNode) e x -> Graph CmmNode e x
134 eraseRegUsage = mapGraph f
135 where f :: WithRegUsage CmmNode e x -> CmmNode e x
136 f (AssignLocal l e _) = CmmAssign (CmmLocal l) e
139 type UsageMap = UniqFM RegUsage
141 usageLattice :: DataflowLattice UsageMap
142 usageLattice = DataflowLattice "usage counts for registers" emptyUFM (joinUFM f)
143 where f _ (OldFact x) (NewFact y)
144 | x >= y = (NoChange, x)
145 | otherwise = (SomeChange, y)
147 -- We reuse the names 'gen' and 'kill', although we're doing something
148 -- slightly different from the Dragon Book
149 usageTransfer :: BwdTransfer (WithRegUsage CmmNode) UsageMap
150 usageTransfer = mkBTransfer3 first middle last
152 middle :: WithRegUsage CmmNode O O -> UsageMap -> UsageMap
153 middle n f = gen_kill n f
154 last :: WithRegUsage CmmNode O C -> FactBase UsageMap -> UsageMap
155 -- Checking for CmmCall/CmmForeignCall is unnecessary, because
156 -- spills/reloads have already occurred by the time we do this
158 -- XXX Deprecated warning is puzzling: what label are we
160 -- ToDo: With a bit more cleverness here, we can avoid
161 -- disappointment and heartbreak associated with the inability
162 -- to inline into CmmCall and CmmForeignCall by
163 -- over-estimating the usage to be ManyUse.
164 last n f = gen_kill n (joinOutFacts usageLattice n f)
165 gen_kill :: WithRegUsage CmmNode e x -> UsageMap -> UsageMap
166 gen_kill a = gen a . kill a
167 gen :: WithRegUsage CmmNode e x -> UsageMap -> UsageMap
168 gen a f = foldRegsUsed increaseUsage f a
169 kill :: WithRegUsage CmmNode e x -> UsageMap -> UsageMap
170 kill a f = foldRegsDefd delFromUFM f a
171 increaseUsage f r = addToUFM_C combine f r SingleUse
172 where combine _ _ = ManyUse
174 usageRewrite :: BwdRewrite FuelUniqSM (WithRegUsage CmmNode) UsageMap
175 usageRewrite = mkBRewrite3 first middle last
176 where first _ _ = return Nothing
177 middle :: Monad m => WithRegUsage CmmNode O O -> UsageMap -> m (Maybe (Graph (WithRegUsage CmmNode) O O))
178 middle (Plain (CmmAssign (CmmLocal l) e)) f
180 $ case lookupUFM f l of
181 Nothing -> emptyGraph
182 Just usage -> mkMiddle (AssignLocal l e usage)
183 middle _ _ = return Nothing
184 last _ _ = return Nothing
186 type CmmGraphWithRegUsage = GenCmmGraph (WithRegUsage CmmNode)
187 annotateUsage :: CmmGraph -> FuelUniqSM (CmmGraphWithRegUsage)
188 annotateUsage vanilla_g =
189 let g = modifyGraph liftRegUsage vanilla_g
190 in liftM fst $ dataflowPassBwd g [(g_entry g, fact_bot usageLattice)] $
191 analRewBwd usageLattice usageTransfer usageRewrite
193 ----------------------------------------------------------------
194 --- Assignment tracking
196 -- The idea is to maintain a map of local registers do expressions,
197 -- such that the value of that register is the same as the value of that
198 -- expression at any given time. We can then do several things,
199 -- as described by Assignment.
201 -- Assignment describes the various optimizations that are valid
202 -- at a given point in the program.
204 -- This assignment can always be inlined. It is cheap or single-use.
206 -- This assignment should be sunk down to its first use. (This will
207 -- increase code size if the register is used in multiple control flow
208 -- paths, but won't increase execution time, and the reduction of
209 -- register pressure is worth it.)
211 -- We cannot safely optimize occurrences of this local register. (This
212 -- corresponds to top in the lattice structure.)
215 -- Extract the expression that is being assigned to
216 xassign :: Assignment -> Maybe CmmExpr
217 xassign (AlwaysInline e) = Just e
218 xassign (AlwaysSink e) = Just e
219 xassign NeverOptimize = Nothing
221 -- Extracts the expression, but only if they're the same constructor
222 xassign2 :: (Assignment, Assignment) -> Maybe (CmmExpr, CmmExpr)
223 xassign2 (AlwaysInline e, AlwaysInline e') = Just (e, e')
224 xassign2 (AlwaysSink e, AlwaysSink e') = Just (e, e')
227 -- Note: We'd like to make decisions about "not optimizing" as soon as
228 -- possible, because this will make running the transfer function more
230 type AssignmentMap = UniqFM Assignment
232 assignmentLattice :: DataflowLattice AssignmentMap
233 assignmentLattice = DataflowLattice "assignments for registers" emptyUFM (joinUFM add)
234 where add _ (OldFact old) (NewFact new)
236 (NeverOptimize, _) -> (NoChange, NeverOptimize)
237 (_, NeverOptimize) -> (SomeChange, NeverOptimize)
238 (xassign2 -> Just (e, e'))
239 | e == e' -> (NoChange, old)
240 | otherwise -> (SomeChange, NeverOptimize)
241 _ -> (SomeChange, NeverOptimize)
243 -- Deletes sinks from assignment map, because /this/ is the place
244 -- where it will be sunk to.
245 deleteSinks :: UserOfLocalRegs n => n -> AssignmentMap -> AssignmentMap
246 deleteSinks n m = foldRegsUsed (adjustUFM f) m n
247 where f (AlwaysSink _) = NeverOptimize
250 -- Invalidates any expressions that use a register.
251 invalidateUsersOf :: CmmReg -> AssignmentMap -> AssignmentMap
252 -- foldUFM_Directly :: (Unique -> elt -> a -> a) -> a -> UniqFM elt -> a
253 invalidateUsersOf reg m = foldUFM_Directly f m m -- [foldUFM performance]
254 where f u (xassign -> Just e) m | reg `regUsedIn` e = addToUFM_Directly m u NeverOptimize
256 {- This requires the entire spine of the map to be continually rebuilt,
257 - which causes crazy memory usage!
258 invalidateUsersOf reg = mapUFM (invalidateUsers' reg)
259 where invalidateUsers' reg (xassign -> Just e) | reg `regUsedIn` e = NeverOptimize
260 invalidateUsers' _ old = old
263 -- Note [foldUFM performance]
264 -- These calls to fold UFM no longer leak memory, but they do cause
265 -- pretty killer amounts of allocation. So they'll be something to
266 -- optimize; we need an algorithmic change to prevent us from having to
267 -- traverse the /entire/ map continually.
269 middleAssignment :: WithRegUsage CmmNode O O -> AssignmentMap -> AssignmentMap
271 -- Algorithm for annotated assignments:
272 -- 1. Delete any sinking assignments that were used by this instruction
273 -- 2. Add the assignment to our list of valid local assignments with
274 -- the correct optimization policy.
275 -- 3. Look for all assignments that reference that register and
277 middleAssignment n@(AssignLocal r e usage) assign
278 = invalidateUsersOf (CmmLocal r) . add . deleteSinks n $ assign
279 where add m = addToUFM m r
281 SingleUse -> AlwaysInline e
283 decide CmmLit{} = AlwaysInline e
284 decide CmmReg{} = AlwaysInline e
285 decide CmmLoad{} = AlwaysSink e
286 decide CmmStackSlot{} = AlwaysSink e
287 decide CmmMachOp{} = AlwaysSink e
288 -- We'll always inline simple operations on the global
289 -- registers, to reduce register pressure: Sp - 4 or Hp - 8
290 -- EZY: Justify this optimization more carefully.
291 decide CmmRegOff{} = AlwaysInline e
293 -- Algorithm for unannotated assignments of global registers:
294 -- 1. Delete any sinking assignments that were used by this instruction
295 -- 2. Look for all assignments that reference this register and
297 middleAssignment (Plain n@(CmmAssign reg@(CmmGlobal _) _)) assign
298 = invalidateUsersOf reg . deleteSinks n $ assign
300 -- Algorithm for unannotated assignments of *local* registers: do
301 -- nothing (it's a reload, so no state should have changed)
302 middleAssignment (Plain (CmmAssign (CmmLocal _) _)) assign = assign
304 -- Algorithm for stores:
305 -- 1. Delete any sinking assignments that were used by this instruction
306 -- 2. Look for all assignments that load from memory locations that
307 -- were clobbered by this store and invalidate them.
308 middleAssignment (Plain n@(CmmStore lhs rhs)) assign
309 = let m = deleteSinks n assign
310 in foldUFM_Directly f m m -- [foldUFM performance]
311 where f u (xassign -> Just x) m | (lhs, rhs) `clobbers` (u, x) = addToUFM_Directly m u NeverOptimize
314 = mapUFM_Directly p . deleteSinks n $ assign
315 -- ToDo: There's a missed opportunity here: even if a memory
316 -- access we're attempting to sink gets clobbered at some
317 -- location, it's still /better/ to sink it to right before the
318 -- point where it gets clobbered. How might we do this?
319 -- Unfortunately, it's too late to change the assignment...
320 where p r (xassign -> Just x) | (lhs, rhs) `clobbers` (r, x) = NeverOptimize
324 -- Assumption: Unsafe foreign calls don't clobber memory
325 -- Since foreign calls clobber caller saved registers, we need
326 -- invalidate any assignments that reference those global registers.
327 -- This is kind of expensive. (One way to optimize this might be to
328 -- store extra information about expressions that allow this and other
329 -- checks to be done cheaply.)
330 middleAssignment (Plain n@(CmmUnsafeForeignCall{})) assign
331 = deleteCallerSaves (foldRegsDefd (\m r -> addToUFM m r NeverOptimize) (deleteSinks n assign) n)
332 where deleteCallerSaves m = foldUFM_Directly f m m
333 f u (xassign -> Just x) m | wrapRecExpf g x False = addToUFM_Directly m u NeverOptimize
335 g (CmmReg (CmmGlobal r)) _ | callerSaves r = True
336 g (CmmRegOff (CmmGlobal r) _) _ | callerSaves r = True
339 middleAssignment (Plain (CmmComment {})) assign
343 -- * Writes using Hp do not overlap with any other memory locations
344 -- (An important invariant being relied on here is that we only ever
345 -- use Hp to allocate values on the heap, which appears to be the
346 -- case given hpReg usage, and that our heap writing code doesn't
347 -- do anything stupid like overlapping writes.)
348 -- * Stack slots do not overlap with any other memory locations
349 -- * Stack slots for different areas do not overlap
350 -- * Stack slots within the same area and different offsets may
351 -- overlap; we need to do a size check (see 'overlaps').
352 -- * Register slots only overlap with themselves. (But this shouldn't
353 -- happen in practice, because we'll fail to inline a reload across
355 -- * Non stack-slot stores always conflict with each other. (This is
356 -- not always the case; we could probably do something special for Hp)
357 clobbers :: (CmmExpr, CmmExpr) -- (lhs, rhs) of clobbering CmmStore
358 -> (Unique, CmmExpr) -- (register, expression) that may be clobbered
360 clobbers (CmmRegOff (CmmGlobal Hp) _, _) (_, _) = False
361 clobbers (CmmReg (CmmGlobal Hp), _) (_, _) = False
362 -- ToDo: Also catch MachOp case
363 clobbers (ss@CmmStackSlot{}, CmmReg (CmmLocal r)) (u, CmmLoad (ss'@CmmStackSlot{}) _)
364 | getUnique r == u, ss == ss' = False -- No-op on the stack slot (XXX: Do we need this special case?)
365 clobbers (CmmStackSlot (CallArea a) o, rhs) (_, expr) = f expr
366 where f (CmmLoad (CmmStackSlot (CallArea a') o') t)
367 = (a, o, widthInBytes (cmmExprWidth rhs)) `overlaps` (a', o', widthInBytes (typeWidth t))
368 f (CmmLoad e _) = containsStackSlot e
369 f (CmmMachOp _ es) = or (map f es)
371 -- Maybe there's an invariant broken if this actually ever
373 containsStackSlot (CmmLoad{}) = True -- load of a load, all bets off
374 containsStackSlot (CmmMachOp _ es) = or (map containsStackSlot es)
375 containsStackSlot (CmmStackSlot{}) = True
376 containsStackSlot _ = False
377 clobbers (CmmStackSlot (RegSlot l) _, _) (_, expr) = f expr
378 where f (CmmLoad (CmmStackSlot (RegSlot l') _) _) = l == l'
380 clobbers _ (_, e) = f e
381 where f (CmmLoad (CmmStackSlot _ _) _) = False
382 f (CmmLoad{}) = True -- conservative
383 f (CmmMachOp _ es) = or (map f es)
386 -- Check for memory overlapping.
393 type CallSubArea = (AreaId, Int, Int) -- area, offset, width
394 overlaps :: CallSubArea -> CallSubArea -> Bool
395 overlaps (a, _, _) (a', _, _) | a /= a' = False
396 overlaps (_, o, w) (_, o', w') =
399 in (s' < o) && (s < o) -- Not LTE, because [ I32 ][ I32 ] is OK
401 lastAssignment :: WithRegUsage CmmNode O C -> AssignmentMap -> [(Label, AssignmentMap)]
402 -- Variables are dead across calls, so invalidating all mappings is justified
403 lastAssignment (Plain (CmmCall _ (Just k) _ _ _)) assign = [(k, mapUFM (const NeverOptimize) assign)]
404 lastAssignment (Plain (CmmForeignCall {succ=k})) assign = [(k, mapUFM (const NeverOptimize) assign)]
405 lastAssignment l assign = map (\id -> (id, deleteSinks l assign)) $ successors l
407 assignmentTransfer :: FwdTransfer (WithRegUsage CmmNode) AssignmentMap
408 assignmentTransfer = mkFTransfer3 (flip const) middleAssignment ((mkFactBase assignmentLattice .) . lastAssignment)
410 assignmentRewrite :: FwdRewrite FuelUniqSM (WithRegUsage CmmNode) AssignmentMap
411 assignmentRewrite = mkFRewrite3 first middle last
413 first _ _ = return Nothing
414 middle :: WithRegUsage CmmNode O O -> AssignmentMap -> GenCmmReplGraph (WithRegUsage CmmNode) O O
415 middle (Plain m) assign = return $ rewrite assign (precompute assign m) mkMiddle m
416 middle (AssignLocal l e u) assign = return $ rewriteLocal assign (precompute assign (CmmAssign (CmmLocal l) e)) l e u
417 last (Plain l) assign = return $ rewrite assign (precompute assign l) mkLast l
418 -- Tuple is (inline?, reloads)
419 precompute :: AssignmentMap -> CmmNode O x -> (Bool, [WithRegUsage CmmNode O O])
420 precompute assign n = foldRegsUsed f (False, []) n -- duplicates are harmless
421 where f (i, l) r = case lookupUFM assign r of
422 Just (AlwaysSink e) -> (i, (Plain (CmmAssign (CmmLocal r) e)):l)
423 Just (AlwaysInline _) -> (True, l)
424 Just NeverOptimize -> (i, l)
425 -- This case can show up when we have
426 -- limited optimization fuel.
428 rewrite :: AssignmentMap
429 -> (Bool, [WithRegUsage CmmNode O O])
430 -> (WithRegUsage CmmNode O x -> Graph (WithRegUsage CmmNode) O x)
432 -> Maybe (Graph (WithRegUsage CmmNode) O x)
433 rewrite _ (False, []) _ _ = Nothing
434 -- Note [CmmCall Inline Hack]
435 -- Conservative hack: don't do any inlining on what will
436 -- be translated into an OldCmm CmmCalls, since the code
437 -- produced here tends to be unproblematic and I need to write
438 -- lint passes to ensure that we don't put anything in the
439 -- arguments that could be construed as a global register by
440 -- some later translation pass. (For example, slots will turn
441 -- into dereferences of Sp). See [Register parameter passing].
442 -- ToDo: Fix this up to only bug out if all inlines were for
443 -- CmmExprs with global registers (we can't use the
444 -- straightforward mapExpDeep call, in this case.) ToDo: We miss
445 -- an opportunity here, where all possible inlinings should
447 rewrite _ (True, []) _ n | not (inlinable n) = Nothing -- see [CmmCall Inline Hack]
448 rewrite assign (i, xs) mk n = Just $ mkMiddles xs <*> mk (Plain (inline i assign n))
450 rewriteLocal :: AssignmentMap
451 -> (Bool, [WithRegUsage CmmNode O O])
452 -> LocalReg -> CmmExpr -> RegUsage
453 -> Maybe (Graph (WithRegUsage CmmNode) O O)
454 rewriteLocal _ (False, []) _ _ _ = Nothing
455 rewriteLocal assign (i, xs) l e u = Just $ mkMiddles xs <*> mkMiddle n'
456 where n' = AssignLocal l e' u
457 e' = if i then wrapRecExp (inlineExp assign) e else e
458 -- inlinable check omitted, since we can always inline into
461 inline :: Bool -> AssignmentMap -> CmmNode e x -> CmmNode e x
463 inline True _ n | not (inlinable n) = n -- see [CmmCall Inline Hack]
464 inline True assign n = mapExpDeep (inlineExp assign) n
466 inlineExp assign old@(CmmReg (CmmLocal r))
467 = case lookupUFM assign r of
468 Just (AlwaysInline x) -> x
470 inlineExp assign old@(CmmRegOff (CmmLocal r) i)
471 = case lookupUFM assign r of
472 Just (AlwaysInline x) ->
474 (CmmRegOff r' i') -> CmmRegOff r' (i + i')
475 _ -> CmmMachOp (MO_Add rep) [x, CmmLit (CmmInt (fromIntegral i) rep)]
476 where rep = typeWidth (localRegType r)
478 inlineExp _ old = old
480 inlinable :: CmmNode e x -> Bool
481 inlinable (CmmCall{}) = False
482 inlinable (CmmForeignCall{}) = False
483 inlinable (CmmUnsafeForeignCall{}) = False
486 rewriteAssignments :: CmmGraph -> FuelUniqSM CmmGraph
487 rewriteAssignments g = do
488 g' <- annotateUsage g
489 g'' <- liftM fst $ dataflowPassFwd g' [(g_entry g, fact_bot assignmentLattice)] $
490 analRewFwd assignmentLattice assignmentTransfer assignmentRewrite
491 return (modifyGraph eraseRegUsage g'')
493 -- ToDo: Outputable instance for UsageMap and AssignmentMap