middle :: CmmNode O O -> Fact O DualLive -> CmmReplGraph O O
middle (CmmAssign (CmmLocal reg') _) live | not (reg' `elemRegSet` in_regs live) = return $ Just emptyGraph
-- XXX maybe this should be somewhere else...
+ middle (CmmAssign lhs (CmmReg rhs)) _ | lhs == rhs = return $ Just emptyGraph
middle (CmmStore lhs (CmmLoad rhs _)) _ | lhs == rhs = return $ Just emptyGraph
middle _ _ = return Nothing
-- Invalidates any expressions that use a register.
invalidateUsersOf :: CmmReg -> AssignmentMap -> AssignmentMap
+-- foldUFM_Directly :: (Unique -> elt -> a -> a) -> a -> UniqFM elt -> a
+invalidateUsersOf reg m = foldUFM_Directly f m m -- [foldUFM performance]
+ where f u (xassign -> Just e) m | reg `regUsedIn` e = addToUFM_Directly m u NeverOptimize
+ f _ _ m = m
+{- This requires the entire spine of the map to be continually rebuilt,
+ - which causes crazy memory usage!
invalidateUsersOf reg = mapUFM (invalidateUsers' reg)
where invalidateUsers' reg (xassign -> Just e) | reg `regUsedIn` e = NeverOptimize
invalidateUsers' _ old = old
+-}
+
+-- Note [foldUFM performance]
+-- These calls to fold UFM no longer leak memory, but they do cause
+-- pretty killer amounts of allocation. So they'll be something to
+-- optimize; we need an algorithmic change to prevent us from having to
+-- traverse the /entire/ map continually.
middleAssignment :: WithRegUsage CmmNode O O -> AssignmentMap -> AssignmentMap
decide CmmLoad{} = AlwaysSink e
decide CmmStackSlot{} = AlwaysSink e
decide CmmMachOp{} = AlwaysSink e
- decide CmmRegOff{} = AlwaysSink e
+ -- We'll always inline simple operations on the global
+ -- registers, to reduce register pressure: Sp - 4 or Hp - 8
+ -- EZY: Justify this optimization more carefully.
+ decide CmmRegOff{} = AlwaysInline e
-- Algorithm for unannotated assignments of global registers:
-- 1. Delete any sinking assignments that were used by this instruction
-- 2. Look for all assignments that load from memory locations that
-- were clobbered by this store and invalidate them.
middleAssignment (Plain n@(CmmStore lhs rhs)) assign
+ = let m = deleteSinks n assign
+ in foldUFM_Directly f m m -- [foldUFM performance]
+ where f u (xassign -> Just x) m | (lhs, rhs) `clobbers` (u, x) = addToUFM_Directly m u NeverOptimize
+ f _ _ m = m
+{- Also leaky
= mapUFM_Directly p . deleteSinks n $ assign
-- ToDo: There's a missed opportunity here: even if a memory
-- access we're attempting to sink gets clobbered at some
-- Unfortunately, it's too late to change the assignment...
where p r (xassign -> Just x) | (lhs, rhs) `clobbers` (r, x) = NeverOptimize
p _ old = old
+-}
-- Assumption: Unsafe foreign calls don't clobber memory
middleAssignment (Plain n@(CmmUnsafeForeignCall{})) assign
= assign
-- Assumptions:
+-- * Writes using Hp do not overlap with any other memory locations
+-- (An important invariant being relied on here is that we only ever
+-- use Hp to allocate values on the heap, which appears to be the
+-- case given hpReg usage, and that our heap writing code doesn't
+-- do anything stupid like overlapping writes.)
-- * Stack slots do not overlap with any other memory locations
--- * Non stack-slot stores always conflict with each other. (This is
--- not always the case; we could probably do something special for Hp)
-- * Stack slots for different areas do not overlap
-- * Stack slots within the same area and different offsets may
-- overlap; we need to do a size check (see 'overlaps').
-clobbers :: (CmmExpr, CmmExpr) -> (Unique, CmmExpr) -> Bool
+-- * Register slots only overlap with themselves. (But this shouldn't
+-- happen in practice, because we'll fail to inline a reload across
+-- the next spill.)
+-- * Non stack-slot stores always conflict with each other. (This is
+-- not always the case; we could probably do something special for Hp)
+clobbers :: (CmmExpr, CmmExpr) -- (lhs, rhs) of clobbering CmmStore
+ -> (Unique, CmmExpr) -- (register, expression) that may be clobbered
+ -> Bool
+clobbers (CmmRegOff (CmmGlobal Hp) _, _) (_, _) = False
+clobbers (CmmReg (CmmGlobal Hp), _) (_, _) = False
+-- ToDo: Also catch MachOp case
clobbers (ss@CmmStackSlot{}, CmmReg (CmmLocal r)) (u, CmmLoad (ss'@CmmStackSlot{}) _)
| getUnique r == u, ss == ss' = False -- No-op on the stack slot (XXX: Do we need this special case?)
clobbers (CmmStackSlot (CallArea a) o, rhs) (_, expr) = f expr
containsStackSlot (CmmMachOp _ es) = or (map containsStackSlot es)
containsStackSlot (CmmStackSlot{}) = True
containsStackSlot _ = False
+clobbers (CmmStackSlot (RegSlot l) _, _) (_, expr) = f expr
+ where f (CmmLoad (CmmStackSlot (RegSlot l') _) _) = l == l'
+ f _ = False
clobbers _ (_, e) = f e
where f (CmmLoad (CmmStackSlot _ _) _) = False
f (CmmLoad{}) = True -- conservative
-- Variables are dead across calls, so invalidating all mappings is justified
lastAssignment (Plain (CmmCall _ (Just k) _ _ _)) assign = [(k, mapUFM (const NeverOptimize) assign)]
lastAssignment (Plain (CmmForeignCall {succ=k})) assign = [(k, mapUFM (const NeverOptimize) assign)]
-lastAssignment l assign = map (\id -> (id, assign)) $ successors l
+lastAssignment l assign = map (\id -> (id, deleteSinks l assign)) $ successors l
assignmentTransfer :: FwdTransfer (WithRegUsage CmmNode) AssignmentMap
assignmentTransfer = mkFTransfer3 (flip const) middleAssignment ((mkFactBase assignmentLattice .) . lastAssignment)
assignmentRewrite = mkFRewrite3 first middle last
where
first _ _ = return Nothing
+ middle :: WithRegUsage CmmNode O O -> AssignmentMap -> GenCmmReplGraph (WithRegUsage CmmNode) O O
middle (Plain m) assign = return $ rewrite assign (precompute assign m) mkMiddle m
- middle _ _ = return Nothing
+ middle (AssignLocal l e u) assign = return $ rewriteLocal assign (precompute assign (CmmAssign (CmmLocal l) e)) mkMiddle l e u
last (Plain l) assign = return $ rewrite assign (precompute assign l) mkLast l
-- Tuple is (inline?, reloads)
precompute assign n = foldRegsUsed f (False, []) n -- duplicates are harmless
rewrite _ (True, []) _ n | not (inlinable n) = Nothing -- see [CmmCall Inline Hack]
rewrite assign (i, xs) mk n = Just $ mkMiddles xs <*> mk (Plain (inline i assign n))
+ rewriteLocal _ (False, []) _ _ _ _ = Nothing
+ rewriteLocal assign (i, xs) mk l e u = Just $ mkMiddles xs <*> mk n'
+ where n' = AssignLocal l e' u
+ e' = if i then wrapRecExp (inlineExp assign) e else e
+ -- inlinable check omitted, since we can always inline into
+ -- assignments.
+
inline :: Bool -> AssignmentMap -> CmmNode e x -> CmmNode e x
inline False _ n = n
inline True _ n | not (inlinable n) = n -- see [CmmCall Inline Hack]
- inline True assign n = mapExpDeep inlineExp n
- where inlineExp old@(CmmReg (CmmLocal r))
- = case lookupUFM assign r of
- Just (AlwaysInline x) -> x
- _ -> old
- inlineExp old@(CmmRegOff (CmmLocal r) i)
- = case lookupUFM assign r of
- Just (AlwaysInline x) -> CmmMachOp (MO_Add rep) [x, CmmLit (CmmInt (fromIntegral i) rep)]
- where rep = typeWidth (localRegType r)
- _ -> old
- inlineExp old = old
+ inline True assign n = mapExpDeep (inlineExp assign) n
+
+ inlineExp assign old@(CmmReg (CmmLocal r))
+ = case lookupUFM assign r of
+ Just (AlwaysInline x) -> x
+ _ -> old
+ inlineExp assign old@(CmmRegOff (CmmLocal r) i)
+ = case lookupUFM assign r of
+ Just (AlwaysInline x) ->
+ case x of
+ (CmmRegOff r' i') -> CmmRegOff r' (i + i')
+ _ -> CmmMachOp (MO_Add rep) [x, CmmLit (CmmInt (fromIntegral i) rep)]
+ where rep = typeWidth (localRegType r)
+ _ -> old
+ inlineExp _ old = old
inlinable :: CmmNode e x -> Bool
inlinable (CmmCall{}) = False