1 -----------------------------------------------------------------------------
5 -- (c) The University of Glasgow 2006
7 -----------------------------------------------------------------------------
15 #include "HsVersions.h"
18 import CmmUtils ( hasNoGlobalRegs )
19 import CLabel ( entryLblToInfoLbl )
21 import SMRep ( tablesNextToCode )
24 import Unique ( Unique )
25 import Panic ( panic )
35 -- -----------------------------------------------------------------------------
39 This pass inlines assignments to temporaries that are used just
40 once. It works as follows:
42 - count uses of each temporary
43 - for each temporary that occurs just once:
44 - attempt to push it forward to the statement that uses it
45 - only push forward past assignments to other temporaries
46 (assumes that temporaries are single-assignment)
47 - if we reach the statement that uses it, inline the rhs
48 and delete the original assignment.
50 Possible generalisations: here is an example from factorial
55 if (_smi != 0) goto cmK;
64 We want to inline _smi and _smn. To inline _smn:
66 - we must be able to push forward past assignments to global regs.
67 We can do this if the rhs of the assignment we are pushing
68 forward doesn't refer to the global reg being assigned to; easy
73 - It is a trivial replacement, reg for reg, but it occurs more than
75 - We can inline trivial assignments even if the temporary occurs
76 more than once, as long as we don't eliminate the original assignment
77 (this doesn't help much on its own).
78 - We need to be able to propagate the assignment forward through jumps;
79 if we did this, we would find that it can be inlined safely in all
84 It catches many useful cases, but could be generalised in
88 cmmMiniInline :: [CmmBasicBlock] -> [CmmBasicBlock]
89 cmmMiniInline blocks = map do_inline blocks
91 blockUses (BasicBlock _ stmts)
92 = foldr (plusUFM_C (+)) emptyUFM (map getStmtUses stmts)
94 uses = foldr (plusUFM_C (+)) emptyUFM (map blockUses blocks)
96 do_inline (BasicBlock id stmts)
97 = BasicBlock id (cmmMiniInlineStmts uses stmts)
100 cmmMiniInlineStmts :: UniqFM Int -> [CmmStmt] -> [CmmStmt]
101 cmmMiniInlineStmts uses [] = []
102 cmmMiniInlineStmts uses (stmt@(CmmAssign (CmmLocal (LocalReg u _)) expr) : stmts)
103 | Just 1 <- lookupUFM uses u,
104 Just stmts' <- lookForInline u expr stmts
107 trace ("nativeGen: inlining " ++ showSDoc (pprStmt stmt)) $
109 cmmMiniInlineStmts uses stmts'
111 cmmMiniInlineStmts uses (stmt:stmts)
112 = stmt : cmmMiniInlineStmts uses stmts
115 -- Try to inline a temporary assignment. We can skip over assignments to
116 -- other tempoararies, because we know that expressions aren't side-effecting
117 -- and temporaries are single-assignment.
118 lookForInline u expr (stmt@(CmmAssign (CmmLocal (LocalReg u' _)) rhs) : rest)
120 = case lookupUFM (getExprUses rhs) u of
121 Just 1 -> Just (inlineStmt u expr stmt : rest)
122 _other -> case lookForInline u expr rest of
124 Just stmts -> Just (stmt:stmts)
126 lookForInline u expr (CmmNop : rest)
127 = lookForInline u expr rest
129 lookForInline u expr (stmt:stmts)
130 = case lookupUFM (getStmtUses stmt) u of
131 Just 1 | ok_to_inline -> Just (inlineStmt u expr stmt : stmts)
134 -- we don't inline into CmmCall if the expression refers to global
135 -- registers. This is a HACK to avoid global registers clashing with
136 -- C argument-passing registers, really the back-end ought to be able
137 -- to handle it properly, but currently neither PprC nor the NCG can
138 -- do it. See also CgForeignCall:load_args_into_temps.
139 ok_to_inline = case stmt of
140 CmmCall{} -> hasNoGlobalRegs expr
143 -- -----------------------------------------------------------------------------
144 -- Boring Cmm traversals for collecting usage info and substitutions.
146 getStmtUses :: CmmStmt -> UniqFM Int
147 getStmtUses (CmmAssign _ e) = getExprUses e
148 getStmtUses (CmmStore e1 e2) = plusUFM_C (+) (getExprUses e1) (getExprUses e2)
149 getStmtUses (CmmCall target _ es _)
150 = plusUFM_C (+) (uses target) (getExprsUses (map fst es))
151 where uses (CmmForeignCall e _) = getExprUses e
153 getStmtUses (CmmCondBranch e _) = getExprUses e
154 getStmtUses (CmmSwitch e _) = getExprUses e
155 getStmtUses (CmmJump e _) = getExprUses e
156 getStmtUses _ = emptyUFM
158 getExprUses :: CmmExpr -> UniqFM Int
159 getExprUses (CmmReg (CmmLocal (LocalReg u _))) = unitUFM u 1
160 getExprUses (CmmRegOff (CmmLocal (LocalReg u _)) _) = unitUFM u 1
161 getExprUses (CmmLoad e _) = getExprUses e
162 getExprUses (CmmMachOp _ es) = getExprsUses es
163 getExprUses _other = emptyUFM
165 getExprsUses es = foldr (plusUFM_C (+)) emptyUFM (map getExprUses es)
167 inlineStmt :: Unique -> CmmExpr -> CmmStmt -> CmmStmt
168 inlineStmt u a (CmmAssign r e) = CmmAssign r (inlineExpr u a e)
169 inlineStmt u a (CmmStore e1 e2) = CmmStore (inlineExpr u a e1) (inlineExpr u a e2)
170 inlineStmt u a (CmmCall target regs es vols)
171 = CmmCall (infn target) regs es' vols
172 where infn (CmmForeignCall fn cconv) = CmmForeignCall fn cconv
173 infn (CmmPrim p) = CmmPrim p
174 es' = [ (inlineExpr u a e, hint) | (e,hint) <- es ]
175 inlineStmt u a (CmmCondBranch e d) = CmmCondBranch (inlineExpr u a e) d
176 inlineStmt u a (CmmSwitch e d) = CmmSwitch (inlineExpr u a e) d
177 inlineStmt u a (CmmJump e d) = CmmJump (inlineExpr u a e) d
178 inlineStmt u a other_stmt = other_stmt
180 inlineExpr :: Unique -> CmmExpr -> CmmExpr -> CmmExpr
181 inlineExpr u a e@(CmmReg (CmmLocal (LocalReg u' _)))
184 inlineExpr u a e@(CmmRegOff (CmmLocal (LocalReg u' rep)) off)
185 | u == u' = CmmMachOp (MO_Add rep) [a, CmmLit (CmmInt (fromIntegral off) rep)]
187 inlineExpr u a (CmmLoad e rep) = CmmLoad (inlineExpr u a e) rep
188 inlineExpr u a (CmmMachOp op es) = CmmMachOp op (map (inlineExpr u a) es)
189 inlineExpr u a other_expr = other_expr
191 -- -----------------------------------------------------------------------------
192 -- MachOp constant folder
194 -- Now, try to constant-fold the MachOps. The arguments have already
195 -- been optimized and folded.
198 :: MachOp -- The operation from an CmmMachOp
199 -> [CmmExpr] -- The optimized arguments
202 cmmMachOpFold op arg@[CmmLit (CmmInt x rep)]
204 MO_S_Neg r -> CmmLit (CmmInt (-x) rep)
205 MO_Not r -> CmmLit (CmmInt (complement x) rep)
207 -- these are interesting: we must first narrow to the
208 -- "from" type, in order to truncate to the correct size.
209 -- The final narrow/widen to the destination type
210 -- is implicit in the CmmLit.
212 | isFloatingRep to -> CmmLit (CmmFloat (fromInteger x) to)
213 | otherwise -> CmmLit (CmmInt (narrowS from x) to)
214 MO_U_Conv from to -> CmmLit (CmmInt (narrowU from x) to)
216 _ -> panic "cmmMachOpFold: unknown unary op"
219 -- Eliminate conversion NOPs
220 cmmMachOpFold (MO_S_Conv rep1 rep2) [x] | rep1 == rep2 = x
221 cmmMachOpFold (MO_U_Conv rep1 rep2) [x] | rep1 == rep2 = x
223 -- Eliminate nested conversions where possible
224 cmmMachOpFold conv_outer args@[CmmMachOp conv_inner [x]]
225 | Just (rep1,rep2,signed1) <- isIntConversion conv_inner,
226 Just (_, rep3,signed2) <- isIntConversion conv_outer
228 -- widen then narrow to the same size is a nop
229 _ | rep1 < rep2 && rep1 == rep3 -> x
230 -- Widen then narrow to different size: collapse to single conversion
231 -- but remember to use the signedness from the widening, just in case
232 -- the final conversion is a widen.
233 | rep1 < rep2 && rep2 > rep3 ->
234 cmmMachOpFold (intconv signed1 rep1 rep3) [x]
235 -- Nested widenings: collapse if the signedness is the same
236 | rep1 < rep2 && rep2 < rep3 && signed1 == signed2 ->
237 cmmMachOpFold (intconv signed1 rep1 rep3) [x]
238 -- Nested narrowings: collapse
239 | rep1 > rep2 && rep2 > rep3 ->
240 cmmMachOpFold (MO_U_Conv rep1 rep3) [x]
242 CmmMachOp conv_outer args
244 isIntConversion (MO_U_Conv rep1 rep2)
245 | not (isFloatingRep rep1) && not (isFloatingRep rep2)
246 = Just (rep1,rep2,False)
247 isIntConversion (MO_S_Conv rep1 rep2)
248 | not (isFloatingRep rep1) && not (isFloatingRep rep2)
249 = Just (rep1,rep2,True)
250 isIntConversion _ = Nothing
252 intconv True = MO_S_Conv
253 intconv False = MO_U_Conv
255 -- ToDo: a narrow of a load can be collapsed into a narrow load, right?
256 -- but what if the architecture only supports word-sized loads, should
257 -- we do the transformation anyway?
259 cmmMachOpFold mop args@[CmmLit (CmmInt x xrep), CmmLit (CmmInt y _)]
261 -- for comparisons: don't forget to narrow the arguments before
262 -- comparing, since they might be out of range.
263 MO_Eq r -> CmmLit (CmmInt (if x_u == y_u then 1 else 0) wordRep)
264 MO_Ne r -> CmmLit (CmmInt (if x_u /= y_u then 1 else 0) wordRep)
266 MO_U_Gt r -> CmmLit (CmmInt (if x_u > y_u then 1 else 0) wordRep)
267 MO_U_Ge r -> CmmLit (CmmInt (if x_u >= y_u then 1 else 0) wordRep)
268 MO_U_Lt r -> CmmLit (CmmInt (if x_u < y_u then 1 else 0) wordRep)
269 MO_U_Le r -> CmmLit (CmmInt (if x_u <= y_u then 1 else 0) wordRep)
271 MO_S_Gt r -> CmmLit (CmmInt (if x_s > y_s then 1 else 0) wordRep)
272 MO_S_Ge r -> CmmLit (CmmInt (if x_s >= y_s then 1 else 0) wordRep)
273 MO_S_Lt r -> CmmLit (CmmInt (if x_s < y_s then 1 else 0) wordRep)
274 MO_S_Le r -> CmmLit (CmmInt (if x_s <= y_s then 1 else 0) wordRep)
276 MO_Add r -> CmmLit (CmmInt (x + y) r)
277 MO_Sub r -> CmmLit (CmmInt (x - y) r)
278 MO_Mul r -> CmmLit (CmmInt (x * y) r)
279 MO_S_Quot r | y /= 0 -> CmmLit (CmmInt (x `quot` y) r)
280 MO_S_Rem r | y /= 0 -> CmmLit (CmmInt (x `rem` y) r)
282 MO_And r -> CmmLit (CmmInt (x .&. y) r)
283 MO_Or r -> CmmLit (CmmInt (x .|. y) r)
284 MO_Xor r -> CmmLit (CmmInt (x `xor` y) r)
286 MO_Shl r -> CmmLit (CmmInt (x `shiftL` fromIntegral y) r)
287 MO_U_Shr r -> CmmLit (CmmInt (x_u `shiftR` fromIntegral y) r)
288 MO_S_Shr r -> CmmLit (CmmInt (x `shiftR` fromIntegral y) r)
290 other -> CmmMachOp mop args
299 -- When possible, shift the constants to the right-hand side, so that we
300 -- can match for strength reductions. Note that the code generator will
301 -- also assume that constants have been shifted to the right when
304 cmmMachOpFold op [x@(CmmLit _), y]
305 | not (isLit y) && isCommutableMachOp op
306 = cmmMachOpFold op [y, x]
308 -- Turn (a+b)+c into a+(b+c) where possible. Because literals are
309 -- moved to the right, it is more likely that we will find
310 -- opportunities for constant folding when the expression is
313 -- ToDo: this appears to introduce a quadratic behaviour due to the
314 -- nested cmmMachOpFold. Can we fix this?
316 -- Why do we check isLit arg1? If arg1 is a lit, it means that arg2
317 -- is also a lit (otherwise arg1 would be on the right). If we
318 -- put arg1 on the left of the rearranged expression, we'll get into a
319 -- loop: (x1+x2)+x3 => x1+(x2+x3) => (x2+x3)+x1 => x2+(x3+x1) ...
321 cmmMachOpFold mop1 [CmmMachOp mop2 [arg1,arg2], arg3]
322 | mop1 == mop2 && isAssociativeMachOp mop1 && not (isLit arg1)
323 = cmmMachOpFold mop1 [arg1, cmmMachOpFold mop2 [arg2,arg3]]
325 -- Make a RegOff if we can
326 cmmMachOpFold (MO_Add _) [CmmReg reg, CmmLit (CmmInt n rep)]
327 = CmmRegOff reg (fromIntegral (narrowS rep n))
328 cmmMachOpFold (MO_Add _) [CmmRegOff reg off, CmmLit (CmmInt n rep)]
329 = CmmRegOff reg (off + fromIntegral (narrowS rep n))
330 cmmMachOpFold (MO_Sub _) [CmmReg reg, CmmLit (CmmInt n rep)]
331 = CmmRegOff reg (- fromIntegral (narrowS rep n))
332 cmmMachOpFold (MO_Sub _) [CmmRegOff reg off, CmmLit (CmmInt n rep)]
333 = CmmRegOff reg (off - fromIntegral (narrowS rep n))
335 -- Fold label(+/-)offset into a CmmLit where possible
337 cmmMachOpFold (MO_Add _) [CmmLit (CmmLabel lbl), CmmLit (CmmInt i rep)]
338 = CmmLit (CmmLabelOff lbl (fromIntegral (narrowU rep i)))
339 cmmMachOpFold (MO_Add _) [CmmLit (CmmInt i rep), CmmLit (CmmLabel lbl)]
340 = CmmLit (CmmLabelOff lbl (fromIntegral (narrowU rep i)))
341 cmmMachOpFold (MO_Sub _) [CmmLit (CmmLabel lbl), CmmLit (CmmInt i rep)]
342 = CmmLit (CmmLabelOff lbl (fromIntegral (negate (narrowU rep i))))
344 -- We can often do something with constants of 0 and 1 ...
346 cmmMachOpFold mop args@[x, y@(CmmLit (CmmInt 0 _))]
357 MO_Ne r | isComparisonExpr x -> x
358 MO_Eq r | Just x' <- maybeInvertConditionalExpr x -> x'
359 MO_U_Gt r | isComparisonExpr x -> x
360 MO_S_Gt r | isComparisonExpr x -> x
361 MO_U_Lt r | isComparisonExpr x -> CmmLit (CmmInt 0 wordRep)
362 MO_S_Lt r | isComparisonExpr x -> CmmLit (CmmInt 0 wordRep)
363 MO_U_Ge r | isComparisonExpr x -> CmmLit (CmmInt 1 wordRep)
364 MO_S_Ge r | isComparisonExpr x -> CmmLit (CmmInt 1 wordRep)
365 MO_U_Le r | Just x' <- maybeInvertConditionalExpr x -> x'
366 MO_S_Le r | Just x' <- maybeInvertConditionalExpr x -> x'
367 other -> CmmMachOp mop args
369 cmmMachOpFold mop args@[x, y@(CmmLit (CmmInt 1 rep))]
374 MO_S_Rem r -> CmmLit (CmmInt 0 rep)
375 MO_U_Rem r -> CmmLit (CmmInt 0 rep)
376 MO_Ne r | Just x' <- maybeInvertConditionalExpr x -> x'
377 MO_Eq r | isComparisonExpr x -> x
378 MO_U_Lt r | Just x' <- maybeInvertConditionalExpr x -> x'
379 MO_S_Lt r | Just x' <- maybeInvertConditionalExpr x -> x'
380 MO_U_Gt r | isComparisonExpr x -> CmmLit (CmmInt 0 wordRep)
381 MO_S_Gt r | isComparisonExpr x -> CmmLit (CmmInt 0 wordRep)
382 MO_U_Le r | isComparisonExpr x -> CmmLit (CmmInt 1 wordRep)
383 MO_S_Le r | isComparisonExpr x -> CmmLit (CmmInt 1 wordRep)
384 MO_U_Ge r | isComparisonExpr x -> x
385 MO_S_Ge r | isComparisonExpr x -> x
386 other -> CmmMachOp mop args
388 -- Now look for multiplication/division by powers of 2 (integers).
390 cmmMachOpFold mop args@[x, y@(CmmLit (CmmInt n _))]
393 -> case exactLog2 n of
395 Just p -> CmmMachOp (MO_Shl rep) [x, CmmLit (CmmInt p rep)]
397 -> case exactLog2 n of
399 Just p -> CmmMachOp (MO_S_Shr rep) [x, CmmLit (CmmInt p rep)]
403 unchanged = CmmMachOp mop args
405 -- Anything else is just too hard.
407 cmmMachOpFold mop args = CmmMachOp mop args
409 -- -----------------------------------------------------------------------------
412 -- This algorithm for determining the $\log_2$ of exact powers of 2 comes
413 -- from GCC. It requires bit manipulation primitives, and we use GHC
414 -- extensions. Tough.
416 -- Used to be in MachInstrs --SDM.
417 -- ToDo: remove use of unboxery --SDM.
422 exactLog2 :: Integer -> Maybe Integer
424 = if (x <= 0 || x >= 2147483648) then
427 case fromInteger x of { I# x# ->
428 if (w2i ((i2w x#) `and#` (i2w (0# -# x#))) /=# x#) then
431 Just (toInteger (I# (pow2 x#)))
434 pow2 x# | x# ==# 1# = 0#
435 | otherwise = 1# +# pow2 (w2i (i2w x# `shiftRL#` 1#))
438 -- -----------------------------------------------------------------------------
439 -- widening / narrowing
441 narrowU :: MachRep -> Integer -> Integer
442 narrowU I8 x = fromIntegral (fromIntegral x :: Word8)
443 narrowU I16 x = fromIntegral (fromIntegral x :: Word16)
444 narrowU I32 x = fromIntegral (fromIntegral x :: Word32)
445 narrowU I64 x = fromIntegral (fromIntegral x :: Word64)
446 narrowU _ _ = panic "narrowTo"
448 narrowS :: MachRep -> Integer -> Integer
449 narrowS I8 x = fromIntegral (fromIntegral x :: Int8)
450 narrowS I16 x = fromIntegral (fromIntegral x :: Int16)
451 narrowS I32 x = fromIntegral (fromIntegral x :: Int32)
452 narrowS I64 x = fromIntegral (fromIntegral x :: Int64)
453 narrowS _ _ = panic "narrowTo"
455 -- -----------------------------------------------------------------------------
459 This is a simple pass that replaces tail-recursive functions like this:
474 the latter generates better C code, because the C compiler treats it
475 like a loop, and brings full loop optimisation to bear.
477 In my measurements this makes little or no difference to anything
478 except factorial, but what the hell.
481 cmmLoopifyForC :: CmmTop -> CmmTop
482 cmmLoopifyForC p@(CmmProc info entry_lbl [] blocks@(BasicBlock top_id _ : _))
483 | null info = p -- only if there's an info table, ignore case alts
485 -- pprTrace "jump_lbl" (ppr jump_lbl <+> ppr entry_lbl) $
486 CmmProc info entry_lbl [] blocks'
487 where blocks' = [ BasicBlock id (map do_stmt stmts)
488 | BasicBlock id stmts <- blocks ]
490 do_stmt (CmmJump (CmmLit (CmmLabel lbl)) _) | lbl == jump_lbl
494 jump_lbl | tablesNextToCode = entryLblToInfoLbl entry_lbl
495 | otherwise = entry_lbl
497 cmmLoopifyForC top = top
499 -- -----------------------------------------------------------------------------
502 isLit (CmmLit _) = True
505 isComparisonExpr :: CmmExpr -> Bool
506 isComparisonExpr (CmmMachOp op _) = isComparisonMachOp op
507 isComparisonExpr _other = False
509 maybeInvertConditionalExpr :: CmmExpr -> Maybe CmmExpr
510 maybeInvertConditionalExpr (CmmMachOp op args)
511 | Just op' <- maybeInvertComparison op = Just (CmmMachOp op' args)
512 maybeInvertConditionalExpr _ = Nothing