1 -----------------------------------------------------------------------------
5 -- (c) The University of Glasgow 2006
7 -----------------------------------------------------------------------------
10 -- The above warning supression flag is a temporary kludge.
11 -- While working on this module you are encouraged to remove it and fix
12 -- any warnings in the module. See
13 -- http://hackage.haskell.org/trac/ghc/wiki/WorkingConventions#Warnings
22 #include "HsVersions.h"
40 -- -----------------------------------------------------------------------------
44 This pass inlines assignments to temporaries that are used just
45 once. It works as follows:
47 - count uses of each temporary
48 - for each temporary that occurs just once:
49 - attempt to push it forward to the statement that uses it
50 - only push forward past assignments to other temporaries
51 (assumes that temporaries are single-assignment)
52 - if we reach the statement that uses it, inline the rhs
53 and delete the original assignment.
55 Possible generalisations: here is an example from factorial
60 if (_smi != 0) goto cmK;
69 We want to inline _smi and _smn. To inline _smn:
71 - we must be able to push forward past assignments to global regs.
72 We can do this if the rhs of the assignment we are pushing
73 forward doesn't refer to the global reg being assigned to; easy
78 - It is a trivial replacement, reg for reg, but it occurs more than
80 - We can inline trivial assignments even if the temporary occurs
81 more than once, as long as we don't eliminate the original assignment
82 (this doesn't help much on its own).
83 - We need to be able to propagate the assignment forward through jumps;
84 if we did this, we would find that it can be inlined safely in all
88 cmmMiniInline :: [CmmBasicBlock] -> [CmmBasicBlock]
89 cmmMiniInline blocks = map do_inline blocks
91 blockUses (BasicBlock _ stmts)
92 = foldr (plusUFM_C (+)) emptyUFM (map getStmtUses stmts)
94 uses = foldr (plusUFM_C (+)) emptyUFM (map blockUses blocks)
96 do_inline (BasicBlock id stmts)
97 = BasicBlock id (cmmMiniInlineStmts uses stmts)
100 cmmMiniInlineStmts :: UniqFM Int -> [CmmStmt] -> [CmmStmt]
101 cmmMiniInlineStmts uses [] = []
102 cmmMiniInlineStmts uses (stmt@(CmmAssign (CmmLocal (LocalReg u _ _)) expr) : stmts)
103 | Just 1 <- lookupUFM uses u,
104 Just stmts' <- lookForInline u expr stmts
107 trace ("nativeGen: inlining " ++ showSDoc (pprStmt stmt)) $
109 cmmMiniInlineStmts uses stmts'
111 cmmMiniInlineStmts uses (stmt:stmts)
112 = stmt : cmmMiniInlineStmts uses stmts
115 -- Try to inline a temporary assignment. We can skip over assignments to
116 -- other tempoararies, because we know that expressions aren't side-effecting
117 -- and temporaries are single-assignment.
118 lookForInline u expr (stmt@(CmmAssign (CmmLocal (LocalReg u' _ _)) rhs) : rest)
120 = case lookupUFM (getExprUses rhs) u of
121 Just 1 -> Just (inlineStmt u expr stmt : rest)
122 _other -> case lookForInline u expr rest of
124 Just stmts -> Just (stmt:stmts)
126 lookForInline u expr (CmmNop : rest)
127 = lookForInline u expr rest
129 lookForInline u expr (stmt:stmts)
130 = case lookupUFM (getStmtUses stmt) u of
131 Just 1 | ok_to_inline -> Just (inlineStmt u expr stmt : stmts)
134 -- we don't inline into CmmCall if the expression refers to global
135 -- registers. This is a HACK to avoid global registers clashing with
136 -- C argument-passing registers, really the back-end ought to be able
137 -- to handle it properly, but currently neither PprC nor the NCG can
138 -- do it. See also CgForeignCall:load_args_into_temps.
139 ok_to_inline = case stmt of
140 CmmCall{} -> hasNoGlobalRegs expr
143 -- -----------------------------------------------------------------------------
144 -- Boring Cmm traversals for collecting usage info and substitutions.
146 getStmtUses :: CmmStmt -> UniqFM Int
147 getStmtUses (CmmAssign _ e) = getExprUses e
148 getStmtUses (CmmStore e1 e2) = plusUFM_C (+) (getExprUses e1) (getExprUses e2)
149 getStmtUses (CmmCall target _ es _ _)
150 = plusUFM_C (+) (uses target) (getExprsUses (map fst es))
151 where uses (CmmCallee e _) = getExprUses e
153 getStmtUses (CmmCondBranch e _) = getExprUses e
154 getStmtUses (CmmSwitch e _) = getExprUses e
155 getStmtUses (CmmJump e _) = getExprUses e
156 getStmtUses _ = emptyUFM
158 getExprUses :: CmmExpr -> UniqFM Int
159 getExprUses (CmmReg (CmmLocal (LocalReg u _ _))) = unitUFM u 1
160 getExprUses (CmmRegOff (CmmLocal (LocalReg u _ _)) _) = unitUFM u 1
161 getExprUses (CmmLoad e _) = getExprUses e
162 getExprUses (CmmMachOp _ es) = getExprsUses es
163 getExprUses _other = emptyUFM
165 getExprsUses es = foldr (plusUFM_C (+)) emptyUFM (map getExprUses es)
167 inlineStmt :: Unique -> CmmExpr -> CmmStmt -> CmmStmt
168 inlineStmt u a (CmmAssign r e) = CmmAssign r (inlineExpr u a e)
169 inlineStmt u a (CmmStore e1 e2) = CmmStore (inlineExpr u a e1) (inlineExpr u a e2)
170 inlineStmt u a (CmmCall target regs es srt ret)
171 = CmmCall (infn target) regs es' srt ret
172 where infn (CmmCallee fn cconv) = CmmCallee fn cconv
173 infn (CmmPrim p) = CmmPrim p
174 es' = [ (inlineExpr u a e, hint) | (e,hint) <- es ]
175 inlineStmt u a (CmmCondBranch e d) = CmmCondBranch (inlineExpr u a e) d
176 inlineStmt u a (CmmSwitch e d) = CmmSwitch (inlineExpr u a e) d
177 inlineStmt u a (CmmJump e d) = CmmJump (inlineExpr u a e) d
178 inlineStmt u a other_stmt = other_stmt
180 inlineExpr :: Unique -> CmmExpr -> CmmExpr -> CmmExpr
181 inlineExpr u a e@(CmmReg (CmmLocal (LocalReg u' _ _)))
184 inlineExpr u a e@(CmmRegOff (CmmLocal (LocalReg u' rep _)) off)
185 | u == u' = CmmMachOp (MO_Add rep) [a, CmmLit (CmmInt (fromIntegral off) rep)]
187 inlineExpr u a (CmmLoad e rep) = CmmLoad (inlineExpr u a e) rep
188 inlineExpr u a (CmmMachOp op es) = CmmMachOp op (map (inlineExpr u a) es)
189 inlineExpr u a other_expr = other_expr
191 -- -----------------------------------------------------------------------------
192 -- MachOp constant folder
194 -- Now, try to constant-fold the MachOps. The arguments have already
195 -- been optimized and folded.
198 :: MachOp -- The operation from an CmmMachOp
199 -> [CmmExpr] -- The optimized arguments
202 cmmMachOpFold op arg@[CmmLit (CmmInt x rep)]
204 MO_S_Neg r -> CmmLit (CmmInt (-x) rep)
205 MO_Not r -> CmmLit (CmmInt (complement x) rep)
207 -- these are interesting: we must first narrow to the
208 -- "from" type, in order to truncate to the correct size.
209 -- The final narrow/widen to the destination type
210 -- is implicit in the CmmLit.
212 | isFloatingRep to -> CmmLit (CmmFloat (fromInteger x) to)
213 | otherwise -> CmmLit (CmmInt (narrowS from x) to)
214 MO_U_Conv from to -> CmmLit (CmmInt (narrowU from x) to)
216 _ -> panic "cmmMachOpFold: unknown unary op"
219 -- Eliminate conversion NOPs
220 cmmMachOpFold (MO_S_Conv rep1 rep2) [x] | rep1 == rep2 = x
221 cmmMachOpFold (MO_U_Conv rep1 rep2) [x] | rep1 == rep2 = x
223 -- Eliminate nested conversions where possible
224 cmmMachOpFold conv_outer args@[CmmMachOp conv_inner [x]]
225 | Just (rep1,rep2,signed1) <- isIntConversion conv_inner,
226 Just (_, rep3,signed2) <- isIntConversion conv_outer
228 -- widen then narrow to the same size is a nop
229 _ | rep1 < rep2 && rep1 == rep3 -> x
230 -- Widen then narrow to different size: collapse to single conversion
231 -- but remember to use the signedness from the widening, just in case
232 -- the final conversion is a widen.
233 | rep1 < rep2 && rep2 > rep3 ->
234 cmmMachOpFold (intconv signed1 rep1 rep3) [x]
235 -- Nested widenings: collapse if the signedness is the same
236 | rep1 < rep2 && rep2 < rep3 && signed1 == signed2 ->
237 cmmMachOpFold (intconv signed1 rep1 rep3) [x]
238 -- Nested narrowings: collapse
239 | rep1 > rep2 && rep2 > rep3 ->
240 cmmMachOpFold (MO_U_Conv rep1 rep3) [x]
242 CmmMachOp conv_outer args
244 isIntConversion (MO_U_Conv rep1 rep2)
245 | not (isFloatingRep rep1) && not (isFloatingRep rep2)
246 = Just (rep1,rep2,False)
247 isIntConversion (MO_S_Conv rep1 rep2)
248 | not (isFloatingRep rep1) && not (isFloatingRep rep2)
249 = Just (rep1,rep2,True)
250 isIntConversion _ = Nothing
252 intconv True = MO_S_Conv
253 intconv False = MO_U_Conv
255 -- ToDo: a narrow of a load can be collapsed into a narrow load, right?
256 -- but what if the architecture only supports word-sized loads, should
257 -- we do the transformation anyway?
259 cmmMachOpFold mop args@[CmmLit (CmmInt x xrep), CmmLit (CmmInt y _)]
261 -- for comparisons: don't forget to narrow the arguments before
262 -- comparing, since they might be out of range.
263 MO_Eq r -> CmmLit (CmmInt (if x_u == y_u then 1 else 0) wordRep)
264 MO_Ne r -> CmmLit (CmmInt (if x_u /= y_u then 1 else 0) wordRep)
266 MO_U_Gt r -> CmmLit (CmmInt (if x_u > y_u then 1 else 0) wordRep)
267 MO_U_Ge r -> CmmLit (CmmInt (if x_u >= y_u then 1 else 0) wordRep)
268 MO_U_Lt r -> CmmLit (CmmInt (if x_u < y_u then 1 else 0) wordRep)
269 MO_U_Le r -> CmmLit (CmmInt (if x_u <= y_u then 1 else 0) wordRep)
271 MO_S_Gt r -> CmmLit (CmmInt (if x_s > y_s then 1 else 0) wordRep)
272 MO_S_Ge r -> CmmLit (CmmInt (if x_s >= y_s then 1 else 0) wordRep)
273 MO_S_Lt r -> CmmLit (CmmInt (if x_s < y_s then 1 else 0) wordRep)
274 MO_S_Le r -> CmmLit (CmmInt (if x_s <= y_s then 1 else 0) wordRep)
276 MO_Add r -> CmmLit (CmmInt (x + y) r)
277 MO_Sub r -> CmmLit (CmmInt (x - y) r)
278 MO_Mul r -> CmmLit (CmmInt (x * y) r)
279 MO_S_Quot r | y /= 0 -> CmmLit (CmmInt (x `quot` y) r)
280 MO_S_Rem r | y /= 0 -> CmmLit (CmmInt (x `rem` y) r)
282 MO_And r -> CmmLit (CmmInt (x .&. y) r)
283 MO_Or r -> CmmLit (CmmInt (x .|. y) r)
284 MO_Xor r -> CmmLit (CmmInt (x `xor` y) r)
286 MO_Shl r -> CmmLit (CmmInt (x `shiftL` fromIntegral y) r)
287 MO_U_Shr r -> CmmLit (CmmInt (x_u `shiftR` fromIntegral y) r)
288 MO_S_Shr r -> CmmLit (CmmInt (x `shiftR` fromIntegral y) r)
290 other -> CmmMachOp mop args
299 -- When possible, shift the constants to the right-hand side, so that we
300 -- can match for strength reductions. Note that the code generator will
301 -- also assume that constants have been shifted to the right when
304 cmmMachOpFold op [x@(CmmLit _), y]
305 | not (isLit y) && isCommutableMachOp op
306 = cmmMachOpFold op [y, x]
308 -- Turn (a+b)+c into a+(b+c) where possible. Because literals are
309 -- moved to the right, it is more likely that we will find
310 -- opportunities for constant folding when the expression is
313 -- ToDo: this appears to introduce a quadratic behaviour due to the
314 -- nested cmmMachOpFold. Can we fix this?
316 -- Why do we check isLit arg1? If arg1 is a lit, it means that arg2
317 -- is also a lit (otherwise arg1 would be on the right). If we
318 -- put arg1 on the left of the rearranged expression, we'll get into a
319 -- loop: (x1+x2)+x3 => x1+(x2+x3) => (x2+x3)+x1 => x2+(x3+x1) ...
321 -- Also don't do it if arg1 is PicBaseReg, so that we don't separate the
322 -- PicBaseReg from the corresponding label (or label difference).
324 cmmMachOpFold mop1 [CmmMachOp mop2 [arg1,arg2], arg3]
325 | mop1 == mop2 && isAssociativeMachOp mop1
326 && not (isLit arg1) && not (isPicReg arg1)
327 = cmmMachOpFold mop1 [arg1, cmmMachOpFold mop2 [arg2,arg3]]
329 -- Make a RegOff if we can
330 cmmMachOpFold (MO_Add _) [CmmReg reg, CmmLit (CmmInt n rep)]
331 = CmmRegOff reg (fromIntegral (narrowS rep n))
332 cmmMachOpFold (MO_Add _) [CmmRegOff reg off, CmmLit (CmmInt n rep)]
333 = CmmRegOff reg (off + fromIntegral (narrowS rep n))
334 cmmMachOpFold (MO_Sub _) [CmmReg reg, CmmLit (CmmInt n rep)]
335 = CmmRegOff reg (- fromIntegral (narrowS rep n))
336 cmmMachOpFold (MO_Sub _) [CmmRegOff reg off, CmmLit (CmmInt n rep)]
337 = CmmRegOff reg (off - fromIntegral (narrowS rep n))
339 -- Fold label(+/-)offset into a CmmLit where possible
341 cmmMachOpFold (MO_Add _) [CmmLit (CmmLabel lbl), CmmLit (CmmInt i rep)]
342 = CmmLit (CmmLabelOff lbl (fromIntegral (narrowU rep i)))
343 cmmMachOpFold (MO_Add _) [CmmLit (CmmInt i rep), CmmLit (CmmLabel lbl)]
344 = CmmLit (CmmLabelOff lbl (fromIntegral (narrowU rep i)))
345 cmmMachOpFold (MO_Sub _) [CmmLit (CmmLabel lbl), CmmLit (CmmInt i rep)]
346 = CmmLit (CmmLabelOff lbl (fromIntegral (negate (narrowU rep i))))
349 -- Comparison of literal with narrowed/widened operand: perform
350 -- the comparison at a different width, as long as the literal is
353 #if i386_TARGET_ARCH || x86_64_TARGET_ARCH
354 -- powerPC NCG has a TODO for I8/I16 comparisons, so don't try
356 cmmMachOpFold cmp [CmmMachOp conv [x], CmmLit (CmmInt i _)]
357 | Just (rep, narrow) <- maybe_conversion conv,
358 Just narrow_cmp <- maybe_comparison cmp rep,
359 let narrow_i = narrow rep i,
361 = cmmMachOpFold narrow_cmp [x, CmmLit (CmmInt narrow_i rep)]
363 maybe_conversion (MO_U_Conv from _) = Just (from, narrowU)
364 maybe_conversion (MO_S_Conv from _) = Just (from, narrowS)
365 maybe_conversion _ = Nothing
367 maybe_comparison (MO_U_Gt _) rep = Just (MO_U_Gt rep)
368 maybe_comparison (MO_U_Ge _) rep = Just (MO_U_Ge rep)
369 maybe_comparison (MO_U_Lt _) rep = Just (MO_U_Lt rep)
370 maybe_comparison (MO_U_Le _) rep = Just (MO_U_Le rep)
371 maybe_comparison (MO_S_Gt _) rep = Just (MO_S_Gt rep)
372 maybe_comparison (MO_S_Ge _) rep = Just (MO_S_Ge rep)
373 maybe_comparison (MO_S_Lt _) rep = Just (MO_S_Lt rep)
374 maybe_comparison (MO_S_Le _) rep = Just (MO_S_Le rep)
375 maybe_comparison (MO_Eq _) rep = Just (MO_Eq rep)
376 maybe_comparison _ _ = Nothing
380 -- We can often do something with constants of 0 and 1 ...
382 cmmMachOpFold mop args@[x, y@(CmmLit (CmmInt 0 _))]
393 MO_Ne r | isComparisonExpr x -> x
394 MO_Eq r | Just x' <- maybeInvertConditionalExpr x -> x'
395 MO_U_Gt r | isComparisonExpr x -> x
396 MO_S_Gt r | isComparisonExpr x -> x
397 MO_U_Lt r | isComparisonExpr x -> CmmLit (CmmInt 0 wordRep)
398 MO_S_Lt r | isComparisonExpr x -> CmmLit (CmmInt 0 wordRep)
399 MO_U_Ge r | isComparisonExpr x -> CmmLit (CmmInt 1 wordRep)
400 MO_S_Ge r | isComparisonExpr x -> CmmLit (CmmInt 1 wordRep)
401 MO_U_Le r | Just x' <- maybeInvertConditionalExpr x -> x'
402 MO_S_Le r | Just x' <- maybeInvertConditionalExpr x -> x'
403 other -> CmmMachOp mop args
405 cmmMachOpFold mop args@[x, y@(CmmLit (CmmInt 1 rep))]
410 MO_S_Rem r -> CmmLit (CmmInt 0 rep)
411 MO_U_Rem r -> CmmLit (CmmInt 0 rep)
412 MO_Ne r | Just x' <- maybeInvertConditionalExpr x -> x'
413 MO_Eq r | isComparisonExpr x -> x
414 MO_U_Lt r | Just x' <- maybeInvertConditionalExpr x -> x'
415 MO_S_Lt r | Just x' <- maybeInvertConditionalExpr x -> x'
416 MO_U_Gt r | isComparisonExpr x -> CmmLit (CmmInt 0 wordRep)
417 MO_S_Gt r | isComparisonExpr x -> CmmLit (CmmInt 0 wordRep)
418 MO_U_Le r | isComparisonExpr x -> CmmLit (CmmInt 1 wordRep)
419 MO_S_Le r | isComparisonExpr x -> CmmLit (CmmInt 1 wordRep)
420 MO_U_Ge r | isComparisonExpr x -> x
421 MO_S_Ge r | isComparisonExpr x -> x
422 other -> CmmMachOp mop args
424 -- Now look for multiplication/division by powers of 2 (integers).
426 cmmMachOpFold mop args@[x, y@(CmmLit (CmmInt n _))]
429 | Just p <- exactLog2 n ->
430 CmmMachOp (MO_Shl rep) [x, CmmLit (CmmInt p rep)]
432 | Just p <- exactLog2 n,
433 CmmReg _ <- x -> -- We duplicate x below, hence require
434 -- it is a reg. FIXME: remove this restriction.
435 -- shift right is not the same as quot, because it rounds
436 -- to minus infinity, whereasq uot rounds toward zero.
437 -- To fix this up, we add one less than the divisor to the
438 -- dividend if it is a negative number.
440 -- to avoid a test/jump, we use the following sequence:
441 -- x1 = x >> word_size-1 (all 1s if -ve, all 0s if +ve)
442 -- x2 = y & (divisor-1)
443 -- result = (x+x2) >>= log2(divisor)
444 -- this could be done a bit more simply using conditional moves,
445 -- but we're processor independent here.
447 -- we optimise the divide by 2 case slightly, generating
448 -- x1 = x >> word_size-1 (unsigned)
449 -- return = (x + x1) >>= log2(divisor)
451 bits = fromIntegral (machRepBitWidth rep) - 1
452 shr = if p == 1 then MO_U_Shr rep else MO_S_Shr rep
453 x1 = CmmMachOp shr [x, CmmLit (CmmInt bits rep)]
454 x2 = if p == 1 then x1 else
455 CmmMachOp (MO_And rep) [x1, CmmLit (CmmInt (n-1) rep)]
456 x3 = CmmMachOp (MO_Add rep) [x, x2]
458 CmmMachOp (MO_S_Shr rep) [x3, CmmLit (CmmInt p rep)]
462 unchanged = CmmMachOp mop args
464 -- Anything else is just too hard.
466 cmmMachOpFold mop args = CmmMachOp mop args
468 -- -----------------------------------------------------------------------------
471 -- This algorithm for determining the $\log_2$ of exact powers of 2 comes
472 -- from GCC. It requires bit manipulation primitives, and we use GHC
473 -- extensions. Tough.
475 -- Used to be in MachInstrs --SDM.
476 -- ToDo: remove use of unboxery --SDM.
481 exactLog2 :: Integer -> Maybe Integer
483 = if (x <= 0 || x >= 2147483648) then
486 case fromInteger x of { I# x# ->
487 if (w2i ((i2w x#) `and#` (i2w (0# -# x#))) /=# x#) then
490 Just (toInteger (I# (pow2 x#)))
493 pow2 x# | x# ==# 1# = 0#
494 | otherwise = 1# +# pow2 (w2i (i2w x# `shiftRL#` 1#))
497 -- -----------------------------------------------------------------------------
498 -- widening / narrowing
500 narrowU :: MachRep -> Integer -> Integer
501 narrowU I8 x = fromIntegral (fromIntegral x :: Word8)
502 narrowU I16 x = fromIntegral (fromIntegral x :: Word16)
503 narrowU I32 x = fromIntegral (fromIntegral x :: Word32)
504 narrowU I64 x = fromIntegral (fromIntegral x :: Word64)
505 narrowU _ _ = panic "narrowTo"
507 narrowS :: MachRep -> Integer -> Integer
508 narrowS I8 x = fromIntegral (fromIntegral x :: Int8)
509 narrowS I16 x = fromIntegral (fromIntegral x :: Int16)
510 narrowS I32 x = fromIntegral (fromIntegral x :: Int32)
511 narrowS I64 x = fromIntegral (fromIntegral x :: Int64)
512 narrowS _ _ = panic "narrowTo"
514 -- -----------------------------------------------------------------------------
518 This is a simple pass that replaces tail-recursive functions like this:
533 the latter generates better C code, because the C compiler treats it
534 like a loop, and brings full loop optimisation to bear.
536 In my measurements this makes little or no difference to anything
537 except factorial, but what the hell.
540 cmmLoopifyForC :: RawCmmTop -> RawCmmTop
541 cmmLoopifyForC p@(CmmProc info entry_lbl [] blocks@(BasicBlock top_id _ : _))
542 | null info = p -- only if there's an info table, ignore case alts
544 -- pprTrace "jump_lbl" (ppr jump_lbl <+> ppr entry_lbl) $
545 CmmProc info entry_lbl [] blocks'
546 where blocks' = [ BasicBlock id (map do_stmt stmts)
547 | BasicBlock id stmts <- blocks ]
549 do_stmt (CmmJump (CmmLit (CmmLabel lbl)) _) | lbl == jump_lbl
553 jump_lbl | tablesNextToCode = entryLblToInfoLbl entry_lbl
554 | otherwise = entry_lbl
556 cmmLoopifyForC top = top
558 -- -----------------------------------------------------------------------------
561 isLit (CmmLit _) = True
564 isComparisonExpr :: CmmExpr -> Bool
565 isComparisonExpr (CmmMachOp op _) = isComparisonMachOp op
566 isComparisonExpr _other = False
568 maybeInvertConditionalExpr :: CmmExpr -> Maybe CmmExpr
569 maybeInvertConditionalExpr (CmmMachOp op args)
570 | Just op' <- maybeInvertComparison op = Just (CmmMachOp op' args)
571 maybeInvertConditionalExpr _ = Nothing
573 isPicReg (CmmReg (CmmGlobal PicBaseReg)) = True