2 -- The above warning supression flag is a temporary kludge.
3 -- While working on this module you are encouraged to remove it and fix
4 -- any warnings in the module. See
5 -- http://hackage.haskell.org/trac/ghc/wiki/Commentary/CodingStyle#Warnings
8 -----------------------------------------------------------------------------
12 -- (c) The University of Glasgow 2006
14 -----------------------------------------------------------------------------
17 cmmEliminateDeadBlocks,
23 #include "HsVersions.h"
41 import Compiler.Hoopl hiding (Unique)
43 -- -----------------------------------------------------------------------------
44 -- Eliminates dead blocks
47 We repeatedly expand the set of reachable blocks until we hit a
48 fixpoint, and then prune any blocks that were not in this set. This is
49 actually a required optimization, as dead blocks can cause problems
50 for invariants in the linear register allocator (and possibly other
54 -- Deep fold over statements could probably be abstracted out, but it
55 -- might not be worth the effort since OldCmm is moribund
56 cmmEliminateDeadBlocks :: [CmmBasicBlock] -> [CmmBasicBlock]
57 cmmEliminateDeadBlocks [] = []
58 cmmEliminateDeadBlocks blocks@(BasicBlock base_id _:_) =
59 let -- Calculate what's reachable from what block
60 -- We have to do a deep fold into CmmExpr because
61 -- there may be a BlockId in the CmmBlock literal.
62 reachableMap = foldl f emptyBlockMap blocks
63 where f m (BasicBlock block_id stmts) = mapInsert block_id (reachableFrom stmts) m
64 reachableFrom stmts = foldl stmt emptyBlockSet stmts
67 stmt m (CmmComment _) = m
68 stmt m (CmmAssign _ e) = expr m e
69 stmt m (CmmStore e1 e2) = expr (expr m e1) e2
70 stmt m (CmmCall c _ as _ _) = f (actuals m as) c
71 where f m (CmmCallee e _) = expr m e
73 stmt m (CmmBranch b) = setInsert b m
74 stmt m (CmmCondBranch e b) = setInsert b (expr m e)
75 stmt m (CmmSwitch e bs) = foldl (flip setInsert) (expr m e) (catMaybes bs)
76 stmt m (CmmJump e as) = expr (actuals m as) e
77 stmt m (CmmReturn as) = actuals m as
78 actuals m as = foldl (\m h -> expr m (hintlessCmm h)) m as
79 expr m (CmmLit l) = lit m l
80 expr m (CmmLoad e _) = expr m e
82 expr m (CmmMachOp _ es) = foldl expr m es
83 expr m (CmmStackSlot _ _) = m
84 expr m (CmmRegOff _ _) = m
85 lit m (CmmBlock b) = setInsert b m
87 -- Expand reachable set until you hit fixpoint
88 initReachable = setSingleton base_id :: BlockSet
89 expandReachable old_set new_set =
90 if setSize new_set > setSize old_set
91 then expandReachable new_set $ setFold
92 (\x s -> maybe setEmpty id (mapLookup x reachableMap) `setUnion` s)
94 (setDifference new_set old_set)
95 else new_set -- fixpoint achieved
96 reachable = expandReachable setEmpty initReachable
97 in filter (\(BasicBlock block_id _) -> setMember block_id reachable) blocks
99 -- -----------------------------------------------------------------------------
103 This pass inlines assignments to temporaries that are used just
104 once. It works as follows:
106 - count uses of each temporary
107 - for each temporary that occurs just once:
108 - attempt to push it forward to the statement that uses it
109 - only push forward past assignments to other temporaries
110 (assumes that temporaries are single-assignment)
111 - if we reach the statement that uses it, inline the rhs
112 and delete the original assignment.
114 [N.B. In the Quick C-- compiler, this optimization is achieved by a
115 combination of two dataflow passes: forward substitution (peephole
116 optimization) and dead-assignment elimination. ---NR]
118 Possible generalisations: here is an example from factorial
123 if (_smi != 0) goto cmK;
130 jump Fac_zdwfac_info;
132 We want to inline _smi and _smn. To inline _smn:
134 - we must be able to push forward past assignments to global regs.
135 We can do this if the rhs of the assignment we are pushing
136 forward doesn't refer to the global reg being assigned to; easy
141 - It is a trivial replacement, reg for reg, but it occurs more than
143 - We can inline trivial assignments even if the temporary occurs
144 more than once, as long as we don't eliminate the original assignment
145 (this doesn't help much on its own).
146 - We need to be able to propagate the assignment forward through jumps;
147 if we did this, we would find that it can be inlined safely in all
151 countUses :: UserOfLocalRegs a => a -> UniqFM Int
152 countUses a = foldRegsUsed (\m r -> addToUFM m r (count m r + 1)) emptyUFM a
153 where count m r = lookupWithDefaultUFM m (0::Int) r
155 cmmMiniInline :: [CmmBasicBlock] -> [CmmBasicBlock]
156 cmmMiniInline blocks = map do_inline blocks
157 where do_inline (BasicBlock id stmts)
158 = BasicBlock id (cmmMiniInlineStmts (countUses blocks) stmts)
160 cmmMiniInlineStmts :: UniqFM Int -> [CmmStmt] -> [CmmStmt]
161 cmmMiniInlineStmts uses [] = []
162 cmmMiniInlineStmts uses (stmt@(CmmAssign (CmmLocal (LocalReg u _)) expr) : stmts)
163 -- not used at all: just discard this assignment
164 | Nothing <- lookupUFM uses u
165 = cmmMiniInlineStmts uses stmts
167 -- used once: try to inline at the use site
168 | Just 1 <- lookupUFM uses u,
169 Just stmts' <- lookForInline u expr stmts
172 trace ("nativeGen: inlining " ++ showSDoc (pprStmt stmt)) $
174 cmmMiniInlineStmts uses stmts'
176 cmmMiniInlineStmts uses (stmt:stmts)
177 = stmt : cmmMiniInlineStmts uses stmts
179 lookForInline u expr stmts = lookForInline' u expr regset stmts
180 where regset = foldRegsUsed extendRegSet emptyRegSet expr
182 lookForInline' u expr regset (stmt : rest)
183 | Just 1 <- lookupUFM (countUses stmt) u, ok_to_inline
184 = Just (inlineStmt u expr stmt : rest)
187 = case lookForInline' u expr regset rest of
189 Just stmts -> Just (stmt:stmts)
195 -- we don't inline into CmmCall if the expression refers to global
196 -- registers. This is a HACK to avoid global registers clashing with
197 -- C argument-passing registers, really the back-end ought to be able
198 -- to handle it properly, but currently neither PprC nor the NCG can
199 -- do it. See also CgForeignCall:load_args_into_temps.
200 ok_to_inline = case stmt of
201 CmmCall{} -> hasNoGlobalRegs expr
204 -- Expressions aren't side-effecting. Temporaries may or may not
205 -- be single-assignment depending on the source (the old code
206 -- generator creates single-assignment code, but hand-written Cmm
207 -- and Cmm from the new code generator is not single-assignment.)
208 -- So we do an extra check to make sure that the register being
209 -- changed is not one we were relying on. I don't know how much of a
210 -- performance hit this is (we have to create a regset for every
211 -- instruction.) -- EZY
212 ok_to_skip = case stmt of
215 CmmAssign (CmmLocal r@(LocalReg u' _)) rhs | u' /= u && not (r `elemRegSet` regset) -> True
216 CmmAssign g@(CmmGlobal _) rhs -> not (g `regUsedIn` expr)
220 inlineStmt :: Unique -> CmmExpr -> CmmStmt -> CmmStmt
221 inlineStmt u a (CmmAssign r e) = CmmAssign r (inlineExpr u a e)
222 inlineStmt u a (CmmStore e1 e2) = CmmStore (inlineExpr u a e1) (inlineExpr u a e2)
223 inlineStmt u a (CmmCall target regs es srt ret)
224 = CmmCall (infn target) regs es' srt ret
225 where infn (CmmCallee fn cconv) = CmmCallee (inlineExpr u a fn) cconv
226 infn (CmmPrim p) = CmmPrim p
227 es' = [ (CmmHinted (inlineExpr u a e) hint) | (CmmHinted e hint) <- es ]
228 inlineStmt u a (CmmCondBranch e d) = CmmCondBranch (inlineExpr u a e) d
229 inlineStmt u a (CmmSwitch e d) = CmmSwitch (inlineExpr u a e) d
230 inlineStmt u a (CmmJump e d) = CmmJump (inlineExpr u a e) d
231 inlineStmt u a other_stmt = other_stmt
233 inlineExpr :: Unique -> CmmExpr -> CmmExpr -> CmmExpr
234 inlineExpr u a e@(CmmReg (CmmLocal (LocalReg u' _)))
237 inlineExpr u a e@(CmmRegOff (CmmLocal (LocalReg u' rep)) off)
238 | u == u' = CmmMachOp (MO_Add width) [a, CmmLit (CmmInt (fromIntegral off) width)]
241 width = typeWidth rep
242 inlineExpr u a (CmmLoad e rep) = CmmLoad (inlineExpr u a e) rep
243 inlineExpr u a (CmmMachOp op es) = CmmMachOp op (map (inlineExpr u a) es)
244 inlineExpr u a other_expr = other_expr
246 -- -----------------------------------------------------------------------------
247 -- MachOp constant folder
249 -- Now, try to constant-fold the MachOps. The arguments have already
250 -- been optimized and folded.
253 :: MachOp -- The operation from an CmmMachOp
254 -> [CmmExpr] -- The optimized arguments
257 cmmMachOpFold op arg@[CmmLit (CmmInt x rep)]
259 MO_S_Neg r -> CmmLit (CmmInt (-x) rep)
260 MO_Not r -> CmmLit (CmmInt (complement x) rep)
262 -- these are interesting: we must first narrow to the
263 -- "from" type, in order to truncate to the correct size.
264 -- The final narrow/widen to the destination type
265 -- is implicit in the CmmLit.
266 MO_SF_Conv from to -> CmmLit (CmmFloat (fromInteger x) to)
267 MO_SS_Conv from to -> CmmLit (CmmInt (narrowS from x) to)
268 MO_UU_Conv from to -> CmmLit (CmmInt (narrowU from x) to)
270 _ -> panic "cmmMachOpFold: unknown unary op"
273 -- Eliminate conversion NOPs
274 cmmMachOpFold (MO_SS_Conv rep1 rep2) [x] | rep1 == rep2 = x
275 cmmMachOpFold (MO_UU_Conv rep1 rep2) [x] | rep1 == rep2 = x
277 -- Eliminate nested conversions where possible
278 cmmMachOpFold conv_outer args@[CmmMachOp conv_inner [x]]
279 | Just (rep1,rep2,signed1) <- isIntConversion conv_inner,
280 Just (_, rep3,signed2) <- isIntConversion conv_outer
282 -- widen then narrow to the same size is a nop
283 _ | rep1 < rep2 && rep1 == rep3 -> x
284 -- Widen then narrow to different size: collapse to single conversion
285 -- but remember to use the signedness from the widening, just in case
286 -- the final conversion is a widen.
287 | rep1 < rep2 && rep2 > rep3 ->
288 cmmMachOpFold (intconv signed1 rep1 rep3) [x]
289 -- Nested widenings: collapse if the signedness is the same
290 | rep1 < rep2 && rep2 < rep3 && signed1 == signed2 ->
291 cmmMachOpFold (intconv signed1 rep1 rep3) [x]
292 -- Nested narrowings: collapse
293 | rep1 > rep2 && rep2 > rep3 ->
294 cmmMachOpFold (MO_UU_Conv rep1 rep3) [x]
296 CmmMachOp conv_outer args
298 isIntConversion (MO_UU_Conv rep1 rep2)
299 = Just (rep1,rep2,False)
300 isIntConversion (MO_SS_Conv rep1 rep2)
301 = Just (rep1,rep2,True)
302 isIntConversion _ = Nothing
304 intconv True = MO_SS_Conv
305 intconv False = MO_UU_Conv
307 -- ToDo: a narrow of a load can be collapsed into a narrow load, right?
308 -- but what if the architecture only supports word-sized loads, should
309 -- we do the transformation anyway?
311 cmmMachOpFold mop args@[CmmLit (CmmInt x xrep), CmmLit (CmmInt y _)]
313 -- for comparisons: don't forget to narrow the arguments before
314 -- comparing, since they might be out of range.
315 MO_Eq r -> CmmLit (CmmInt (if x_u == y_u then 1 else 0) wordWidth)
316 MO_Ne r -> CmmLit (CmmInt (if x_u /= y_u then 1 else 0) wordWidth)
318 MO_U_Gt r -> CmmLit (CmmInt (if x_u > y_u then 1 else 0) wordWidth)
319 MO_U_Ge r -> CmmLit (CmmInt (if x_u >= y_u then 1 else 0) wordWidth)
320 MO_U_Lt r -> CmmLit (CmmInt (if x_u < y_u then 1 else 0) wordWidth)
321 MO_U_Le r -> CmmLit (CmmInt (if x_u <= y_u then 1 else 0) wordWidth)
323 MO_S_Gt r -> CmmLit (CmmInt (if x_s > y_s then 1 else 0) wordWidth)
324 MO_S_Ge r -> CmmLit (CmmInt (if x_s >= y_s then 1 else 0) wordWidth)
325 MO_S_Lt r -> CmmLit (CmmInt (if x_s < y_s then 1 else 0) wordWidth)
326 MO_S_Le r -> CmmLit (CmmInt (if x_s <= y_s then 1 else 0) wordWidth)
328 MO_Add r -> CmmLit (CmmInt (x + y) r)
329 MO_Sub r -> CmmLit (CmmInt (x - y) r)
330 MO_Mul r -> CmmLit (CmmInt (x * y) r)
331 MO_U_Quot r | y /= 0 -> CmmLit (CmmInt (x_u `quot` y_u) r)
332 MO_U_Rem r | y /= 0 -> CmmLit (CmmInt (x_u `rem` y_u) r)
333 MO_S_Quot r | y /= 0 -> CmmLit (CmmInt (x `quot` y) r)
334 MO_S_Rem r | y /= 0 -> CmmLit (CmmInt (x `rem` y) r)
336 MO_And r -> CmmLit (CmmInt (x .&. y) r)
337 MO_Or r -> CmmLit (CmmInt (x .|. y) r)
338 MO_Xor r -> CmmLit (CmmInt (x `xor` y) r)
340 MO_Shl r -> CmmLit (CmmInt (x `shiftL` fromIntegral y) r)
341 MO_U_Shr r -> CmmLit (CmmInt (x_u `shiftR` fromIntegral y) r)
342 MO_S_Shr r -> CmmLit (CmmInt (x `shiftR` fromIntegral y) r)
344 other -> CmmMachOp mop args
353 -- When possible, shift the constants to the right-hand side, so that we
354 -- can match for strength reductions. Note that the code generator will
355 -- also assume that constants have been shifted to the right when
358 cmmMachOpFold op [x@(CmmLit _), y]
359 | not (isLit y) && isCommutableMachOp op
360 = cmmMachOpFold op [y, x]
362 -- Turn (a+b)+c into a+(b+c) where possible. Because literals are
363 -- moved to the right, it is more likely that we will find
364 -- opportunities for constant folding when the expression is
367 -- ToDo: this appears to introduce a quadratic behaviour due to the
368 -- nested cmmMachOpFold. Can we fix this?
370 -- Why do we check isLit arg1? If arg1 is a lit, it means that arg2
371 -- is also a lit (otherwise arg1 would be on the right). If we
372 -- put arg1 on the left of the rearranged expression, we'll get into a
373 -- loop: (x1+x2)+x3 => x1+(x2+x3) => (x2+x3)+x1 => x2+(x3+x1) ...
375 -- Also don't do it if arg1 is PicBaseReg, so that we don't separate the
376 -- PicBaseReg from the corresponding label (or label difference).
378 cmmMachOpFold mop1 [CmmMachOp mop2 [arg1,arg2], arg3]
379 | mop2 `associates_with` mop1
380 && not (isLit arg1) && not (isPicReg arg1)
381 = cmmMachOpFold mop2 [arg1, cmmMachOpFold mop1 [arg2,arg3]]
383 MO_Add{} `associates_with` MO_Sub{} = True
384 mop1 `associates_with` mop2 =
385 mop1 == mop2 && isAssociativeMachOp mop1
387 -- special case: (a - b) + c ==> a + (c - b)
388 cmmMachOpFold mop1@(MO_Add{}) [CmmMachOp mop2@(MO_Sub{}) [arg1,arg2], arg3]
389 | not (isLit arg1) && not (isPicReg arg1)
390 = cmmMachOpFold mop1 [arg1, cmmMachOpFold mop2 [arg3,arg2]]
392 -- Make a RegOff if we can
393 cmmMachOpFold (MO_Add _) [CmmReg reg, CmmLit (CmmInt n rep)]
394 = CmmRegOff reg (fromIntegral (narrowS rep n))
395 cmmMachOpFold (MO_Add _) [CmmRegOff reg off, CmmLit (CmmInt n rep)]
396 = CmmRegOff reg (off + fromIntegral (narrowS rep n))
397 cmmMachOpFold (MO_Sub _) [CmmReg reg, CmmLit (CmmInt n rep)]
398 = CmmRegOff reg (- fromIntegral (narrowS rep n))
399 cmmMachOpFold (MO_Sub _) [CmmRegOff reg off, CmmLit (CmmInt n rep)]
400 = CmmRegOff reg (off - fromIntegral (narrowS rep n))
402 -- Fold label(+/-)offset into a CmmLit where possible
404 cmmMachOpFold (MO_Add _) [CmmLit (CmmLabel lbl), CmmLit (CmmInt i rep)]
405 = CmmLit (CmmLabelOff lbl (fromIntegral (narrowU rep i)))
406 cmmMachOpFold (MO_Add _) [CmmLit (CmmInt i rep), CmmLit (CmmLabel lbl)]
407 = CmmLit (CmmLabelOff lbl (fromIntegral (narrowU rep i)))
408 cmmMachOpFold (MO_Sub _) [CmmLit (CmmLabel lbl), CmmLit (CmmInt i rep)]
409 = CmmLit (CmmLabelOff lbl (fromIntegral (negate (narrowU rep i))))
412 -- Comparison of literal with widened operand: perform the comparison
413 -- at the smaller width, as long as the literal is within range.
415 -- We can't do the reverse trick, when the operand is narrowed:
416 -- narrowing throws away bits from the operand, there's no way to do
417 -- the same comparison at the larger size.
419 #if i386_TARGET_ARCH || x86_64_TARGET_ARCH
420 -- powerPC NCG has a TODO for I8/I16 comparisons, so don't try
422 cmmMachOpFold cmp [CmmMachOp conv [x], CmmLit (CmmInt i _)]
423 | -- if the operand is widened:
424 Just (rep, signed, narrow_fn) <- maybe_conversion conv,
425 -- and this is a comparison operation:
426 Just narrow_cmp <- maybe_comparison cmp rep signed,
427 -- and the literal fits in the smaller size:
429 -- then we can do the comparison at the smaller size
430 = cmmMachOpFold narrow_cmp [x, CmmLit (CmmInt i rep)]
432 maybe_conversion (MO_UU_Conv from to)
434 = Just (from, False, narrowU)
435 maybe_conversion (MO_SS_Conv from to)
437 = Just (from, True, narrowS)
439 -- don't attempt to apply this optimisation when the source
440 -- is a float; see #1916
441 maybe_conversion _ = Nothing
443 -- careful (#2080): if the original comparison was signed, but
444 -- we were doing an unsigned widen, then we must do an
445 -- unsigned comparison at the smaller size.
446 maybe_comparison (MO_U_Gt _) rep _ = Just (MO_U_Gt rep)
447 maybe_comparison (MO_U_Ge _) rep _ = Just (MO_U_Ge rep)
448 maybe_comparison (MO_U_Lt _) rep _ = Just (MO_U_Lt rep)
449 maybe_comparison (MO_U_Le _) rep _ = Just (MO_U_Le rep)
450 maybe_comparison (MO_Eq _) rep _ = Just (MO_Eq rep)
451 maybe_comparison (MO_S_Gt _) rep True = Just (MO_S_Gt rep)
452 maybe_comparison (MO_S_Ge _) rep True = Just (MO_S_Ge rep)
453 maybe_comparison (MO_S_Lt _) rep True = Just (MO_S_Lt rep)
454 maybe_comparison (MO_S_Le _) rep True = Just (MO_S_Le rep)
455 maybe_comparison (MO_S_Gt _) rep False = Just (MO_U_Gt rep)
456 maybe_comparison (MO_S_Ge _) rep False = Just (MO_U_Ge rep)
457 maybe_comparison (MO_S_Lt _) rep False = Just (MO_U_Lt rep)
458 maybe_comparison (MO_S_Le _) rep False = Just (MO_U_Le rep)
459 maybe_comparison _ _ _ = Nothing
463 -- We can often do something with constants of 0 and 1 ...
465 cmmMachOpFold mop args@[x, y@(CmmLit (CmmInt 0 _))]
476 MO_Ne r | isComparisonExpr x -> x
477 MO_Eq r | Just x' <- maybeInvertCmmExpr x -> x'
478 MO_U_Gt r | isComparisonExpr x -> x
479 MO_S_Gt r | isComparisonExpr x -> x
480 MO_U_Lt r | isComparisonExpr x -> CmmLit (CmmInt 0 wordWidth)
481 MO_S_Lt r | isComparisonExpr x -> CmmLit (CmmInt 0 wordWidth)
482 MO_U_Ge r | isComparisonExpr x -> CmmLit (CmmInt 1 wordWidth)
483 MO_S_Ge r | isComparisonExpr x -> CmmLit (CmmInt 1 wordWidth)
484 MO_U_Le r | Just x' <- maybeInvertCmmExpr x -> x'
485 MO_S_Le r | Just x' <- maybeInvertCmmExpr x -> x'
486 other -> CmmMachOp mop args
488 cmmMachOpFold mop args@[x, y@(CmmLit (CmmInt 1 rep))]
493 MO_S_Rem r -> CmmLit (CmmInt 0 rep)
494 MO_U_Rem r -> CmmLit (CmmInt 0 rep)
495 MO_Ne r | Just x' <- maybeInvertCmmExpr x -> x'
496 MO_Eq r | isComparisonExpr x -> x
497 MO_U_Lt r | Just x' <- maybeInvertCmmExpr x -> x'
498 MO_S_Lt r | Just x' <- maybeInvertCmmExpr x -> x'
499 MO_U_Gt r | isComparisonExpr x -> CmmLit (CmmInt 0 wordWidth)
500 MO_S_Gt r | isComparisonExpr x -> CmmLit (CmmInt 0 wordWidth)
501 MO_U_Le r | isComparisonExpr x -> CmmLit (CmmInt 1 wordWidth)
502 MO_S_Le r | isComparisonExpr x -> CmmLit (CmmInt 1 wordWidth)
503 MO_U_Ge r | isComparisonExpr x -> x
504 MO_S_Ge r | isComparisonExpr x -> x
505 other -> CmmMachOp mop args
507 -- Now look for multiplication/division by powers of 2 (integers).
509 cmmMachOpFold mop args@[x, y@(CmmLit (CmmInt n _))]
512 | Just p <- exactLog2 n ->
513 cmmMachOpFold (MO_Shl rep) [x, CmmLit (CmmInt p rep)]
515 | Just p <- exactLog2 n ->
516 cmmMachOpFold (MO_U_Shr rep) [x, CmmLit (CmmInt p rep)]
518 | Just p <- exactLog2 n,
519 CmmReg _ <- x -> -- We duplicate x below, hence require
520 -- it is a reg. FIXME: remove this restriction.
521 -- shift right is not the same as quot, because it rounds
522 -- to minus infinity, whereasq quot rounds toward zero.
523 -- To fix this up, we add one less than the divisor to the
524 -- dividend if it is a negative number.
526 -- to avoid a test/jump, we use the following sequence:
527 -- x1 = x >> word_size-1 (all 1s if -ve, all 0s if +ve)
528 -- x2 = y & (divisor-1)
529 -- result = (x+x2) >>= log2(divisor)
530 -- this could be done a bit more simply using conditional moves,
531 -- but we're processor independent here.
533 -- we optimise the divide by 2 case slightly, generating
534 -- x1 = x >> word_size-1 (unsigned)
535 -- return = (x + x1) >>= log2(divisor)
537 bits = fromIntegral (widthInBits rep) - 1
538 shr = if p == 1 then MO_U_Shr rep else MO_S_Shr rep
539 x1 = CmmMachOp shr [x, CmmLit (CmmInt bits rep)]
540 x2 = if p == 1 then x1 else
541 CmmMachOp (MO_And rep) [x1, CmmLit (CmmInt (n-1) rep)]
542 x3 = CmmMachOp (MO_Add rep) [x, x2]
544 cmmMachOpFold (MO_S_Shr rep) [x3, CmmLit (CmmInt p rep)]
548 unchanged = CmmMachOp mop args
550 -- Anything else is just too hard.
552 cmmMachOpFold mop args = CmmMachOp mop args
554 -- -----------------------------------------------------------------------------
557 -- This algorithm for determining the $\log_2$ of exact powers of 2 comes
558 -- from GCC. It requires bit manipulation primitives, and we use GHC
559 -- extensions. Tough.
561 -- Used to be in MachInstrs --SDM.
562 -- ToDo: remove use of unboxery --SDM.
564 -- Unboxery removed in favor of FastInt; but is the function supposed to fail
565 -- on inputs >= 2147483648, or was that just an implementation artifact?
566 -- And is this speed-critical, or can we just use Integer operations
567 -- (including Data.Bits)?
570 exactLog2 :: Integer -> Maybe Integer
572 = if (x_ <= 0 || x_ >= 2147483648) then
575 case iUnbox (fromInteger x_) of { x ->
576 if (x `bitAndFastInt` negateFastInt x) /=# x then
579 Just (toInteger (iBox (pow2 x)))
582 pow2 x | x ==# _ILIT(1) = _ILIT(0)
583 | otherwise = _ILIT(1) +# pow2 (x `shiftR_FastInt` _ILIT(1))
586 -- -----------------------------------------------------------------------------
590 This is a simple pass that replaces tail-recursive functions like this:
605 the latter generates better C code, because the C compiler treats it
606 like a loop, and brings full loop optimisation to bear.
608 In my measurements this makes little or no difference to anything
609 except factorial, but what the hell.
612 cmmLoopifyForC :: RawCmmTop -> RawCmmTop
613 cmmLoopifyForC p@(CmmProc info entry_lbl
614 (ListGraph blocks@(BasicBlock top_id _ : _)))
615 | null info = p -- only if there's an info table, ignore case alts
617 -- pprTrace "jump_lbl" (ppr jump_lbl <+> ppr entry_lbl) $
618 CmmProc info entry_lbl (ListGraph blocks')
619 where blocks' = [ BasicBlock id (map do_stmt stmts)
620 | BasicBlock id stmts <- blocks ]
622 do_stmt (CmmJump (CmmLit (CmmLabel lbl)) _) | lbl == jump_lbl
626 jump_lbl | tablesNextToCode = entryLblToInfoLbl entry_lbl
627 | otherwise = entry_lbl
629 cmmLoopifyForC top = top
631 -- -----------------------------------------------------------------------------
634 isLit (CmmLit _) = True
637 isComparisonExpr :: CmmExpr -> Bool
638 isComparisonExpr (CmmMachOp op _) = isComparisonMachOp op
639 isComparisonExpr _other = False
641 isPicReg (CmmReg (CmmGlobal PicBaseReg)) = True