2 -- The above warning supression flag is a temporary kludge.
3 -- While working on this module you are encouraged to remove it and fix
4 -- any warnings in the module. See
5 -- http://hackage.haskell.org/trac/ghc/wiki/Commentary/CodingStyle#Warnings
8 -----------------------------------------------------------------------------
12 -- (c) The University of Glasgow 2006
14 -----------------------------------------------------------------------------
17 cmmEliminateDeadBlocks,
23 #include "HsVersions.h"
42 import Compiler.Hoopl hiding (Unique)
44 -- -----------------------------------------------------------------------------
45 -- Eliminates dead blocks
48 We repeatedly expand the set of reachable blocks until we hit a
49 fixpoint, and then prune any blocks that were not in this set. This is
50 actually a required optimization, as dead blocks can cause problems
51 for invariants in the linear register allocator (and possibly other
55 -- Deep fold over statements could probably be abstracted out, but it
56 -- might not be worth the effort since OldCmm is moribund
57 cmmEliminateDeadBlocks :: [CmmBasicBlock] -> [CmmBasicBlock]
58 cmmEliminateDeadBlocks [] = []
59 cmmEliminateDeadBlocks blocks@(BasicBlock base_id _:_) =
60 let -- Calculate what's reachable from what block
61 reachableMap = foldl' f emptyUFM blocks -- lazy in values
62 where f m (BasicBlock block_id stmts) = addToUFM m block_id (reachableFrom stmts)
63 reachableFrom stmts = foldl stmt [] stmts
66 stmt m (CmmComment _) = m
67 stmt m (CmmAssign _ e) = expr m e
68 stmt m (CmmStore e1 e2) = expr (expr m e1) e2
69 stmt m (CmmCall c _ as _ _) = f (actuals m as) c
70 where f m (CmmCallee e _) = expr m e
72 stmt m (CmmBranch b) = b:m
73 stmt m (CmmCondBranch e b) = b:(expr m e)
74 stmt m (CmmSwitch e bs) = catMaybes bs ++ expr m e
75 stmt m (CmmJump e as) = expr (actuals m as) e
76 stmt m (CmmReturn as) = actuals m as
77 actuals m as = foldl' (\m h -> expr m (hintlessCmm h)) m as
78 -- We have to do a deep fold into CmmExpr because
79 -- there may be a BlockId in the CmmBlock literal.
80 expr m (CmmLit l) = lit m l
81 expr m (CmmLoad e _) = expr m e
83 expr m (CmmMachOp _ es) = foldl' expr m es
84 expr m (CmmStackSlot _ _) = m
85 expr m (CmmRegOff _ _) = m
86 lit m (CmmBlock b) = b:m
89 reachable = go [base_id] (setEmpty :: BlockSet)
92 | setMember x m = go xs m
93 | otherwise = go (add ++ xs) (setInsert x m)
94 where add = fromMaybe (panic "cmmEliminateDeadBlocks: unknown block")
95 (lookupUFM reachableMap x)
96 in filter (\(BasicBlock block_id _) -> setMember block_id reachable) blocks
98 -- -----------------------------------------------------------------------------
102 This pass inlines assignments to temporaries that are used just
103 once. It works as follows:
105 - count uses of each temporary
106 - for each temporary that occurs just once:
107 - attempt to push it forward to the statement that uses it
108 - only push forward past assignments to other temporaries
109 (assumes that temporaries are single-assignment)
110 - if we reach the statement that uses it, inline the rhs
111 and delete the original assignment.
113 [N.B. In the Quick C-- compiler, this optimization is achieved by a
114 combination of two dataflow passes: forward substitution (peephole
115 optimization) and dead-assignment elimination. ---NR]
117 Possible generalisations: here is an example from factorial
122 if (_smi != 0) goto cmK;
129 jump Fac_zdwfac_info;
131 We want to inline _smi and _smn. To inline _smn:
133 - we must be able to push forward past assignments to global regs.
134 We can do this if the rhs of the assignment we are pushing
135 forward doesn't refer to the global reg being assigned to; easy
140 - It is a trivial replacement, reg for reg, but it occurs more than
142 - We can inline trivial assignments even if the temporary occurs
143 more than once, as long as we don't eliminate the original assignment
144 (this doesn't help much on its own).
145 - We need to be able to propagate the assignment forward through jumps;
146 if we did this, we would find that it can be inlined safely in all
150 countUses :: UserOfLocalRegs a => a -> UniqFM Int
151 countUses a = foldRegsUsed (\m r -> addToUFM m r (count m r + 1)) emptyUFM a
152 where count m r = lookupWithDefaultUFM m (0::Int) r
154 cmmMiniInline :: [CmmBasicBlock] -> [CmmBasicBlock]
155 cmmMiniInline blocks = map do_inline blocks
156 where do_inline (BasicBlock id stmts)
157 = BasicBlock id (cmmMiniInlineStmts (countUses blocks) stmts)
159 cmmMiniInlineStmts :: UniqFM Int -> [CmmStmt] -> [CmmStmt]
160 cmmMiniInlineStmts uses [] = []
161 cmmMiniInlineStmts uses (stmt@(CmmAssign (CmmLocal (LocalReg u _)) expr) : stmts)
162 -- not used at all: just discard this assignment
163 | Nothing <- lookupUFM uses u
164 = cmmMiniInlineStmts uses stmts
166 -- used once: try to inline at the use site
167 | Just 1 <- lookupUFM uses u,
168 Just stmts' <- lookForInline u expr stmts
171 trace ("nativeGen: inlining " ++ showSDoc (pprStmt stmt)) $
173 cmmMiniInlineStmts uses stmts'
175 cmmMiniInlineStmts uses (stmt:stmts)
176 = stmt : cmmMiniInlineStmts uses stmts
178 lookForInline u expr stmts = lookForInline' u expr regset stmts
179 where regset = foldRegsUsed extendRegSet emptyRegSet expr
181 lookForInline' u expr regset (stmt : rest)
182 | Just 1 <- lookupUFM (countUses stmt) u, ok_to_inline
183 = Just (inlineStmt u expr stmt : rest)
186 = case lookForInline' u expr regset rest of
188 Just stmts -> Just (stmt:stmts)
194 -- we don't inline into CmmCall if the expression refers to global
195 -- registers. This is a HACK to avoid global registers clashing with
196 -- C argument-passing registers, really the back-end ought to be able
197 -- to handle it properly, but currently neither PprC nor the NCG can
198 -- do it. See also CgForeignCall:load_args_into_temps.
199 ok_to_inline = case stmt of
200 CmmCall{} -> hasNoGlobalRegs expr
203 -- Expressions aren't side-effecting. Temporaries may or may not
204 -- be single-assignment depending on the source (the old code
205 -- generator creates single-assignment code, but hand-written Cmm
206 -- and Cmm from the new code generator is not single-assignment.)
207 -- So we do an extra check to make sure that the register being
208 -- changed is not one we were relying on. I don't know how much of a
209 -- performance hit this is (we have to create a regset for every
210 -- instruction.) -- EZY
211 ok_to_skip = case stmt of
214 CmmAssign (CmmLocal r@(LocalReg u' _)) rhs | u' /= u && not (r `elemRegSet` regset) -> True
215 CmmAssign g@(CmmGlobal _) rhs -> not (g `regUsedIn` expr)
219 inlineStmt :: Unique -> CmmExpr -> CmmStmt -> CmmStmt
220 inlineStmt u a (CmmAssign r e) = CmmAssign r (inlineExpr u a e)
221 inlineStmt u a (CmmStore e1 e2) = CmmStore (inlineExpr u a e1) (inlineExpr u a e2)
222 inlineStmt u a (CmmCall target regs es srt ret)
223 = CmmCall (infn target) regs es' srt ret
224 where infn (CmmCallee fn cconv) = CmmCallee (inlineExpr u a fn) cconv
225 infn (CmmPrim p) = CmmPrim p
226 es' = [ (CmmHinted (inlineExpr u a e) hint) | (CmmHinted e hint) <- es ]
227 inlineStmt u a (CmmCondBranch e d) = CmmCondBranch (inlineExpr u a e) d
228 inlineStmt u a (CmmSwitch e d) = CmmSwitch (inlineExpr u a e) d
229 inlineStmt u a (CmmJump e d) = CmmJump (inlineExpr u a e) d
230 inlineStmt u a other_stmt = other_stmt
232 inlineExpr :: Unique -> CmmExpr -> CmmExpr -> CmmExpr
233 inlineExpr u a e@(CmmReg (CmmLocal (LocalReg u' _)))
236 inlineExpr u a e@(CmmRegOff (CmmLocal (LocalReg u' rep)) off)
237 | u == u' = CmmMachOp (MO_Add width) [a, CmmLit (CmmInt (fromIntegral off) width)]
240 width = typeWidth rep
241 inlineExpr u a (CmmLoad e rep) = CmmLoad (inlineExpr u a e) rep
242 inlineExpr u a (CmmMachOp op es) = CmmMachOp op (map (inlineExpr u a) es)
243 inlineExpr u a other_expr = other_expr
245 -- -----------------------------------------------------------------------------
246 -- MachOp constant folder
248 -- Now, try to constant-fold the MachOps. The arguments have already
249 -- been optimized and folded.
252 :: MachOp -- The operation from an CmmMachOp
253 -> [CmmExpr] -- The optimized arguments
256 cmmMachOpFold op arg@[CmmLit (CmmInt x rep)]
258 MO_S_Neg r -> CmmLit (CmmInt (-x) rep)
259 MO_Not r -> CmmLit (CmmInt (complement x) rep)
261 -- these are interesting: we must first narrow to the
262 -- "from" type, in order to truncate to the correct size.
263 -- The final narrow/widen to the destination type
264 -- is implicit in the CmmLit.
265 MO_SF_Conv from to -> CmmLit (CmmFloat (fromInteger x) to)
266 MO_SS_Conv from to -> CmmLit (CmmInt (narrowS from x) to)
267 MO_UU_Conv from to -> CmmLit (CmmInt (narrowU from x) to)
269 _ -> panic "cmmMachOpFold: unknown unary op"
272 -- Eliminate conversion NOPs
273 cmmMachOpFold (MO_SS_Conv rep1 rep2) [x] | rep1 == rep2 = x
274 cmmMachOpFold (MO_UU_Conv rep1 rep2) [x] | rep1 == rep2 = x
276 -- Eliminate nested conversions where possible
277 cmmMachOpFold conv_outer args@[CmmMachOp conv_inner [x]]
278 | Just (rep1,rep2,signed1) <- isIntConversion conv_inner,
279 Just (_, rep3,signed2) <- isIntConversion conv_outer
281 -- widen then narrow to the same size is a nop
282 _ | rep1 < rep2 && rep1 == rep3 -> x
283 -- Widen then narrow to different size: collapse to single conversion
284 -- but remember to use the signedness from the widening, just in case
285 -- the final conversion is a widen.
286 | rep1 < rep2 && rep2 > rep3 ->
287 cmmMachOpFold (intconv signed1 rep1 rep3) [x]
288 -- Nested widenings: collapse if the signedness is the same
289 | rep1 < rep2 && rep2 < rep3 && signed1 == signed2 ->
290 cmmMachOpFold (intconv signed1 rep1 rep3) [x]
291 -- Nested narrowings: collapse
292 | rep1 > rep2 && rep2 > rep3 ->
293 cmmMachOpFold (MO_UU_Conv rep1 rep3) [x]
295 CmmMachOp conv_outer args
297 isIntConversion (MO_UU_Conv rep1 rep2)
298 = Just (rep1,rep2,False)
299 isIntConversion (MO_SS_Conv rep1 rep2)
300 = Just (rep1,rep2,True)
301 isIntConversion _ = Nothing
303 intconv True = MO_SS_Conv
304 intconv False = MO_UU_Conv
306 -- ToDo: a narrow of a load can be collapsed into a narrow load, right?
307 -- but what if the architecture only supports word-sized loads, should
308 -- we do the transformation anyway?
310 cmmMachOpFold mop args@[CmmLit (CmmInt x xrep), CmmLit (CmmInt y _)]
312 -- for comparisons: don't forget to narrow the arguments before
313 -- comparing, since they might be out of range.
314 MO_Eq r -> CmmLit (CmmInt (if x_u == y_u then 1 else 0) wordWidth)
315 MO_Ne r -> CmmLit (CmmInt (if x_u /= y_u then 1 else 0) wordWidth)
317 MO_U_Gt r -> CmmLit (CmmInt (if x_u > y_u then 1 else 0) wordWidth)
318 MO_U_Ge r -> CmmLit (CmmInt (if x_u >= y_u then 1 else 0) wordWidth)
319 MO_U_Lt r -> CmmLit (CmmInt (if x_u < y_u then 1 else 0) wordWidth)
320 MO_U_Le r -> CmmLit (CmmInt (if x_u <= y_u then 1 else 0) wordWidth)
322 MO_S_Gt r -> CmmLit (CmmInt (if x_s > y_s then 1 else 0) wordWidth)
323 MO_S_Ge r -> CmmLit (CmmInt (if x_s >= y_s then 1 else 0) wordWidth)
324 MO_S_Lt r -> CmmLit (CmmInt (if x_s < y_s then 1 else 0) wordWidth)
325 MO_S_Le r -> CmmLit (CmmInt (if x_s <= y_s then 1 else 0) wordWidth)
327 MO_Add r -> CmmLit (CmmInt (x + y) r)
328 MO_Sub r -> CmmLit (CmmInt (x - y) r)
329 MO_Mul r -> CmmLit (CmmInt (x * y) r)
330 MO_U_Quot r | y /= 0 -> CmmLit (CmmInt (x_u `quot` y_u) r)
331 MO_U_Rem r | y /= 0 -> CmmLit (CmmInt (x_u `rem` y_u) r)
332 MO_S_Quot r | y /= 0 -> CmmLit (CmmInt (x `quot` y) r)
333 MO_S_Rem r | y /= 0 -> CmmLit (CmmInt (x `rem` y) r)
335 MO_And r -> CmmLit (CmmInt (x .&. y) r)
336 MO_Or r -> CmmLit (CmmInt (x .|. y) r)
337 MO_Xor r -> CmmLit (CmmInt (x `xor` y) r)
339 MO_Shl r -> CmmLit (CmmInt (x `shiftL` fromIntegral y) r)
340 MO_U_Shr r -> CmmLit (CmmInt (x_u `shiftR` fromIntegral y) r)
341 MO_S_Shr r -> CmmLit (CmmInt (x `shiftR` fromIntegral y) r)
343 other -> CmmMachOp mop args
352 -- When possible, shift the constants to the right-hand side, so that we
353 -- can match for strength reductions. Note that the code generator will
354 -- also assume that constants have been shifted to the right when
357 cmmMachOpFold op [x@(CmmLit _), y]
358 | not (isLit y) && isCommutableMachOp op
359 = cmmMachOpFold op [y, x]
361 -- Turn (a+b)+c into a+(b+c) where possible. Because literals are
362 -- moved to the right, it is more likely that we will find
363 -- opportunities for constant folding when the expression is
366 -- ToDo: this appears to introduce a quadratic behaviour due to the
367 -- nested cmmMachOpFold. Can we fix this?
369 -- Why do we check isLit arg1? If arg1 is a lit, it means that arg2
370 -- is also a lit (otherwise arg1 would be on the right). If we
371 -- put arg1 on the left of the rearranged expression, we'll get into a
372 -- loop: (x1+x2)+x3 => x1+(x2+x3) => (x2+x3)+x1 => x2+(x3+x1) ...
374 -- Also don't do it if arg1 is PicBaseReg, so that we don't separate the
375 -- PicBaseReg from the corresponding label (or label difference).
377 cmmMachOpFold mop1 [CmmMachOp mop2 [arg1,arg2], arg3]
378 | mop2 `associates_with` mop1
379 && not (isLit arg1) && not (isPicReg arg1)
380 = cmmMachOpFold mop2 [arg1, cmmMachOpFold mop1 [arg2,arg3]]
382 MO_Add{} `associates_with` MO_Sub{} = True
383 mop1 `associates_with` mop2 =
384 mop1 == mop2 && isAssociativeMachOp mop1
386 -- special case: (a - b) + c ==> a + (c - b)
387 cmmMachOpFold mop1@(MO_Add{}) [CmmMachOp mop2@(MO_Sub{}) [arg1,arg2], arg3]
388 | not (isLit arg1) && not (isPicReg arg1)
389 = cmmMachOpFold mop1 [arg1, cmmMachOpFold mop2 [arg3,arg2]]
391 -- Make a RegOff if we can
392 cmmMachOpFold (MO_Add _) [CmmReg reg, CmmLit (CmmInt n rep)]
393 = CmmRegOff reg (fromIntegral (narrowS rep n))
394 cmmMachOpFold (MO_Add _) [CmmRegOff reg off, CmmLit (CmmInt n rep)]
395 = CmmRegOff reg (off + fromIntegral (narrowS rep n))
396 cmmMachOpFold (MO_Sub _) [CmmReg reg, CmmLit (CmmInt n rep)]
397 = CmmRegOff reg (- fromIntegral (narrowS rep n))
398 cmmMachOpFold (MO_Sub _) [CmmRegOff reg off, CmmLit (CmmInt n rep)]
399 = CmmRegOff reg (off - fromIntegral (narrowS rep n))
401 -- Fold label(+/-)offset into a CmmLit where possible
403 cmmMachOpFold (MO_Add _) [CmmLit (CmmLabel lbl), CmmLit (CmmInt i rep)]
404 = CmmLit (CmmLabelOff lbl (fromIntegral (narrowU rep i)))
405 cmmMachOpFold (MO_Add _) [CmmLit (CmmInt i rep), CmmLit (CmmLabel lbl)]
406 = CmmLit (CmmLabelOff lbl (fromIntegral (narrowU rep i)))
407 cmmMachOpFold (MO_Sub _) [CmmLit (CmmLabel lbl), CmmLit (CmmInt i rep)]
408 = CmmLit (CmmLabelOff lbl (fromIntegral (negate (narrowU rep i))))
411 -- Comparison of literal with widened operand: perform the comparison
412 -- at the smaller width, as long as the literal is within range.
414 -- We can't do the reverse trick, when the operand is narrowed:
415 -- narrowing throws away bits from the operand, there's no way to do
416 -- the same comparison at the larger size.
418 #if i386_TARGET_ARCH || x86_64_TARGET_ARCH
419 -- powerPC NCG has a TODO for I8/I16 comparisons, so don't try
421 cmmMachOpFold cmp [CmmMachOp conv [x], CmmLit (CmmInt i _)]
422 | -- if the operand is widened:
423 Just (rep, signed, narrow_fn) <- maybe_conversion conv,
424 -- and this is a comparison operation:
425 Just narrow_cmp <- maybe_comparison cmp rep signed,
426 -- and the literal fits in the smaller size:
428 -- then we can do the comparison at the smaller size
429 = cmmMachOpFold narrow_cmp [x, CmmLit (CmmInt i rep)]
431 maybe_conversion (MO_UU_Conv from to)
433 = Just (from, False, narrowU)
434 maybe_conversion (MO_SS_Conv from to)
436 = Just (from, True, narrowS)
438 -- don't attempt to apply this optimisation when the source
439 -- is a float; see #1916
440 maybe_conversion _ = Nothing
442 -- careful (#2080): if the original comparison was signed, but
443 -- we were doing an unsigned widen, then we must do an
444 -- unsigned comparison at the smaller size.
445 maybe_comparison (MO_U_Gt _) rep _ = Just (MO_U_Gt rep)
446 maybe_comparison (MO_U_Ge _) rep _ = Just (MO_U_Ge rep)
447 maybe_comparison (MO_U_Lt _) rep _ = Just (MO_U_Lt rep)
448 maybe_comparison (MO_U_Le _) rep _ = Just (MO_U_Le rep)
449 maybe_comparison (MO_Eq _) rep _ = Just (MO_Eq rep)
450 maybe_comparison (MO_S_Gt _) rep True = Just (MO_S_Gt rep)
451 maybe_comparison (MO_S_Ge _) rep True = Just (MO_S_Ge rep)
452 maybe_comparison (MO_S_Lt _) rep True = Just (MO_S_Lt rep)
453 maybe_comparison (MO_S_Le _) rep True = Just (MO_S_Le rep)
454 maybe_comparison (MO_S_Gt _) rep False = Just (MO_U_Gt rep)
455 maybe_comparison (MO_S_Ge _) rep False = Just (MO_U_Ge rep)
456 maybe_comparison (MO_S_Lt _) rep False = Just (MO_U_Lt rep)
457 maybe_comparison (MO_S_Le _) rep False = Just (MO_U_Le rep)
458 maybe_comparison _ _ _ = Nothing
462 -- We can often do something with constants of 0 and 1 ...
464 cmmMachOpFold mop args@[x, y@(CmmLit (CmmInt 0 _))]
475 MO_Ne r | isComparisonExpr x -> x
476 MO_Eq r | Just x' <- maybeInvertCmmExpr x -> x'
477 MO_U_Gt r | isComparisonExpr x -> x
478 MO_S_Gt r | isComparisonExpr x -> x
479 MO_U_Lt r | isComparisonExpr x -> CmmLit (CmmInt 0 wordWidth)
480 MO_S_Lt r | isComparisonExpr x -> CmmLit (CmmInt 0 wordWidth)
481 MO_U_Ge r | isComparisonExpr x -> CmmLit (CmmInt 1 wordWidth)
482 MO_S_Ge r | isComparisonExpr x -> CmmLit (CmmInt 1 wordWidth)
483 MO_U_Le r | Just x' <- maybeInvertCmmExpr x -> x'
484 MO_S_Le r | Just x' <- maybeInvertCmmExpr x -> x'
485 other -> CmmMachOp mop args
487 cmmMachOpFold mop args@[x, y@(CmmLit (CmmInt 1 rep))]
492 MO_S_Rem r -> CmmLit (CmmInt 0 rep)
493 MO_U_Rem r -> CmmLit (CmmInt 0 rep)
494 MO_Ne r | Just x' <- maybeInvertCmmExpr x -> x'
495 MO_Eq r | isComparisonExpr x -> x
496 MO_U_Lt r | Just x' <- maybeInvertCmmExpr x -> x'
497 MO_S_Lt r | Just x' <- maybeInvertCmmExpr x -> x'
498 MO_U_Gt r | isComparisonExpr x -> CmmLit (CmmInt 0 wordWidth)
499 MO_S_Gt r | isComparisonExpr x -> CmmLit (CmmInt 0 wordWidth)
500 MO_U_Le r | isComparisonExpr x -> CmmLit (CmmInt 1 wordWidth)
501 MO_S_Le r | isComparisonExpr x -> CmmLit (CmmInt 1 wordWidth)
502 MO_U_Ge r | isComparisonExpr x -> x
503 MO_S_Ge r | isComparisonExpr x -> x
504 other -> CmmMachOp mop args
506 -- Now look for multiplication/division by powers of 2 (integers).
508 cmmMachOpFold mop args@[x, y@(CmmLit (CmmInt n _))]
511 | Just p <- exactLog2 n ->
512 cmmMachOpFold (MO_Shl rep) [x, CmmLit (CmmInt p rep)]
514 | Just p <- exactLog2 n ->
515 cmmMachOpFold (MO_U_Shr rep) [x, CmmLit (CmmInt p rep)]
517 | Just p <- exactLog2 n,
518 CmmReg _ <- x -> -- We duplicate x below, hence require
519 -- it is a reg. FIXME: remove this restriction.
520 -- shift right is not the same as quot, because it rounds
521 -- to minus infinity, whereasq quot rounds toward zero.
522 -- To fix this up, we add one less than the divisor to the
523 -- dividend if it is a negative number.
525 -- to avoid a test/jump, we use the following sequence:
526 -- x1 = x >> word_size-1 (all 1s if -ve, all 0s if +ve)
527 -- x2 = y & (divisor-1)
528 -- result = (x+x2) >>= log2(divisor)
529 -- this could be done a bit more simply using conditional moves,
530 -- but we're processor independent here.
532 -- we optimise the divide by 2 case slightly, generating
533 -- x1 = x >> word_size-1 (unsigned)
534 -- return = (x + x1) >>= log2(divisor)
536 bits = fromIntegral (widthInBits rep) - 1
537 shr = if p == 1 then MO_U_Shr rep else MO_S_Shr rep
538 x1 = CmmMachOp shr [x, CmmLit (CmmInt bits rep)]
539 x2 = if p == 1 then x1 else
540 CmmMachOp (MO_And rep) [x1, CmmLit (CmmInt (n-1) rep)]
541 x3 = CmmMachOp (MO_Add rep) [x, x2]
543 cmmMachOpFold (MO_S_Shr rep) [x3, CmmLit (CmmInt p rep)]
547 unchanged = CmmMachOp mop args
549 -- Anything else is just too hard.
551 cmmMachOpFold mop args = CmmMachOp mop args
553 -- -----------------------------------------------------------------------------
556 -- This algorithm for determining the $\log_2$ of exact powers of 2 comes
557 -- from GCC. It requires bit manipulation primitives, and we use GHC
558 -- extensions. Tough.
560 -- Used to be in MachInstrs --SDM.
561 -- ToDo: remove use of unboxery --SDM.
563 -- Unboxery removed in favor of FastInt; but is the function supposed to fail
564 -- on inputs >= 2147483648, or was that just an implementation artifact?
565 -- And is this speed-critical, or can we just use Integer operations
566 -- (including Data.Bits)?
569 exactLog2 :: Integer -> Maybe Integer
571 = if (x_ <= 0 || x_ >= 2147483648) then
574 case iUnbox (fromInteger x_) of { x ->
575 if (x `bitAndFastInt` negateFastInt x) /=# x then
578 Just (toInteger (iBox (pow2 x)))
581 pow2 x | x ==# _ILIT(1) = _ILIT(0)
582 | otherwise = _ILIT(1) +# pow2 (x `shiftR_FastInt` _ILIT(1))
585 -- -----------------------------------------------------------------------------
589 This is a simple pass that replaces tail-recursive functions like this:
604 the latter generates better C code, because the C compiler treats it
605 like a loop, and brings full loop optimisation to bear.
607 In my measurements this makes little or no difference to anything
608 except factorial, but what the hell.
611 cmmLoopifyForC :: RawCmmTop -> RawCmmTop
612 cmmLoopifyForC p@(CmmProc info entry_lbl
613 (ListGraph blocks@(BasicBlock top_id _ : _)))
614 | null info = p -- only if there's an info table, ignore case alts
616 -- pprTrace "jump_lbl" (ppr jump_lbl <+> ppr entry_lbl) $
617 CmmProc info entry_lbl (ListGraph blocks')
618 where blocks' = [ BasicBlock id (map do_stmt stmts)
619 | BasicBlock id stmts <- blocks ]
621 do_stmt (CmmJump (CmmLit (CmmLabel lbl)) _) | lbl == jump_lbl
625 jump_lbl | tablesNextToCode = entryLblToInfoLbl entry_lbl
626 | otherwise = entry_lbl
628 cmmLoopifyForC top = top
630 -- -----------------------------------------------------------------------------
633 isLit (CmmLit _) = True
636 isComparisonExpr :: CmmExpr -> Bool
637 isComparisonExpr (CmmMachOp op _) = isComparisonMachOp op
638 isComparisonExpr _other = False
640 isPicReg (CmmReg (CmmGlobal PicBaseReg)) = True