2 % (c) The AQUA Project, Glasgow University, 1993-1998
4 \section[SimplUtils]{The simplifier utilities}
9 mkLam, mkCase, prepareAlts,
12 preInlineUnconditionally, postInlineUnconditionally,
13 activeUnfolding, activeUnfInRule, activeRule,
14 simplEnvForGHCi, simplEnvForRules, updModeForInlineRules,
16 -- The continuation type
17 SimplCont(..), DupFlag(..), ArgInfo(..),
18 contIsDupable, contResultType, contIsTrivial, contArgs, dropArgs,
19 pushArgs, countValArgs, countArgs, addArgTo,
20 mkBoringStop, mkRhsStop, mkLazyArgStop, contIsRhsOrArg,
21 interestingCallContext,
23 interestingArg, mkArgInfo,
28 #include "HsVersions.h"
34 import qualified CoreSubst
38 import CoreArity ( etaExpand, exprEtaExpandArity )
42 import Var ( isCoVar )
45 import Type hiding( substTy )
46 import Coercion ( coercionKind )
48 import Unify ( dataConCannotMatch )
60 %************************************************************************
64 %************************************************************************
66 A SimplCont allows the simplifier to traverse the expression in a
67 zipper-like fashion. The SimplCont represents the rest of the expression,
68 "above" the point of interest.
70 You can also think of a SimplCont as an "evaluation context", using
71 that term in the way it is used for operational semantics. This is the
72 way I usually think of it, For example you'll often see a syntax for
73 evaluation context looking like
74 C ::= [] | C e | case C of alts | C `cast` co
75 That's the kind of thing we are doing here, and I use that syntax in
80 * A SimplCont describes a *strict* context (just like
81 evaluation contexts do). E.g. Just [] is not a SimplCont
83 * A SimplCont describes a context that *does not* bind
84 any variables. E.g. \x. [] is not a SimplCont
88 = Stop -- An empty context, or hole, []
89 CallCtxt -- True <=> There is something interesting about
90 -- the context, and hence the inliner
91 -- should be a bit keener (see interestingCallContext)
93 -- This is an argument of a function that has RULES
94 -- Inlining the call might allow the rule to fire
96 | CoerceIt -- C `cast` co
97 OutCoercion -- The coercion simplified
102 InExpr StaticEnv -- The argument and its static env
105 | Select -- case C of alts
107 InId [InAlt] StaticEnv -- The case binder, alts, and subst-env
110 -- The two strict forms have no DupFlag, because we never duplicate them
111 | StrictBind -- (\x* \xs. e) C
112 InId [InBndr] -- let x* = [] in e
113 InExpr StaticEnv -- is a special case
116 | StrictArg -- f e1 ..en C
117 ArgInfo -- Specifies f, e1..en, Whether f has rules, etc
118 -- plus strictness flags for *further* args
119 CallCtxt -- Whether *this* argument position is interesting
124 ai_fun :: Id, -- The function
125 ai_args :: [OutExpr], -- ...applied to these args (which are in *reverse* order)
126 ai_rules :: [CoreRule], -- Rules for this function
128 ai_encl :: Bool, -- Flag saying whether this function
129 -- or an enclosing one has rules (recursively)
130 -- True => be keener to inline in all args
132 ai_strs :: [Bool], -- Strictness of remaining arguments
133 -- Usually infinite, but if it is finite it guarantees
134 -- that the function diverges after being given
135 -- that number of args
136 ai_discs :: [Int] -- Discounts for remaining arguments; non-zero => be keener to inline
140 addArgTo :: ArgInfo -> OutExpr -> ArgInfo
141 addArgTo ai arg = ai { ai_args = arg : ai_args ai }
143 instance Outputable SimplCont where
144 ppr (Stop interesting) = ptext (sLit "Stop") <> brackets (ppr interesting)
145 ppr (ApplyTo dup arg _ cont) = ((ptext (sLit "ApplyTo") <+> ppr dup <+> pprParendExpr arg)
146 {- $$ nest 2 (pprSimplEnv se) -}) $$ ppr cont
147 ppr (StrictBind b _ _ _ cont) = (ptext (sLit "StrictBind") <+> ppr b) $$ ppr cont
148 ppr (StrictArg ai _ cont) = (ptext (sLit "StrictArg") <+> ppr (ai_fun ai)) $$ ppr cont
149 ppr (Select dup bndr alts _ cont) = (ptext (sLit "Select") <+> ppr dup <+> ppr bndr) $$
150 (nest 4 (ppr alts)) $$ ppr cont
151 ppr (CoerceIt co cont) = (ptext (sLit "CoerceIt") <+> ppr co) $$ ppr cont
153 data DupFlag = OkToDup | NoDup
155 instance Outputable DupFlag where
156 ppr OkToDup = ptext (sLit "ok")
157 ppr NoDup = ptext (sLit "nodup")
162 mkBoringStop :: SimplCont
163 mkBoringStop = Stop BoringCtxt
165 mkRhsStop :: SimplCont -- See Note [RHS of lets] in CoreUnfold
166 mkRhsStop = Stop (ArgCtxt False)
168 mkLazyArgStop :: CallCtxt -> SimplCont
169 mkLazyArgStop cci = Stop cci
172 contIsRhsOrArg :: SimplCont -> Bool
173 contIsRhsOrArg (Stop {}) = True
174 contIsRhsOrArg (StrictBind {}) = True
175 contIsRhsOrArg (StrictArg {}) = True
176 contIsRhsOrArg _ = False
179 contIsDupable :: SimplCont -> Bool
180 contIsDupable (Stop {}) = True
181 contIsDupable (ApplyTo OkToDup _ _ _) = True
182 contIsDupable (Select OkToDup _ _ _ _) = True
183 contIsDupable (CoerceIt _ cont) = contIsDupable cont
184 contIsDupable _ = False
187 contIsTrivial :: SimplCont -> Bool
188 contIsTrivial (Stop {}) = True
189 contIsTrivial (ApplyTo _ (Type _) _ cont) = contIsTrivial cont
190 contIsTrivial (CoerceIt _ cont) = contIsTrivial cont
191 contIsTrivial _ = False
194 contResultType :: SimplEnv -> OutType -> SimplCont -> OutType
195 contResultType env ty cont
198 subst_ty se ty = substTy (se `setInScope` env) ty
201 go (CoerceIt co cont) _ = go cont (snd (coercionKind co))
202 go (StrictBind _ bs body se cont) _ = go cont (subst_ty se (exprType (mkLams bs body)))
203 go (StrictArg ai _ cont) _ = go cont (funResultTy (argInfoResultTy ai))
204 go (Select _ _ alts se cont) _ = go cont (subst_ty se (coreAltsType alts))
205 go (ApplyTo _ arg se cont) ty = go cont (apply_to_arg ty arg se)
207 apply_to_arg ty (Type ty_arg) se = applyTy ty (subst_ty se ty_arg)
208 apply_to_arg ty _ _ = funResultTy ty
210 argInfoResultTy :: ArgInfo -> OutType
211 argInfoResultTy (ArgInfo { ai_fun = fun, ai_args = args })
212 = foldr (\arg fn_ty -> applyTypeToArg fn_ty arg) (idType fun) args
215 countValArgs :: SimplCont -> Int
216 countValArgs (ApplyTo _ (Type _) _ cont) = countValArgs cont
217 countValArgs (ApplyTo _ _ _ cont) = 1 + countValArgs cont
220 countArgs :: SimplCont -> Int
221 countArgs (ApplyTo _ _ _ cont) = 1 + countArgs cont
224 contArgs :: SimplCont -> ([OutExpr], SimplCont)
225 -- Uses substitution to turn each arg into an OutExpr
226 contArgs cont = go [] cont
228 go args (ApplyTo _ arg se cont) = go (substExpr se arg : args) cont
229 go args cont = (reverse args, cont)
231 pushArgs :: SimplEnv -> [CoreExpr] -> SimplCont -> SimplCont
232 pushArgs _env [] cont = cont
233 pushArgs env (arg:args) cont = ApplyTo NoDup arg env (pushArgs env args cont)
235 dropArgs :: Int -> SimplCont -> SimplCont
236 dropArgs 0 cont = cont
237 dropArgs n (ApplyTo _ _ _ cont) = dropArgs (n-1) cont
238 dropArgs n other = pprPanic "dropArgs" (ppr n <+> ppr other)
242 Note [Interesting call context]
243 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
244 We want to avoid inlining an expression where there can't possibly be
245 any gain, such as in an argument position. Hence, if the continuation
246 is interesting (eg. a case scrutinee, application etc.) then we
247 inline, otherwise we don't.
249 Previously some_benefit used to return True only if the variable was
250 applied to some value arguments. This didn't work:
252 let x = _coerce_ (T Int) Int (I# 3) in
253 case _coerce_ Int (T Int) x of
256 we want to inline x, but can't see that it's a constructor in a case
257 scrutinee position, and some_benefit is False.
261 dMonadST = _/\_ t -> :Monad (g1 _@_ t, g2 _@_ t, g3 _@_ t)
263 .... case dMonadST _@_ x0 of (a,b,c) -> ....
265 we'd really like to inline dMonadST here, but we *don't* want to
266 inline if the case expression is just
268 case x of y { DEFAULT -> ... }
270 since we can just eliminate this case instead (x is in WHNF). Similar
271 applies when x is bound to a lambda expression. Hence
272 contIsInteresting looks for case expressions with just a single
277 interestingCallContext :: SimplCont -> CallCtxt
278 -- See Note [Interesting call context]
279 interestingCallContext cont
282 interesting (Select _ bndr _ _ _)
283 | isDeadBinder bndr = CaseCtxt
284 | otherwise = ArgCtxt False -- If the binder is used, this
285 -- is like a strict let
286 -- See Note [RHS of lets] in CoreUnfold
288 interesting (ApplyTo _ arg _ cont)
289 | isTypeArg arg = interesting cont
290 | otherwise = ValAppCtxt -- Can happen if we have (f Int |> co) y
291 -- If f has an INLINE prag we need to give it some
292 -- motivation to inline. See Note [Cast then apply]
295 interesting (StrictArg _ cci _) = cci
296 interesting (StrictBind {}) = BoringCtxt
297 interesting (Stop cci) = cci
298 interesting (CoerceIt _ cont) = interesting cont
299 -- If this call is the arg of a strict function, the context
300 -- is a bit interesting. If we inline here, we may get useful
301 -- evaluation information to avoid repeated evals: e.g.
303 -- Here the contIsInteresting makes the '*' keener to inline,
304 -- which in turn exposes a constructor which makes the '+' inline.
305 -- Assuming that +,* aren't small enough to inline regardless.
307 -- It's also very important to inline in a strict context for things
310 -- Here, the context of (f x) is strict, and if f's unfolding is
311 -- a build it's *great* to inline it here. So we must ensure that
312 -- the context for (f x) is not totally uninteresting.
317 -> [CoreRule] -- Rules for function
318 -> Int -- Number of value args
319 -> SimplCont -- Context of the call
322 mkArgInfo fun rules n_val_args call_cont
323 | n_val_args < idArity fun -- Note [Unsaturated functions]
324 = ArgInfo { ai_fun = fun, ai_args = [], ai_rules = rules
326 , ai_strs = vanilla_stricts
327 , ai_discs = vanilla_discounts }
329 = ArgInfo { ai_fun = fun, ai_args = [], ai_rules = rules
330 , ai_encl = interestingArgContext rules call_cont
331 , ai_strs = add_type_str (idType fun) arg_stricts
332 , ai_discs = arg_discounts }
334 vanilla_discounts, arg_discounts :: [Int]
335 vanilla_discounts = repeat 0
336 arg_discounts = case idUnfolding fun of
337 CoreUnfolding {uf_guidance = UnfIfGoodArgs {ug_args = discounts}}
338 -> discounts ++ vanilla_discounts
339 _ -> vanilla_discounts
341 vanilla_stricts, arg_stricts :: [Bool]
342 vanilla_stricts = repeat False
345 = case splitStrictSig (idStrictness fun) of
346 (demands, result_info)
347 | not (demands `lengthExceeds` n_val_args)
348 -> -- Enough args, use the strictness given.
349 -- For bottoming functions we used to pretend that the arg
350 -- is lazy, so that we don't treat the arg as an
351 -- interesting context. This avoids substituting
352 -- top-level bindings for (say) strings into
353 -- calls to error. But now we are more careful about
354 -- inlining lone variables, so its ok (see SimplUtils.analyseCont)
355 if isBotRes result_info then
356 map isStrictDmd demands -- Finite => result is bottom
358 map isStrictDmd demands ++ vanilla_stricts
360 -> WARN( True, text "More demands than arity" <+> ppr fun <+> ppr (idArity fun)
361 <+> ppr n_val_args <+> ppr demands )
362 vanilla_stricts -- Not enough args, or no strictness
364 add_type_str :: Type -> [Bool] -> [Bool]
365 -- If the function arg types are strict, record that in the 'strictness bits'
366 -- No need to instantiate because unboxed types (which dominate the strict
367 -- types) can't instantiate type variables.
368 -- add_type_str is done repeatedly (for each call); might be better
369 -- once-for-all in the function
370 -- But beware primops/datacons with no strictness
371 add_type_str _ [] = []
372 add_type_str fun_ty strs -- Look through foralls
373 | Just (_, fun_ty') <- splitForAllTy_maybe fun_ty -- Includes coercions
374 = add_type_str fun_ty' strs
375 add_type_str fun_ty (str:strs) -- Add strict-type info
376 | Just (arg_ty, fun_ty') <- splitFunTy_maybe fun_ty
377 = (str || isStrictType arg_ty) : add_type_str fun_ty' strs
381 {- Note [Unsaturated functions]
382 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
383 Consider (test eyeball/inline4)
386 where f has arity 2. Then we do not want to inline 'x', because
387 it'll just be floated out again. Even if f has lots of discounts
388 on its first argument -- it must be saturated for these to kick in
391 interestingArgContext :: [CoreRule] -> SimplCont -> Bool
392 -- If the argument has form (f x y), where x,y are boring,
393 -- and f is marked INLINE, then we don't want to inline f.
394 -- But if the context of the argument is
396 -- where g has rules, then we *do* want to inline f, in case it
397 -- exposes a rule that might fire. Similarly, if the context is
399 -- where h has rules, then we do want to inline f; hence the
400 -- call_cont argument to interestingArgContext
402 -- The ai-rules flag makes this happen; if it's
403 -- set, the inliner gets just enough keener to inline f
404 -- regardless of how boring f's arguments are, if it's marked INLINE
406 -- The alternative would be to *always* inline an INLINE function,
407 -- regardless of how boring its context is; but that seems overkill
408 -- For example, it'd mean that wrapper functions were always inlined
409 interestingArgContext rules call_cont
410 = notNull rules || enclosing_fn_has_rules
412 enclosing_fn_has_rules = go call_cont
414 go (Select {}) = False
415 go (ApplyTo {}) = False
416 go (StrictArg _ cci _) = interesting cci
417 go (StrictBind {}) = False -- ??
418 go (CoerceIt _ c) = go c
419 go (Stop cci) = interesting cci
421 interesting (ArgCtxt rules) = rules
422 interesting _ = False
427 %************************************************************************
429 \subsection{Decisions about inlining}
431 %************************************************************************
434 simplEnvForGHCi :: SimplEnv
435 simplEnvForGHCi = mkSimplEnv allOffSwitchChecker $
436 SimplGently { sm_rules = False, sm_inline = False }
437 -- Do not do any inlining, in case we expose some unboxed
438 -- tuple stuff that confuses the bytecode interpreter
440 simplEnvForRules :: SimplEnv
441 simplEnvForRules = mkSimplEnv allOffSwitchChecker $
442 SimplGently { sm_rules = True, sm_inline = False }
444 updModeForInlineRules :: SimplifierMode -> SimplifierMode
445 updModeForInlineRules mode
447 SimplGently {} -> mode -- Don't modify mode if we already gentle
448 SimplPhase {} -> SimplGently { sm_rules = True, sm_inline = True }
449 -- Simplify as much as possible, subject to the usual "gentle" rules
452 Inlining is controlled partly by the SimplifierMode switch. This has two
455 SimplGently (a) Simplifying before specialiser/full laziness
456 (b) Simplifiying inside InlineRules
457 (c) Simplifying the LHS of a rule
458 (d) Simplifying a GHCi expression or Template
461 SimplPhase n _ Used at all other times
465 Gentle mode has a separate boolean flag to control
466 a) inlining (sm_inline flag)
467 b) rules (sm_rules flag)
468 A key invariant about Gentle mode is that it is treated as the EARLIEST
469 phase. Something is inlined if the sm_inline flag is on AND the thing
470 is inlinable in the earliest phase. This is important. Example
472 {-# INLINE [~1] g #-}
478 If we were to inline g into f's inlining, then an importing module would
480 f e --> g (g e) ---> RULE fires
481 because the InlineRule for f has had g inlined into it.
483 On the other hand, it is bad not to do ANY inlining into an
484 InlineRule, because then recursive knots in instance declarations
485 don't get unravelled.
487 However, *sometimes* SimplGently must do no call-site inlining at all.
488 Before full laziness we must be careful not to inline wrappers,
489 because doing so inhibits floating
490 e.g. ...(case f x of ...)...
491 ==> ...(case (case x of I# x# -> fw x#) of ...)...
492 ==> ...(case x of I# x# -> case fw x# of ...)...
493 and now the redex (f x) isn't floatable any more.
495 The no-inlining thing is also important for Template Haskell. You might be
496 compiling in one-shot mode with -O2; but when TH compiles a splice before
497 running it, we don't want to use -O2. Indeed, we don't want to inline
498 anything, because the byte-code interpreter might get confused about
499 unboxed tuples and suchlike.
501 Note [RULEs enabled in SimplGently]
502 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
503 RULES are enabled when doing "gentle" simplification. Two reasons:
505 * We really want the class-op cancellation to happen:
506 op (df d1 d2) --> $cop3 d1 d2
507 because this breaks the mutual recursion between 'op' and 'df'
511 to work in Template Haskell when simplifying
512 splices, so we get simpler code for literal strings
514 Note [Simplifying gently inside InlineRules]
515 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
516 We don't do much simplification inside InlineRules (which come from
517 INLINE pragmas). It really is important to switch off inlinings
518 inside such expressions. Consider the following example
524 in ...g...g...g...g...g...
526 Now, if that's the ONLY occurrence of f, it will be inlined inside g,
527 and thence copied multiple times when g is inlined.
529 This function may be inlinined in other modules, so we don't want to
530 remove (by inlining) calls to functions that have specialisations, or
531 that may have transformation rules in an importing scope.
533 E.g. {-# INLINE f #-}
536 and suppose that g is strict *and* has specialisations. If we inline
537 g's wrapper, we deny f the chance of getting the specialised version
538 of g when f is inlined at some call site (perhaps in some other
541 It's also important not to inline a worker back into a wrapper.
543 wraper = inline_me (\x -> ...worker... )
544 Normally, the inline_me prevents the worker getting inlined into
545 the wrapper (initially, the worker's only call site!). But,
546 if the wrapper is sure to be called, the strictness analyser will
547 mark it 'demanded', so when the RHS is simplified, it'll get an ArgOf
548 continuation. That's why the keep_inline predicate returns True for
549 ArgOf continuations. It shouldn't do any harm not to dissolve the
550 inline-me note under these circumstances.
552 Although we do very little simplification inside an InlineRule,
553 the RHS is simplified as normal. For example:
555 all xs = foldr (&&) True xs
556 any p = all . map p {-# INLINE any #-}
558 The RHS of 'any' will get optimised and deforested; but the InlineRule
559 will still mention the original RHS.
562 preInlineUnconditionally
563 ~~~~~~~~~~~~~~~~~~~~~~~~
564 @preInlineUnconditionally@ examines a bndr to see if it is used just
565 once in a completely safe way, so that it is safe to discard the
566 binding inline its RHS at the (unique) usage site, REGARDLESS of how
567 big the RHS might be. If this is the case we don't simplify the RHS
568 first, but just inline it un-simplified.
570 This is much better than first simplifying a perhaps-huge RHS and then
571 inlining and re-simplifying it. Indeed, it can be at least quadratically
580 We may end up simplifying e1 N times, e2 N-1 times, e3 N-3 times etc.
581 This can happen with cascades of functions too:
588 THE MAIN INVARIANT is this:
590 ---- preInlineUnconditionally invariant -----
591 IF preInlineUnconditionally chooses to inline x = <rhs>
592 THEN doing the inlining should not change the occurrence
593 info for the free vars of <rhs>
594 ----------------------------------------------
596 For example, it's tempting to look at trivial binding like
598 and inline it unconditionally. But suppose x is used many times,
599 but this is the unique occurrence of y. Then inlining x would change
600 y's occurrence info, which breaks the invariant. It matters: y
601 might have a BIG rhs, which will now be dup'd at every occurrenc of x.
604 Even RHSs labelled InlineMe aren't caught here, because there might be
605 no benefit from inlining at the call site.
607 [Sept 01] Don't unconditionally inline a top-level thing, because that
608 can simply make a static thing into something built dynamically. E.g.
612 [Remember that we treat \s as a one-shot lambda.] No point in
613 inlining x unless there is something interesting about the call site.
615 But watch out: if you aren't careful, some useful foldr/build fusion
616 can be lost (most notably in spectral/hartel/parstof) because the
617 foldr didn't see the build. Doing the dynamic allocation isn't a big
618 deal, in fact, but losing the fusion can be. But the right thing here
619 seems to be to do a callSiteInline based on the fact that there is
620 something interesting about the call site (it's strict). Hmm. That
623 Conclusion: inline top level things gaily until Phase 0 (the last
624 phase), at which point don't.
626 Note [pre/postInlineUnconditionally in gentle mode]
627 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
628 Even in gentle mode we want to do preInlineUnconditionally. The
629 reason is that too little clean-up happens if you don't inline
630 use-once things. Also a bit of inlining is *good* for full laziness;
631 it can expose constant sub-expressions. Example in
632 spectral/mandel/Mandel.hs, where the mandelset function gets a useful
633 let-float if you inline windowToViewport
635 However, as usual for Gentle mode, do not inline things that are
636 inactive in the intial stages. See Note [Gentle mode].
638 Note [Top-level botomming Ids]
639 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
640 Don't inline top-level Ids that are bottoming, even if they are used just
641 once, because FloatOut has gone to some trouble to extract them out.
642 Inlining them won't make the program run faster!
645 preInlineUnconditionally :: SimplEnv -> TopLevelFlag -> InId -> InExpr -> Bool
646 preInlineUnconditionally env top_lvl bndr rhs
648 | isTopLevel top_lvl && isBottomingId bndr = False -- Note [Top-level bottoming Ids]
649 | opt_SimplNoPreInlining = False
650 | otherwise = case idOccInfo bndr of
651 IAmDead -> True -- Happens in ((\x.1) v)
652 OneOcc in_lam True int_cxt -> try_once in_lam int_cxt
656 active = case phase of
657 SimplGently {} -> isEarlyActive act
658 -- See Note [pre/postInlineUnconditionally in gentle mode]
659 SimplPhase n _ -> isActive n act
660 act = idInlineActivation bndr
661 try_once in_lam int_cxt -- There's one textual occurrence
662 | not in_lam = isNotTopLevel top_lvl || early_phase
663 | otherwise = int_cxt && canInlineInLam rhs
665 -- Be very careful before inlining inside a lambda, because (a) we must not
666 -- invalidate occurrence information, and (b) we want to avoid pushing a
667 -- single allocation (here) into multiple allocations (inside lambda).
668 -- Inlining a *function* with a single *saturated* call would be ok, mind you.
669 -- || (if is_cheap && not (canInlineInLam rhs) then pprTrace "preinline" (ppr bndr <+> ppr rhs) ok else ok)
671 -- is_cheap = exprIsCheap rhs
672 -- ok = is_cheap && int_cxt
674 -- int_cxt The context isn't totally boring
675 -- E.g. let f = \ab.BIG in \y. map f xs
676 -- Don't want to substitute for f, because then we allocate
677 -- its closure every time the \y is called
678 -- But: let f = \ab.BIG in \y. map (f y) xs
679 -- Now we do want to substitute for f, even though it's not
680 -- saturated, because we're going to allocate a closure for
681 -- (f y) every time round the loop anyhow.
683 -- canInlineInLam => free vars of rhs are (Once in_lam) or Many,
684 -- so substituting rhs inside a lambda doesn't change the occ info.
685 -- Sadly, not quite the same as exprIsHNF.
686 canInlineInLam (Lit _) = True
687 canInlineInLam (Lam b e) = isRuntimeVar b || canInlineInLam e
688 canInlineInLam (Note _ e) = canInlineInLam e
689 canInlineInLam _ = False
691 early_phase = case phase of
692 SimplPhase 0 _ -> False
694 -- If we don't have this early_phase test, consider
695 -- x = length [1,2,3]
696 -- The full laziness pass carefully floats all the cons cells to
697 -- top level, and preInlineUnconditionally floats them all back in.
698 -- Result is (a) static allocation replaced by dynamic allocation
699 -- (b) many simplifier iterations because this tickles
700 -- a related problem; only one inlining per pass
702 -- On the other hand, I have seen cases where top-level fusion is
703 -- lost if we don't inline top level thing (e.g. string constants)
704 -- Hence the test for phase zero (which is the phase for all the final
705 -- simplifications). Until phase zero we take no special notice of
706 -- top level things, but then we become more leery about inlining
711 postInlineUnconditionally
712 ~~~~~~~~~~~~~~~~~~~~~~~~~
713 @postInlineUnconditionally@ decides whether to unconditionally inline
714 a thing based on the form of its RHS; in particular if it has a
715 trivial RHS. If so, we can inline and discard the binding altogether.
717 NB: a loop breaker has must_keep_binding = True and non-loop-breakers
718 only have *forward* references Hence, it's safe to discard the binding
720 NOTE: This isn't our last opportunity to inline. We're at the binding
721 site right now, and we'll get another opportunity when we get to the
724 Note that we do this unconditional inlining only for trival RHSs.
725 Don't inline even WHNFs inside lambdas; doing so may simply increase
726 allocation when the function is called. This isn't the last chance; see
729 NB: Even inline pragmas (e.g. IMustBeINLINEd) are ignored here Why?
730 Because we don't even want to inline them into the RHS of constructor
731 arguments. See NOTE above
733 NB: At one time even NOINLINE was ignored here: if the rhs is trivial
734 it's best to inline it anyway. We often get a=E; b=a from desugaring,
735 with both a and b marked NOINLINE. But that seems incompatible with
736 our new view that inlining is like a RULE, so I'm sticking to the 'active'
740 postInlineUnconditionally
741 :: SimplEnv -> TopLevelFlag
742 -> OutId -- The binder (an InId would be fine too)
743 -> OccInfo -- From the InId
747 postInlineUnconditionally env top_lvl bndr occ_info rhs unfolding
749 | isLoopBreaker occ_info = False -- If it's a loop-breaker of any kind, don't inline
750 -- because it might be referred to "earlier"
751 | isExportedId bndr = False
752 | isStableUnfolding unfolding = False -- Note [InlineRule and postInlineUnconditionally]
753 | exprIsTrivial rhs = True
754 | isTopLevel top_lvl = False -- Note [Top level and postInlineUnconditionally]
757 -- The point of examining occ_info here is that for *non-values*
758 -- that occur outside a lambda, the call-site inliner won't have
759 -- a chance (becuase it doesn't know that the thing
760 -- only occurs once). The pre-inliner won't have gotten
761 -- it either, if the thing occurs in more than one branch
762 -- So the main target is things like
765 -- True -> case x of ...
766 -- False -> case x of ...
767 -- This is very important in practice; e.g. wheel-seive1 doubles
768 -- in allocation if you miss this out
769 OneOcc in_lam _one_br int_cxt -- OneOcc => no code-duplication issue
770 -> smallEnoughToInline unfolding -- Small enough to dup
771 -- ToDo: consider discount on smallEnoughToInline if int_cxt is true
773 -- NB: Do NOT inline arbitrarily big things, even if one_br is True
774 -- Reason: doing so risks exponential behaviour. We simplify a big
775 -- expression, inline it, and simplify it again. But if the
776 -- very same thing happens in the big expression, we get
778 -- PRINCIPLE: when we've already simplified an expression once,
779 -- make sure that we only inline it if it's reasonably small.
782 -- Outside a lambda, we want to be reasonably aggressive
783 -- about inlining into multiple branches of case
784 -- e.g. let x = <non-value>
785 -- in case y of { C1 -> ..x..; C2 -> ..x..; C3 -> ... }
786 -- Inlining can be a big win if C3 is the hot-spot, even if
787 -- the uses in C1, C2 are not 'interesting'
788 -- An example that gets worse if you add int_cxt here is 'clausify'
790 (isCheapUnfolding unfolding && int_cxt))
791 -- isCheap => acceptable work duplication; in_lam may be true
792 -- int_cxt to prevent us inlining inside a lambda without some
793 -- good reason. See the notes on int_cxt in preInlineUnconditionally
795 IAmDead -> True -- This happens; for example, the case_bndr during case of
796 -- known constructor: case (a,b) of x { (p,q) -> ... }
797 -- Here x isn't mentioned in the RHS, so we don't want to
798 -- create the (dead) let-binding let x = (a,b) in ...
802 -- Here's an example that we don't handle well:
803 -- let f = if b then Left (\x.BIG) else Right (\y.BIG)
804 -- in \y. ....case f of {...} ....
805 -- Here f is used just once, and duplicating the case work is fine (exprIsCheap).
807 -- - We can't preInlineUnconditionally because that woud invalidate
808 -- the occ info for b.
809 -- - We can't postInlineUnconditionally because the RHS is big, and
810 -- that risks exponential behaviour
811 -- - We can't call-site inline, because the rhs is big
815 active = case getMode env of
816 SimplGently {} -> isEarlyActive act
817 -- See Note [pre/postInlineUnconditionally in gentle mode]
818 SimplPhase n _ -> isActive n act
819 act = idInlineActivation bndr
821 activeUnfolding :: SimplEnv -> IdUnfoldingFun
823 = case getMode env of
824 SimplGently { sm_inline = False } -> active_unfolding_minimal
825 SimplGently { sm_inline = True } -> active_unfolding_gentle
826 SimplPhase n _ -> active_unfolding n
828 activeUnfInRule :: SimplEnv -> IdUnfoldingFun
829 -- When matching in RULE, we want to "look through" an unfolding
830 -- if *rules* are on, even if *inlinings* are not. A notable example
831 -- is DFuns, which really we want to match in rules like (op dfun)
834 = case getMode env of
835 SimplGently { sm_rules = False } -> active_unfolding_minimal
836 SimplGently { sm_rules = True } -> active_unfolding_gentle
837 SimplPhase n _ -> active_unfolding n
839 active_unfolding_minimal :: IdUnfoldingFun
840 -- Compuslory unfoldings only
841 -- Ignore SimplGently, because we want to inline regardless;
842 -- the Id has no top-level binding at all
844 -- NB: we used to have a second exception, for data con wrappers.
845 -- On the grounds that we use gentle mode for rule LHSs, and
846 -- they match better when data con wrappers are inlined.
847 -- But that only really applies to the trivial wrappers (like (:)),
848 -- and they are now constructed as Compulsory unfoldings (in MkId)
849 -- so they'll happen anyway.
850 active_unfolding_minimal id
851 | isCompulsoryUnfolding unf = unf
852 | otherwise = NoUnfolding
854 unf = realIdUnfolding id -- Never a loop breaker
856 active_unfolding_gentle :: IdUnfoldingFun
857 -- Anything that is early-active
858 -- See Note [Gentle mode]
859 active_unfolding_gentle id
860 | isEarlyActive (idInlineActivation id) = idUnfolding id
861 | otherwise = NoUnfolding
862 -- idUnfolding checks for loop-breakers
863 -- Things with an INLINE pragma may have
864 -- an unfolding *and* be a loop breaker
865 -- (maybe the knot is not yet untied)
867 active_unfolding :: CompilerPhase -> IdUnfoldingFun
868 active_unfolding n id
869 | isActive n (idInlineActivation id) = idUnfolding id
870 | otherwise = NoUnfolding
872 activeRule :: DynFlags -> SimplEnv -> Maybe (Activation -> Bool)
873 -- Nothing => No rules at all
874 activeRule dflags env
875 | not (dopt Opt_EnableRewriteRules dflags)
876 = Nothing -- Rewriting is off
878 = case getMode env of
879 SimplGently { sm_rules = rules_on }
880 | rules_on -> Just isEarlyActive -- Note [RULEs enabled in SimplGently]
881 | otherwise -> Nothing
882 SimplPhase n _ -> Just (isActive n)
885 Note [Top level and postInlineUnconditionally]
886 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
887 We don't do postInlineUnconditionally for top-level things (exept ones that
889 * There is no point, because the main goal is to get rid of local
890 bindings used in multiple case branches.
891 * Doing so will inline top-level error expressions that have been
892 carefully floated out by FloatOut. More generally, it might
893 replace static allocation with dynamic.
895 Note [InlineRule and postInlineUnconditionally]
896 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
897 Do not do postInlineUnconditionally if the Id has an InlineRule, otherwise
898 we lose the unfolding. Example
900 -- f has InlineRule with rhs (e |> co)
904 Then there's a danger we'll optimise to
909 and now postInlineUnconditionally, losing the InlineRule on f. Now f'
910 won't inline because 'e' is too big.
913 %************************************************************************
917 %************************************************************************
920 mkLam :: SimplEnv -> [OutBndr] -> OutExpr -> SimplM OutExpr
921 -- mkLam tries three things
922 -- a) eta reduction, if that gives a trivial expression
923 -- b) eta expansion [only if there are some value lambdas]
928 = do { dflags <- getDOptsSmpl
929 ; mkLam' dflags bndrs body }
931 mkLam' :: DynFlags -> [OutBndr] -> OutExpr -> SimplM OutExpr
932 mkLam' dflags bndrs (Cast body co)
933 | not (any bad bndrs)
934 -- Note [Casts and lambdas]
935 = do { lam <- mkLam' dflags bndrs body
936 ; return (mkCoerce (mkPiTypes bndrs co) lam) }
938 co_vars = tyVarsOfType co
939 bad bndr = isCoVar bndr && bndr `elemVarSet` co_vars
941 mkLam' dflags bndrs body
942 | dopt Opt_DoEtaReduction dflags,
943 Just etad_lam <- tryEtaReduce bndrs body
944 = do { tick (EtaReduction (head bndrs))
947 | dopt Opt_DoLambdaEtaExpansion dflags,
948 not (inGentleMode env), -- In gentle mode don't eta-expansion
949 any isRuntimeVar bndrs -- because it can clutter up the code
950 -- with casts etc that may not be removed
951 = do { let body' = tryEtaExpansion dflags body
952 ; return (mkLams bndrs body') }
955 = return (mkLams bndrs body)
958 Note [Casts and lambdas]
959 ~~~~~~~~~~~~~~~~~~~~~~~~
961 (\x. (\y. e) `cast` g1) `cast` g2
962 There is a danger here that the two lambdas look separated, and the
963 full laziness pass might float an expression to between the two.
965 So this equation in mkLam' floats the g1 out, thus:
966 (\x. e `cast` g1) --> (\x.e) `cast` (tx -> g1)
969 In general, this floats casts outside lambdas, where (I hope) they
970 might meet and cancel with some other cast:
971 \x. e `cast` co ===> (\x. e) `cast` (tx -> co)
972 /\a. e `cast` co ===> (/\a. e) `cast` (/\a. co)
973 /\g. e `cast` co ===> (/\g. e) `cast` (/\g. co)
976 Notice that it works regardless of 'e'. Originally it worked only
977 if 'e' was itself a lambda, but in some cases that resulted in
978 fruitless iteration in the simplifier. A good example was when
979 compiling Text.ParserCombinators.ReadPrec, where we had a definition
980 like (\x. Get `cast` g)
981 where Get is a constructor with nonzero arity. Then mkLam eta-expanded
982 the Get, and the next iteration eta-reduced it, and then eta-expanded
985 Note also the side condition for the case of coercion binders.
986 It does not make sense to transform
987 /\g. e `cast` g ==> (/\g.e) `cast` (/\g.g)
988 because the latter is not well-kinded.
990 -- c) floating lets out through big lambdas
991 -- [only if all tyvar lambdas, and only if this lambda
992 -- is the RHS of a let]
994 {- Sept 01: I'm experimenting with getting the
995 full laziness pass to float out past big lambdsa
996 | all isTyVar bndrs, -- Only for big lambdas
997 contIsRhs cont -- Only try the rhs type-lambda floating
998 -- if this is indeed a right-hand side; otherwise
999 -- we end up floating the thing out, only for float-in
1000 -- to float it right back in again!
1001 = do (floats, body') <- tryRhsTyLam env bndrs body
1002 return (floats, mkLams bndrs body')
1006 %************************************************************************
1010 %************************************************************************
1012 Note [Eta reduction conditions]
1013 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1014 We try for eta reduction here, but *only* if we get all the way to an
1015 trivial expression. We don't want to remove extra lambdas unless we
1016 are going to avoid allocating this thing altogether.
1018 There are some particularly delicate points here:
1020 * Eta reduction is not valid in general:
1022 This matters, partly for old-fashioned correctness reasons but,
1023 worse, getting it wrong can yield a seg fault. Consider
1025 h y = case (case y of { True -> f `seq` True; False -> False }) of
1026 True -> ...; False -> ...
1028 If we (unsoundly) eta-reduce f to get f=f, the strictness analyser
1029 says f=bottom, and replaces the (f `seq` True) with just
1030 (f `cast` unsafe-co). BUT, as thing stand, 'f' got arity 1, and it
1031 *keeps* arity 1 (perhaps also wrongly). So CorePrep eta-expands
1032 the definition again, so that it does not termninate after all.
1033 Result: seg-fault because the boolean case actually gets a function value.
1036 So it's important to to the right thing.
1038 * Note [Arity care]: we need to be careful if we just look at f's
1039 arity. Currently (Dec07), f's arity is visible in its own RHS (see
1040 Note [Arity robustness] in SimplEnv) so we must *not* trust the
1041 arity when checking that 'f' is a value. Otherwise we will
1046 Which might change a terminiating program (think (f `seq` e)) to a
1047 non-terminating one. So we check for being a loop breaker first.
1049 However for GlobalIds we can look at the arity; and for primops we
1050 must, since they have no unfolding.
1052 * Regardless of whether 'f' is a value, we always want to
1053 reduce (/\a -> f a) to f
1054 This came up in a RULE: foldr (build (/\a -> g a))
1055 did not match foldr (build (/\b -> ...something complex...))
1056 The type checker can insert these eta-expanded versions,
1057 with both type and dictionary lambdas; hence the slightly
1060 * Never *reduce* arity. For example
1062 Then if h has arity 1 we don't want to eta-reduce because then
1063 f's arity would decrease, and that is bad
1065 These delicacies are why we don't use exprIsTrivial and exprIsHNF here.
1069 tryEtaReduce :: [OutBndr] -> OutExpr -> Maybe OutExpr
1070 tryEtaReduce bndrs body
1071 = go (reverse bndrs) body
1073 incoming_arity = count isId bndrs
1075 go (b : bs) (App fun arg) | ok_arg b arg = go bs fun -- Loop round
1076 go [] fun | ok_fun fun = Just fun -- Success!
1077 go _ _ = Nothing -- Failure!
1079 -- Note [Eta reduction conditions]
1080 ok_fun (App fun (Type ty))
1081 | not (any (`elemVarSet` tyVarsOfType ty) bndrs)
1084 = not (fun_id `elem` bndrs)
1085 && (ok_fun_id fun_id || all ok_lam bndrs)
1088 ok_fun_id fun = fun_arity fun >= incoming_arity
1090 fun_arity fun -- See Note [Arity care]
1091 | isLocalId fun && isLoopBreaker (idOccInfo fun) = 0
1092 | otherwise = idArity fun
1094 ok_lam v = isTyVar v || isDictId v
1096 ok_arg b arg = varToCoreExpr b `cheapEqExpr` arg
1100 %************************************************************************
1104 %************************************************************************
1108 f = \x1..xn -> N ==> f = \x1..xn y1..ym -> N y1..ym
1111 where (in both cases)
1113 * The xi can include type variables
1115 * The yi are all value variables
1117 * N is a NORMAL FORM (i.e. no redexes anywhere)
1118 wanting a suitable number of extra args.
1120 The biggest reason for doing this is for cases like
1126 Here we want to get the lambdas together. A good exmaple is the nofib
1127 program fibheaps, which gets 25% more allocation if you don't do this
1130 We may have to sandwich some coerces between the lambdas
1131 to make the types work. exprEtaExpandArity looks through coerces
1132 when computing arity; and etaExpand adds the coerces as necessary when
1133 actually computing the expansion.
1136 tryEtaExpansion :: DynFlags -> OutExpr -> OutExpr
1137 -- There is at least one runtime binder in the binders
1138 tryEtaExpansion dflags body
1139 = etaExpand fun_arity body
1141 fun_arity = exprEtaExpandArity dflags body
1145 %************************************************************************
1147 \subsection{Floating lets out of big lambdas}
1149 %************************************************************************
1151 Note [Floating and type abstraction]
1152 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1155 We'd like to float this to
1158 x = /\a. C (y1 a) (y2 a)
1159 for the usual reasons: we want to inline x rather vigorously.
1161 You may think that this kind of thing is rare. But in some programs it is
1162 common. For example, if you do closure conversion you might get:
1164 data a :-> b = forall e. (e -> a -> b) :$ e
1166 f_cc :: forall a. a :-> a
1167 f_cc = /\a. (\e. id a) :$ ()
1169 Now we really want to inline that f_cc thing so that the
1170 construction of the closure goes away.
1172 So I have elaborated simplLazyBind to understand right-hand sides that look
1176 and treat them specially. The real work is done in SimplUtils.abstractFloats,
1177 but there is quite a bit of plumbing in simplLazyBind as well.
1179 The same transformation is good when there are lets in the body:
1181 /\abc -> let(rec) x = e in b
1183 let(rec) x' = /\abc -> let x = x' a b c in e
1185 /\abc -> let x = x' a b c in b
1187 This is good because it can turn things like:
1189 let f = /\a -> letrec g = ... g ... in g
1191 letrec g' = /\a -> ... g' a ...
1193 let f = /\ a -> g' a
1195 which is better. In effect, it means that big lambdas don't impede
1198 This optimisation is CRUCIAL in eliminating the junk introduced by
1199 desugaring mutually recursive definitions. Don't eliminate it lightly!
1201 [May 1999] If we do this transformation *regardless* then we can
1202 end up with some pretty silly stuff. For example,
1205 st = /\ s -> let { x1=r1 ; x2=r2 } in ...
1210 st = /\s -> ...[y1 s/x1, y2 s/x2]
1213 Unless the "..." is a WHNF there is really no point in doing this.
1214 Indeed it can make things worse. Suppose x1 is used strictly,
1217 x1* = case f y of { (a,b) -> e }
1219 If we abstract this wrt the tyvar we then can't do the case inline
1220 as we would normally do.
1222 That's why the whole transformation is part of the same process that
1223 floats let-bindings and constructor arguments out of RHSs. In particular,
1224 it is guarded by the doFloatFromRhs call in simplLazyBind.
1228 abstractFloats :: [OutTyVar] -> SimplEnv -> OutExpr -> SimplM ([OutBind], OutExpr)
1229 abstractFloats main_tvs body_env body
1230 = ASSERT( notNull body_floats )
1231 do { (subst, float_binds) <- mapAccumLM abstract empty_subst body_floats
1232 ; return (float_binds, CoreSubst.substExpr subst body) }
1234 main_tv_set = mkVarSet main_tvs
1235 body_floats = getFloats body_env
1236 empty_subst = CoreSubst.mkEmptySubst (seInScope body_env)
1238 abstract :: CoreSubst.Subst -> OutBind -> SimplM (CoreSubst.Subst, OutBind)
1239 abstract subst (NonRec id rhs)
1240 = do { (poly_id, poly_app) <- mk_poly tvs_here id
1241 ; let poly_rhs = mkLams tvs_here rhs'
1242 subst' = CoreSubst.extendIdSubst subst id poly_app
1243 ; return (subst', (NonRec poly_id poly_rhs)) }
1245 rhs' = CoreSubst.substExpr subst rhs
1246 tvs_here | any isCoVar main_tvs = main_tvs -- Note [Abstract over coercions]
1248 = varSetElems (main_tv_set `intersectVarSet` exprSomeFreeVars isTyVar rhs')
1250 -- Abstract only over the type variables free in the rhs
1251 -- wrt which the new binding is abstracted. But the naive
1252 -- approach of abstract wrt the tyvars free in the Id's type
1254 -- /\ a b -> let t :: (a,b) = (e1, e2)
1257 -- Here, b isn't free in x's type, but we must nevertheless
1258 -- abstract wrt b as well, because t's type mentions b.
1259 -- Since t is floated too, we'd end up with the bogus:
1260 -- poly_t = /\ a b -> (e1, e2)
1261 -- poly_x = /\ a -> fst (poly_t a *b*)
1262 -- So for now we adopt the even more naive approach of
1263 -- abstracting wrt *all* the tyvars. We'll see if that
1264 -- gives rise to problems. SLPJ June 98
1266 abstract subst (Rec prs)
1267 = do { (poly_ids, poly_apps) <- mapAndUnzipM (mk_poly tvs_here) ids
1268 ; let subst' = CoreSubst.extendSubstList subst (ids `zip` poly_apps)
1269 poly_rhss = [mkLams tvs_here (CoreSubst.substExpr subst' rhs) | rhs <- rhss]
1270 ; return (subst', Rec (poly_ids `zip` poly_rhss)) }
1272 (ids,rhss) = unzip prs
1273 -- For a recursive group, it's a bit of a pain to work out the minimal
1274 -- set of tyvars over which to abstract:
1275 -- /\ a b c. let x = ...a... in
1276 -- letrec { p = ...x...q...
1277 -- q = .....p...b... } in
1279 -- Since 'x' is abstracted over 'a', the {p,q} group must be abstracted
1280 -- over 'a' (because x is replaced by (poly_x a)) as well as 'b'.
1281 -- Since it's a pain, we just use the whole set, which is always safe
1283 -- If you ever want to be more selective, remember this bizarre case too:
1285 -- Here, we must abstract 'x' over 'a'.
1288 mk_poly tvs_here var
1289 = do { uniq <- getUniqueM
1290 ; let poly_name = setNameUnique (idName var) uniq -- Keep same name
1291 poly_ty = mkForAllTys tvs_here (idType var) -- But new type of course
1292 poly_id = transferPolyIdInfo var tvs_here $ -- Note [transferPolyIdInfo] in Id.lhs
1293 mkLocalId poly_name poly_ty
1294 ; return (poly_id, mkTyApps (Var poly_id) (mkTyVarTys tvs_here)) }
1295 -- In the olden days, it was crucial to copy the occInfo of the original var,
1296 -- because we were looking at occurrence-analysed but as yet unsimplified code!
1297 -- In particular, we mustn't lose the loop breakers. BUT NOW we are looking
1298 -- at already simplified code, so it doesn't matter
1300 -- It's even right to retain single-occurrence or dead-var info:
1301 -- Suppose we started with /\a -> let x = E in B
1302 -- where x occurs once in B. Then we transform to:
1303 -- let x' = /\a -> E in /\a -> let x* = x' a in B
1304 -- where x* has an INLINE prag on it. Now, once x* is inlined,
1305 -- the occurrences of x' will be just the occurrences originally
1309 Note [Abstract over coercions]
1310 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1311 If a coercion variable (g :: a ~ Int) is free in the RHS, then so is the
1312 type variable a. Rather than sort this mess out, we simply bale out and abstract
1313 wrt all the type variables if any of them are coercion variables.
1316 Historical note: if you use let-bindings instead of a substitution, beware of this:
1318 -- Suppose we start with:
1320 -- x = /\ a -> let g = G in E
1322 -- Then we'll float to get
1324 -- x = let poly_g = /\ a -> G
1325 -- in /\ a -> let g = poly_g a in E
1327 -- But now the occurrence analyser will see just one occurrence
1328 -- of poly_g, not inside a lambda, so the simplifier will
1329 -- PreInlineUnconditionally poly_g back into g! Badk to square 1!
1330 -- (I used to think that the "don't inline lone occurrences" stuff
1331 -- would stop this happening, but since it's the *only* occurrence,
1332 -- PreInlineUnconditionally kicks in first!)
1334 -- Solution: put an INLINE note on g's RHS, so that poly_g seems
1335 -- to appear many times. (NB: mkInlineMe eliminates
1336 -- such notes on trivial RHSs, so do it manually.)
1338 %************************************************************************
1342 %************************************************************************
1344 prepareAlts tries these things:
1346 1. Eliminate alternatives that cannot match, including the
1347 DEFAULT alternative.
1349 2. If the DEFAULT alternative can match only one possible constructor,
1350 then make that constructor explicit.
1352 case e of x { DEFAULT -> rhs }
1354 case e of x { (a,b) -> rhs }
1355 where the type is a single constructor type. This gives better code
1356 when rhs also scrutinises x or e.
1358 3. Returns a list of the constructors that cannot holds in the
1359 DEFAULT alternative (if there is one)
1361 Here "cannot match" includes knowledge from GADTs
1363 It's a good idea do do this stuff before simplifying the alternatives, to
1364 avoid simplifying alternatives we know can't happen, and to come up with
1365 the list of constructors that are handled, to put into the IdInfo of the
1366 case binder, for use when simplifying the alternatives.
1368 Eliminating the default alternative in (1) isn't so obvious, but it can
1371 data Colour = Red | Green | Blue
1380 DEFAULT -> [ case y of ... ]
1382 If we inline h into f, the default case of the inlined h can't happen.
1383 If we don't notice this, we may end up filtering out *all* the cases
1384 of the inner case y, which give us nowhere to go!
1387 prepareAlts :: OutExpr -> OutId -> [InAlt] -> SimplM ([AltCon], [InAlt])
1388 prepareAlts scrut case_bndr' alts
1389 = do { let (alts_wo_default, maybe_deflt) = findDefault alts
1390 alt_cons = [con | (con,_,_) <- alts_wo_default]
1391 imposs_deflt_cons = nub (imposs_cons ++ alt_cons)
1392 -- "imposs_deflt_cons" are handled
1393 -- EITHER by the context,
1394 -- OR by a non-DEFAULT branch in this case expression.
1396 ; default_alts <- prepareDefault case_bndr' mb_tc_app
1397 imposs_deflt_cons maybe_deflt
1399 ; let trimmed_alts = filterOut impossible_alt alts_wo_default
1400 merged_alts = mergeAlts trimmed_alts default_alts
1401 -- We need the mergeAlts in case the new default_alt
1402 -- has turned into a constructor alternative.
1403 -- The merge keeps the inner DEFAULT at the front, if there is one
1404 -- and interleaves the alternatives in the right order
1406 ; return (imposs_deflt_cons, merged_alts) }
1408 mb_tc_app = splitTyConApp_maybe (idType case_bndr')
1409 Just (_, inst_tys) = mb_tc_app
1411 imposs_cons = case scrut of
1412 Var v -> otherCons (idUnfolding v)
1415 impossible_alt :: CoreAlt -> Bool
1416 impossible_alt (con, _, _) | con `elem` imposs_cons = True
1417 impossible_alt (DataAlt con, _, _) = dataConCannotMatch inst_tys con
1418 impossible_alt _ = False
1421 prepareDefault :: OutId -- Case binder; need just for its type. Note that as an
1422 -- OutId, it has maximum information; this is important.
1423 -- Test simpl013 is an example
1424 -> Maybe (TyCon, [Type]) -- Type of scrutinee, decomposed
1425 -> [AltCon] -- These cons can't happen when matching the default
1426 -> Maybe InExpr -- Rhs
1427 -> SimplM [InAlt] -- Still unsimplified
1428 -- We use a list because it's what mergeAlts expects,
1430 --------- Fill in known constructor -----------
1431 prepareDefault case_bndr (Just (tycon, inst_tys)) imposs_cons (Just deflt_rhs)
1432 | -- This branch handles the case where we are
1433 -- scrutinisng an algebraic data type
1434 isAlgTyCon tycon -- It's a data type, tuple, or unboxed tuples.
1435 , not (isNewTyCon tycon) -- We can have a newtype, if we are just doing an eval:
1436 -- case x of { DEFAULT -> e }
1437 -- and we don't want to fill in a default for them!
1438 , Just all_cons <- tyConDataCons_maybe tycon
1439 , not (null all_cons) -- This is a tricky corner case. If the data type has no constructors,
1440 -- which GHC allows, then the case expression will have at most a default
1441 -- alternative. We don't want to eliminate that alternative, because the
1442 -- invariant is that there's always one alternative. It's more convenient
1444 -- case x of { DEFAULT -> e }
1445 -- as it is, rather than transform it to
1446 -- error "case cant match"
1447 -- which would be quite legitmate. But it's a really obscure corner, and
1448 -- not worth wasting code on.
1449 , let imposs_data_cons = [con | DataAlt con <- imposs_cons] -- We now know it's a data type
1450 impossible con = con `elem` imposs_data_cons || dataConCannotMatch inst_tys con
1451 = case filterOut impossible all_cons of
1452 [] -> return [] -- Eliminate the default alternative
1453 -- altogether if it can't match
1455 [con] -> -- It matches exactly one constructor, so fill it in
1456 do { tick (FillInCaseDefault case_bndr)
1458 ; let (ex_tvs, co_tvs, arg_ids) =
1459 dataConRepInstPat us con inst_tys
1460 ; return [(DataAlt con, ex_tvs ++ co_tvs ++ arg_ids, deflt_rhs)] }
1462 _ -> return [(DEFAULT, [], deflt_rhs)]
1464 | debugIsOn, isAlgTyCon tycon, not (isOpenTyCon tycon), null (tyConDataCons tycon)
1465 -- Check for no data constructors
1466 -- This can legitimately happen for type families, so don't report that
1467 = pprTrace "prepareDefault" (ppr case_bndr <+> ppr tycon)
1468 $ return [(DEFAULT, [], deflt_rhs)]
1470 --------- Catch-all cases -----------
1471 prepareDefault _case_bndr _bndr_ty _imposs_cons (Just deflt_rhs)
1472 = return [(DEFAULT, [], deflt_rhs)]
1474 prepareDefault _case_bndr _bndr_ty _imposs_cons Nothing
1475 = return [] -- No default branch
1480 %************************************************************************
1484 %************************************************************************
1486 mkCase tries these things
1488 1. Merge Nested Cases
1490 case e of b { ==> case e of b {
1491 p1 -> rhs1 p1 -> rhs1
1493 pm -> rhsm pm -> rhsm
1494 _ -> case b of b' { pn -> let b'=b in rhsn
1496 ... po -> let b'=b in rhso
1497 po -> rhso _ -> let b'=b in rhsd
1501 which merges two cases in one case when -- the default alternative of
1502 the outer case scrutises the same variable as the outer case. This
1503 transformation is called Case Merging. It avoids that the same
1504 variable is scrutinised multiple times.
1506 2. Eliminate Identity Case
1512 and similar friends.
1514 3. Merge identical alternatives.
1515 If several alternatives are identical, merge them into
1516 a single DEFAULT alternative. I've occasionally seen this
1517 making a big difference:
1519 case e of =====> case e of
1520 C _ -> f x D v -> ....v....
1521 D v -> ....v.... DEFAULT -> f x
1524 The point is that we merge common RHSs, at least for the DEFAULT case.
1525 [One could do something more elaborate but I've never seen it needed.]
1526 To avoid an expensive test, we just merge branches equal to the *first*
1527 alternative; this picks up the common cases
1528 a) all branches equal
1529 b) some branches equal to the DEFAULT (which occurs first)
1531 The case where Merge Identical Alternatives transformation showed up
1532 was like this (base/Foreign/C/Err/Error.lhs):
1538 where @is@ was something like
1540 p `is` n = p /= (-1) && p == n
1542 This gave rise to a horrible sequence of cases
1549 and similarly in cascade for all the join points!
1553 mkCase, mkCase1, mkCase2
1556 -> [OutAlt] -- Alternatives in standard (increasing) order
1559 --------------------------------------------------
1560 -- 1. Merge Nested Cases
1561 --------------------------------------------------
1563 mkCase dflags scrut outer_bndr ((DEFAULT, _, deflt_rhs) : outer_alts)
1564 | dopt Opt_CaseMerge dflags
1565 , Case (Var inner_scrut_var) inner_bndr _ inner_alts <- deflt_rhs
1566 , inner_scrut_var == outer_bndr
1567 = do { tick (CaseMerge outer_bndr)
1569 ; let wrap_alt (con, args, rhs) = ASSERT( outer_bndr `notElem` args )
1570 (con, args, wrap_rhs rhs)
1571 -- Simplifier's no-shadowing invariant should ensure
1572 -- that outer_bndr is not shadowed by the inner patterns
1573 wrap_rhs rhs = Let (NonRec inner_bndr (Var outer_bndr)) rhs
1574 -- The let is OK even for unboxed binders,
1576 wrapped_alts | isDeadBinder inner_bndr = inner_alts
1577 | otherwise = map wrap_alt inner_alts
1579 merged_alts = mergeAlts outer_alts wrapped_alts
1580 -- NB: mergeAlts gives priority to the left
1583 -- DEFAULT -> case x of
1586 -- When we merge, we must ensure that e1 takes
1587 -- precedence over e2 as the value for A!
1589 ; mkCase1 dflags scrut outer_bndr merged_alts
1591 -- Warning: don't call mkCase recursively!
1592 -- Firstly, there's no point, because inner alts have already had
1593 -- mkCase applied to them, so they won't have a case in their default
1594 -- Secondly, if you do, you get an infinite loop, because the bindCaseBndr
1595 -- in munge_rhs may put a case into the DEFAULT branch!
1597 mkCase dflags scrut bndr alts = mkCase1 dflags scrut bndr alts
1599 --------------------------------------------------
1600 -- 2. Eliminate Identity Case
1601 --------------------------------------------------
1603 mkCase1 _dflags scrut case_bndr alts -- Identity case
1604 | all identity_alt alts
1605 = do { tick (CaseIdentity case_bndr)
1606 ; return (re_cast scrut) }
1608 identity_alt (con, args, rhs) = check_eq con args (de_cast rhs)
1610 check_eq DEFAULT _ (Var v) = v == case_bndr
1611 check_eq (LitAlt lit') _ (Lit lit) = lit == lit'
1612 check_eq (DataAlt con) args rhs = rhs `cheapEqExpr` mkConApp con (arg_tys ++ varsToCoreExprs args)
1613 || rhs `cheapEqExpr` Var case_bndr
1614 check_eq _ _ _ = False
1616 arg_tys = map Type (tyConAppArgs (idType case_bndr))
1619 -- case e of x { _ -> x `cast` c }
1620 -- And we definitely want to eliminate this case, to give
1622 -- So we throw away the cast from the RHS, and reconstruct
1623 -- it at the other end. All the RHS casts must be the same
1624 -- if (all identity_alt alts) holds.
1626 -- Don't worry about nested casts, because the simplifier combines them
1627 de_cast (Cast e _) = e
1630 re_cast scrut = case head alts of
1631 (_,_,Cast _ co) -> Cast scrut co
1634 --------------------------------------------------
1635 -- 3. Merge Identical Alternatives
1636 --------------------------------------------------
1637 mkCase1 dflags scrut case_bndr ((_con1,bndrs1,rhs1) : con_alts)
1638 | all isDeadBinder bndrs1 -- Remember the default
1639 , length filtered_alts < length con_alts -- alternative comes first
1640 -- Also Note [Dead binders]
1641 = do { tick (AltMerge case_bndr)
1642 ; mkCase2 dflags scrut case_bndr alts' }
1644 alts' = (DEFAULT, [], rhs1) : filtered_alts
1645 filtered_alts = filter keep con_alts
1646 keep (_con,bndrs,rhs) = not (all isDeadBinder bndrs && rhs `cheapEqExpr` rhs1)
1648 mkCase1 dflags scrut bndr alts = mkCase2 dflags scrut bndr alts
1650 --------------------------------------------------
1652 --------------------------------------------------
1653 mkCase2 _dflags scrut bndr alts
1654 = return (Case scrut bndr (coreAltsType alts) alts)
1658 ~~~~~~~~~~~~~~~~~~~~
1659 Note that dead-ness is maintained by the simplifier, so that it is
1660 accurate after simplification as well as before.
1663 Note [Cascading case merge]
1664 ~~~~~~~~~~~~~~~~~~~~~~~~~~~
1665 Case merging should cascade in one sweep, because it
1669 DEFAULT -> case a of b
1670 DEFAULT -> case b of c {
1677 DEFAULT -> case a of b
1678 DEFAULT -> let c = b in e
1679 A -> let c = b in ea
1684 DEFAULT -> let b = a in let c = b in e
1685 A -> let b = a in let c = b in ea
1686 B -> let b = a in eb
1690 However here's a tricky case that we still don't catch, and I don't
1691 see how to catch it in one pass:
1693 case x of c1 { I# a1 ->
1696 DEFAULT -> case x of c3 { I# a2 ->
1699 After occurrence analysis (and its binder-swap) we get this
1701 case x of c1 { I# a1 ->
1702 let x = c1 in -- Binder-swap addition
1705 DEFAULT -> case x of c3 { I# a2 ->
1708 When we simplify the inner case x, we'll see that
1709 x=c1=I# a1. So we'll bind a2 to a1, and get
1711 case x of c1 { I# a1 ->
1714 DEFAULT -> case a1 of ...
1716 This is corect, but we can't do a case merge in this sweep
1717 because c2 /= a1. Reason: the binding c1=I# a1 went inwards
1718 without getting changed to c1=I# c2.
1720 I don't think this is worth fixing, even if I knew how. It'll
1721 all come out in the next pass anyway.