2 % (c) The AQUA Project, Glasgow University, 1993-1998
4 \section[SimplUtils]{The simplifier utilities}
9 mkLam, mkCase, prepareAlts, tryEtaExpand,
12 preInlineUnconditionally, postInlineUnconditionally,
13 activeUnfolding, activeRule,
14 getUnfoldingInRuleMatch,
15 simplEnvForGHCi, updModeForInlineRules,
17 -- The continuation type
18 SimplCont(..), DupFlag(..), ArgInfo(..),
20 contIsDupable, contResultType, contIsTrivial, contArgs, dropArgs,
21 pushSimplifiedArgs, countValArgs, countArgs, addArgTo,
22 mkBoringStop, mkRhsStop, mkLazyArgStop, contIsRhsOrArg,
23 interestingCallContext,
25 interestingArg, mkArgInfo,
30 #include "HsVersions.h"
33 import CoreMonad ( SimplifierMode(..), Tick(..) )
37 import qualified CoreSubst
48 import TcType ( isDictLikeTy )
49 import Type hiding( substTy )
50 import Coercion ( coercionKind )
52 import Unify ( dataConCannotMatch )
64 %************************************************************************
68 %************************************************************************
70 A SimplCont allows the simplifier to traverse the expression in a
71 zipper-like fashion. The SimplCont represents the rest of the expression,
72 "above" the point of interest.
74 You can also think of a SimplCont as an "evaluation context", using
75 that term in the way it is used for operational semantics. This is the
76 way I usually think of it, For example you'll often see a syntax for
77 evaluation context looking like
78 C ::= [] | C e | case C of alts | C `cast` co
79 That's the kind of thing we are doing here, and I use that syntax in
84 * A SimplCont describes a *strict* context (just like
85 evaluation contexts do). E.g. Just [] is not a SimplCont
87 * A SimplCont describes a context that *does not* bind
88 any variables. E.g. \x. [] is not a SimplCont
92 = Stop -- An empty context, or hole, []
93 CallCtxt -- True <=> There is something interesting about
94 -- the context, and hence the inliner
95 -- should be a bit keener (see interestingCallContext)
97 -- This is an argument of a function that has RULES
98 -- Inlining the call might allow the rule to fire
100 | CoerceIt -- C `cast` co
101 OutCoercion -- The coercion simplified
105 DupFlag -- See Note [DupFlag invariants]
106 InExpr StaticEnv -- The argument and its static env
109 | Select -- case C of alts
110 DupFlag -- See Note [DupFlag invariants]
111 InId [InAlt] StaticEnv -- The case binder, alts, and subst-env
114 -- The two strict forms have no DupFlag, because we never duplicate them
115 | StrictBind -- (\x* \xs. e) C
116 InId [InBndr] -- let x* = [] in e
117 InExpr StaticEnv -- is a special case
120 | StrictArg -- f e1 ..en C
121 ArgInfo -- Specifies f, e1..en, Whether f has rules, etc
122 -- plus strictness flags for *further* args
123 CallCtxt -- Whether *this* argument position is interesting
128 ai_fun :: Id, -- The function
129 ai_args :: [OutExpr], -- ...applied to these args (which are in *reverse* order)
130 ai_rules :: [CoreRule], -- Rules for this function
132 ai_encl :: Bool, -- Flag saying whether this function
133 -- or an enclosing one has rules (recursively)
134 -- True => be keener to inline in all args
136 ai_strs :: [Bool], -- Strictness of remaining arguments
137 -- Usually infinite, but if it is finite it guarantees
138 -- that the function diverges after being given
139 -- that number of args
140 ai_discs :: [Int] -- Discounts for remaining arguments; non-zero => be keener to inline
144 addArgTo :: ArgInfo -> OutExpr -> ArgInfo
145 addArgTo ai arg = ai { ai_args = arg : ai_args ai }
147 instance Outputable SimplCont where
148 ppr (Stop interesting) = ptext (sLit "Stop") <> brackets (ppr interesting)
149 ppr (ApplyTo dup arg _ cont) = ((ptext (sLit "ApplyTo") <+> ppr dup <+> pprParendExpr arg)
150 {- $$ nest 2 (pprSimplEnv se) -}) $$ ppr cont
151 ppr (StrictBind b _ _ _ cont) = (ptext (sLit "StrictBind") <+> ppr b) $$ ppr cont
152 ppr (StrictArg ai _ cont) = (ptext (sLit "StrictArg") <+> ppr (ai_fun ai)) $$ ppr cont
153 ppr (Select dup bndr alts se cont) = (ptext (sLit "Select") <+> ppr dup <+> ppr bndr) $$
154 (nest 2 $ vcat [ppr (seTvSubst se), ppr alts]) $$ ppr cont
155 ppr (CoerceIt co cont) = (ptext (sLit "CoerceIt") <+> ppr co) $$ ppr cont
157 data DupFlag = NoDup -- Unsimplified, might be big
158 | Simplified -- Simplified
159 | OkToDup -- Simplified and small
161 isSimplified :: DupFlag -> Bool
162 isSimplified NoDup = False
163 isSimplified _ = True -- Invariant: the subst-env is empty
165 instance Outputable DupFlag where
166 ppr OkToDup = ptext (sLit "ok")
167 ppr NoDup = ptext (sLit "nodup")
168 ppr Simplified = ptext (sLit "simpl")
171 Note [DupFlag invariants]
172 ~~~~~~~~~~~~~~~~~~~~~~~~~
173 In both (ApplyTo dup _ env k)
174 and (Select dup _ _ env k)
175 the following invariants hold
177 (a) if dup = OkToDup, then continuation k is also ok-to-dup
178 (b) if dup = OkToDup or Simplified, the subst-env is empty
179 (and and hence no need to re-simplify)
183 mkBoringStop :: SimplCont
184 mkBoringStop = Stop BoringCtxt
186 mkRhsStop :: SimplCont -- See Note [RHS of lets] in CoreUnfold
187 mkRhsStop = Stop (ArgCtxt False)
189 mkLazyArgStop :: CallCtxt -> SimplCont
190 mkLazyArgStop cci = Stop cci
193 contIsRhsOrArg :: SimplCont -> Bool
194 contIsRhsOrArg (Stop {}) = True
195 contIsRhsOrArg (StrictBind {}) = True
196 contIsRhsOrArg (StrictArg {}) = True
197 contIsRhsOrArg _ = False
200 contIsDupable :: SimplCont -> Bool
201 contIsDupable (Stop {}) = True
202 contIsDupable (ApplyTo OkToDup _ _ _) = True -- See Note [DupFlag invariants]
203 contIsDupable (Select OkToDup _ _ _ _) = True -- ...ditto...
204 contIsDupable (CoerceIt _ cont) = contIsDupable cont
205 contIsDupable _ = False
208 contIsTrivial :: SimplCont -> Bool
209 contIsTrivial (Stop {}) = True
210 contIsTrivial (ApplyTo _ (Type _) _ cont) = contIsTrivial cont
211 contIsTrivial (CoerceIt _ cont) = contIsTrivial cont
212 contIsTrivial _ = False
215 contResultType :: SimplEnv -> OutType -> SimplCont -> OutType
216 contResultType env ty cont
219 subst_ty se ty = substTy (se `setInScope` env) ty
222 go (CoerceIt co cont) _ = go cont (snd (coercionKind co))
223 go (StrictBind _ bs body se cont) _ = go cont (subst_ty se (exprType (mkLams bs body)))
224 go (StrictArg ai _ cont) _ = go cont (funResultTy (argInfoResultTy ai))
225 go (Select _ _ alts se cont) _ = go cont (subst_ty se (coreAltsType alts))
226 go (ApplyTo _ arg se cont) ty = go cont (apply_to_arg ty arg se)
228 apply_to_arg ty (Type ty_arg) se = applyTy ty (subst_ty se ty_arg)
229 apply_to_arg ty _ _ = funResultTy ty
231 argInfoResultTy :: ArgInfo -> OutType
232 argInfoResultTy (ArgInfo { ai_fun = fun, ai_args = args })
233 = foldr (\arg fn_ty -> applyTypeToArg fn_ty arg) (idType fun) args
236 countValArgs :: SimplCont -> Int
237 countValArgs (ApplyTo _ (Type _) _ cont) = countValArgs cont
238 countValArgs (ApplyTo _ _ _ cont) = 1 + countValArgs cont
241 countArgs :: SimplCont -> Int
242 countArgs (ApplyTo _ _ _ cont) = 1 + countArgs cont
245 contArgs :: SimplCont -> (Bool, [ArgSummary], SimplCont)
246 -- Uses substitution to turn each arg into an OutExpr
247 contArgs cont@(ApplyTo {})
248 = case go [] cont of { (args, cont') -> (False, args, cont') }
250 go args (ApplyTo _ arg se cont)
251 | isTypeArg arg = go args cont
252 | otherwise = go (is_interesting arg se : args) cont
253 go args cont = (reverse args, cont)
255 is_interesting arg se = interestingArg (substExpr (text "contArgs") se arg)
256 -- Do *not* use short-cutting substitution here
257 -- because we want to get as much IdInfo as possible
259 contArgs cont = (True, [], cont)
261 pushSimplifiedArgs :: SimplEnv -> [CoreExpr] -> SimplCont -> SimplCont
262 pushSimplifiedArgs _env [] cont = cont
263 pushSimplifiedArgs env (arg:args) cont = ApplyTo Simplified arg env (pushSimplifiedArgs env args cont)
264 -- The env has an empty SubstEnv
266 dropArgs :: Int -> SimplCont -> SimplCont
267 dropArgs 0 cont = cont
268 dropArgs n (ApplyTo _ _ _ cont) = dropArgs (n-1) cont
269 dropArgs n other = pprPanic "dropArgs" (ppr n <+> ppr other)
273 Note [Interesting call context]
274 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
275 We want to avoid inlining an expression where there can't possibly be
276 any gain, such as in an argument position. Hence, if the continuation
277 is interesting (eg. a case scrutinee, application etc.) then we
278 inline, otherwise we don't.
280 Previously some_benefit used to return True only if the variable was
281 applied to some value arguments. This didn't work:
283 let x = _coerce_ (T Int) Int (I# 3) in
284 case _coerce_ Int (T Int) x of
287 we want to inline x, but can't see that it's a constructor in a case
288 scrutinee position, and some_benefit is False.
292 dMonadST = _/\_ t -> :Monad (g1 _@_ t, g2 _@_ t, g3 _@_ t)
294 .... case dMonadST _@_ x0 of (a,b,c) -> ....
296 we'd really like to inline dMonadST here, but we *don't* want to
297 inline if the case expression is just
299 case x of y { DEFAULT -> ... }
301 since we can just eliminate this case instead (x is in WHNF). Similar
302 applies when x is bound to a lambda expression. Hence
303 contIsInteresting looks for case expressions with just a single
308 interestingCallContext :: SimplCont -> CallCtxt
309 -- See Note [Interesting call context]
310 interestingCallContext cont
313 interesting (Select _ bndr _ _ _)
314 | isDeadBinder bndr = CaseCtxt
315 | otherwise = ArgCtxt False -- If the binder is used, this
316 -- is like a strict let
317 -- See Note [RHS of lets] in CoreUnfold
319 interesting (ApplyTo _ arg _ cont)
320 | isTypeArg arg = interesting cont
321 | otherwise = ValAppCtxt -- Can happen if we have (f Int |> co) y
322 -- If f has an INLINE prag we need to give it some
323 -- motivation to inline. See Note [Cast then apply]
326 interesting (StrictArg _ cci _) = cci
327 interesting (StrictBind {}) = BoringCtxt
328 interesting (Stop cci) = cci
329 interesting (CoerceIt _ cont) = interesting cont
330 -- If this call is the arg of a strict function, the context
331 -- is a bit interesting. If we inline here, we may get useful
332 -- evaluation information to avoid repeated evals: e.g.
334 -- Here the contIsInteresting makes the '*' keener to inline,
335 -- which in turn exposes a constructor which makes the '+' inline.
336 -- Assuming that +,* aren't small enough to inline regardless.
338 -- It's also very important to inline in a strict context for things
341 -- Here, the context of (f x) is strict, and if f's unfolding is
342 -- a build it's *great* to inline it here. So we must ensure that
343 -- the context for (f x) is not totally uninteresting.
348 -> [CoreRule] -- Rules for function
349 -> Int -- Number of value args
350 -> SimplCont -- Context of the call
353 mkArgInfo fun rules n_val_args call_cont
354 | n_val_args < idArity fun -- Note [Unsaturated functions]
355 = ArgInfo { ai_fun = fun, ai_args = [], ai_rules = rules
357 , ai_strs = vanilla_stricts
358 , ai_discs = vanilla_discounts }
360 = ArgInfo { ai_fun = fun, ai_args = [], ai_rules = rules
361 , ai_encl = interestingArgContext rules call_cont
362 , ai_strs = add_type_str (idType fun) arg_stricts
363 , ai_discs = arg_discounts }
365 vanilla_discounts, arg_discounts :: [Int]
366 vanilla_discounts = repeat 0
367 arg_discounts = case idUnfolding fun of
368 CoreUnfolding {uf_guidance = UnfIfGoodArgs {ug_args = discounts}}
369 -> discounts ++ vanilla_discounts
370 _ -> vanilla_discounts
372 vanilla_stricts, arg_stricts :: [Bool]
373 vanilla_stricts = repeat False
376 = case splitStrictSig (idStrictness fun) of
377 (demands, result_info)
378 | not (demands `lengthExceeds` n_val_args)
379 -> -- Enough args, use the strictness given.
380 -- For bottoming functions we used to pretend that the arg
381 -- is lazy, so that we don't treat the arg as an
382 -- interesting context. This avoids substituting
383 -- top-level bindings for (say) strings into
384 -- calls to error. But now we are more careful about
385 -- inlining lone variables, so its ok (see SimplUtils.analyseCont)
386 if isBotRes result_info then
387 map isStrictDmd demands -- Finite => result is bottom
389 map isStrictDmd demands ++ vanilla_stricts
391 -> WARN( True, text "More demands than arity" <+> ppr fun <+> ppr (idArity fun)
392 <+> ppr n_val_args <+> ppr demands )
393 vanilla_stricts -- Not enough args, or no strictness
395 add_type_str :: Type -> [Bool] -> [Bool]
396 -- If the function arg types are strict, record that in the 'strictness bits'
397 -- No need to instantiate because unboxed types (which dominate the strict
398 -- types) can't instantiate type variables.
399 -- add_type_str is done repeatedly (for each call); might be better
400 -- once-for-all in the function
401 -- But beware primops/datacons with no strictness
402 add_type_str _ [] = []
403 add_type_str fun_ty strs -- Look through foralls
404 | Just (_, fun_ty') <- splitForAllTy_maybe fun_ty -- Includes coercions
405 = add_type_str fun_ty' strs
406 add_type_str fun_ty (str:strs) -- Add strict-type info
407 | Just (arg_ty, fun_ty') <- splitFunTy_maybe fun_ty
408 = (str || isStrictType arg_ty) : add_type_str fun_ty' strs
412 {- Note [Unsaturated functions]
413 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
414 Consider (test eyeball/inline4)
417 where f has arity 2. Then we do not want to inline 'x', because
418 it'll just be floated out again. Even if f has lots of discounts
419 on its first argument -- it must be saturated for these to kick in
422 interestingArgContext :: [CoreRule] -> SimplCont -> Bool
423 -- If the argument has form (f x y), where x,y are boring,
424 -- and f is marked INLINE, then we don't want to inline f.
425 -- But if the context of the argument is
427 -- where g has rules, then we *do* want to inline f, in case it
428 -- exposes a rule that might fire. Similarly, if the context is
430 -- where h has rules, then we do want to inline f; hence the
431 -- call_cont argument to interestingArgContext
433 -- The ai-rules flag makes this happen; if it's
434 -- set, the inliner gets just enough keener to inline f
435 -- regardless of how boring f's arguments are, if it's marked INLINE
437 -- The alternative would be to *always* inline an INLINE function,
438 -- regardless of how boring its context is; but that seems overkill
439 -- For example, it'd mean that wrapper functions were always inlined
440 interestingArgContext rules call_cont
441 = notNull rules || enclosing_fn_has_rules
443 enclosing_fn_has_rules = go call_cont
445 go (Select {}) = False
446 go (ApplyTo {}) = False
447 go (StrictArg _ cci _) = interesting cci
448 go (StrictBind {}) = False -- ??
449 go (CoerceIt _ c) = go c
450 go (Stop cci) = interesting cci
452 interesting (ArgCtxt rules) = rules
453 interesting _ = False
457 %************************************************************************
461 %************************************************************************
463 The SimplifierMode controls several switches; see its definition in
465 sm_rules :: Bool -- Whether RULES are enabled
466 sm_inline :: Bool -- Whether inlining is enabled
467 sm_case_case :: Bool -- Whether case-of-case is enabled
468 sm_eta_expand :: Bool -- Whether eta-expansion is enabled
471 simplEnvForGHCi :: SimplEnv
472 simplEnvForGHCi = mkSimplEnv $
473 SimplMode { sm_names = ["GHCi"]
474 , sm_phase = InitialPhase
475 , sm_rules = True, sm_inline = False
476 , sm_eta_expand = False, sm_case_case = True }
477 -- Do not do any inlining, in case we expose some unboxed
478 -- tuple stuff that confuses the bytecode interpreter
480 updModeForInlineRules :: Activation -> SimplifierMode -> SimplifierMode
481 -- See Note [Simplifying inside InlineRules]
482 updModeForInlineRules inline_rule_act current_mode
483 = current_mode { sm_phase = phaseFromActivation inline_rule_act
486 , sm_eta_expand = False }
488 phaseFromActivation (ActiveAfter n) = Phase n
489 phaseFromActivation _ = InitialPhase
492 Note [Inlining in gentle mode]
493 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
494 Something is inlined if
495 (i) the sm_inline flag is on, AND
496 (ii) the thing has an INLINE pragma, AND
497 (iii) the thing is inlinable in the earliest phase.
499 Example of why (iii) is important:
500 {-# INLINE [~1] g #-}
506 If we were to inline g into f's inlining, then an importing module would
508 f e --> g (g e) ---> RULE fires
509 because the InlineRule for f has had g inlined into it.
511 On the other hand, it is bad not to do ANY inlining into an
512 InlineRule, because then recursive knots in instance declarations
513 don't get unravelled.
515 However, *sometimes* SimplGently must do no call-site inlining at all
516 (hence sm_inline = False). Before full laziness we must be careful
517 not to inline wrappers, because doing so inhibits floating
518 e.g. ...(case f x of ...)...
519 ==> ...(case (case x of I# x# -> fw x#) of ...)...
520 ==> ...(case x of I# x# -> case fw x# of ...)...
521 and now the redex (f x) isn't floatable any more.
523 The no-inlining thing is also important for Template Haskell. You might be
524 compiling in one-shot mode with -O2; but when TH compiles a splice before
525 running it, we don't want to use -O2. Indeed, we don't want to inline
526 anything, because the byte-code interpreter might get confused about
527 unboxed tuples and suchlike.
529 Note [Simplifying inside InlineRules]
530 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
531 We must take care with simplification inside InlineRules (which come from
534 First, consider the following example
539 in ...g...g...g...g...g...
540 Now, if that's the ONLY occurrence of f, it might be inlined inside g,
541 and thence copied multiple times when g is inlined. HENCE we treat
542 any occurrence in an InlineRule as a multiple occurrence, not a single
543 one; see OccurAnal.addRuleUsage.
545 Second, we do want *do* to some modest rules/inlining stuff in InlineRules,
546 partly to eliminate senseless crap, and partly to break the recursive knots
547 generated by instance declarations.
549 However, suppose we have
550 {-# INLINE <act> f #-}
552 meaning "inline f in phases p where activation <act>(p) holds".
553 Then what inlinings/rules can we apply to the copy of <rhs> captured in
554 f's InlineRule? Our model is that literally <rhs> is substituted for
555 f when it is inlined. So our conservative plan (implemented by
556 updModeForInlineRules) is this:
558 -------------------------------------------------------------
559 When simplifying the RHS of an InlineRule, set the phase to the
560 phase in which the InlineRule first becomes active
561 -------------------------------------------------------------
565 a) Rules/inlinings that *cease* being active before p will
566 not apply to the InlineRule rhs, consistent with it being
567 inlined in its *original* form in phase p.
569 b) Rules/inlinings that only become active *after* p will
570 not apply to the InlineRule rhs, again to be consistent with
571 inlining the *original* rhs in phase p.
577 {-# NOINLINE [1] g #-}
580 {-# RULE h g = ... #-}
581 Here we must not inline g into f's RHS, even when we get to phase 0,
582 because when f is later inlined into some other module we want the
590 and suppose that there are auto-generated specialisations and a strictness
591 wrapper for g. The specialisations get activation AlwaysActive, and the
592 strictness wrapper get activation (ActiveAfter 0). So the strictness
593 wrepper fails the test and won't be inlined into f's InlineRule. That
594 means f can inline, expose the specialised call to g, so the specialisation
597 A note about wrappers
598 ~~~~~~~~~~~~~~~~~~~~~
599 It's also important not to inline a worker back into a wrapper.
601 wraper = inline_me (\x -> ...worker... )
602 Normally, the inline_me prevents the worker getting inlined into
603 the wrapper (initially, the worker's only call site!). But,
604 if the wrapper is sure to be called, the strictness analyser will
605 mark it 'demanded', so when the RHS is simplified, it'll get an ArgOf
609 activeUnfolding :: SimplEnv -> Id -> Bool
611 | not (sm_inline mode) = active_unfolding_minimal
612 | otherwise = case sm_phase mode of
613 InitialPhase -> active_unfolding_gentle
614 Phase n -> active_unfolding n
618 getUnfoldingInRuleMatch :: SimplEnv -> IdUnfoldingFun
619 -- When matching in RULE, we want to "look through" an unfolding
620 -- (to see a constructor) if *rules* are on, even if *inlinings*
621 -- are not. A notable example is DFuns, which really we want to
622 -- match in rules like (op dfun) in gentle mode. Another example
623 -- is 'otherwise' which we want exprIsConApp_maybe to be able to
625 getUnfoldingInRuleMatch env id
626 | unf_is_active = idUnfolding id
627 | otherwise = NoUnfolding
631 | not (sm_rules mode) = active_unfolding_minimal id
632 | otherwise = isActive (sm_phase mode) (idInlineActivation id)
634 active_unfolding_minimal :: Id -> Bool
635 -- Compuslory unfoldings only
636 -- Ignore SimplGently, because we want to inline regardless;
637 -- the Id has no top-level binding at all
639 -- NB: we used to have a second exception, for data con wrappers.
640 -- On the grounds that we use gentle mode for rule LHSs, and
641 -- they match better when data con wrappers are inlined.
642 -- But that only really applies to the trivial wrappers (like (:)),
643 -- and they are now constructed as Compulsory unfoldings (in MkId)
644 -- so they'll happen anyway.
645 active_unfolding_minimal id = isCompulsoryUnfolding (realIdUnfolding id)
647 active_unfolding :: PhaseNum -> Id -> Bool
648 active_unfolding n id = isActiveIn n (idInlineActivation id)
650 active_unfolding_gentle :: Id -> Bool
651 -- Anything that is early-active
652 -- See Note [Gentle mode]
653 active_unfolding_gentle id
654 = isInlinePragma prag
655 && isEarlyActive (inlinePragmaActivation prag)
656 -- NB: wrappers are not early-active
658 prag = idInlinePragma id
660 ----------------------
661 activeRule :: DynFlags -> SimplEnv -> Maybe (Activation -> Bool)
662 -- Nothing => No rules at all
663 activeRule _dflags env
664 | not (sm_rules mode) = Nothing -- Rewriting is off
665 | otherwise = Just (isActive (sm_phase mode))
672 %************************************************************************
674 preInlineUnconditionally
676 %************************************************************************
678 preInlineUnconditionally
679 ~~~~~~~~~~~~~~~~~~~~~~~~
680 @preInlineUnconditionally@ examines a bndr to see if it is used just
681 once in a completely safe way, so that it is safe to discard the
682 binding inline its RHS at the (unique) usage site, REGARDLESS of how
683 big the RHS might be. If this is the case we don't simplify the RHS
684 first, but just inline it un-simplified.
686 This is much better than first simplifying a perhaps-huge RHS and then
687 inlining and re-simplifying it. Indeed, it can be at least quadratically
696 We may end up simplifying e1 N times, e2 N-1 times, e3 N-3 times etc.
697 This can happen with cascades of functions too:
704 THE MAIN INVARIANT is this:
706 ---- preInlineUnconditionally invariant -----
707 IF preInlineUnconditionally chooses to inline x = <rhs>
708 THEN doing the inlining should not change the occurrence
709 info for the free vars of <rhs>
710 ----------------------------------------------
712 For example, it's tempting to look at trivial binding like
714 and inline it unconditionally. But suppose x is used many times,
715 but this is the unique occurrence of y. Then inlining x would change
716 y's occurrence info, which breaks the invariant. It matters: y
717 might have a BIG rhs, which will now be dup'd at every occurrenc of x.
720 Even RHSs labelled InlineMe aren't caught here, because there might be
721 no benefit from inlining at the call site.
723 [Sept 01] Don't unconditionally inline a top-level thing, because that
724 can simply make a static thing into something built dynamically. E.g.
728 [Remember that we treat \s as a one-shot lambda.] No point in
729 inlining x unless there is something interesting about the call site.
731 But watch out: if you aren't careful, some useful foldr/build fusion
732 can be lost (most notably in spectral/hartel/parstof) because the
733 foldr didn't see the build. Doing the dynamic allocation isn't a big
734 deal, in fact, but losing the fusion can be. But the right thing here
735 seems to be to do a callSiteInline based on the fact that there is
736 something interesting about the call site (it's strict). Hmm. That
739 Conclusion: inline top level things gaily until Phase 0 (the last
740 phase), at which point don't.
742 Note [pre/postInlineUnconditionally in gentle mode]
743 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
744 Even in gentle mode we want to do preInlineUnconditionally. The
745 reason is that too little clean-up happens if you don't inline
746 use-once things. Also a bit of inlining is *good* for full laziness;
747 it can expose constant sub-expressions. Example in
748 spectral/mandel/Mandel.hs, where the mandelset function gets a useful
749 let-float if you inline windowToViewport
751 However, as usual for Gentle mode, do not inline things that are
752 inactive in the intial stages. See Note [Gentle mode].
754 Note [InlineRule and preInlineUnconditionally]
755 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
756 Surprisingly, do not pre-inline-unconditionally Ids with INLINE pragmas!
766 ...fInt...fInt...fInt...
768 Here f occurs just once, in the RHS of f1. But if we inline it there
769 we'll lose the opportunity to inline at each of fInt's call sites.
770 The INLINE pragma will only inline when the application is saturated
771 for exactly this reason; and we don't want PreInlineUnconditionally
772 to second-guess it. A live example is Trac #3736.
773 c.f. Note [InlineRule and postInlineUnconditionally]
775 Note [Top-level botomming Ids]
776 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
777 Don't inline top-level Ids that are bottoming, even if they are used just
778 once, because FloatOut has gone to some trouble to extract them out.
779 Inlining them won't make the program run faster!
782 preInlineUnconditionally :: SimplEnv -> TopLevelFlag -> InId -> InExpr -> Bool
783 preInlineUnconditionally env top_lvl bndr rhs
785 | isStableUnfolding (idUnfolding bndr) = False -- Note [InlineRule and preInlineUnconditionally]
786 | isTopLevel top_lvl && isBottomingId bndr = False -- Note [Top-level bottoming Ids]
787 | opt_SimplNoPreInlining = False
788 | otherwise = case idOccInfo bndr of
789 IAmDead -> True -- Happens in ((\x.1) v)
790 OneOcc in_lam True int_cxt -> try_once in_lam int_cxt
794 active = isActive (sm_phase mode) act
795 -- See Note [pre/postInlineUnconditionally in gentle mode]
796 act = idInlineActivation bndr
797 try_once in_lam int_cxt -- There's one textual occurrence
798 | not in_lam = isNotTopLevel top_lvl || early_phase
799 | otherwise = int_cxt && canInlineInLam rhs
801 -- Be very careful before inlining inside a lambda, because (a) we must not
802 -- invalidate occurrence information, and (b) we want to avoid pushing a
803 -- single allocation (here) into multiple allocations (inside lambda).
804 -- Inlining a *function* with a single *saturated* call would be ok, mind you.
805 -- || (if is_cheap && not (canInlineInLam rhs) then pprTrace "preinline" (ppr bndr <+> ppr rhs) ok else ok)
807 -- is_cheap = exprIsCheap rhs
808 -- ok = is_cheap && int_cxt
810 -- int_cxt The context isn't totally boring
811 -- E.g. let f = \ab.BIG in \y. map f xs
812 -- Don't want to substitute for f, because then we allocate
813 -- its closure every time the \y is called
814 -- But: let f = \ab.BIG in \y. map (f y) xs
815 -- Now we do want to substitute for f, even though it's not
816 -- saturated, because we're going to allocate a closure for
817 -- (f y) every time round the loop anyhow.
819 -- canInlineInLam => free vars of rhs are (Once in_lam) or Many,
820 -- so substituting rhs inside a lambda doesn't change the occ info.
821 -- Sadly, not quite the same as exprIsHNF.
822 canInlineInLam (Lit _) = True
823 canInlineInLam (Lam b e) = isRuntimeVar b || canInlineInLam e
824 canInlineInLam (Note _ e) = canInlineInLam e
825 canInlineInLam _ = False
827 early_phase = case sm_phase mode of
830 -- If we don't have this early_phase test, consider
831 -- x = length [1,2,3]
832 -- The full laziness pass carefully floats all the cons cells to
833 -- top level, and preInlineUnconditionally floats them all back in.
834 -- Result is (a) static allocation replaced by dynamic allocation
835 -- (b) many simplifier iterations because this tickles
836 -- a related problem; only one inlining per pass
838 -- On the other hand, I have seen cases where top-level fusion is
839 -- lost if we don't inline top level thing (e.g. string constants)
840 -- Hence the test for phase zero (which is the phase for all the final
841 -- simplifications). Until phase zero we take no special notice of
842 -- top level things, but then we become more leery about inlining
847 %************************************************************************
849 postInlineUnconditionally
851 %************************************************************************
853 postInlineUnconditionally
854 ~~~~~~~~~~~~~~~~~~~~~~~~~
855 @postInlineUnconditionally@ decides whether to unconditionally inline
856 a thing based on the form of its RHS; in particular if it has a
857 trivial RHS. If so, we can inline and discard the binding altogether.
859 NB: a loop breaker has must_keep_binding = True and non-loop-breakers
860 only have *forward* references. Hence, it's safe to discard the binding
862 NOTE: This isn't our last opportunity to inline. We're at the binding
863 site right now, and we'll get another opportunity when we get to the
866 Note that we do this unconditional inlining only for trival RHSs.
867 Don't inline even WHNFs inside lambdas; doing so may simply increase
868 allocation when the function is called. This isn't the last chance; see
871 NB: Even inline pragmas (e.g. IMustBeINLINEd) are ignored here Why?
872 Because we don't even want to inline them into the RHS of constructor
873 arguments. See NOTE above
875 NB: At one time even NOINLINE was ignored here: if the rhs is trivial
876 it's best to inline it anyway. We often get a=E; b=a from desugaring,
877 with both a and b marked NOINLINE. But that seems incompatible with
878 our new view that inlining is like a RULE, so I'm sticking to the 'active'
882 postInlineUnconditionally
883 :: SimplEnv -> TopLevelFlag
884 -> OutId -- The binder (an InId would be fine too)
885 -> OccInfo -- From the InId
889 postInlineUnconditionally env top_lvl bndr occ_info rhs unfolding
891 | isLoopBreaker occ_info = False -- If it's a loop-breaker of any kind, don't inline
892 -- because it might be referred to "earlier"
893 | isExportedId bndr = False
894 | isStableUnfolding unfolding = False -- Note [InlineRule and postInlineUnconditionally]
895 | isTopLevel top_lvl = False -- Note [Top level and postInlineUnconditionally]
896 | exprIsTrivial rhs = True
899 -- The point of examining occ_info here is that for *non-values*
900 -- that occur outside a lambda, the call-site inliner won't have
901 -- a chance (becuase it doesn't know that the thing
902 -- only occurs once). The pre-inliner won't have gotten
903 -- it either, if the thing occurs in more than one branch
904 -- So the main target is things like
907 -- True -> case x of ...
908 -- False -> case x of ...
909 -- This is very important in practice; e.g. wheel-seive1 doubles
910 -- in allocation if you miss this out
911 OneOcc in_lam _one_br int_cxt -- OneOcc => no code-duplication issue
912 -> smallEnoughToInline unfolding -- Small enough to dup
913 -- ToDo: consider discount on smallEnoughToInline if int_cxt is true
915 -- NB: Do NOT inline arbitrarily big things, even if one_br is True
916 -- Reason: doing so risks exponential behaviour. We simplify a big
917 -- expression, inline it, and simplify it again. But if the
918 -- very same thing happens in the big expression, we get
920 -- PRINCIPLE: when we've already simplified an expression once,
921 -- make sure that we only inline it if it's reasonably small.
924 -- Outside a lambda, we want to be reasonably aggressive
925 -- about inlining into multiple branches of case
926 -- e.g. let x = <non-value>
927 -- in case y of { C1 -> ..x..; C2 -> ..x..; C3 -> ... }
928 -- Inlining can be a big win if C3 is the hot-spot, even if
929 -- the uses in C1, C2 are not 'interesting'
930 -- An example that gets worse if you add int_cxt here is 'clausify'
932 (isCheapUnfolding unfolding && int_cxt))
933 -- isCheap => acceptable work duplication; in_lam may be true
934 -- int_cxt to prevent us inlining inside a lambda without some
935 -- good reason. See the notes on int_cxt in preInlineUnconditionally
937 IAmDead -> True -- This happens; for example, the case_bndr during case of
938 -- known constructor: case (a,b) of x { (p,q) -> ... }
939 -- Here x isn't mentioned in the RHS, so we don't want to
940 -- create the (dead) let-binding let x = (a,b) in ...
944 -- Here's an example that we don't handle well:
945 -- let f = if b then Left (\x.BIG) else Right (\y.BIG)
946 -- in \y. ....case f of {...} ....
947 -- Here f is used just once, and duplicating the case work is fine (exprIsCheap).
949 -- - We can't preInlineUnconditionally because that woud invalidate
950 -- the occ info for b.
951 -- - We can't postInlineUnconditionally because the RHS is big, and
952 -- that risks exponential behaviour
953 -- - We can't call-site inline, because the rhs is big
957 active = isActive (sm_phase (getMode env)) (idInlineActivation bndr)
958 -- See Note [pre/postInlineUnconditionally in gentle mode]
961 Note [Top level and postInlineUnconditionally]
962 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
963 We don't do postInlineUnconditionally for top-level things (even for
964 ones that are trivial):
966 * Doing so will inline top-level error expressions that have been
967 carefully floated out by FloatOut. More generally, it might
968 replace static allocation with dynamic.
970 * Even for trivial expressions there's a problem. Consider
971 {-# RULE "foo" forall (xs::[T]). reverse xs = ruggle xs #-}
974 In one simplifier pass we might fire the rule, getting
976 but in *that* simplifier pass we must not do postInlineUnconditionally
977 on 'ruggle' because then we'll have an unbound occurrence of 'ruggle'
979 If the rhs is trivial it'll be inlined by callSiteInline, and then
980 the binding will be dead and discarded by the next use of OccurAnal
982 * There is less point, because the main goal is to get rid of local
983 bindings used in multiple case branches.
986 Note [InlineRule and postInlineUnconditionally]
987 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
988 Do not do postInlineUnconditionally if the Id has an InlineRule, otherwise
989 we lose the unfolding. Example
991 -- f has InlineRule with rhs (e |> co)
995 Then there's a danger we'll optimise to
1000 and now postInlineUnconditionally, losing the InlineRule on f. Now f'
1001 won't inline because 'e' is too big.
1003 c.f. Note [InlineRule and preInlineUnconditionally]
1006 %************************************************************************
1010 %************************************************************************
1013 mkLam :: SimplEnv -> [OutBndr] -> OutExpr -> SimplM OutExpr
1014 -- mkLam tries three things
1015 -- a) eta reduction, if that gives a trivial expression
1016 -- b) eta expansion [only if there are some value lambdas]
1020 mkLam _env bndrs body
1021 = do { dflags <- getDOptsSmpl
1022 ; mkLam' dflags bndrs body }
1024 mkLam' :: DynFlags -> [OutBndr] -> OutExpr -> SimplM OutExpr
1025 mkLam' dflags bndrs (Cast body co)
1026 | not (any bad bndrs)
1027 -- Note [Casts and lambdas]
1028 = do { lam <- mkLam' dflags bndrs body
1029 ; return (mkCoerce (mkPiTypes bndrs co) lam) }
1031 co_vars = tyVarsOfType co
1032 bad bndr = isCoVar bndr && bndr `elemVarSet` co_vars
1034 mkLam' dflags bndrs body@(Lam {})
1035 = mkLam' dflags (bndrs ++ bndrs1) body1
1037 (bndrs1, body1) = collectBinders body
1039 mkLam' dflags bndrs body
1040 | dopt Opt_DoEtaReduction dflags
1041 , Just etad_lam <- tryEtaReduce bndrs body
1042 = do { tick (EtaReduction (head bndrs))
1046 = return (mkLams bndrs body)
1050 Note [Casts and lambdas]
1051 ~~~~~~~~~~~~~~~~~~~~~~~~
1053 (\x. (\y. e) `cast` g1) `cast` g2
1054 There is a danger here that the two lambdas look separated, and the
1055 full laziness pass might float an expression to between the two.
1057 So this equation in mkLam' floats the g1 out, thus:
1058 (\x. e `cast` g1) --> (\x.e) `cast` (tx -> g1)
1061 In general, this floats casts outside lambdas, where (I hope) they
1062 might meet and cancel with some other cast:
1063 \x. e `cast` co ===> (\x. e) `cast` (tx -> co)
1064 /\a. e `cast` co ===> (/\a. e) `cast` (/\a. co)
1065 /\g. e `cast` co ===> (/\g. e) `cast` (/\g. co)
1066 (if not (g `in` co))
1068 Notice that it works regardless of 'e'. Originally it worked only
1069 if 'e' was itself a lambda, but in some cases that resulted in
1070 fruitless iteration in the simplifier. A good example was when
1071 compiling Text.ParserCombinators.ReadPrec, where we had a definition
1072 like (\x. Get `cast` g)
1073 where Get is a constructor with nonzero arity. Then mkLam eta-expanded
1074 the Get, and the next iteration eta-reduced it, and then eta-expanded
1077 Note also the side condition for the case of coercion binders.
1078 It does not make sense to transform
1079 /\g. e `cast` g ==> (/\g.e) `cast` (/\g.g)
1080 because the latter is not well-kinded.
1082 %************************************************************************
1086 %************************************************************************
1088 When we meet a let-binding we try eta-expansion. To find the
1089 arity of the RHS we use a little fixpoint analysis; see Note [Arity analysis]
1092 tryEtaExpand :: SimplEnv -> OutId -> OutExpr -> SimplM (Arity, OutExpr)
1093 -- See Note [Eta-expanding at let bindings]
1094 tryEtaExpand env bndr rhs
1095 = do { dflags <- getDOptsSmpl
1096 ; (new_arity, new_rhs) <- try_expand dflags
1098 ; WARN( new_arity < old_arity || new_arity < _dmd_arity,
1099 (ptext (sLit "Arity decrease:") <+> (ppr bndr <+> ppr old_arity
1100 <+> ppr new_arity <+> ppr _dmd_arity) $$ ppr new_rhs) )
1101 -- Note [Arity decrease]
1102 return (new_arity, new_rhs) }
1105 | sm_eta_expand (getMode env) -- Provided eta-expansion is on
1106 , not (exprIsTrivial rhs)
1107 , let dicts_cheap = dopt Opt_DictsCheap dflags
1108 new_arity = findArity dicts_cheap bndr rhs old_arity
1109 , new_arity > rhs_arity
1110 = do { tick (EtaExpansion bndr)
1111 ; return (new_arity, etaExpand new_arity rhs) }
1113 = return (rhs_arity, rhs)
1115 rhs_arity = exprArity rhs
1116 old_arity = idArity bndr
1117 _dmd_arity = length $ fst $ splitStrictSig $ idStrictness bndr
1119 findArity :: Bool -> Id -> CoreExpr -> Arity -> Arity
1120 -- This implements the fixpoint loop for arity analysis
1121 -- See Note [Arity analysis]
1122 findArity dicts_cheap bndr rhs old_arity
1123 = go (exprEtaExpandArity (mk_cheap_fn dicts_cheap init_cheap_app) rhs)
1124 -- We always call exprEtaExpandArity once, but usually
1125 -- that produces a result equal to old_arity, and then
1126 -- we stop right away (since arities should not decrease)
1127 -- Result: the common case is that there is just one iteration
1129 go :: Arity -> Arity
1131 | cur_arity <= old_arity = cur_arity
1132 | new_arity == cur_arity = cur_arity
1133 | otherwise = ASSERT( new_arity < cur_arity )
1134 pprTrace "Exciting arity"
1135 (vcat [ ppr bndr <+> ppr cur_arity <+> ppr new_arity
1139 new_arity = exprEtaExpandArity (mk_cheap_fn dicts_cheap cheap_app) rhs
1141 cheap_app :: CheapAppFun
1142 cheap_app fn n_val_args
1143 | fn == bndr = n_val_args < cur_arity
1144 | otherwise = isCheapApp fn n_val_args
1146 init_cheap_app :: CheapAppFun
1147 init_cheap_app fn n_val_args
1149 | otherwise = isCheapApp fn n_val_args
1151 mk_cheap_fn :: Bool -> CheapAppFun -> CheapFun
1152 mk_cheap_fn dicts_cheap cheap_app
1154 = \e _ -> exprIsCheap' cheap_app e
1156 = \e mb_ty -> exprIsCheap' cheap_app e
1159 Just ty -> isDictLikeTy ty
1160 -- If the experimental -fdicts-cheap flag is on, we eta-expand through
1161 -- dictionary bindings. This improves arities. Thereby, it also
1162 -- means that full laziness is less prone to floating out the
1163 -- application of a function to its dictionary arguments, which
1164 -- can thereby lose opportunities for fusion. Example:
1165 -- foo :: Ord a => a -> ...
1166 -- foo = /\a \(d:Ord a). let d' = ...d... in \(x:a). ....
1167 -- -- So foo has arity 1
1169 -- f = \x. foo dInt $ bar x
1171 -- The (foo DInt) is floated out, and makes ineffective a RULE
1172 -- foo (bar x) = ...
1174 -- One could go further and make exprIsCheap reply True to any
1175 -- dictionary-typed expression, but that's more work.
1177 -- See Note [Dictionary-like types] in TcType.lhs for why we use
1178 -- isDictLikeTy here rather than isDictTy
1181 Note [Eta-expanding at let bindings]
1182 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1183 We now eta expand at let-bindings, which is where the payoff
1186 One useful consequence is this example:
1187 genMap :: C a => ...
1188 {-# INLINE genMap #-}
1192 {-# INLINE myMap #-}
1195 Notice that 'genMap' should only inline if applied to two arguments.
1196 In the InlineRule for myMap we'll have the unfolding
1197 (\d -> genMap Int (..d..))
1198 We do not want to eta-expand to
1199 (\d f xs -> genMap Int (..d..) f xs)
1200 because then 'genMap' will inline, and it really shouldn't: at least
1201 as far as the programmer is concerned, it's not applied to two
1204 Note [Arity analysis]
1205 ~~~~~~~~~~~~~~~~~~~~~
1206 The motivating example for arity analysis is this:
1208 f = \x. let g = f (x+1)
1211 What arity does f have? Really it should have arity 2, but a naive
1212 look at the RHS won't see that. You need a fixpoint analysis which
1213 says it has arity "infinity" the first time round.
1215 This example happens a lot; it first showed up in Andy Gill's thesis,
1216 fifteen years ago! It also shows up in the code for 'rnf' on lists
1219 The analysis is easy to achieve because exprEtaExpandArity takes an
1221 type CheapFun = CoreExpr -> Maybe Type -> Bool
1222 used to decide if an expression is cheap enough to push inside a
1223 lambda. And exprIsCheap' in turn takes an argument
1224 type CheapAppFun = Id -> Int -> Bool
1225 which tells when an application is cheap. This makes it easy to
1226 write the analysis loop.
1228 The analysis is cheap-and-cheerful because it doesn't deal with
1229 mutual recursion. But the self-recursive case is the important one.
1232 %************************************************************************
1234 \subsection{Floating lets out of big lambdas}
1236 %************************************************************************
1238 Note [Floating and type abstraction]
1239 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1242 We'd like to float this to
1245 x = /\a. C (y1 a) (y2 a)
1246 for the usual reasons: we want to inline x rather vigorously.
1248 You may think that this kind of thing is rare. But in some programs it is
1249 common. For example, if you do closure conversion you might get:
1251 data a :-> b = forall e. (e -> a -> b) :$ e
1253 f_cc :: forall a. a :-> a
1254 f_cc = /\a. (\e. id a) :$ ()
1256 Now we really want to inline that f_cc thing so that the
1257 construction of the closure goes away.
1259 So I have elaborated simplLazyBind to understand right-hand sides that look
1263 and treat them specially. The real work is done in SimplUtils.abstractFloats,
1264 but there is quite a bit of plumbing in simplLazyBind as well.
1266 The same transformation is good when there are lets in the body:
1268 /\abc -> let(rec) x = e in b
1270 let(rec) x' = /\abc -> let x = x' a b c in e
1272 /\abc -> let x = x' a b c in b
1274 This is good because it can turn things like:
1276 let f = /\a -> letrec g = ... g ... in g
1278 letrec g' = /\a -> ... g' a ...
1280 let f = /\ a -> g' a
1282 which is better. In effect, it means that big lambdas don't impede
1285 This optimisation is CRUCIAL in eliminating the junk introduced by
1286 desugaring mutually recursive definitions. Don't eliminate it lightly!
1288 [May 1999] If we do this transformation *regardless* then we can
1289 end up with some pretty silly stuff. For example,
1292 st = /\ s -> let { x1=r1 ; x2=r2 } in ...
1297 st = /\s -> ...[y1 s/x1, y2 s/x2]
1300 Unless the "..." is a WHNF there is really no point in doing this.
1301 Indeed it can make things worse. Suppose x1 is used strictly,
1304 x1* = case f y of { (a,b) -> e }
1306 If we abstract this wrt the tyvar we then can't do the case inline
1307 as we would normally do.
1309 That's why the whole transformation is part of the same process that
1310 floats let-bindings and constructor arguments out of RHSs. In particular,
1311 it is guarded by the doFloatFromRhs call in simplLazyBind.
1315 abstractFloats :: [OutTyVar] -> SimplEnv -> OutExpr -> SimplM ([OutBind], OutExpr)
1316 abstractFloats main_tvs body_env body
1317 = ASSERT( notNull body_floats )
1318 do { (subst, float_binds) <- mapAccumLM abstract empty_subst body_floats
1319 ; return (float_binds, CoreSubst.substExpr (text "abstract_floats1") subst body) }
1321 main_tv_set = mkVarSet main_tvs
1322 body_floats = getFloats body_env
1323 empty_subst = CoreSubst.mkEmptySubst (seInScope body_env)
1325 abstract :: CoreSubst.Subst -> OutBind -> SimplM (CoreSubst.Subst, OutBind)
1326 abstract subst (NonRec id rhs)
1327 = do { (poly_id, poly_app) <- mk_poly tvs_here id
1328 ; let poly_rhs = mkLams tvs_here rhs'
1329 subst' = CoreSubst.extendIdSubst subst id poly_app
1330 ; return (subst', (NonRec poly_id poly_rhs)) }
1332 rhs' = CoreSubst.substExpr (text "abstract_floats2") subst rhs
1333 tvs_here | any isCoVar main_tvs = main_tvs -- Note [Abstract over coercions]
1335 = varSetElems (main_tv_set `intersectVarSet` exprSomeFreeVars isTyCoVar rhs')
1337 -- Abstract only over the type variables free in the rhs
1338 -- wrt which the new binding is abstracted. But the naive
1339 -- approach of abstract wrt the tyvars free in the Id's type
1341 -- /\ a b -> let t :: (a,b) = (e1, e2)
1344 -- Here, b isn't free in x's type, but we must nevertheless
1345 -- abstract wrt b as well, because t's type mentions b.
1346 -- Since t is floated too, we'd end up with the bogus:
1347 -- poly_t = /\ a b -> (e1, e2)
1348 -- poly_x = /\ a -> fst (poly_t a *b*)
1349 -- So for now we adopt the even more naive approach of
1350 -- abstracting wrt *all* the tyvars. We'll see if that
1351 -- gives rise to problems. SLPJ June 98
1353 abstract subst (Rec prs)
1354 = do { (poly_ids, poly_apps) <- mapAndUnzipM (mk_poly tvs_here) ids
1355 ; let subst' = CoreSubst.extendSubstList subst (ids `zip` poly_apps)
1356 poly_rhss = [mkLams tvs_here (CoreSubst.substExpr (text "abstract_floats3") subst' rhs)
1358 ; return (subst', Rec (poly_ids `zip` poly_rhss)) }
1360 (ids,rhss) = unzip prs
1361 -- For a recursive group, it's a bit of a pain to work out the minimal
1362 -- set of tyvars over which to abstract:
1363 -- /\ a b c. let x = ...a... in
1364 -- letrec { p = ...x...q...
1365 -- q = .....p...b... } in
1367 -- Since 'x' is abstracted over 'a', the {p,q} group must be abstracted
1368 -- over 'a' (because x is replaced by (poly_x a)) as well as 'b'.
1369 -- Since it's a pain, we just use the whole set, which is always safe
1371 -- If you ever want to be more selective, remember this bizarre case too:
1373 -- Here, we must abstract 'x' over 'a'.
1376 mk_poly tvs_here var
1377 = do { uniq <- getUniqueM
1378 ; let poly_name = setNameUnique (idName var) uniq -- Keep same name
1379 poly_ty = mkForAllTys tvs_here (idType var) -- But new type of course
1380 poly_id = transferPolyIdInfo var tvs_here $ -- Note [transferPolyIdInfo] in Id.lhs
1381 mkLocalId poly_name poly_ty
1382 ; return (poly_id, mkTyApps (Var poly_id) (mkTyVarTys tvs_here)) }
1383 -- In the olden days, it was crucial to copy the occInfo of the original var,
1384 -- because we were looking at occurrence-analysed but as yet unsimplified code!
1385 -- In particular, we mustn't lose the loop breakers. BUT NOW we are looking
1386 -- at already simplified code, so it doesn't matter
1388 -- It's even right to retain single-occurrence or dead-var info:
1389 -- Suppose we started with /\a -> let x = E in B
1390 -- where x occurs once in B. Then we transform to:
1391 -- let x' = /\a -> E in /\a -> let x* = x' a in B
1392 -- where x* has an INLINE prag on it. Now, once x* is inlined,
1393 -- the occurrences of x' will be just the occurrences originally
1397 Note [Abstract over coercions]
1398 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1399 If a coercion variable (g :: a ~ Int) is free in the RHS, then so is the
1400 type variable a. Rather than sort this mess out, we simply bale out and abstract
1401 wrt all the type variables if any of them are coercion variables.
1404 Historical note: if you use let-bindings instead of a substitution, beware of this:
1406 -- Suppose we start with:
1408 -- x = /\ a -> let g = G in E
1410 -- Then we'll float to get
1412 -- x = let poly_g = /\ a -> G
1413 -- in /\ a -> let g = poly_g a in E
1415 -- But now the occurrence analyser will see just one occurrence
1416 -- of poly_g, not inside a lambda, so the simplifier will
1417 -- PreInlineUnconditionally poly_g back into g! Badk to square 1!
1418 -- (I used to think that the "don't inline lone occurrences" stuff
1419 -- would stop this happening, but since it's the *only* occurrence,
1420 -- PreInlineUnconditionally kicks in first!)
1422 -- Solution: put an INLINE note on g's RHS, so that poly_g seems
1423 -- to appear many times. (NB: mkInlineMe eliminates
1424 -- such notes on trivial RHSs, so do it manually.)
1426 %************************************************************************
1430 %************************************************************************
1432 prepareAlts tries these things:
1434 1. Eliminate alternatives that cannot match, including the
1435 DEFAULT alternative.
1437 2. If the DEFAULT alternative can match only one possible constructor,
1438 then make that constructor explicit.
1440 case e of x { DEFAULT -> rhs }
1442 case e of x { (a,b) -> rhs }
1443 where the type is a single constructor type. This gives better code
1444 when rhs also scrutinises x or e.
1446 3. Returns a list of the constructors that cannot holds in the
1447 DEFAULT alternative (if there is one)
1449 Here "cannot match" includes knowledge from GADTs
1451 It's a good idea do do this stuff before simplifying the alternatives, to
1452 avoid simplifying alternatives we know can't happen, and to come up with
1453 the list of constructors that are handled, to put into the IdInfo of the
1454 case binder, for use when simplifying the alternatives.
1456 Eliminating the default alternative in (1) isn't so obvious, but it can
1459 data Colour = Red | Green | Blue
1468 DEFAULT -> [ case y of ... ]
1470 If we inline h into f, the default case of the inlined h can't happen.
1471 If we don't notice this, we may end up filtering out *all* the cases
1472 of the inner case y, which give us nowhere to go!
1475 prepareAlts :: OutExpr -> OutId -> [InAlt] -> SimplM ([AltCon], [InAlt])
1476 prepareAlts scrut case_bndr' alts
1477 = do { let (alts_wo_default, maybe_deflt) = findDefault alts
1478 alt_cons = [con | (con,_,_) <- alts_wo_default]
1479 imposs_deflt_cons = nub (imposs_cons ++ alt_cons)
1480 -- "imposs_deflt_cons" are handled
1481 -- EITHER by the context,
1482 -- OR by a non-DEFAULT branch in this case expression.
1484 ; default_alts <- prepareDefault case_bndr' mb_tc_app
1485 imposs_deflt_cons maybe_deflt
1487 ; let trimmed_alts = filterOut impossible_alt alts_wo_default
1488 merged_alts = mergeAlts trimmed_alts default_alts
1489 -- We need the mergeAlts in case the new default_alt
1490 -- has turned into a constructor alternative.
1491 -- The merge keeps the inner DEFAULT at the front, if there is one
1492 -- and interleaves the alternatives in the right order
1494 ; return (imposs_deflt_cons, merged_alts) }
1496 mb_tc_app = splitTyConApp_maybe (idType case_bndr')
1497 Just (_, inst_tys) = mb_tc_app
1499 imposs_cons = case scrut of
1500 Var v -> otherCons (idUnfolding v)
1503 impossible_alt :: CoreAlt -> Bool
1504 impossible_alt (con, _, _) | con `elem` imposs_cons = True
1505 impossible_alt (DataAlt con, _, _) = dataConCannotMatch inst_tys con
1506 impossible_alt _ = False
1509 prepareDefault :: OutId -- Case binder; need just for its type. Note that as an
1510 -- OutId, it has maximum information; this is important.
1511 -- Test simpl013 is an example
1512 -> Maybe (TyCon, [Type]) -- Type of scrutinee, decomposed
1513 -> [AltCon] -- These cons can't happen when matching the default
1514 -> Maybe InExpr -- Rhs
1515 -> SimplM [InAlt] -- Still unsimplified
1516 -- We use a list because it's what mergeAlts expects,
1518 --------- Fill in known constructor -----------
1519 prepareDefault case_bndr (Just (tycon, inst_tys)) imposs_cons (Just deflt_rhs)
1520 | -- This branch handles the case where we are
1521 -- scrutinisng an algebraic data type
1522 isAlgTyCon tycon -- It's a data type, tuple, or unboxed tuples.
1523 , not (isNewTyCon tycon) -- We can have a newtype, if we are just doing an eval:
1524 -- case x of { DEFAULT -> e }
1525 -- and we don't want to fill in a default for them!
1526 , Just all_cons <- tyConDataCons_maybe tycon
1527 , not (null all_cons)
1528 -- This is a tricky corner case. If the data type has no constructors,
1529 -- which GHC allows, then the case expression will have at most a default
1530 -- alternative. We don't want to eliminate that alternative, because the
1531 -- invariant is that there's always one alternative. It's more convenient
1533 -- case x of { DEFAULT -> e }
1534 -- as it is, rather than transform it to
1535 -- error "case cant match"
1536 -- which would be quite legitmate. But it's a really obscure corner, and
1537 -- not worth wasting code on.
1538 , let imposs_data_cons = [con | DataAlt con <- imposs_cons] -- We now know it's a data type
1539 impossible con = con `elem` imposs_data_cons || dataConCannotMatch inst_tys con
1540 = case filterOut impossible all_cons of
1541 [] -> return [] -- Eliminate the default alternative
1542 -- altogether if it can't match
1544 [con] -> -- It matches exactly one constructor, so fill it in
1545 do { tick (FillInCaseDefault case_bndr)
1547 ; let (ex_tvs, co_tvs, arg_ids) =
1548 dataConRepInstPat us con inst_tys
1549 ; return [(DataAlt con, ex_tvs ++ co_tvs ++ arg_ids, deflt_rhs)] }
1551 _ -> return [(DEFAULT, [], deflt_rhs)]
1553 | debugIsOn, isAlgTyCon tycon
1554 , null (tyConDataCons tycon)
1555 , not (isFamilyTyCon tycon || isAbstractTyCon tycon)
1556 -- Check for no data constructors
1557 -- This can legitimately happen for abstract types and type families,
1558 -- so don't report that
1559 = pprTrace "prepareDefault" (ppr case_bndr <+> ppr tycon)
1560 $ return [(DEFAULT, [], deflt_rhs)]
1562 --------- Catch-all cases -----------
1563 prepareDefault _case_bndr _bndr_ty _imposs_cons (Just deflt_rhs)
1564 = return [(DEFAULT, [], deflt_rhs)]
1566 prepareDefault _case_bndr _bndr_ty _imposs_cons Nothing
1567 = return [] -- No default branch
1572 %************************************************************************
1576 %************************************************************************
1578 mkCase tries these things
1580 1. Merge Nested Cases
1582 case e of b { ==> case e of b {
1583 p1 -> rhs1 p1 -> rhs1
1585 pm -> rhsm pm -> rhsm
1586 _ -> case b of b' { pn -> let b'=b in rhsn
1588 ... po -> let b'=b in rhso
1589 po -> rhso _ -> let b'=b in rhsd
1593 which merges two cases in one case when -- the default alternative of
1594 the outer case scrutises the same variable as the outer case. This
1595 transformation is called Case Merging. It avoids that the same
1596 variable is scrutinised multiple times.
1598 2. Eliminate Identity Case
1604 and similar friends.
1606 3. Merge identical alternatives.
1607 If several alternatives are identical, merge them into
1608 a single DEFAULT alternative. I've occasionally seen this
1609 making a big difference:
1611 case e of =====> case e of
1612 C _ -> f x D v -> ....v....
1613 D v -> ....v.... DEFAULT -> f x
1616 The point is that we merge common RHSs, at least for the DEFAULT case.
1617 [One could do something more elaborate but I've never seen it needed.]
1618 To avoid an expensive test, we just merge branches equal to the *first*
1619 alternative; this picks up the common cases
1620 a) all branches equal
1621 b) some branches equal to the DEFAULT (which occurs first)
1623 The case where Merge Identical Alternatives transformation showed up
1624 was like this (base/Foreign/C/Err/Error.lhs):
1630 where @is@ was something like
1632 p `is` n = p /= (-1) && p == n
1634 This gave rise to a horrible sequence of cases
1641 and similarly in cascade for all the join points!
1645 mkCase, mkCase1, mkCase2
1648 -> [OutAlt] -- Alternatives in standard (increasing) order
1651 --------------------------------------------------
1652 -- 1. Merge Nested Cases
1653 --------------------------------------------------
1655 mkCase dflags scrut outer_bndr ((DEFAULT, _, deflt_rhs) : outer_alts)
1656 | dopt Opt_CaseMerge dflags
1657 , Case (Var inner_scrut_var) inner_bndr _ inner_alts <- deflt_rhs
1658 , inner_scrut_var == outer_bndr
1659 = do { tick (CaseMerge outer_bndr)
1661 ; let wrap_alt (con, args, rhs) = ASSERT( outer_bndr `notElem` args )
1662 (con, args, wrap_rhs rhs)
1663 -- Simplifier's no-shadowing invariant should ensure
1664 -- that outer_bndr is not shadowed by the inner patterns
1665 wrap_rhs rhs = Let (NonRec inner_bndr (Var outer_bndr)) rhs
1666 -- The let is OK even for unboxed binders,
1668 wrapped_alts | isDeadBinder inner_bndr = inner_alts
1669 | otherwise = map wrap_alt inner_alts
1671 merged_alts = mergeAlts outer_alts wrapped_alts
1672 -- NB: mergeAlts gives priority to the left
1675 -- DEFAULT -> case x of
1678 -- When we merge, we must ensure that e1 takes
1679 -- precedence over e2 as the value for A!
1681 ; mkCase1 dflags scrut outer_bndr merged_alts
1683 -- Warning: don't call mkCase recursively!
1684 -- Firstly, there's no point, because inner alts have already had
1685 -- mkCase applied to them, so they won't have a case in their default
1686 -- Secondly, if you do, you get an infinite loop, because the bindCaseBndr
1687 -- in munge_rhs may put a case into the DEFAULT branch!
1689 mkCase dflags scrut bndr alts = mkCase1 dflags scrut bndr alts
1691 --------------------------------------------------
1692 -- 2. Eliminate Identity Case
1693 --------------------------------------------------
1695 mkCase1 _dflags scrut case_bndr alts -- Identity case
1696 | all identity_alt alts
1697 = do { tick (CaseIdentity case_bndr)
1698 ; return (re_cast scrut) }
1700 identity_alt (con, args, rhs) = check_eq con args (de_cast rhs)
1702 check_eq DEFAULT _ (Var v) = v == case_bndr
1703 check_eq (LitAlt lit') _ (Lit lit) = lit == lit'
1704 check_eq (DataAlt con) args rhs = rhs `cheapEqExpr` mkConApp con (arg_tys ++ varsToCoreExprs args)
1705 || rhs `cheapEqExpr` Var case_bndr
1706 check_eq _ _ _ = False
1708 arg_tys = map Type (tyConAppArgs (idType case_bndr))
1711 -- case e of x { _ -> x `cast` c }
1712 -- And we definitely want to eliminate this case, to give
1714 -- So we throw away the cast from the RHS, and reconstruct
1715 -- it at the other end. All the RHS casts must be the same
1716 -- if (all identity_alt alts) holds.
1718 -- Don't worry about nested casts, because the simplifier combines them
1719 de_cast (Cast e _) = e
1722 re_cast scrut = case head alts of
1723 (_,_,Cast _ co) -> Cast scrut co
1726 --------------------------------------------------
1727 -- 3. Merge Identical Alternatives
1728 --------------------------------------------------
1729 mkCase1 dflags scrut case_bndr ((_con1,bndrs1,rhs1) : con_alts)
1730 | all isDeadBinder bndrs1 -- Remember the default
1731 , length filtered_alts < length con_alts -- alternative comes first
1732 -- Also Note [Dead binders]
1733 = do { tick (AltMerge case_bndr)
1734 ; mkCase2 dflags scrut case_bndr alts' }
1736 alts' = (DEFAULT, [], rhs1) : filtered_alts
1737 filtered_alts = filter keep con_alts
1738 keep (_con,bndrs,rhs) = not (all isDeadBinder bndrs && rhs `cheapEqExpr` rhs1)
1740 mkCase1 dflags scrut bndr alts = mkCase2 dflags scrut bndr alts
1742 --------------------------------------------------
1744 --------------------------------------------------
1745 mkCase2 _dflags scrut bndr alts
1746 = return (Case scrut bndr (coreAltsType alts) alts)
1750 ~~~~~~~~~~~~~~~~~~~~
1751 Note that dead-ness is maintained by the simplifier, so that it is
1752 accurate after simplification as well as before.
1755 Note [Cascading case merge]
1756 ~~~~~~~~~~~~~~~~~~~~~~~~~~~
1757 Case merging should cascade in one sweep, because it
1761 DEFAULT -> case a of b
1762 DEFAULT -> case b of c {
1769 DEFAULT -> case a of b
1770 DEFAULT -> let c = b in e
1771 A -> let c = b in ea
1776 DEFAULT -> let b = a in let c = b in e
1777 A -> let b = a in let c = b in ea
1778 B -> let b = a in eb
1782 However here's a tricky case that we still don't catch, and I don't
1783 see how to catch it in one pass:
1785 case x of c1 { I# a1 ->
1788 DEFAULT -> case x of c3 { I# a2 ->
1791 After occurrence analysis (and its binder-swap) we get this
1793 case x of c1 { I# a1 ->
1794 let x = c1 in -- Binder-swap addition
1797 DEFAULT -> case x of c3 { I# a2 ->
1800 When we simplify the inner case x, we'll see that
1801 x=c1=I# a1. So we'll bind a2 to a1, and get
1803 case x of c1 { I# a1 ->
1806 DEFAULT -> case a1 of ...
1808 This is corect, but we can't do a case merge in this sweep
1809 because c2 /= a1. Reason: the binding c1=I# a1 went inwards
1810 without getting changed to c1=I# c2.
1812 I don't think this is worth fixing, even if I knew how. It'll
1813 all come out in the next pass anyway.