X-Git-Url: http://git.megacz.com/?a=blobdiff_plain;f=compiler%2FsimplCore%2FSimplify.lhs;h=b38bdc8a7b15497283dde898a2049deafa2d7252;hb=7bb3d1fc79521d591cd9f824893963141a7997b6;hp=30cb32109f29a13aa960b8e4607fcd9411a48f5f;hpb=85289ab5670891aa19b19456072fe2a697da881d;p=ghc-hetmet.git diff --git a/compiler/simplCore/Simplify.lhs b/compiler/simplCore/Simplify.lhs index 30cb321..b38bdc8 100644 --- a/compiler/simplCore/Simplify.lhs +++ b/compiler/simplCore/Simplify.lhs @@ -13,9 +13,9 @@ import SimplMonad import Type hiding ( substTy, extendTvSubst ) import SimplEnv import SimplUtils -import MkId ( rUNTIME_ERROR_ID ) import FamInstEnv ( FamInstEnv ) import Id +import MkId ( mkImpossibleExpr, seqId ) import Var import IdInfo import Coercion @@ -26,8 +26,9 @@ import NewDemand ( isStrictDmd, splitStrictSig ) import PprCore ( pprParendExpr, pprCoreExpr ) import CoreUnfold ( mkUnfolding, callSiteInline, CallCtxt(..) ) import CoreUtils +import CoreArity ( exprArity ) import Rules ( lookupRule, getRules ) -import BasicTypes ( isMarkedStrict ) +import BasicTypes ( isMarkedStrict, Arity ) import CostCentre ( currentCCS ) import TysPrim ( realWorldStatePrimTy ) import PrelInfo ( realWorldPrimId ) @@ -35,8 +36,6 @@ import BasicTypes ( TopLevelFlag(..), isTopLevel, RecFlag(..), isNonRuleLoopBreaker ) import Maybes ( orElse ) import Data.List ( mapAccumL ) -import MonadUtils ( foldlM ) -import StaticFlags ( opt_PassCaseBndrToJoinPoints ) import Outputable import FastString \end{code} @@ -341,7 +340,7 @@ simplLazyBind env top_lvl is_rec bndr bndr1 rhs rhs_se ; (env', rhs') <- if not (doFloatFromRhs top_lvl is_rec False body2 body_env2) then -- No floating, just wrap up! - do { rhs' <- mkLam tvs' (wrapFloats body_env2 body2) + do { rhs' <- mkLam env tvs' (wrapFloats body_env2 body2) ; return (env, rhs') } else if null tvs then -- Simple floating @@ -351,8 +350,8 @@ simplLazyBind env top_lvl is_rec bndr bndr1 rhs rhs_se else -- Do type-abstraction first do { tick LetFloatFromLet ; (poly_binds, body3) <- abstractFloats tvs' body_env2 body2 - ; rhs' <- mkLam tvs' body3 - ; env' <- foldlM (addPolyBind top_lvl) env poly_binds + ; rhs' <- mkLam env tvs' body3 + ; let env' = foldl (addPolyBind top_lvl) env poly_binds ; return (env', rhs') } ; completeBind env' top_lvl bndr bndr1 rhs' } @@ -462,7 +461,7 @@ prepareRhs env0 rhs0 where is_val = n_val_args > 0 -- There is at least one arg -- ...and the fun a constructor or PAP - && (isDataConWorkId fun || n_val_args < idArity fun) + && (isConLikeId fun || n_val_args < idArity fun) go _ env other = return (False, env, other) \end{code} @@ -567,23 +566,29 @@ completeBind :: SimplEnv -- * or by adding to the floats in the envt completeBind env top_lvl old_bndr new_bndr new_rhs - = do { let old_info = idInfo old_bndr - old_unf = unfoldingInfo old_info - occ_info = occInfo old_info + | postInlineUnconditionally env top_lvl new_bndr occ_info new_rhs unfolding + -- Inline and discard the binding + = do { tick (PostInlineUnconditionally old_bndr) + ; -- pprTrace "postInlineUnconditionally" (ppr old_bndr <+> ppr new_bndr <+> ppr new_rhs) $ + return (extendIdSubst env old_bndr (DoneEx new_rhs)) } + -- Use the substitution to make quite, quite sure that the + -- substitution will happen, since we are going to discard the binding - ; new_unfolding <- simplUnfolding env top_lvl old_bndr occ_info old_unf new_rhs - - ; if postInlineUnconditionally env top_lvl new_bndr occ_info new_rhs new_unfolding - -- Inline and discard the binding - then do { tick (PostInlineUnconditionally old_bndr) - ; return (extendIdSubst env old_bndr (DoneEx new_rhs)) } - -- Use the substitution to make quite, quite sure that the - -- substitution will happen, since we are going to discard the binding - - else return (addNonRecWithUnf env new_bndr new_rhs new_unfolding) } - ------------------------------- -addPolyBind :: TopLevelFlag -> SimplEnv -> OutBind -> SimplM SimplEnv + | otherwise + = return (addNonRecWithUnf env new_bndr new_rhs unfolding wkr) + where + unfolding | omit_unfolding = NoUnfolding + | otherwise = mkUnfolding (isTopLevel top_lvl) new_rhs + old_info = idInfo old_bndr + occ_info = occInfo old_info + wkr = substWorker env (workerInfo old_info) + omit_unfolding = isNonRuleLoopBreaker occ_info + -- or not (activeInline env old_bndr) + -- Do *not* trim the unfolding in SimplGently, else + -- the specialiser can't see it! + +----------------- +addPolyBind :: TopLevelFlag -> SimplEnv -> OutBind -> SimplEnv -- Add a new binding to the environment, complete with its unfolding -- but *do not* do postInlineUnconditionally, because we have already -- processed some of the scope of the binding @@ -596,92 +601,95 @@ addPolyBind :: TopLevelFlag -> SimplEnv -> OutBind -> SimplM SimplEnv -- opportunity to inline 'y' too. addPolyBind top_lvl env (NonRec poly_id rhs) - = do { unfolding <- simplUnfolding env top_lvl poly_id NoOccInfo noUnfolding rhs - -- Assumes that poly_id did not have an INLINE prag - -- which is perhaps wrong. ToDo: think about this - ; return (addNonRecWithUnf env poly_id rhs unfolding) } + = addNonRecWithUnf env poly_id rhs unfolding NoWorker + where + unfolding | not (activeInline env poly_id) = NoUnfolding + | otherwise = mkUnfolding (isTopLevel top_lvl) rhs + -- addNonRecWithInfo adds the new binding in the + -- proper way (ie complete with unfolding etc), + -- and extends the in-scope set -addPolyBind _ env bind@(Rec _) = return (extendFloats env bind) +addPolyBind _ env bind@(Rec _) = extendFloats env bind -- Hack: letrecs are more awkward, so we extend "by steam" -- without adding unfoldings etc. At worst this leads to -- more simplifier iterations ------------------------------- +----------------- addNonRecWithUnf :: SimplEnv - -> OutId -> OutExpr -- New binder and RHS - -> Unfolding -- New unfolding - -> SimplEnv -addNonRecWithUnf env new_bndr new_rhs new_unfolding - = let new_arity = exprArity new_rhs - old_arity = idArity new_bndr - info1 = idInfo new_bndr `setArityInfo` new_arity - - -- Unfolding info: Note [Setting the new unfolding] - info2 = info1 `setUnfoldingInfo` new_unfolding - - -- Demand info: Note [Setting the demand info] - info3 | isEvaldUnfolding new_unfolding = zapDemandInfo info2 `orElse` info2 - | otherwise = info2 - - final_id = new_bndr `setIdInfo` info3 - dmd_arity = length $ fst $ splitStrictSig $ idNewStrictness new_bndr - in - ASSERT( isId new_bndr ) + -> OutId -> OutExpr -- New binder and RHS + -> Unfolding -> WorkerInfo -- and unfolding + -> SimplEnv +-- Add suitable IdInfo to the Id, add the binding to the floats, and extend the in-scope set +addNonRecWithUnf env new_bndr rhs unfolding wkr + = ASSERT( isId new_bndr ) WARN( new_arity < old_arity || new_arity < dmd_arity, - (ppr final_id <+> ppr old_arity <+> ppr new_arity <+> ppr dmd_arity) $$ ppr new_rhs ) - - final_id `seq` -- This seq forces the Id, and hence its IdInfo, - -- and hence any inner substitutions - -- pprTrace "Binding" (ppr final_id <+> ppr unfolding) $ - addNonRec env final_id new_rhs - -- The addNonRec adds it to the in-scope set too - ------------------------------- -simplUnfolding :: SimplEnv-> TopLevelFlag - -> Id -- Debug output only - -> OccInfo -> Unfolding -> OutExpr - -> SimplM Unfolding -simplUnfolding env top_lvl bndr occ_info old_unf new_rhs -- Note [Setting the new unfolding] - | omit_unfolding = WARN( is_inline_rule, ppr bndr ) return NoUnfolding - | is_inline_rule = return (substUnfolding env is_top_lvl old_unf) - | otherwise = return (mkUnfolding is_top_lvl new_rhs) + (ptext (sLit "Arity decrease:") <+> ppr final_id <+> ppr old_arity + <+> ppr new_arity <+> ppr dmd_arity) $$ ppr rhs ) + -- Note [Arity decrease] + final_id `seq` -- This seq forces the Id, and hence its IdInfo, + -- and hence any inner substitutions + addNonRec env final_id rhs + -- The addNonRec adds it to the in-scope set too where - is_top_lvl = isTopLevel top_lvl - is_inline_rule = isInlineRule old_unf - omit_unfolding = isNonRuleLoopBreaker occ_info -\end{code} - -Note [Setting the new unfolding] -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -* If there's an INLINE pragma, we use substUnfolding to retain the - supplied inlining + dmd_arity = length $ fst $ splitStrictSig $ idNewStrictness new_bndr + old_arity = idArity new_bndr -* If not, we make an unfolding from the new RHS. But *only* for - non-loop-breakers. Making loop breakers not have an unfolding at all - means that we can avoid tests in exprIsConApp, for example. This is - important: if exprIsConApp says 'yes' for a recursive thing, then we - can get into an infinite loop + -- Arity info + new_arity = exprArity rhs + new_bndr_info = idInfo new_bndr `setArityInfo` new_arity + + -- Unfolding info + -- Add the unfolding *only* for non-loop-breakers + -- Making loop breakers not have an unfolding at all + -- means that we can avoid tests in exprIsConApp, for example. + -- This is important: if exprIsConApp says 'yes' for a recursive + -- thing, then we can get into an infinite loop + + -- Demand info + -- If the unfolding is a value, the demand info may + -- go pear-shaped, so we nuke it. Example: + -- let x = (a,b) in + -- case x of (p,q) -> h p q x + -- Here x is certainly demanded. But after we've nuked + -- the case, we'll get just + -- let x = (a,b) in h a b x + -- and now x is not demanded (I'm assuming h is lazy) + -- This really happens. Similarly + -- let f = \x -> e in ...f..f... + -- After inlining f at some of its call sites the original binding may + -- (for example) be no longer strictly demanded. + -- The solution here is a bit ad hoc... + info_w_unf = new_bndr_info `setUnfoldingInfo` unfolding + `setWorkerInfo` wkr + + final_info | isEvaldUnfolding unfolding = zapDemandInfo info_w_unf `orElse` info_w_unf + | otherwise = info_w_unf + + final_id = new_bndr `setIdInfo` final_info +\end{code} -If there's an INLINE pragma on a loop breaker, we simply discard it -(with a DEBUG warning). The desugarer complains about binding groups -that look likely to trigger this behaviour. +Note [Arity decrease] +~~~~~~~~~~~~~~~~~~~~~ +Generally speaking the arity of a binding should not decrease. But it *can* +legitimately happen becuase of RULES. Eg + f = g Int +where g has arity 2, will have arity 2. But if there's a rewrite rule + g Int --> h +where h has arity 1, then f's arity will decrease. Here's a real-life example, +which is in the output of Specialise: + + Rec { + $dm {Arity 2} = \d.\x. op d + {-# RULES forall d. $dm Int d = $s$dm #-} + + dInt = MkD .... opInt ... + opInt {Arity 1} = $dm dInt + $s$dm {Arity 0} = \x. op dInt } -Note [Setting the demand info] -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -If the unfolding is a value, the demand info may -go pear-shaped, so we nuke it. Example: - let x = (a,b) in - case x of (p,q) -> h p q x -Here x is certainly demanded. But after we've nuked -the case, we'll get just - let x = (a,b) in h a b x -and now x is not demanded (I'm assuming h is lazy) -This really happens. Similarly - let f = \x -> e in ...f..f... -After inlining f at some of its call sites the original binding may -(for example) be no longer strictly demanded. -The solution here is a bit ad hoc... +Here opInt has arity 1; but when we apply the rule its arity drops to 0. +That's why Specialise goes to a little trouble to pin the right arity +on specialised functions too. %************************************************************************ @@ -935,12 +943,12 @@ simplLam env (bndr:bndrs) body (ApplyTo _ arg arg_se cont) simplLam env bndrs body cont = do { (env', bndrs') <- simplLamBndrs env bndrs ; body' <- simplExpr env' body - ; new_lam <- mkLam bndrs' body' + ; new_lam <- mkLam env' bndrs' body' ; rebuild env' new_lam cont } ------------------ simplNonRecE :: SimplEnv - -> InBndr -- The binder + -> InId -- The binder -> (InExpr, SimplEnv) -- Rhs of binding (or arg of lambda) -> ([InBndr], InExpr) -- Body of the let/lambda -- \xs.e @@ -999,9 +1007,21 @@ simplNote env (SCC cc) e cont = do { e' <- simplExpr (setEnclosingCC env currentCCS) e ; rebuild env (mkSCC cc e') cont } -simplNote env (CoreNote s) e cont - = do { e' <- simplExpr env e - ; rebuild env (Note (CoreNote s) e') cont } +-- See notes with SimplMonad.inlineMode +simplNote env InlineMe e cont + | Just (inside, outside) <- splitInlineCont cont -- Boring boring continuation; see notes above + = do { -- Don't inline inside an INLINE expression + e' <- simplExprC (setMode inlineMode env) e inside + ; rebuild env (mkInlineMe e') outside } + + | otherwise -- Dissolve the InlineMe note if there's + -- an interesting context of any kind to combine with + -- (even a type application -- anything except Stop) + = simplExprF env e cont + +simplNote env (CoreNote s) e cont = do + e' <- simplExpr env e + rebuild env (Note (CoreNote s) e') cont \end{code} @@ -1033,8 +1053,7 @@ simplVar env var cont completeCall :: SimplEnv -> Id -> SimplCont -> SimplM (SimplEnv, OutExpr) completeCall env var cont - = do { dflags <- getDOptsSmpl - ; let (args,call_cont) = contArgs cont + = do { let (args,call_cont) = contArgs cont -- The args are OutExprs, obtained by *lazily* substituting -- in the args found in cont. These args are only examined -- to limited depth (unless a rule fires). But we must do @@ -1050,45 +1069,18 @@ completeCall env var cont -- We used to use the black-listing mechanism to ensure that inlining of -- the wrapper didn't occur for things that have specialisations till a -- later phase, so but now we just try RULES first - -- - -- Note [Rules for recursive functions] - -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -- You might think that we shouldn't apply rules for a loop breaker: - -- doing so might give rise to an infinite loop, because a RULE is - -- rather like an extra equation for the function: - -- RULE: f (g x) y = x+y - -- Eqn: f a y = a-y - -- - -- But it's too drastic to disable rules for loop breakers. - -- Even the foldr/build rule would be disabled, because foldr - -- is recursive, and hence a loop breaker: - -- foldr k z (build g) = g k z - -- So it's up to the programmer: rules can cause divergence - ; rule_base <- getSimplRules - ; let in_scope = getInScope env - rules = getRules rule_base var - maybe_rule = case activeRule dflags env of - Nothing -> Nothing -- No rules apply - Just act_fn -> lookupRule act_fn in_scope - var args rules - ; case maybe_rule of { - Just (rule, rule_rhs) -> do - tick (RuleFired (ru_name rule)) - (if dopt Opt_D_dump_rule_firings dflags then - pprTrace "Rule fired" (vcat [ - text "Rule:" <+> ftext (ru_name rule), - text "Before:" <+> ppr var <+> sep (map pprParendExpr args), - text "After: " <+> pprCoreExpr rule_rhs, - text "Cont: " <+> ppr call_cont]) - else - id) $ - simplExprF env rule_rhs (dropArgs (ruleArity rule) cont) + -- + -- See also Note [Rules for recursive functions] + ; mb_rule <- tryRules env var args call_cont + ; case mb_rule of { + Just (n_args, rule_rhs) -> simplExprF env rule_rhs (dropArgs n_args cont) ; -- The ruleArity says how many args the rule consumed + ; Nothing -> do -- No rules - ; Nothing -> do -- No rules ------------- Next try inlining ---------------- - { let arg_infos = [interestingArg arg | arg <- args, isValArg arg] + { dflags <- getDOptsSmpl + ; let arg_infos = [interestingArg arg | arg <- args, isValArg arg] n_val_args = length arg_infos interesting_cont = interestingCallContext call_cont active_inline = activeInline env var @@ -1194,6 +1186,58 @@ to get the effect that finding (error "foo") in a strict arg position will discard the entire application and replace it with (error "foo"). Getting all this at once is TOO HARD! + +%************************************************************************ +%* * + Rewrite rules +%* * +%************************************************************************ + +\begin{code} +tryRules :: SimplEnv -> Id -> [OutExpr] -> SimplCont + -> SimplM (Maybe (Arity, CoreExpr)) -- The arity is the number of + -- args consumed by the rule +tryRules env fn args call_cont + = do { dflags <- getDOptsSmpl + ; rule_base <- getSimplRules + ; let in_scope = getInScope env + rules = getRules rule_base fn + maybe_rule = case activeRule dflags env of + Nothing -> Nothing -- No rules apply + Just act_fn -> lookupRule act_fn in_scope + fn args rules + ; case (rules, maybe_rule) of { + ([], _) -> return Nothing ; + (_, Nothing) -> return Nothing ; + (_, Just (rule, rule_rhs)) -> do + + { tick (RuleFired (ru_name rule)) + ; (if dopt Opt_D_dump_rule_firings dflags then + pprTrace "Rule fired" (vcat [ + text "Rule:" <+> ftext (ru_name rule), + text "Before:" <+> ppr fn <+> sep (map pprParendExpr args), + text "After: " <+> pprCoreExpr rule_rhs, + text "Cont: " <+> ppr call_cont]) + else + id) $ + return (Just (ruleArity rule, rule_rhs)) }}} +\end{code} + +Note [Rules for recursive functions] +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +You might think that we shouldn't apply rules for a loop breaker: +doing so might give rise to an infinite loop, because a RULE is +rather like an extra equation for the function: + RULE: f (g x) y = x+y + Eqn: f a y = a-y + +But it's too drastic to disable rules for loop breakers. +Even the foldr/build rule would be disabled, because foldr +is recursive, and hence a loop breaker: + foldr k z (build g) = g k z +So it's up to the programmer: rules can cause divergence + + %************************************************************************ %* * Rebuilding a cse expression @@ -1290,12 +1334,13 @@ I don't really know how to improve this situation. --------------------------------------------------------- -- Eliminate the case if possible -rebuildCase :: SimplEnv - -> OutExpr -- Scrutinee - -> InId -- Case binder - -> [InAlt] -- Alternatives (inceasing order) - -> SimplCont - -> SimplM (SimplEnv, OutExpr) +rebuildCase, reallyRebuildCase + :: SimplEnv + -> OutExpr -- Scrutinee + -> InId -- Case binder + -> [InAlt] -- Alternatives (inceasing order) + -> SimplCont + -> SimplM (SimplEnv, OutExpr) -------------------------------------------------- -- 1. Eliminate the case if there's a known constructor @@ -1356,12 +1401,28 @@ rebuildCase env scrut case_bndr [(_, bndrs, rhs)] cont -- exprOkForSpeculation was intended for. var_demanded_later _ = False +rebuildCase env scrut case_bndr alts@[(_, bndrs, rhs)] cont + | all isDeadBinder (case_bndr : bndrs) -- So this is just 'seq' + = -- For this case, see Note [Rules for seq] in MkId + do { let rhs' = substExpr env rhs + out_args = [Type (substTy env (idType case_bndr)), + Type (exprType rhs'), scrut, rhs'] + -- Lazily evaluated, so we don't do most of this + ; mb_rule <- tryRules env seqId out_args cont + ; case mb_rule of + Just (n_args, res) -> simplExprF (zapSubstEnv env) + (mkApps res (drop n_args out_args)) + cont + Nothing -> reallyRebuildCase env scrut case_bndr alts cont } + +rebuildCase env scrut case_bndr alts cont + = reallyRebuildCase env scrut case_bndr alts cont -------------------------------------------------- -- 3. Catch-all case -------------------------------------------------- -rebuildCase env scrut case_bndr alts cont +reallyRebuildCase env scrut case_bndr alts cont = do { -- Prepare the continuation; -- The new subst_env is in place (env', dup_cont, nodup_cont) <- prepareCaseCont env alts cont @@ -1370,17 +1431,7 @@ rebuildCase env scrut case_bndr alts cont ; (scrut', case_bndr', alts') <- simplAlts env' scrut case_bndr alts dup_cont -- Check for empty alternatives - ; if null alts' then - -- This isn't strictly an error, although it is unusual. - -- It's possible that the simplifer might "see" that - -- an inner case has no accessible alternatives before - -- it "sees" that the entire branch of an outer case is - -- inaccessible. So we simply put an error case here instead. - pprTrace "mkCase: null alts" (ppr case_bndr <+> ppr scrut) $ - let res_ty' = contResultType env' (substTy env' (coreAltsType alts)) dup_cont - lit = mkStringLit "Impossible alternative" - in return (env', mkApps (Var rUNTIME_ERROR_ID) [Type res_ty', lit]) - + ; if null alts' then missingAlt env case_bndr alts cont else do { case_expr <- mkCase scrut' case_bndr' alts' @@ -1411,19 +1462,6 @@ The point is that we bring into the envt a binding after the outer case, and that makes (a,b) alive. At least we do unless the case binder is guaranteed dead. -In practice, the scrutinee is almost always a variable, so we pretty -much always zap the OccInfo of the binders. It doesn't matter much though. - - -Note [Case of cast] -~~~~~~~~~~~~~~~~~~~ -Consider case (v `cast` co) of x { I# -> - ... (case (v `cast` co) of {...}) ... -We'd like to eliminate the inner case. We can get this neatly by -arranging that inside the outer case we add the unfolding - v |-> x `cast` (sym co) -to v. Then we should inline v at the inner case, cancel the casts, and away we go - Note [Improving seq] ~~~~~~~~~~~~~~~~~~~ Consider @@ -1455,59 +1493,6 @@ At one point I did transformation in LiberateCase, but it's more robust here. LiberateCase gets to see it.) -Historical note [no-case-of-case] -~~~~~~~~~~~~~~~~~~~~~~ -We *used* to suppress the binder-swap in case expressoins when --fno-case-of-case is on. Old remarks: - "This happens in the first simplifier pass, - and enhances full laziness. Here's the bad case: - f = \ y -> ...(case x of I# v -> ...(case x of ...) ... ) - If we eliminate the inner case, we trap it inside the I# v -> arm, - which might prevent some full laziness happening. I've seen this - in action in spectral/cichelli/Prog.hs: - [(m,n) | m <- [1..max], n <- [1..max]] - Hence the check for NoCaseOfCase." -However, now the full-laziness pass itself reverses the binder-swap, so this -check is no longer necessary. - -Historical note [Suppressing the case binder-swap] -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -There is another situation when it might make sense to suppress the -case-expression binde-swap. If we have - - case x of w1 { DEFAULT -> case x of w2 { A -> e1; B -> e2 } - ...other cases .... } - -We'll perform the binder-swap for the outer case, giving - - case x of w1 { DEFAULT -> case w1 of w2 { A -> e1; B -> e2 } - ...other cases .... } - -But there is no point in doing it for the inner case, because w1 can't -be inlined anyway. Furthermore, doing the case-swapping involves -zapping w2's occurrence info (see paragraphs that follow), and that -forces us to bind w2 when doing case merging. So we get - - case x of w1 { A -> let w2 = w1 in e1 - B -> let w2 = w1 in e2 - ...other cases .... } - -This is plain silly in the common case where w2 is dead. - -Even so, I can't see a good way to implement this idea. I tried -not doing the binder-swap if the scrutinee was already evaluated -but that failed big-time: - - data T = MkT !Int - - case v of w { MkT x -> - case x of x1 { I# y1 -> - case x of x2 { I# y2 -> ... - -Notice that because MkT is strict, x is marked "evaluated". But to -eliminate the last case, we must either make sure that x (as well as -x1) has unfolding MkT y1. THe straightforward thing to do is to do -the binder-swap. So this whole note is a no-op. \begin{code} @@ -1733,23 +1718,15 @@ knownCon :: SimplEnv -> OutExpr -> AltCon knownCon env scrut con args bndr alts cont = do { tick (KnownBranch bndr) - ; knownAlt env scrut args bndr (findAlt con alts) cont } + ; case findAlt con alts of + Nothing -> missingAlt env bndr alts cont + Just alt -> knownAlt env scrut args bndr alt cont + } +------------------- knownAlt :: SimplEnv -> OutExpr -> [OutExpr] - -> InId -> (AltCon, [CoreBndr], InExpr) -> SimplCont + -> InId -> InAlt -> SimplCont -> SimplM (SimplEnv, OutExpr) -knownAlt env scrut _ bndr (DEFAULT, bs, rhs) cont - = ASSERT( null bs ) - do { env' <- simplNonRecX env bndr scrut - -- This might give rise to a binding with non-atomic args - -- like x = Node (f x) (g x) - -- but simplNonRecX will atomic-ify it - ; simplExprF env' rhs cont } - -knownAlt env scrut _ bndr (LitAlt _, bs, rhs) cont - = ASSERT( null bs ) - do { env' <- simplNonRecX env bndr scrut - ; simplExprF env' rhs cont } knownAlt env scrut the_args bndr (DataAlt dc, bs, rhs) cont = do { let n_drop_tys = length (dataConUnivTyVars dc) @@ -1795,6 +1772,25 @@ knownAlt env scrut the_args bndr (DataAlt dc, bs, rhs) cont bind_args _ _ _ = pprPanic "bind_args" $ ppr dc $$ ppr bs $$ ppr the_args $$ text "scrut:" <+> ppr scrut + +knownAlt env scrut _ bndr (_, bs, rhs) cont + = ASSERT( null bs ) -- Works for LitAlt and DEFAULT + do { env' <- simplNonRecX env bndr scrut + ; simplExprF env' rhs cont } + + +------------------- +missingAlt :: SimplEnv -> Id -> [InAlt] -> SimplCont -> SimplM (SimplEnv, OutExpr) + -- This isn't strictly an error, although it is unusual. + -- It's possible that the simplifer might "see" that + -- an inner case has no accessible alternatives before + -- it "sees" that the entire branch of an outer case is + -- inaccessible. So we simply put an error case here instead. +missingAlt env case_bndr alts cont + = WARN( True, ptext (sLit "missingAlt") <+> ppr case_bndr ) + return (env, mkImpossibleExpr res_ty) + where + res_ty = contResultType env (substTy env (coreAltsType alts)) cont \end{code} @@ -1833,11 +1829,20 @@ mkDupableCont env (CoerceIt ty cont) mkDupableCont env cont@(StrictBind {}) = return (env, mkBoringStop, cont) - -- See Note [Duplicating strict continuations] + -- See Note [Duplicating StrictBind] -mkDupableCont env cont@(StrictArg {}) - = return (env, mkBoringStop, cont) - -- See Note [Duplicating strict continuations] +mkDupableCont env (StrictArg fun cci ai cont) + -- See Note [Duplicating StrictArg] + = do { (env', dup, nodup) <- mkDupableCont env cont + ; (env'', fun') <- mk_dupable_call env' fun + ; return (env'', StrictArg fun' cci ai dup, nodup) } + where + mk_dupable_call env (Var v) = return (env, Var v) + mk_dupable_call env (App fun arg) = do { (env', fun') <- mk_dupable_call env fun + ; (env'', arg') <- makeTrivial env' arg + ; return (env'', fun' `App` arg') } + mk_dupable_call _ other = pprPanic "mk_dupable_call" (ppr other) + -- The invariant of StrictArg is that the first arg is always an App chain mkDupableCont env (ApplyTo _ arg se cont) = -- e.g. [...hole...] (...arg...) @@ -1906,97 +1911,40 @@ mkDupableAlts env case_bndr' the_alts mkDupableAlt :: SimplEnv -> OutId -> (AltCon, [CoreBndr], CoreExpr) -> SimplM (SimplEnv, (AltCon, [CoreBndr], CoreExpr)) -mkDupableAlt env case_bndr1 (con, bndrs1, rhs1) - | exprIsDupable rhs1 -- Note [Small alternative rhs] - = return (env, (con, bndrs1, rhs1)) +mkDupableAlt env case_bndr' (con, bndrs', rhs') + | exprIsDupable rhs' -- Note [Small alternative rhs] + = return (env, (con, bndrs', rhs')) | otherwise - = do { let abstract_over bndr + = do { let rhs_ty' = exprType rhs' + used_bndrs' = filter abstract_over (case_bndr' : bndrs') + abstract_over bndr | isTyVar bndr = True -- Abstract over all type variables just in case | otherwise = not (isDeadBinder bndr) -- The deadness info on the new Ids is preserved by simplBinders - inst_tys1 = tyConAppArgs (idType case_bndr1) - con_app dc = mkConApp dc (map Type inst_tys1 ++ varsToCoreExprs bndrs1) - - (rhs2, final_bndrs) -- See Note [Passing the case binder to join points] - | isDeadBinder case_bndr1 - = (rhs1, filter abstract_over bndrs1) - | opt_PassCaseBndrToJoinPoints, not (null bndrs1) - = (rhs1, (case_bndr1 : filter abstract_over bndrs1)) - | otherwise - = case con of - DataAlt dc -> (Let (NonRec case_bndr1 (con_app dc)) rhs1, bndrs1) - LitAlt lit -> ASSERT( null bndrs1 ) (Let (NonRec case_bndr1 (Lit lit)) rhs1, []) - DEFAULT -> ASSERT( null bndrs1 ) (rhs1, [case_bndr1]) - - ; (final_bndrs1, final_args) -- Note [Join point abstraction] - <- if (any isId final_bndrs) - then return (final_bndrs, varsToCoreExprs final_bndrs) + ; (final_bndrs', final_args) -- Note [Join point abstraction] + <- if (any isId used_bndrs') + then return (used_bndrs', varsToCoreExprs used_bndrs') else do { rw_id <- newId (fsLit "w") realWorldStatePrimTy - ; return (rw_id : final_bndrs, - Var realWorldPrimId : varsToCoreExprs final_bndrs) } + ; return ([rw_id], [Var realWorldPrimId]) } - ; let rhs_ty1 = exprType rhs1 - ; join_bndr <- newId (fsLit "$j") (mkPiTypes final_bndrs1 rhs_ty1) + ; join_bndr <- newId (fsLit "$j") (mkPiTypes final_bndrs' rhs_ty') -- Note [Funky mkPiTypes] ; let -- We make the lambdas into one-shot-lambdas. The -- join point is sure to be applied at most once, and doing so -- prevents the body of the join point being floated out by -- the full laziness pass - really_final_bndrs = map one_shot final_bndrs1 + really_final_bndrs = map one_shot final_bndrs' one_shot v | isId v = setOneShotLambda v | otherwise = v - join_rhs = mkLams really_final_bndrs rhs2 + join_rhs = mkLams really_final_bndrs rhs' join_call = mkApps (Var join_bndr) final_args - ; env1 <- addPolyBind NotTopLevel env (NonRec join_bndr join_rhs) - ; return (env1, (con, bndrs1, join_call)) } + ; return (addPolyBind NotTopLevel env (NonRec join_bndr join_rhs), (con, bndrs', join_call)) } -- See Note [Duplicated env] \end{code} -Note [Passing the case binder to join points] -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Suppose we have - case e of cb { C1 -> r1[cb]; C2 x y z -> r2[cb,x] } -and we want to make join points for the two alternatives, -which mention the case binder 'cb'. Should we pass 'cb' to -the join point, or reconstruct it? Here are the two alternatives -for the C2 alternative: - - Plan A(pass cb): j2 cb x = r2[cb,x] - - Plan B(reconstruct cb): j2 x y z = let cb = C2 x y z in r2[cb,x] - -The advantge of Plan B is that we can "see" the definition of cb -in r2, and that may be important when we inline stuff in r2. The -disadvantage is that if this optimisation doesn't happen, we end up -re-allocating C2, when it already exists. This does happen occasionally; -an example is the function nofib/spectral/cichelli/Auxil.$whinsert. - -Plan B is always better if the constructor is nullary. - -In both cases we don't have liveness info for cb on a branch-by-branch -basis, and it's possible that 'cb' is used in some branches but not -others. Well, the absence analyser will find that out later, so it's -not too bad. - -Sadly, at the time of writing, neither choice seems an unequivocal -win. Here are nofib results, for adding -fpass-case-bndr-to-join-points -(all others are zero effect): - - Program Size Allocs Runtime Elapsed --------------------------------------------------------------------------------- - cichelli +0.0% -4.4% 0.13 0.13 - pic +0.0% -0.7% 0.01 0.04 - transform -0.0% +2.8% -0.4% -9.1% - wave4main +0.0% +10.5% +3.1% +3.4% --------------------------------------------------------------------------------- - Min -0.0% -4.4% -7.0% -31.9% - Max +0.1% +10.5% +3.1% +15.0% - Geometric Mean +0.0% +0.1% -1.7% -6.1% - - Note [Duplicated env] ~~~~~~~~~~~~~~~~~~~~~ Some of the alternatives are simplified, but have not been turned into a join point @@ -2006,7 +1954,7 @@ we'd lose that when zapping the subst-env. We could have a per-alt subst-env, but zapping it (as we do in mkDupableCont, the Select case) is safe, and at worst delays the join-point inlining. -Note [Small alterantive rhs] +Note [Small alternative rhs] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ It is worth checking for a small RHS because otherwise we get extra let bindings that may cause an extra iteration of the simplifier to @@ -2075,32 +2023,71 @@ It's a bit silly to add the realWorld dummy arg in this case, making True -> $j s (the \v alone is enough to make CPR happy) but I think it's rare -Note [Duplicating strict continuations] -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Do *not* duplicate StrictBind and StritArg continuations. We gain -nothing by propagating them into the expressions, and we do lose a -lot. Here's an example: - && (case x of { T -> F; F -> T }) E +Note [Duplicating StrictArg] +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +The original plan had (where E is a big argument) +e.g. f E [..hole..] + ==> let $j = \a -> f E a + in $j [..hole..] + +But this is terrible! Here's an example: + && E (case x of { T -> F; F -> T }) Now, && is strict so we end up simplifying the case with an ArgOf continuation. If we let-bind it, we get - - let $j = \v -> && v E + let $j = \v -> && E v in simplExpr (case x of { T -> F; F -> T }) (ArgOf (\r -> $j r) And after simplifying more we get - - let $j = \v -> && v E + let $j = \v -> && E v in case x of { T -> $j F; F -> $j T } Which is a Very Bad Thing +What we do now is this + f E [..hole..] + ==> let a = E + in f a [..hole..] +Now if the thing in the hole is a case expression (which is when +we'll call mkDupableCont), we'll push the function call into the +branches, which is what we want. Now RULES for f may fire, and +call-pattern specialisation. Here's an example from Trac #3116 + go (n+1) (case l of + 1 -> bs' + _ -> Chunk p fpc (o+1) (l-1) bs') +If we can push the call for 'go' inside the case, we get +call-pattern specialisation for 'go', which is *crucial* for +this program. + +Here is the (&&) example: + && E (case x of { T -> F; F -> T }) + ==> let a = E in + case x of { T -> && a F; F -> && a T } +Much better! + +Notice that + * Arguments to f *after* the strict one are handled by + the ApplyTo case of mkDupableCont. Eg + f [..hole..] E + + * We can only do the let-binding of E because the function + part of a StrictArg continuation is an explicit syntax + tree. In earlier versions we represented it as a function + (CoreExpr -> CoreEpxr) which we couldn't take apart. + +Do *not* duplicate StrictBind and StritArg continuations. We gain +nothing by propagating them into the expressions, and we do lose a +lot. + +The desire not to duplicate is the entire reason that +mkDupableCont returns a pair of continuations. + +Note [Duplicating StrictBind] +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Unlike StrictArg, there doesn't seem anything to gain from +duplicating a StrictBind continuation, so we don't. + The desire not to duplicate is the entire reason that mkDupableCont returns a pair of continuations. -The original plan had: -e.g. (...strict-fn...) [...hole...] - ==> - let $j = \a -> ...strict-fn... - in $j [...hole...] Note [Single-alternative cases] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~