X-Git-Url: http://git.megacz.com/?a=blobdiff_plain;f=compiler%2FsimplCore%2FSimplify.lhs;h=7c88ad29e8c716340a03a13c5e4073cc526d7135;hb=63f6b0868f4948232f87bc4df52c9d3a2ec8f184;hp=22c7a5a409d91c5a1a9375de96f8a801a1558cf3;hpb=ccd0e382566940a508fcb1aa7487bc7a785fc329;p=ghc-hetmet.git diff --git a/compiler/simplCore/Simplify.lhs b/compiler/simplCore/Simplify.lhs index 22c7a5a..7c88ad2 100644 --- a/compiler/simplCore/Simplify.lhs +++ b/compiler/simplCore/Simplify.lhs @@ -26,6 +26,7 @@ import NewDemand ( isStrictDmd, splitStrictSig ) import PprCore ( pprParendExpr, pprCoreExpr ) import CoreUnfold ( mkUnfolding, callSiteInline, CallCtxt(..) ) import CoreUtils +import CoreArity ( exprArity ) import Rules ( lookupRule, getRules ) import BasicTypes ( isMarkedStrict ) import CostCentre ( currentCCS ) @@ -35,8 +36,6 @@ import BasicTypes ( TopLevelFlag(..), isTopLevel, RecFlag(..), isNonRuleLoopBreaker ) import Maybes ( orElse ) import Data.List ( mapAccumL ) -import MonadUtils ( foldlM ) -import StaticFlags ( opt_PassCaseBndrToJoinPoints ) import Outputable import FastString \end{code} @@ -341,7 +340,7 @@ simplLazyBind env top_lvl is_rec bndr bndr1 rhs rhs_se ; (env', rhs') <- if not (doFloatFromRhs top_lvl is_rec False body2 body_env2) then -- No floating, just wrap up! - do { rhs' <- mkLam tvs' (wrapFloats body_env2 body2) + do { rhs' <- mkLam env tvs' (wrapFloats body_env2 body2) ; return (env, rhs') } else if null tvs then -- Simple floating @@ -351,7 +350,7 @@ simplLazyBind env top_lvl is_rec bndr bndr1 rhs rhs_se else -- Do type-abstraction first do { tick LetFloatFromLet ; (poly_binds, body3) <- abstractFloats tvs' body_env2 body2 - ; rhs' <- mkLam tvs' body3 + ; rhs' <- mkLam env tvs' body3 ; let env' = foldl (addPolyBind top_lvl) env poly_binds ; return (env', rhs') } @@ -462,7 +461,7 @@ prepareRhs env0 rhs0 where is_val = n_val_args > 0 -- There is at least one arg -- ...and the fun a constructor or PAP - && (isDataConWorkId fun || n_val_args < idArity fun) + && (isConLikeId fun || n_val_args < idArity fun) go _ env other = return (False, env, other) \end{code} @@ -579,7 +578,7 @@ completeBind env top_lvl old_bndr new_bndr new_rhs = return (addNonRecWithUnf env new_bndr new_rhs unfolding wkr) where unfolding | omit_unfolding = NoUnfolding - | otherwise = mkUnfolding (isTopLevel top_lvl) new_rhs + | otherwise = mkUnfolding (isTopLevel top_lvl) new_rhs old_info = idInfo old_bndr occ_info = occInfo old_info wkr = substWorker env (workerInfo old_info) @@ -624,7 +623,9 @@ addNonRecWithUnf :: SimplEnv addNonRecWithUnf env new_bndr rhs unfolding wkr = ASSERT( isId new_bndr ) WARN( new_arity < old_arity || new_arity < dmd_arity, - (ppr final_id <+> ppr old_arity <+> ppr new_arity <+> ppr dmd_arity) $$ ppr rhs ) + (ptext (sLit "Arity decrease:") <+> ppr final_id <+> ppr old_arity + <+> ppr new_arity <+> ppr dmd_arity) $$ ppr rhs ) + -- Note [Arity decrease] final_id `seq` -- This seq forces the Id, and hence its IdInfo, -- and hence any inner substitutions addNonRec env final_id rhs @@ -667,6 +668,28 @@ addNonRecWithUnf env new_bndr rhs unfolding wkr final_id = new_bndr `setIdInfo` final_info \end{code} +Note [Arity decrease] +~~~~~~~~~~~~~~~~~~~~~ +Generally speaking the arity of a binding should not decrease. But it *can* +legitimately happen becuase of RULES. Eg + f = g Int +where g has arity 2, will have arity 2. But if there's a rewrite rule + g Int --> h +where h has arity 1, then f's arity will decrease. Here's a real-life example, +which is in the output of Specialise: + + Rec { + $dm {Arity 2} = \d.\x. op d + {-# RULES forall d. $dm Int d = $s$dm #-} + + dInt = MkD .... opInt ... + opInt {Arity 1} = $dm dInt + + $s$dm {Arity 0} = \x. op dInt } + +Here opInt has arity 1; but when we apply the rule its arity drops to 0. +That's why Specialise goes to a little trouble to pin the right arity +on specialised functions too. %************************************************************************ @@ -920,7 +943,7 @@ simplLam env (bndr:bndrs) body (ApplyTo _ arg arg_se cont) simplLam env bndrs body cont = do { (env', bndrs') <- simplLamBndrs env bndrs ; body' <- simplExpr env' body - ; new_lam <- mkLam bndrs' body' + ; new_lam <- mkLam env' bndrs' body' ; rebuild env' new_lam cont } ------------------ @@ -1095,7 +1118,7 @@ completeCall env var cont Just unfolding -- There is an inlining! -> do { tick (UnfoldingDone var) ; (if dopt Opt_D_dump_inlinings dflags then - pprTrace ("Inlining done" ++ showSDoc (ppr var)) (vcat [ + pprTrace ("Inlining done: " ++ showSDoc (ppr var)) (vcat [ text "Before:" <+> ppr var <+> sep (map pprParendExpr args), text "Inlined fn: " <+> nest 2 (ppr unfolding), text "Cont: " <+> ppr call_cont]) @@ -1439,59 +1462,6 @@ At one point I did transformation in LiberateCase, but it's more robust here. LiberateCase gets to see it.) -Historical note [no-case-of-case] -~~~~~~~~~~~~~~~~~~~~~~ -We *used* to suppress the binder-swap in case expressoins when --fno-case-of-case is on. Old remarks: - "This happens in the first simplifier pass, - and enhances full laziness. Here's the bad case: - f = \ y -> ...(case x of I# v -> ...(case x of ...) ... ) - If we eliminate the inner case, we trap it inside the I# v -> arm, - which might prevent some full laziness happening. I've seen this - in action in spectral/cichelli/Prog.hs: - [(m,n) | m <- [1..max], n <- [1..max]] - Hence the check for NoCaseOfCase." -However, now the full-laziness pass itself reverses the binder-swap, so this -check is no longer necessary. - -Historical note [Suppressing the case binder-swap] -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -There is another situation when it might make sense to suppress the -case-expression binde-swap. If we have - - case x of w1 { DEFAULT -> case x of w2 { A -> e1; B -> e2 } - ...other cases .... } - -We'll perform the binder-swap for the outer case, giving - - case x of w1 { DEFAULT -> case w1 of w2 { A -> e1; B -> e2 } - ...other cases .... } - -But there is no point in doing it for the inner case, because w1 can't -be inlined anyway. Furthermore, doing the case-swapping involves -zapping w2's occurrence info (see paragraphs that follow), and that -forces us to bind w2 when doing case merging. So we get - - case x of w1 { A -> let w2 = w1 in e1 - B -> let w2 = w1 in e2 - ...other cases .... } - -This is plain silly in the common case where w2 is dead. - -Even so, I can't see a good way to implement this idea. I tried -not doing the binder-swap if the scrutinee was already evaluated -but that failed big-time: - - data T = MkT !Int - - case v of w { MkT x -> - case x of x1 { I# y1 -> - case x of x2 { I# y2 -> ... - -Notice that because MkT is strict, x is marked "evaluated". But to -eliminate the last case, we must either make sure that x (as well as -x1) has unfolding MkT y1. THe straightforward thing to do is to do -the binder-swap. So this whole note is a no-op. \begin{code} @@ -1817,11 +1787,20 @@ mkDupableCont env (CoerceIt ty cont) mkDupableCont env cont@(StrictBind {}) = return (env, mkBoringStop, cont) - -- See Note [Duplicating strict continuations] + -- See Note [Duplicating StrictBind] -mkDupableCont env cont@(StrictArg {}) - = return (env, mkBoringStop, cont) - -- See Note [Duplicating strict continuations] +mkDupableCont env (StrictArg fun cci ai cont) + -- See Note [Duplicating StrictArg] + = do { (env', dup, nodup) <- mkDupableCont env cont + ; (env'', fun') <- mk_dupable_call env' fun + ; return (env'', StrictArg fun' cci ai dup, nodup) } + where + mk_dupable_call env (Var v) = return (env, Var v) + mk_dupable_call env (App fun arg) = do { (env', fun') <- mk_dupable_call env fun + ; (env'', arg') <- makeTrivial env' arg + ; return (env'', fun' `App` arg') } + mk_dupable_call _ other = pprPanic "mk_dupable_call" (ppr other) + -- The invariant of StrictArg is that the first arg is always an App chain mkDupableCont env (ApplyTo _ arg se cont) = -- e.g. [...hole...] (...arg...) @@ -1890,97 +1869,40 @@ mkDupableAlts env case_bndr' the_alts mkDupableAlt :: SimplEnv -> OutId -> (AltCon, [CoreBndr], CoreExpr) -> SimplM (SimplEnv, (AltCon, [CoreBndr], CoreExpr)) -mkDupableAlt env case_bndr1 (con, bndrs1, rhs1) - | exprIsDupable rhs1 -- Note [Small alternative rhs] - = return (env, (con, bndrs1, rhs1)) +mkDupableAlt env case_bndr' (con, bndrs', rhs') + | exprIsDupable rhs' -- Note [Small alternative rhs] + = return (env, (con, bndrs', rhs')) | otherwise - = do { let abstract_over bndr + = do { let rhs_ty' = exprType rhs' + used_bndrs' = filter abstract_over (case_bndr' : bndrs') + abstract_over bndr | isTyVar bndr = True -- Abstract over all type variables just in case | otherwise = not (isDeadBinder bndr) -- The deadness info on the new Ids is preserved by simplBinders - inst_tys1 = tyConAppArgs (idType case_bndr1) - con_app dc = mkConApp dc (map Type inst_tys1 ++ varsToCoreExprs bndrs1) - - (rhs2, final_bndrs) -- See Note [Passing the case binder to join points] - | isDeadBinder case_bndr1 - = (rhs1, filter abstract_over bndrs1) - | opt_PassCaseBndrToJoinPoints, not (null bndrs1) - = (rhs1, (case_bndr1 : filter abstract_over bndrs1)) - | otherwise - = case con of - DataAlt dc -> (Let (NonRec case_bndr1 (con_app dc)) rhs1, bndrs1) - LitAlt lit -> ASSERT( null bndrs1 ) (Let (NonRec case_bndr1 (Lit lit)) rhs1, []) - DEFAULT -> ASSERT( null bndrs1 ) (rhs1, [case_bndr1]) - - ; (final_bndrs1, final_args) -- Note [Join point abstraction] - <- if (any isId final_bndrs) - then return (final_bndrs, varsToCoreExprs final_bndrs) + ; (final_bndrs', final_args) -- Note [Join point abstraction] + <- if (any isId used_bndrs') + then return (used_bndrs', varsToCoreExprs used_bndrs') else do { rw_id <- newId (fsLit "w") realWorldStatePrimTy - ; return (rw_id : final_bndrs, - Var realWorldPrimId : varsToCoreExprs final_bndrs) } + ; return ([rw_id], [Var realWorldPrimId]) } - ; let rhs_ty1 = exprType rhs1 - ; join_bndr <- newId (fsLit "$j") (mkPiTypes final_bndrs1 rhs_ty1) + ; join_bndr <- newId (fsLit "$j") (mkPiTypes final_bndrs' rhs_ty') -- Note [Funky mkPiTypes] ; let -- We make the lambdas into one-shot-lambdas. The -- join point is sure to be applied at most once, and doing so -- prevents the body of the join point being floated out by -- the full laziness pass - really_final_bndrs = map one_shot final_bndrs1 + really_final_bndrs = map one_shot final_bndrs' one_shot v | isId v = setOneShotLambda v | otherwise = v - join_rhs = mkLams really_final_bndrs rhs2 + join_rhs = mkLams really_final_bndrs rhs' join_call = mkApps (Var join_bndr) final_args - ; env1 <- addPolyBind NotTopLevel env (NonRec join_bndr join_rhs) - ; return (env1, (con, bndrs1, join_call)) } + ; return (addPolyBind NotTopLevel env (NonRec join_bndr join_rhs), (con, bndrs', join_call)) } -- See Note [Duplicated env] \end{code} -Note [Passing the case binder to join points] -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Suppose we have - case e of cb { C1 -> r1[cb]; C2 x y z -> r2[cb,x] } -and we want to make join points for the two alternatives, -which mention the case binder 'cb'. Should we pass 'cb' to -the join point, or reconstruct it? Here are the two alternatives -for the C2 alternative: - - Plan A(pass cb): j2 cb x = r2[cb,x] - - Plan B(reconstruct cb): j2 x y z = let cb = C2 x y z in r2[cb,x] - -The advantge of Plan B is that we can "see" the definition of cb -in r2, and that may be important when we inline stuff in r2. The -disadvantage is that if this optimisation doesn't happen, we end up -re-allocating C2, when it already exists. This does happen occasionally; -an example is the function nofib/spectral/cichelli/Auxil.$whinsert. - -Plan B is always better if the constructor is nullary. - -In both cases we don't have liveness info for cb on a branch-by-branch -basis, and it's possible that 'cb' is used in some branches but not -others. Well, the absence analyser will find that out later, so it's -not too bad. - -Sadly, at the time of writing, neither choice seems an unequivocal -win. Here are nofib results, for adding -fpass-case-bndr-to-join-points -(all others are zero effect): - - Program Size Allocs Runtime Elapsed --------------------------------------------------------------------------------- - cichelli +0.0% -4.4% 0.13 0.13 - pic +0.0% -0.7% 0.01 0.04 - transform -0.0% +2.8% -0.4% -9.1% - wave4main +0.0% +10.5% +3.1% +3.4% --------------------------------------------------------------------------------- - Min -0.0% -4.4% -7.0% -31.9% - Max +0.1% +10.5% +3.1% +15.0% - Geometric Mean +0.0% +0.1% -1.7% -6.1% - - Note [Duplicated env] ~~~~~~~~~~~~~~~~~~~~~ Some of the alternatives are simplified, but have not been turned into a join point @@ -2059,32 +1981,71 @@ It's a bit silly to add the realWorld dummy arg in this case, making True -> $j s (the \v alone is enough to make CPR happy) but I think it's rare -Note [Duplicating strict continuations] -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Do *not* duplicate StrictBind and StritArg continuations. We gain -nothing by propagating them into the expressions, and we do lose a -lot. Here's an example: - && (case x of { T -> F; F -> T }) E +Note [Duplicating StrictArg] +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +The original plan had (where E is a big argument) +e.g. f E [..hole..] + ==> let $j = \a -> f E a + in $j [..hole..] + +But this is terrible! Here's an example: + && E (case x of { T -> F; F -> T }) Now, && is strict so we end up simplifying the case with an ArgOf continuation. If we let-bind it, we get - - let $j = \v -> && v E + let $j = \v -> && E v in simplExpr (case x of { T -> F; F -> T }) (ArgOf (\r -> $j r) And after simplifying more we get - - let $j = \v -> && v E + let $j = \v -> && E v in case x of { T -> $j F; F -> $j T } Which is a Very Bad Thing +What we do now is this + f E [..hole..] + ==> let a = E + in f a [..hole..] +Now if the thing in the hole is a case expression (which is when +we'll call mkDupableCont), we'll push the function call into the +branches, which is what we want. Now RULES for f may fire, and +call-pattern specialisation. Here's an example from Trac #3116 + go (n+1) (case l of + 1 -> bs' + _ -> Chunk p fpc (o+1) (l-1) bs') +If we can push the call for 'go' inside the case, we get +call-pattern specialisation for 'go', which is *crucial* for +this program. + +Here is the (&&) example: + && E (case x of { T -> F; F -> T }) + ==> let a = E in + case x of { T -> && a F; F -> && a T } +Much better! + +Notice that + * Arguments to f *after* the strict one are handled by + the ApplyTo case of mkDupableCont. Eg + f [..hole..] E + + * We can only do the let-binding of E because the function + part of a StrictArg continuation is an explicit syntax + tree. In earlier versions we represented it as a function + (CoreExpr -> CoreEpxr) which we couldn't take apart. + +Do *not* duplicate StrictBind and StritArg continuations. We gain +nothing by propagating them into the expressions, and we do lose a +lot. + +The desire not to duplicate is the entire reason that +mkDupableCont returns a pair of continuations. + +Note [Duplicating StrictBind] +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Unlike StrictArg, there doesn't seem anything to gain from +duplicating a StrictBind continuation, so we don't. + The desire not to duplicate is the entire reason that mkDupableCont returns a pair of continuations. -The original plan had: -e.g. (...strict-fn...) [...hole...] - ==> - let $j = \a -> ...strict-fn... - in $j [...hole...] Note [Single-alternative cases] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~