X-Git-Url: http://git.megacz.com/?a=blobdiff_plain;f=ghc%2Fcompiler%2FcoreSyn%2FCoreUtils.lhs;h=5c26e0da780bba25a19ae73eb0c67395d0df04f6;hb=1cfc9faaa059b9b090971399e4eb8ae9d364335c;hp=0c01569b73ecb21cb92823e59c8a02fbbca66143;hpb=fe69f3c1d6062b90635963aa414c33951bf18427;p=ghc-hetmet.git diff --git a/ghc/compiler/coreSyn/CoreUtils.lhs b/ghc/compiler/coreSyn/CoreUtils.lhs index 0c01569..5c26e0d 100644 --- a/ghc/compiler/coreSyn/CoreUtils.lhs +++ b/ghc/compiler/coreSyn/CoreUtils.lhs @@ -6,19 +6,23 @@ \begin{code} module CoreUtils ( -- Construction - mkNote, mkInlineMe, mkSCC, mkCoerce, - bindNonRec, mkIfThenElse, mkAltExpr, - mkPiType, + mkNote, mkInlineMe, mkSCC, mkCoerce, mkCoerce2, + bindNonRec, needsCaseBinding, + mkIfThenElse, mkAltExpr, mkPiType, mkPiTypes, + + -- Taking expressions apart + findDefault, findAlt, hasDefault, -- Properties of expressions - exprType, coreAltsType, exprArity, + exprType, coreAltsType, exprIsBottom, exprIsDupable, exprIsTrivial, exprIsCheap, exprIsValue,exprOkForSpeculation, exprIsBig, - exprIsConApp_maybe, - idAppIsBottom, idAppIsCheap, + exprIsConApp_maybe, + rhsIsStatic, - -- Expr transformation - etaReduceExpr, exprEtaExpandArity, + -- Arity and eta expansion + manifestArity, exprArity, + exprEtaExpandArity, etaExpand, -- Size coreBindsSize, @@ -27,46 +31,47 @@ module CoreUtils ( hashExpr, -- Equality - cheapEqExpr, eqExpr, applyTypeToArgs + cheapEqExpr, eqExpr, applyTypeToArgs, applyTypeToArg ) where #include "HsVersions.h" -import GlaExts -- For `xori` +import GLAEXTS -- For `xori` import CoreSyn -import CoreFVs ( exprFreeVars ) import PprCore ( pprCoreExpr ) import Var ( Var, isId, isTyVar ) -import VarSet import VarEnv -import Name ( isLocallyDefined, hashName ) -import Literal ( Literal, hashLiteral, literalType, litIsDupable ) -import DataCon ( DataCon, dataConRepArity ) -import PrimOp ( primOpOkForSpeculation, primOpIsCheap, - primOpIsDupable ) -import Id ( Id, idType, idFlavour, idStrictness, idLBVarInfo, - mkWildId, idArity, idName, idUnfolding, idInfo, - isDataConId_maybe, isPrimOpId_maybe +import Name ( hashName, isDllName ) +import Literal ( hashLiteral, literalType, litIsDupable, + litIsTrivial, isZeroLit, isLitLitLit ) +import DataCon ( DataCon, dataConRepArity, dataConArgTys, + isExistentialDataCon, dataConTyCon, dataConName ) +import PrimOp ( PrimOp(..), primOpOkForSpeculation, primOpIsCheap ) +import Id ( Id, idType, globalIdDetails, idNewStrictness, + mkWildId, idArity, idName, idUnfolding, idInfo, + isOneShotLambda, isDataConWorkId_maybe, mkSysLocal, + isDataConWorkId, isBottomingId ) -import IdInfo ( arityLowerBound, InlinePragInfo(..), - LBVarInfo(..), - IdFlavour(..), - megaSeqIdInfo ) -import Demand ( appIsBottom ) -import Type ( Type, mkFunTy, mkForAllTy, - splitFunTy_maybe, tyVarsOfType, tyVarsOfTypes, - isNotUsgTy, mkUsgTy, unUsgTy, UsageAnn(..), - applyTys, isUnLiftedType, seqType +import IdInfo ( GlobalIdDetails(..), megaSeqIdInfo ) +import NewDemand ( appIsBottom ) +import Type ( Type, mkFunTy, mkForAllTy, splitFunTy_maybe, + splitFunTy, + applyTys, isUnLiftedType, seqType, mkTyVarTy, + splitForAllTy_maybe, isForAllTy, splitNewType_maybe, + splitTyConApp_maybe, eqType, funResultTy, applyTy, + funResultTy, applyTy ) -import TysWiredIn ( boolTy, stringTy, trueDataCon, falseDataCon ) +import TyCon ( tyConArity ) +import TysWiredIn ( boolTy, trueDataCon, falseDataCon ) import CostCentre ( CostCentre ) -import Unique ( buildIdKey, augmentIdKey ) -import Util ( zipWithEqual, mapAccumL ) -import Maybes ( maybeToBool ) +import BasicTypes ( Arity ) +import Unique ( Unique ) import Outputable import TysPrim ( alphaTy ) -- Debugging only +import Util ( equalLength, lengthAtLeast ) +import TysPrim ( statePrimTyCon ) \end{code} @@ -84,7 +89,6 @@ exprType (Lit lit) = literalType lit exprType (Let _ body) = exprType body exprType (Case _ _ alts) = coreAltsType alts exprType (Note (Coerce ty _) e) = ty -- **! should take usage from e -exprType (Note (TermUsg u) e) = mkUsgTy u (unUsgTy (exprType e)) exprType (Note other_note e) = exprType e exprType (Lam binder expr) = mkPiType binder (exprType expr) exprType e@(App _ _) @@ -103,29 +107,35 @@ lbvarinfo field to figure out the right annotation for the arrove in case of a term variable. \begin{code} -mkPiType :: Var -> Type -> Type -- The more polymorphic version doesn't work... -mkPiType v ty | isId v = (case idLBVarInfo v of - IsOneShotLambda -> mkUsgTy UsOnce - otherwise -> id) $ - mkFunTy (idType v) ty - | isTyVar v = mkForAllTy v ty +mkPiType :: Var -> Type -> Type -- The more polymorphic version +mkPiTypes :: [Var] -> Type -> Type -- doesn't work... + +mkPiTypes vs ty = foldr mkPiType ty vs + +mkPiType v ty + | isId v = mkFunTy (idType v) ty + | otherwise = mkForAllTy v ty \end{code} \begin{code} --- The first argument is just for debugging +applyTypeToArg :: Type -> CoreExpr -> Type +applyTypeToArg fun_ty (Type arg_ty) = applyTy fun_ty arg_ty +applyTypeToArg fun_ty other_arg = funResultTy fun_ty + applyTypeToArgs :: CoreExpr -> Type -> [CoreExpr] -> Type +-- A more efficient version of applyTypeToArg +-- when we have several args +-- The first argument is just for debugging applyTypeToArgs e op_ty [] = op_ty applyTypeToArgs e op_ty (Type ty : args) = -- Accumulate type arguments so we can instantiate all at once - ASSERT2( all isNotUsgTy tys, - ppr e <+> text "of" <+> ppr op_ty <+> text "to" <+> - ppr (Type ty : args) <+> text "i.e." <+> ppr tys ) - applyTypeToArgs e (applyTys op_ty tys) rest_args + go [ty] args where - (tys, rest_args) = go [ty] args - go tys (Type ty : args) = go (ty:tys) args - go tys rest_args = (reverse tys, rest_args) + go rev_tys (Type ty : args) = go (ty:rev_tys) args + go rev_tys rest_args = applyTypeToArgs e op_ty' rest_args + where + op_ty' = applyTys op_ty (reverse rev_tys) applyTypeToArgs e op_ty (other_arg : args) = case (splitFunTy_maybe op_ty) of @@ -145,7 +155,7 @@ mkNote removes redundant coercions, and SCCs where possible \begin{code} mkNote :: Note -> CoreExpr -> CoreExpr -mkNote (Coerce to_ty from_ty) expr = mkCoerce to_ty from_ty expr +mkNote (Coerce to_ty from_ty) expr = mkCoerce2 to_ty from_ty expr mkNote (SCC cc) expr = mkSCC cc expr mkNote InlineMe expr = mkInlineMe expr mkNote note expr = Note note expr @@ -161,34 +171,54 @@ Drop trivial InlineMe's. This is somewhat important, because if we have an unfo that looks like (Note InlineMe (Var v)), the InlineMe doesn't go away because it may not be *applied* to anything. +We don't use exprIsTrivial here, though, because we sometimes generate worker/wrapper +bindings like + fw = ... + f = inline_me (coerce t fw) +As usual, the inline_me prevents the worker from getting inlined back into the wrapper. +We want the split, so that the coerces can cancel at the call site. + +However, we can get left with tiresome type applications. Notably, consider + f = /\ a -> let t = e in (t, w) +Then lifting the let out of the big lambda gives + t' = /\a -> e + f = /\ a -> let t = inline_me (t' a) in (t, w) +The inline_me is to stop the simplifier inlining t' right back +into t's RHS. In the next phase we'll substitute for t (since +its rhs is trivial) and *then* we could get rid of the inline_me. +But it hardly seems worth it, so I don't bother. + \begin{code} -mkInlineMe e | exprIsTrivial e = e - | otherwise = Note InlineMe e +mkInlineMe (Var v) = Var v +mkInlineMe e = Note InlineMe e \end{code} \begin{code} -mkCoerce :: Type -> Type -> CoreExpr -> CoreExpr - -mkCoerce to_ty from_ty (Note (Coerce to_ty2 from_ty2) expr) - = ASSERT( from_ty == to_ty2 ) - mkCoerce to_ty from_ty2 expr - -mkCoerce to_ty from_ty expr - | to_ty == from_ty = expr - | otherwise = ASSERT( from_ty == exprType expr ) - Note (Coerce to_ty from_ty) expr +mkCoerce :: Type -> CoreExpr -> CoreExpr +mkCoerce to_ty expr = mkCoerce2 to_ty (exprType expr) expr + +mkCoerce2 :: Type -> Type -> CoreExpr -> CoreExpr +mkCoerce2 to_ty from_ty (Note (Coerce to_ty2 from_ty2) expr) + = ASSERT( from_ty `eqType` to_ty2 ) + mkCoerce2 to_ty from_ty2 expr + +mkCoerce2 to_ty from_ty expr + | to_ty `eqType` from_ty = expr + | otherwise = ASSERT( from_ty `eqType` exprType expr ) + Note (Coerce to_ty from_ty) expr \end{code} \begin{code} mkSCC :: CostCentre -> Expr b -> Expr b -- Note: Nested SCC's *are* preserved for the benefit of - -- cost centre stack profiling (Durham) - -mkSCC cc (Lit lit) = Lit lit -mkSCC cc (Lam x e) = Lam x (mkSCC cc e) -- Move _scc_ inside lambda -mkSCC cc expr = Note (SCC cc) expr + -- cost centre stack profiling +mkSCC cc (Lit lit) = Lit lit +mkSCC cc (Lam x e) = Lam x (mkSCC cc e) -- Move _scc_ inside lambda +mkSCC cc (Note (SCC cc') e) = Note (SCC cc) (Note (SCC cc') e) +mkSCC cc (Note n e) = Note n (mkSCC cc e) -- Move _scc_ inside notes +mkSCC cc expr = Note (SCC cc) expr \end{code} @@ -210,8 +240,13 @@ bindNonRec :: Id -> CoreExpr -> CoreExpr -> CoreExpr -- that give Core Lint a heart attack. Actually the simplifier -- deals with them perfectly well. bindNonRec bndr rhs body - | isUnLiftedType (idType bndr) = Case rhs bndr [(DEFAULT,[],body)] - | otherwise = Let (NonRec bndr rhs) body + | needsCaseBinding (idType bndr) rhs = Case rhs bndr [(DEFAULT,[],body)] + | otherwise = Let (NonRec bndr rhs) body + +needsCaseBinding ty rhs = isUnLiftedType ty && not (exprOkForSpeculation rhs) + -- Make a case expression instead of a let + -- These can arise either from the desugarer, + -- or from beta reductions: (\x.e) (x +# y) \end{code} \begin{code} @@ -230,6 +265,41 @@ mkIfThenElse guard then_expr else_expr (DataAlt falseDataCon, [], else_expr) ] \end{code} + +%************************************************************************ +%* * +\subsection{Taking expressions apart} +%* * +%************************************************************************ + +The default alternative must be first, if it exists at all. +This makes it easy to find, though it makes matching marginally harder. + +\begin{code} +hasDefault :: [CoreAlt] -> Bool +hasDefault ((DEFAULT,_,_) : alts) = True +hasDefault _ = False + +findDefault :: [CoreAlt] -> ([CoreAlt], Maybe CoreExpr) +findDefault ((DEFAULT,args,rhs) : alts) = ASSERT( null args ) (alts, Just rhs) +findDefault alts = (alts, Nothing) + +findAlt :: AltCon -> [CoreAlt] -> CoreAlt +findAlt con alts + = case alts of + (deflt@(DEFAULT,_,_):alts) -> go alts deflt + other -> go alts panic_deflt + + where + panic_deflt = pprPanic "Missing alternative" (ppr con $$ vcat (map ppr alts)) + + go [] deflt = deflt + go (alt@(con1,_,_) : alts) deflt | con == con1 = alt + | otherwise = ASSERT( not (con1 == DEFAULT) ) + go alts deflt +\end{code} + + %************************************************************************ %* * \subsection{Figuring out things about expressions} @@ -244,16 +314,25 @@ mkIfThenElse guard then_expr else_expr @exprIsBottom@ is true of expressions that are guaranteed to diverge +There used to be a gruesome test for (hasNoBinding v) in the +Var case: + exprIsTrivial (Var v) | hasNoBinding v = idArity v == 0 +The idea here is that a constructor worker, like $wJust, is +really short for (\x -> $wJust x), becuase $wJust has no binding. +So it should be treated like a lambda. Ditto unsaturated primops. +But now constructor workers are not "have-no-binding" Ids. And +completely un-applied primops and foreign-call Ids are sufficiently +rare that I plan to allow them to be duplicated and put up with +saturating them. + \begin{code} -exprIsTrivial (Var v) - | Just op <- isPrimOpId_maybe v = primOpIsDupable op - | otherwise = True -exprIsTrivial (Type _) = True -exprIsTrivial (Lit lit) = True -exprIsTrivial (App e arg) = isTypeArg arg && exprIsTrivial e -exprIsTrivial (Note _ e) = exprIsTrivial e -exprIsTrivial (Lam b body) | isTyVar b = exprIsTrivial body -exprIsTrivial other = False +exprIsTrivial (Var v) = True -- See notes above +exprIsTrivial (Type _) = True +exprIsTrivial (Lit lit) = litIsTrivial lit +exprIsTrivial (App e arg) = not (isRuntimeArg arg) && exprIsTrivial e +exprIsTrivial (Note _ e) = exprIsTrivial e +exprIsTrivial (Lam b body) = not (isRuntimeVar b) && exprIsTrivial body +exprIsTrivial other = False \end{code} @@ -269,10 +348,11 @@ exprIsTrivial other = False \begin{code} -exprIsDupable (Type _) = True -exprIsDupable (Var v) = True -exprIsDupable (Lit lit) = litIsDupable lit -exprIsDupable (Note _ e) = exprIsDupable e +exprIsDupable (Type _) = True +exprIsDupable (Var v) = True +exprIsDupable (Lit lit) = litIsDupable lit +exprIsDupable (Note InlineMe e) = True +exprIsDupable (Note _ e) = exprIsDupable e exprIsDupable expr = go expr 0 where @@ -300,19 +380,16 @@ shared. The main examples of things which aren't WHNF but are * case e of pi -> ei + (where e, and all the ei are cheap) - where e, and all the ei are cheap; and - - * let x = e - in b - - where e and b are cheap; and + * let x = e in b + (where e and b are cheap) * op x1 ... xn - - where op is a cheap primitive operator + (where op is a cheap primitive operator) * error "foo" + (because we are happy to substitute it inside a lambda) Notice that a variable is considered 'cheap': we can push it inside a lambda, because sharing will make sure it is only evaluated once. @@ -322,12 +399,21 @@ exprIsCheap :: CoreExpr -> Bool exprIsCheap (Lit lit) = True exprIsCheap (Type _) = True exprIsCheap (Var _) = True +exprIsCheap (Note InlineMe e) = True exprIsCheap (Note _ e) = exprIsCheap e -exprIsCheap (Lam x e) = if isId x then True else exprIsCheap e -exprIsCheap (Case (Var v) _ alts) = and [exprIsCheap rhs | (_,_,rhs) <- alts] +exprIsCheap (Lam x e) = isRuntimeVar x || exprIsCheap e +exprIsCheap (Case e _ alts) = exprIsCheap e && + and [exprIsCheap rhs | (_,_,rhs) <- alts] -- Experimentally, treat (case x of ...) as cheap + -- (and case __coerce x etc.) -- This improves arities of overloaded functions where -- there is only dictionary selection (no construction) involved +exprIsCheap (Let (NonRec x _) e) + | isUnLiftedType (idType x) = exprIsCheap e + | otherwise = False + -- strict lets always have cheap right hand sides, and + -- do no allocation. + exprIsCheap other_expr = go other_expr 0 True where @@ -337,13 +423,12 @@ exprIsCheap other_expr || idAppIsBottom f n_args -- Application of a function which - -- always gives bottom; we treat this as - -- a WHNF, because it certainly doesn't - -- need to be shared! + -- always gives bottom; we treat this as cheap + -- because it certainly doesn't need to be shared! go (App f a) n_args args_cheap - | isTypeArg a = go f n_args args_cheap - | otherwise = go f (n_args + 1) (exprIsCheap a && args_cheap) + | not (isRuntimeArg a) = go f n_args args_cheap + | otherwise = go f (n_args + 1) (exprIsCheap a && args_cheap) go other n_args args_cheap = False @@ -352,11 +437,11 @@ idAppIsCheap id n_val_args | n_val_args == 0 = True -- Just a type application of -- a variable (f t1 t2 t3) -- counts as WHNF - | otherwise = case idFlavour id of - DataConId _ -> True - RecordSelId _ -> True -- I'm experimenting with making record selection - -- look cheap, so we will substitute it inside a - -- lambda. Particularly for dictionary field selection + | otherwise = case globalIdDetails id of + DataConWorkId _ -> True + RecordSelId _ -> True -- I'm experimenting with making record selection + ClassOpId _ -> True -- look cheap, so we will substitute it inside a + -- lambda. Particularly for dictionary field selection PrimOpId op -> primOpIsCheap op -- In principle we should worry about primops -- that return a type variable, since the result @@ -394,28 +479,50 @@ side effects, and can't diverge or raise an exception. \begin{code} exprOkForSpeculation :: CoreExpr -> Bool exprOkForSpeculation (Lit _) = True +exprOkForSpeculation (Type _) = True exprOkForSpeculation (Var v) = isUnLiftedType (idType v) exprOkForSpeculation (Note _ e) = exprOkForSpeculation e exprOkForSpeculation other_expr - = go other_expr 0 True + = case collectArgs other_expr of + (Var f, args) -> spec_ok (globalIdDetails f) args + other -> False + where - go (Var f) n_args args_ok - = case idFlavour f of - DataConId _ -> True -- The strictness of the constructor has already - -- been expressed by its "wrapper", so we don't need - -- to take the arguments into account - - PrimOpId op -> primOpOkForSpeculation op && args_ok + spec_ok (DataConWorkId _) args + = True -- The strictness of the constructor has already + -- been expressed by its "wrapper", so we don't need + -- to take the arguments into account + + spec_ok (PrimOpId op) args + | isDivOp op, -- Special case for dividing operations that fail + [arg1, Lit lit] <- args -- only if the divisor is zero + = not (isZeroLit lit) && exprOkForSpeculation arg1 + -- Often there is a literal divisor, and this + -- can get rid of a thunk in an inner looop + + | otherwise + = primOpOkForSpeculation op && + all exprOkForSpeculation args -- A bit conservative: we don't really need -- to care about lazy arguments, but this is easy - other -> False - - go (App f a) n_args args_ok - | isTypeArg a = go f n_args args_ok - | otherwise = go f (n_args + 1) (exprOkForSpeculation a && args_ok) - - go other n_args args_ok = False + spec_ok other args = False + +isDivOp :: PrimOp -> Bool +-- True of dyadic operators that can fail +-- only if the second arg is zero +-- This function probably belongs in PrimOp, or even in +-- an automagically generated file.. but it's such a +-- special case I thought I'd leave it here for now. +isDivOp IntQuotOp = True +isDivOp IntRemOp = True +isDivOp WordQuotOp = True +isDivOp WordRemOp = True +isDivOp IntegerQuotRemOp = True +isDivOp IntegerDivModOp = True +isDivOp FloatDivOp = True +isDivOp DoubleDivOp = True +isDivOp other = False \end{code} @@ -433,88 +540,138 @@ exprIsBottom e = go 0 e go n (Lam _ _) = False idAppIsBottom :: Id -> Int -> Bool -idAppIsBottom id n_val_args = appIsBottom (idStrictness id) n_val_args +idAppIsBottom id n_val_args = appIsBottom (idNewStrictness id) n_val_args \end{code} @exprIsValue@ returns true for expressions that are certainly *already* -evaluated to WHNF. This is used to decide wether it's ok to change +evaluated to *head* normal form. This is used to decide whether it's ok +to change + case x of _ -> e ===> e and to decide whether it's safe to discard a `seq` -So, it does *not* treat variables as evaluated, unless they say they are +So, it does *not* treat variables as evaluated, unless they say they are. -\begin{code} -exprIsValue :: CoreExpr -> Bool -- True => Value-lambda, constructor, PAP -exprIsValue (Type ty) = True -- Types are honorary Values; we don't mind - -- copying them -exprIsValue (Lit l) = True -exprIsValue (Lam b e) = isId b || exprIsValue e -exprIsValue (Note _ e) = exprIsValue e -exprIsValue other_expr - = go other_expr 0 - where - go (Var f) n_args = idAppIsValue f n_args - - go (App f a) n_args - | isTypeArg a = go f n_args - | otherwise = go f (n_args + 1) - - go (Note _ f) n_args = go f n_args +But it *does* treat partial applications and constructor applications +as values, even if their arguments are non-trivial, provided the argument +type is lifted; + e.g. (:) (f x) (map f xs) is a value + map (...redex...) is a value +Because `seq` on such things completes immediately - go other n_args = False +For unlifted argument types, we have to be careful: + C (f x :: Int#) +Suppose (f x) diverges; then C (f x) is not a value. True, but +this form is illegal (see the invariants in CoreSyn). Args of unboxed +type must be ok-for-speculation (or trivial). -idAppIsValue :: Id -> Int -> Bool -idAppIsValue id n_val_args - = case idFlavour id of - DataConId _ -> True - PrimOpId _ -> n_val_args < idArity id - other | n_val_args == 0 -> isEvaldUnfolding (idUnfolding id) - | otherwise -> n_val_args < idArity id +\begin{code} +exprIsValue :: CoreExpr -> Bool -- True => Value-lambda, constructor, PAP +exprIsValue (Var v) -- NB: There are no value args at this point + = isDataConWorkId v -- Catches nullary constructors, + -- so that [] and () are values, for example + || idArity v > 0 -- Catches (e.g.) primops that don't have unfoldings + || isEvaldUnfolding (idUnfolding v) + -- Check the thing's unfolding; it might be bound to a value -- A worry: what if an Id's unfolding is just itself: -- then we could get an infinite loop... -\end{code} -\begin{code} -exprArity :: CoreExpr -> Int -- How many value lambdas are at the top -exprArity (Lam b e) | isTyVar b = exprArity e - | otherwise = 1 + exprArity e - -exprArity (Note note e) | ok_note note = exprArity e - where - ok_note (Coerce _ _) = True - -- We *do* look through coerces when getting arities. - -- Reason: arities are to do with *representation* and - -- work duplication. - ok_note InlineMe = True - ok_note InlineCall = True - ok_note other = False - -- SCC and TermUsg might be over-conservative? - -exprArity other = 0 +exprIsValue (Lit l) = True +exprIsValue (Type ty) = True -- Types are honorary Values; + -- we don't mind copying them +exprIsValue (Lam b e) = isRuntimeVar b || exprIsValue e +exprIsValue (Note _ e) = exprIsValue e +exprIsValue (App e (Type _)) = exprIsValue e +exprIsValue (App e a) = app_is_value e [a] +exprIsValue other = False + +-- There is at least one value argument +app_is_value (Var fun) args + | isDataConWorkId fun -- Constructor apps are values + || idArity fun > valArgCount args -- Under-applied function + = check_args (idType fun) args +app_is_value (App f a) as = app_is_value f (a:as) +app_is_value other as = False + + -- 'check_args' checks that unlifted-type args + -- are in fact guaranteed non-divergent +check_args fun_ty [] = True +check_args fun_ty (Type _ : args) = case splitForAllTy_maybe fun_ty of + Just (_, ty) -> check_args ty args +check_args fun_ty (arg : args) + | isUnLiftedType arg_ty = exprOkForSpeculation arg + | otherwise = check_args res_ty args + where + (arg_ty, res_ty) = splitFunTy fun_ty \end{code} \begin{code} exprIsConApp_maybe :: CoreExpr -> Maybe (DataCon, [CoreExpr]) -exprIsConApp_maybe expr - = analyse (collectArgs expr) +exprIsConApp_maybe (Note (Coerce to_ty from_ty) expr) + = -- Maybe this is over the top, but here we try to turn + -- coerce (S,T) ( x, y ) + -- effectively into + -- ( coerce S x, coerce T y ) + -- This happens in anger in PrelArrExts which has a coerce + -- case coerce memcpy a b of + -- (# r, s #) -> ... + -- where the memcpy is in the IO monad, but the call is in + -- the (ST s) monad + case exprIsConApp_maybe expr of { + Nothing -> Nothing ; + Just (dc, args) -> + + case splitTyConApp_maybe to_ty of { + Nothing -> Nothing ; + Just (tc, tc_arg_tys) | tc /= dataConTyCon dc -> Nothing + | isExistentialDataCon dc -> Nothing + | otherwise -> + -- Type constructor must match + -- We knock out existentials to keep matters simple(r) + let + arity = tyConArity tc + val_args = drop arity args + to_arg_tys = dataConArgTys dc tc_arg_tys + mk_coerce ty arg = mkCoerce ty arg + new_val_args = zipWith mk_coerce to_arg_tys val_args + in + ASSERT( all isTypeArg (take arity args) ) + ASSERT( equalLength val_args to_arg_tys ) + Just (dc, map Type tc_arg_tys ++ new_val_args) + }} + +exprIsConApp_maybe (Note _ expr) + = exprIsConApp_maybe expr + -- We ignore InlineMe notes in case we have + -- x = __inline_me__ (a,b) + -- All part of making sure that INLINE pragmas never hurt + -- Marcin tripped on this one when making dictionaries more inlinable + -- + -- In fact, we ignore all notes. For example, + -- case _scc_ "foo" (C a b) of + -- C a b -> e + -- should be optimised away, but it will be only if we look + -- through the SCC note. + +exprIsConApp_maybe expr = analyse (collectArgs expr) where analyse (Var fun, args) - | maybeToBool maybe_con_app = maybe_con_app - where - maybe_con_app = case isDataConId_maybe fun of - Just con | length args >= dataConRepArity con - -- Might be > because the arity excludes type args - -> Just (con, args) - other -> Nothing + | Just con <- isDataConWorkId_maybe fun, + args `lengthAtLeast` dataConRepArity con + -- Might be > because the arity excludes type args + = Just (con,args) + -- Look through unfoldings, but only cheap ones, because + -- we are effectively duplicating the unfolding analyse (Var fun, []) - = case maybeUnfoldingTemplate (idUnfolding fun) of - Nothing -> Nothing - Just unf -> exprIsConApp_maybe unf + | let unf = idUnfolding fun, + isCheapUnfolding unf + = exprIsConApp_maybe (unfoldingTemplate unf) analyse other = Nothing -\end{code} +\end{code} + %************************************************************************ @@ -523,91 +680,305 @@ exprIsConApp_maybe expr %* * %************************************************************************ -@etaReduceExpr@ trys an eta reduction at the top level of a Core Expr. - -e.g. \ x y -> f x y ===> f +\begin{code} +exprEtaExpandArity :: CoreExpr -> Arity +{- The Arity returned is the number of value args the + thing can be applied to without doing much work + +exprEtaExpandArity is used when eta expanding + e ==> \xy -> e x y + +It returns 1 (or more) to: + case x of p -> \s -> ... +because for I/O ish things we really want to get that \s to the top. +We are prepared to evaluate x each time round the loop in order to get that + +It's all a bit more subtle than it looks: + +1. One-shot lambdas + +Consider one-shot lambdas + let x = expensive in \y z -> E +We want this to have arity 2 if the \y-abstraction is a 1-shot lambda +Hence the ArityType returned by arityType + +2. The state-transformer hack + +The one-shot lambda special cause is particularly important/useful for +IO state transformers, where we often get + let x = E in \ s -> ... + +and the \s is a real-world state token abstraction. Such abstractions +are almost invariably 1-shot, so we want to pull the \s out, past the +let x=E, even if E is expensive. So we treat state-token lambdas as +one-shot even if they aren't really. The hack is in Id.isOneShotLambda. + +3. Dealing with bottom + +Consider also + f = \x -> error "foo" +Here, arity 1 is fine. But if it is + f = \x -> case x of + True -> error "foo" + False -> \y -> x+y +then we want to get arity 2. Tecnically, this isn't quite right, because + (f True) `seq` 1 +should diverge, but it'll converge if we eta-expand f. Nevertheless, we +do so; it improves some programs significantly, and increasing convergence +isn't a bad thing. Hence the ABot/ATop in ArityType. + +Actually, the situation is worse. Consider + f = \x -> case x of + True -> \y -> x+y + False -> \y -> x-y +Can we eta-expand here? At first the answer looks like "yes of course", but +consider + (f bot) `seq` 1 +This should diverge! But if we eta-expand, it won't. Again, we ignore this +"problem", because being scrupulous would lose an important transformation for +many programs. +-} + + +exprEtaExpandArity e = arityDepth (arityType e) + +-- A limited sort of function type +data ArityType = AFun Bool ArityType -- True <=> one-shot + | ATop -- Know nothing + | ABot -- Diverges + +arityDepth :: ArityType -> Arity +arityDepth (AFun _ ty) = 1 + arityDepth ty +arityDepth ty = 0 + +andArityType ABot at2 = at2 +andArityType ATop at2 = ATop +andArityType (AFun t1 at1) (AFun t2 at2) = AFun (t1 && t2) (andArityType at1 at2) +andArityType at1 at2 = andArityType at2 at1 + +arityType :: CoreExpr -> ArityType + -- (go1 e) = [b1,..,bn] + -- means expression can be rewritten \x_b1 -> ... \x_bn -> body + -- where bi is True <=> the lambda is one-shot + +arityType (Note n e) = arityType e +-- Not needed any more: etaExpand is cleverer +-- | ok_note n = arityType e +-- | otherwise = ATop + +arityType (Var v) + = mk (idArity v) + where + mk :: Arity -> ArityType + mk 0 | isBottomingId v = ABot + | otherwise = ATop + mk n = AFun False (mk (n-1)) + + -- When the type of the Id encodes one-shot-ness, + -- use the idinfo here + + -- Lambdas; increase arity +arityType (Lam x e) | isId x = AFun (isOneShotLambda x || isStateHack x) (arityType e) + | otherwise = arityType e + + -- Applications; decrease arity +arityType (App f (Type _)) = arityType f +arityType (App f a) = case arityType f of + AFun one_shot xs | exprIsCheap a -> xs + other -> ATop + + -- Case/Let; keep arity if either the expression is cheap + -- or it's a 1-shot lambda +arityType (Case scrut _ alts) = case foldr1 andArityType [arityType rhs | (_,_,rhs) <- alts] of + xs@(AFun one_shot _) | one_shot -> xs + xs | exprIsCheap scrut -> xs + | otherwise -> ATop + +arityType (Let b e) = case arityType e of + xs@(AFun one_shot _) | one_shot -> xs + xs | all exprIsCheap (rhssOfBind b) -> xs + | otherwise -> ATop + +arityType other = ATop + +isStateHack id = case splitTyConApp_maybe (idType id) of + Just (tycon,_) | tycon == statePrimTyCon -> True + other -> False + + -- The last clause is a gross hack. It claims that + -- every function over realWorldStatePrimTy is a one-shot + -- function. This is pretty true in practice, and makes a big + -- difference. For example, consider + -- a `thenST` \ r -> ...E... + -- The early full laziness pass, if it doesn't know that r is one-shot + -- will pull out E (let's say it doesn't mention r) to give + -- let lvl = E in a `thenST` \ r -> ...lvl... + -- When `thenST` gets inlined, we end up with + -- let lvl = E in \s -> case a s of (r, s') -> ...lvl... + -- and we don't re-inline E. + -- + -- It would be better to spot that r was one-shot to start with, but + -- I don't want to rely on that. + -- + -- Another good example is in fill_in in PrelPack.lhs. We should be able to + -- spot that fill_in has arity 2 (and when Keith is done, we will) but we can't yet. + +{- NOT NEEDED ANY MORE: etaExpand is cleverer +ok_note InlineMe = False +ok_note other = True + -- Notice that we do not look through __inline_me__ + -- This may seem surprising, but consider + -- f = _inline_me (\x -> e) + -- We DO NOT want to eta expand this to + -- f = \x -> (_inline_me (\x -> e)) x + -- because the _inline_me gets dropped now it is applied, + -- giving just + -- f = \x -> e + -- A Bad Idea +-} +\end{code} -But we only do this if it gets rid of a whole lambda, not part. -The idea is that lambdas are often quite helpful: they indicate -head normal forms, so we don't want to chuck them away lightly. \begin{code} -etaReduceExpr :: CoreExpr -> CoreExpr - -- ToDo: we should really check that we don't turn a non-bottom - -- lambda into a bottom variable. Sigh - -etaReduceExpr expr@(Lam bndr body) - = check (reverse binders) body +etaExpand :: Arity -- Result should have this number of value args + -> [Unique] + -> CoreExpr -> Type -- Expression and its type + -> CoreExpr +-- (etaExpand n us e ty) returns an expression with +-- the same meaning as 'e', but with arity 'n'. +-- +-- Given e' = etaExpand n us e ty +-- We should have +-- ty = exprType e = exprType e' +-- +-- Note that SCCs are not treated specially. If we have +-- etaExpand 2 (\x -> scc "foo" e) +-- = (\xy -> (scc "foo" e) y) +-- So the costs of evaluating 'e' (not 'e y') are attributed to "foo" + +etaExpand n us expr ty + | manifestArity expr >= n = expr -- The no-op case + | otherwise = eta_expand n us expr ty where - (binders, body) = collectBinders expr - check [] body - | not (any (`elemVarSet` body_fvs) binders) - = body -- Success! - where - body_fvs = exprFreeVars body +-- manifestArity sees how many leading value lambdas there are +manifestArity :: CoreExpr -> Arity +manifestArity (Lam v e) | isId v = 1 + manifestArity e + | otherwise = manifestArity e +manifestArity (Note _ e) = manifestArity e +manifestArity e = 0 + +-- etaExpand deals with for-alls. For example: +-- etaExpand 1 E +-- where E :: forall a. a -> a +-- would return +-- (/\b. \y::a -> E b y) +-- +-- It deals with coerces too, though they are now rare +-- so perhaps the extra code isn't worth it + +eta_expand n us expr ty + | n == 0 && + -- The ILX code generator requires eta expansion for type arguments + -- too, but alas the 'n' doesn't tell us how many of them there + -- may be. So we eagerly eta expand any big lambdas, and just + -- cross our fingers about possible loss of sharing in the ILX case. + -- The Right Thing is probably to make 'arity' include + -- type variables throughout the compiler. (ToDo.) + not (isForAllTy ty) + -- Saturated, so nothing to do + = expr + + -- Short cut for the case where there already + -- is a lambda; no point in gratuitously adding more +eta_expand n us (Lam v body) ty + | isTyVar v + = Lam v (eta_expand n us body (applyTy ty (mkTyVarTy v))) + + | otherwise + = Lam v (eta_expand (n-1) us body (funResultTy ty)) + +-- We used to have a special case that stepped inside Coerces here, +-- thus: eta_expand n us (Note note@(Coerce _ ty) e) _ +-- = Note note (eta_expand n us e ty) +-- BUT this led to an infinite loop +-- Example: newtype T = MkT (Int -> Int) +-- eta_expand 1 (coerce (Int->Int) e) +-- --> coerce (Int->Int) (eta_expand 1 T e) +-- by the bogus eqn +-- --> coerce (Int->Int) (coerce T +-- (\x::Int -> eta_expand 1 (coerce (Int->Int) e))) +-- by the splitNewType_maybe case below +-- and round we go + +eta_expand n us expr ty + = case splitForAllTy_maybe ty of { + Just (tv,ty') -> Lam tv (eta_expand n us (App expr (Type (mkTyVarTy tv))) ty') + + ; Nothing -> + + case splitFunTy_maybe ty of { + Just (arg_ty, res_ty) -> Lam arg1 (eta_expand (n-1) us2 (App expr (Var arg1)) res_ty) + where + arg1 = mkSysLocal FSLIT("eta") uniq arg_ty + (uniq:us2) = us + + ; Nothing -> + + -- Given this: + -- newtype T = MkT (Int -> Int) + -- Consider eta-expanding this + -- eta_expand 1 e T + -- We want to get + -- coerce T (\x::Int -> (coerce (Int->Int) e) x) + + case splitNewType_maybe ty of { + Just ty' -> mkCoerce2 ty ty' (eta_expand n us (mkCoerce2 ty' ty expr) ty') ; + Nothing -> pprTrace "Bad eta expand" (ppr expr $$ ppr ty) expr + }}} +\end{code} - check (b : bs) (App fun arg) - | (varToCoreExpr b `cheapEqExpr` arg) - = check bs fun +exprArity is a cheap-and-cheerful version of exprEtaExpandArity. +It tells how many things the expression can be applied to before doing +any work. It doesn't look inside cases, lets, etc. The idea is that +exprEtaExpandArity will do the hard work, leaving something that's easy +for exprArity to grapple with. In particular, Simplify uses exprArity to +compute the ArityInfo for the Id. - check _ _ = expr -- Bale out +Originally I thought that it was enough just to look for top-level lambdas, but +it isn't. I've seen this -etaReduceExpr expr = expr -- The common case -\end{code} - + foo = PrelBase.timesInt -\begin{code} -exprEtaExpandArity :: CoreExpr -> Int -- The number of args the thing can be applied to - -- without doing much work --- This is used when eta expanding --- e ==> \xy -> e x y --- --- It returns 1 (or more) to: --- case x of p -> \s -> ... --- because for I/O ish things we really want to get that \s to the top. --- We are prepared to evaluate x each time round the loop in order to get that --- Hence "generous" arity - -exprEtaExpandArity e - = go e - where - go (Var v) = idArity v - go (App f (Type _)) = go f - go (App f a) | exprIsCheap a = (go f - 1) `max` 0 -- Never go -ve! - go (Lam x e) | isId x = go e + 1 - | otherwise = go e - go (Note n e) | ok_note n = go e - go (Case scrut _ alts) - | exprIsCheap scrut = min_zero [go rhs | (_,_,rhs) <- alts] - go (Let b e) - | all exprIsCheap (rhssOfBind b) = go e - - go other = 0 - - ok_note (Coerce _ _) = True - ok_note InlineCall = True - ok_note other = False - -- Notice that we do not look through __inline_me__ - -- This one is a bit more surprising, but consider - -- f = _inline_me (\x -> e) - -- We DO NOT want to eta expand this to - -- f = \x -> (_inline_me (\x -> e)) x - -- because the _inline_me gets dropped now it is applied, - -- giving just - -- f = \x -> e - -- A Bad Idea - -min_zero :: [Int] -> Int -- Find the minimum, but zero is the smallest -min_zero (x:xs) = go x xs - where - go 0 xs = 0 -- Nothing beats zero - go min [] = min - go min (x:xs) | x < min = go x xs - | otherwise = go min xs +We want foo to get arity 2 even though the eta-expander will leave it +unchanged, in the expectation that it'll be inlined. But occasionally it +isn't, because foo is blacklisted (used in a rule). -\end{code} +Similarly, see the ok_note check in exprEtaExpandArity. So + f = __inline_me (\x -> e) +won't be eta-expanded. + +And in any case it seems more robust to have exprArity be a bit more intelligent. +But note that (\x y z -> f x y z) +should have arity 3, regardless of f's arity. +\begin{code} +exprArity :: CoreExpr -> Arity +exprArity e = go e + where + go (Var v) = idArity v + go (Lam x e) | isId x = go e + 1 + | otherwise = go e + go (Note n e) = go e + go (App e (Type t)) = go e + go (App f a) | exprIsCheap a = (go f - 1) `max` 0 + -- NB: exprIsCheap a! + -- f (fac x) does not have arity 2, + -- even if f has arity 3! + -- NB: `max 0`! (\x y -> f x) has arity 2, even if f is + -- unknown, hence arity 0 + go _ = 0 +\end{code} %************************************************************************ %* * @@ -624,7 +995,7 @@ cheapEqExpr :: Expr b -> Expr b -> Bool cheapEqExpr (Var v1) (Var v2) = v1==v2 cheapEqExpr (Lit lit1) (Lit lit2) = lit1 == lit2 -cheapEqExpr (Type t1) (Type t2) = t1 == t2 +cheapEqExpr (Type t1) (Type t2) = t1 `eqType` t2 cheapEqExpr (App f1 a1) (App f2 a2) = f1 `cheapEqExpr` f2 && a1 `cheapEqExpr` a2 @@ -644,6 +1015,9 @@ exprIsBig other = True \begin{code} eqExpr :: CoreExpr -> CoreExpr -> Bool -- Works ok at more general type, but only needed at CoreExpr + -- Used in rule matching, so when we find a type we use + -- eqTcType, which doesn't look through newtypes + -- [And it doesn't risk falling into a black hole either.] eqExpr e1 e2 = eq emptyVarEnv e1 e2 where @@ -660,7 +1034,7 @@ eqExpr e1 e2 eq env (Let (NonRec v1 r1) e1) (Let (NonRec v2 r2) e2) = eq env r1 r2 && eq (extendVarEnv env v1 v2) e1 e2 eq env (Let (Rec ps1) e1) - (Let (Rec ps2) e2) = length ps1 == length ps2 && + (Let (Rec ps2) e2) = equalLength ps1 ps2 && and (zipWith eq_rhs ps1 ps2) && eq env' e1 e2 where @@ -668,13 +1042,13 @@ eqExpr e1 e2 eq_rhs (_,r1) (_,r2) = eq env' r1 r2 eq env (Case e1 v1 a1) (Case e2 v2 a2) = eq env e1 e2 && - length a1 == length a2 && + equalLength a1 a2 && and (zipWith (eq_alt env') a1 a2) where env' = extendVarEnv env v1 v2 eq env (Note n1 e1) (Note n2 e2) = eq_note env n1 n2 && eq env e1 e2 - eq env (Type t1) (Type t2) = t1 == t2 + eq env (Type t1) (Type t2) = t1 `eqType` t2 eq env e1 e2 = False eq_list env [] [] = True @@ -685,8 +1059,9 @@ eqExpr e1 e2 eq (extendVarEnvList env (vs1 `zip` vs2)) r1 r2 eq_note env (SCC cc1) (SCC cc2) = cc1 == cc2 - eq_note env (Coerce t1 f1) (Coerce t2 f2) = t1==t2 && f1==f2 + eq_note env (Coerce t1 f1) (Coerce t2 f2) = t1 `eqType` t2 && f1 `eqType` f2 eq_note env InlineCall InlineCall = True + eq_note env (CoreNote s1) (CoreNote s2) = s1 == s2 eq_note env other1 other2 = False \end{code} @@ -704,7 +1079,7 @@ coreBindsSize bs = foldr ((+) . bindSize) 0 bs exprSize :: CoreExpr -> Int -- A measure of the size of the expressions -- It also forces the expression pretty drastically as a side effect -exprSize (Var v) = varSize v +exprSize (Var v) = v `seq` 1 exprSize (Lit lit) = lit `seq` 1 exprSize (App f a) = exprSize f + exprSize a exprSize (Lam b e) = varSize b + exprSize e @@ -717,9 +1092,7 @@ noteSize (SCC cc) = cc `seq` 1 noteSize (Coerce t1 t2) = seqType t1 `seq` seqType t2 `seq` 1 noteSize InlineCall = 1 noteSize InlineMe = 1 -noteSize (TermUsg usg) = usg `seq` 1 - -exprsSize = foldr ((+) . exprSize) 0 +noteSize (CoreNote s) = s `seq` 1 -- hdaume: core annotations varSize :: Var -> Int varSize b | isTyVar b = 1 @@ -771,3 +1144,127 @@ fast_hash_expr other = 1 hashId :: Id -> Int hashId id = hashName (idName id) \end{code} + +%************************************************************************ +%* * +\subsection{Determining non-updatable right-hand-sides} +%* * +%************************************************************************ + +Top-level constructor applications can usually be allocated +statically, but they can't if + a) the constructor, or any of the arguments, come from another DLL + b) any of the arguments are LitLits +(because we can't refer to static labels in other DLLs). + +If this happens we simply make the RHS into an updatable thunk, +and 'exectute' it rather than allocating it statically. + +\begin{code} +rhsIsStatic :: CoreExpr -> Bool +-- This function is called only on *top-level* right-hand sides +-- Returns True if the RHS can be allocated statically, with +-- no thunks involved at all. +-- +-- It's called (i) in TidyPgm.hasCafRefs to decide if the rhs is, or +-- refers to, CAFs; and (ii) in CoreToStg to decide whether to put an +-- update flag on it. +-- +-- The basic idea is that rhsIsStatic returns True only if the RHS is +-- (a) a value lambda +-- (b) a saturated constructor application with static args +-- +-- BUT watch out for +-- (i) Any cross-DLL references kill static-ness completely +-- because they must be 'executed' not statically allocated +-- +-- (ii) We treat partial applications as redexes, because in fact we +-- make a thunk for them that runs and builds a PAP +-- at run-time. The only appliations that are treated as +-- static are *saturated* applications of constructors. + +-- We used to try to be clever with nested structures like this: +-- ys = (:) w ((:) w []) +-- on the grounds that CorePrep will flatten ANF-ise it later. +-- But supporting this special case made the function much more +-- complicated, because the special case only applies if there are no +-- enclosing type lambdas: +-- ys = /\ a -> Foo (Baz ([] a)) +-- Here the nested (Baz []) won't float out to top level in CorePrep. +-- +-- But in fact, even without -O, nested structures at top level are +-- flattened by the simplifier, so we don't need to be super-clever here. +-- +-- Examples +-- +-- f = \x::Int. x+7 TRUE +-- p = (True,False) TRUE +-- +-- d = (fst p, False) FALSE because there's a redex inside +-- (this particular one doesn't happen but...) +-- +-- h = D# (1.0## /## 2.0##) FALSE (redex again) +-- n = /\a. Nil a TRUE +-- +-- t = /\a. (:) (case w a of ...) (Nil a) FALSE (redex) +-- +-- +-- This is a bit like CoreUtils.exprIsValue, with the following differences: +-- a) scc "foo" (\x -> ...) is updatable (so we catch the right SCC) +-- +-- b) (C x xs), where C is a contructors is updatable if the application is +-- dynamic +-- +-- c) don't look through unfolding of f in (f x). +-- +-- When opt_RuntimeTypes is on, we keep type lambdas and treat +-- them as making the RHS re-entrant (non-updatable). + +rhsIsStatic rhs = is_static False rhs + +is_static :: Bool -- True <=> in a constructor argument; must be atomic + -> CoreExpr -> Bool + +is_static False (Lam b e) = isRuntimeVar b || is_static False e + +is_static in_arg (Note (SCC _) e) = False +is_static in_arg (Note _ e) = is_static in_arg e + +is_static in_arg (Lit lit) = not (isLitLitLit lit) + -- lit-lit arguments cannot be used in static constructors either. + -- (litlits are deprecated, so I'm not going to bother cleaning up this infelicity --SDM). + +is_static in_arg other_expr = go other_expr 0 + where + go (Var f) n_val_args + | not (isDllName (idName f)) + = saturated_data_con f n_val_args + || (in_arg && n_val_args == 0) + -- A naked un-applied variable is *not* deemed a static RHS + -- E.g. f = g + -- Reason: better to update so that the indirection gets shorted + -- out, and the true value will be seen + -- NB: if you change this, you'll break the invariant that THUNK_STATICs + -- are always updatable. If you do so, make sure that non-updatable + -- ones have enough space for their static link field! + + go (App f a) n_val_args + | isTypeArg a = go f n_val_args + | not in_arg && is_static True a = go f (n_val_args + 1) + -- The (not in_arg) checks that we aren't in a constructor argument; + -- if we are, we don't allow (value) applications of any sort + -- + -- NB. In case you wonder, args are sometimes not atomic. eg. + -- x = D# (1.0## /## 2.0##) + -- can't float because /## can fail. + + go (Note (SCC _) f) n_val_args = False + go (Note _ f) n_val_args = go f n_val_args + + go other n_val_args = False + + saturated_data_con f n_val_args + = case isDataConWorkId_maybe f of + Just dc -> n_val_args == dataConRepArity dc + Nothing -> False +\end{code}