-%
-% (c) The University of Glasgow 2006
-% (c) The GRASP/AQUA Project, Glasgow University, 1997-1998
-%
-% Author: Juan J. Quintela <quintela@krilin.dc.fi.udc.es>
-
-\begin{code}
-{-# OPTIONS -fno-warn-incomplete-patterns #-}
--- The above warning supression flag is a temporary kludge.
--- While working on this module you are encouraged to remove it and fix
--- any warnings in the module. See
--- http://hackage.haskell.org/trac/ghc/wiki/Commentary/CodingStyle#Warnings
--- for details
-
-module Check ( check , ExhaustivePat ) where
-
-#include "HsVersions.h"
-
-import HsSyn
-import TcHsSyn
-import DsUtils
-import MatchLit
-import Id
-import DataCon
-import Name
-import TysWiredIn
-import PrelNames
-import TyCon
-import Type
-import Unify( dataConCannotMatch )
-import SrcLoc
-import UniqSet
-import Util
-import Outputable
-import FastString
-\end{code}
-
-This module performs checks about if one list of equations are:
-\begin{itemize}
-\item Overlapped
-\item Non exhaustive
-\end{itemize}
-To discover that we go through the list of equations in a tree-like fashion.
-
-If you like theory, a similar algorithm is described in:
-\begin{quotation}
- {\em Two Techniques for Compiling Lazy Pattern Matching},
- Luc Maranguet,
- INRIA Rocquencourt (RR-2385, 1994)
-\end{quotation}
-The algorithm is based on the first technique, but there are some differences:
-\begin{itemize}
-\item We don't generate code
-\item We have constructors and literals (not only literals as in the
- article)
-\item We don't use directions, we must select the columns from
- left-to-right
-\end{itemize}
-(By the way the second technique is really similar to the one used in
- @Match.lhs@ to generate code)
-
-This function takes the equations of a pattern and returns:
-\begin{itemize}
-\item The patterns that are not recognized
-\item The equations that are not overlapped
-\end{itemize}
-It simplify the patterns and then call @check'@ (the same semantics), and it
-needs to reconstruct the patterns again ....
-
-The problem appear with things like:
-\begin{verbatim}
- f [x,y] = ....
- f (x:xs) = .....
-\end{verbatim}
-We want to put the two patterns with the same syntax, (prefix form) and
-then all the constructors are equal:
-\begin{verbatim}
- f (: x (: y [])) = ....
- f (: x xs) = .....
-\end{verbatim}
-(more about that in @tidy_eqns@)
-
-We would prefer to have a @WarningPat@ of type @String@, but Strings and the
-Pretty Printer are not friends.
-
-We use @InPat@ in @WarningPat@ instead of @OutPat@
-because we need to print the
-warning messages in the same way they are introduced, i.e. if the user
-wrote:
-\begin{verbatim}
- f [x,y] = ..
-\end{verbatim}
-He don't want a warning message written:
-\begin{verbatim}
- f (: x (: y [])) ........
-\end{verbatim}
-Then we need to use InPats.
-\begin{quotation}
- Juan Quintela 5 JUL 1998\\
- User-friendliness and compiler writers are no friends.
-\end{quotation}
-
-\begin{code}
-type WarningPat = InPat Name
-type ExhaustivePat = ([WarningPat], [(Name, [HsLit])])
-type EqnNo = Int
-type EqnSet = UniqSet EqnNo
-
-
-check :: [EquationInfo] -> ([ExhaustivePat], [EquationInfo])
- -- Second result is the shadowed equations
- -- if there are view patterns, just give up - don't know what the function is
-check qs = (untidy_warns, shadowed_eqns)
- where
- (warns, used_nos) = check' ([1..] `zip` map tidy_eqn qs)
- untidy_warns = map untidy_exhaustive warns
- shadowed_eqns = [eqn | (eqn,i) <- qs `zip` [1..],
- not (i `elementOfUniqSet` used_nos)]
-
-untidy_exhaustive :: ExhaustivePat -> ExhaustivePat
-untidy_exhaustive ([pat], messages) =
- ([untidy_no_pars pat], map untidy_message messages)
-untidy_exhaustive (pats, messages) =
- (map untidy_pars pats, map untidy_message messages)
-
-untidy_message :: (Name, [HsLit]) -> (Name, [HsLit])
-untidy_message (string, lits) = (string, map untidy_lit lits)
-\end{code}
-
-The function @untidy@ does the reverse work of the @tidy_pat@ funcion.
-
-\begin{code}
-
-type NeedPars = Bool
-
-untidy_no_pars :: WarningPat -> WarningPat
-untidy_no_pars p = untidy False p
-
-untidy_pars :: WarningPat -> WarningPat
-untidy_pars p = untidy True p
-
-untidy :: NeedPars -> WarningPat -> WarningPat
-untidy b (L loc p) = L loc (untidy' b p)
- where
- untidy' _ p@(WildPat _) = p
- untidy' _ p@(VarPat _) = p
- untidy' _ (LitPat lit) = LitPat (untidy_lit lit)
- untidy' _ p@(ConPatIn _ (PrefixCon [])) = p
- untidy' b (ConPatIn name ps) = pars b (L loc (ConPatIn name (untidy_con ps)))
- untidy' _ (ListPat pats ty) = ListPat (map untidy_no_pars pats) ty
- untidy' _ (TuplePat pats box ty) = TuplePat (map untidy_no_pars pats) box ty
- untidy' _ (PArrPat _ _) = panic "Check.untidy: Shouldn't get a parallel array here!"
- untidy' _ (SigPatIn _ _) = panic "Check.untidy: SigPat"
-
-untidy_con :: HsConPatDetails Name -> HsConPatDetails Name
-untidy_con (PrefixCon pats) = PrefixCon (map untidy_pars pats)
-untidy_con (InfixCon p1 p2) = InfixCon (untidy_pars p1) (untidy_pars p2)
-untidy_con (RecCon (HsRecFields flds dd))
- = RecCon (HsRecFields [ fld { hsRecFieldArg = untidy_pars (hsRecFieldArg fld) }
- | fld <- flds ] dd)
-
-pars :: NeedPars -> WarningPat -> Pat Name
-pars True p = ParPat p
-pars _ p = unLoc p
-
-untidy_lit :: HsLit -> HsLit
-untidy_lit (HsCharPrim c) = HsChar c
-untidy_lit lit = lit
-\end{code}
-
-This equation is the same that check, the only difference is that the
-boring work is done, that work needs to be done only once, this is
-the reason top have two functions, check is the external interface,
-@check'@ is called recursively.
-
-There are several cases:
-
-\begin{itemize}
-\item There are no equations: Everything is OK.
-\item There are only one equation, that can fail, and all the patterns are
- variables. Then that equation is used and the same equation is
- non-exhaustive.
-\item All the patterns are variables, and the match can fail, there are
- more equations then the results is the result of the rest of equations
- and this equation is used also.
-
-\item The general case, if all the patterns are variables (here the match
- can't fail) then the result is that this equation is used and this
- equation doesn't generate non-exhaustive cases.
-
-\item In the general case, there can exist literals ,constructors or only
- vars in the first column, we actuate in consequence.
-
-\end{itemize}
-
-
-\begin{code}
-
-check' :: [(EqnNo, EquationInfo)]
- -> ([ExhaustivePat], -- Pattern scheme that might not be matched at all
- EqnSet) -- Eqns that are used (others are overlapped)
-
-check' [] = ([([],[])],emptyUniqSet)
-
-check' ((n, EqnInfo { eqn_pats = ps, eqn_rhs = MatchResult can_fail _ }) : rs)
- | first_eqn_all_vars && case can_fail of { CantFail -> True; CanFail -> False }
- = ([], unitUniqSet n) -- One eqn, which can't fail
-
- | first_eqn_all_vars && null rs -- One eqn, but it can fail
- = ([(takeList ps (repeat nlWildPat),[])], unitUniqSet n)
-
- | first_eqn_all_vars -- Several eqns, first can fail
- = (pats, addOneToUniqSet indexs n)
- where
- first_eqn_all_vars = all_vars ps
- (pats,indexs) = check' rs
-
-check' qs
- | some_literals = split_by_literals qs
- | some_constructors = split_by_constructor qs
- | only_vars = first_column_only_vars qs
- | otherwise = pprPanic "Check.check': Not implemented :-(" (ppr first_pats)
- -- Shouldn't happen
- where
- -- Note: RecPats will have been simplified to ConPats
- -- at this stage.
- first_pats = ASSERT2( okGroup qs, pprGroup qs ) map firstPatN qs
- some_constructors = any is_con first_pats
- some_literals = any is_lit first_pats
- only_vars = all is_var first_pats
-\end{code}
-
-Here begins the code to deal with literals, we need to split the matrix
-in different matrix beginning by each literal and a last matrix with the
-rest of values.
-
-\begin{code}
-split_by_literals :: [(EqnNo, EquationInfo)] -> ([ExhaustivePat], EqnSet)
-split_by_literals qs = process_literals used_lits qs
- where
- used_lits = get_used_lits qs
-\end{code}
-
-@process_explicit_literals@ is a function that process each literal that appears
-in the column of the matrix.
-
-\begin{code}
-process_explicit_literals :: [HsLit] -> [(EqnNo, EquationInfo)] -> ([ExhaustivePat],EqnSet)
-process_explicit_literals lits qs = (concat pats, unionManyUniqSets indexs)
- where
- pats_indexs = map (\x -> construct_literal_matrix x qs) lits
- (pats,indexs) = unzip pats_indexs
-\end{code}
-
-
-@process_literals@ calls @process_explicit_literals@ to deal with the literals
-that appears in the matrix and deal also with the rest of the cases. It
-must be one Variable to be complete.
-
-\begin{code}
-
-process_literals :: [HsLit] -> [(EqnNo, EquationInfo)] -> ([ExhaustivePat],EqnSet)
-process_literals used_lits qs
- | null default_eqns = ASSERT( not (null qs) ) ([make_row_vars used_lits (head qs)] ++ pats,indexs)
- | otherwise = (pats_default,indexs_default)
- where
- (pats,indexs) = process_explicit_literals used_lits qs
- default_eqns = ASSERT2( okGroup qs, pprGroup qs )
- [remove_var q | q <- qs, is_var (firstPatN q)]
- (pats',indexs') = check' default_eqns
- pats_default = [(nlWildPat:ps,constraints) | (ps,constraints) <- (pats')] ++ pats
- indexs_default = unionUniqSets indexs' indexs
-\end{code}
-
-Here we have selected the literal and we will select all the equations that
-begins for that literal and create a new matrix.
-
-\begin{code}
-construct_literal_matrix :: HsLit -> [(EqnNo, EquationInfo)] -> ([ExhaustivePat],EqnSet)
-construct_literal_matrix lit qs =
- (map (\ (xs,ys) -> (new_lit:xs,ys)) pats,indexs)
- where
- (pats,indexs) = (check' (remove_first_column_lit lit qs))
- new_lit = nlLitPat lit
-
-remove_first_column_lit :: HsLit
- -> [(EqnNo, EquationInfo)]
- -> [(EqnNo, EquationInfo)]
-remove_first_column_lit lit qs
- = ASSERT2( okGroup qs, pprGroup qs )
- [(n, shift_pat eqn) | q@(n,eqn) <- qs, is_var_lit lit (firstPatN q)]
- where
- shift_pat eqn@(EqnInfo { eqn_pats = _:ps}) = eqn { eqn_pats = ps }
- shift_pat _ = panic "Check.shift_var: no patterns"
-\end{code}
-
-This function splits the equations @qs@ in groups that deal with the
-same constructor.
-
-\begin{code}
-split_by_constructor :: [(EqnNo, EquationInfo)] -> ([ExhaustivePat], EqnSet)
-split_by_constructor qs
- | notNull unused_cons = need_default_case used_cons unused_cons qs
- | otherwise = no_need_default_case used_cons qs
- where
- used_cons = get_used_cons qs
- unused_cons = get_unused_cons used_cons
-\end{code}
-
-The first column of the patterns matrix only have vars, then there is
-nothing to do.
-
-\begin{code}
-first_column_only_vars :: [(EqnNo, EquationInfo)] -> ([ExhaustivePat],EqnSet)
-first_column_only_vars qs = (map (\ (xs,ys) -> (nlWildPat:xs,ys)) pats,indexs)
- where
- (pats, indexs) = check' (map remove_var qs)
-\end{code}
-
-This equation takes a matrix of patterns and split the equations by
-constructor, using all the constructors that appears in the first column
-of the pattern matching.
-
-We can need a default clause or not ...., it depends if we used all the
-constructors or not explicitly. The reasoning is similar to @process_literals@,
-the difference is that here the default case is not always needed.
-
-\begin{code}
-no_need_default_case :: [Pat Id] -> [(EqnNo, EquationInfo)] -> ([ExhaustivePat],EqnSet)
-no_need_default_case cons qs = (concat pats, unionManyUniqSets indexs)
- where
- pats_indexs = map (\x -> construct_matrix x qs) cons
- (pats,indexs) = unzip pats_indexs
-
-need_default_case :: [Pat Id] -> [DataCon] -> [(EqnNo, EquationInfo)] -> ([ExhaustivePat],EqnSet)
-need_default_case used_cons unused_cons qs
- | null default_eqns = (pats_default_no_eqns,indexs)
- | otherwise = (pats_default,indexs_default)
- where
- (pats,indexs) = no_need_default_case used_cons qs
- default_eqns = ASSERT2( okGroup qs, pprGroup qs )
- [remove_var q | q <- qs, is_var (firstPatN q)]
- (pats',indexs') = check' default_eqns
- pats_default = [(make_whole_con c:ps,constraints) |
- c <- unused_cons, (ps,constraints) <- pats'] ++ pats
- new_wilds = ASSERT( not (null qs) ) make_row_vars_for_constructor (head qs)
- pats_default_no_eqns = [(make_whole_con c:new_wilds,[]) | c <- unused_cons] ++ pats
- indexs_default = unionUniqSets indexs' indexs
-
-construct_matrix :: Pat Id -> [(EqnNo, EquationInfo)] -> ([ExhaustivePat],EqnSet)
-construct_matrix con qs =
- (map (make_con con) pats,indexs)
- where
- (pats,indexs) = (check' (remove_first_column con qs))
-\end{code}
-
-Here remove first column is more difficult that with literals due to the fact
-that constructors can have arguments.
-
-For instance, the matrix
-\begin{verbatim}
- (: x xs) y
- z y
-\end{verbatim}
-is transformed in:
-\begin{verbatim}
- x xs y
- _ _ y
-\end{verbatim}
-
-\begin{code}
-remove_first_column :: Pat Id -- Constructor
- -> [(EqnNo, EquationInfo)]
- -> [(EqnNo, EquationInfo)]
-remove_first_column (ConPatOut{ pat_con = L _ con, pat_args = PrefixCon con_pats }) qs
- = ASSERT2( okGroup qs, pprGroup qs )
- [(n, shift_var eqn) | q@(n, eqn) <- qs, is_var_con con (firstPatN q)]
- where
- new_wilds = [WildPat (hsLPatType arg_pat) | arg_pat <- con_pats]
- shift_var eqn@(EqnInfo { eqn_pats = ConPatOut{ pat_args = PrefixCon ps' } : ps})
- = eqn { eqn_pats = map unLoc ps' ++ ps }
- shift_var eqn@(EqnInfo { eqn_pats = WildPat _ : ps })
- = eqn { eqn_pats = new_wilds ++ ps }
- shift_var _ = panic "Check.Shift_var:No done"
-
-make_row_vars :: [HsLit] -> (EqnNo, EquationInfo) -> ExhaustivePat
-make_row_vars used_lits (_, EqnInfo { eqn_pats = pats})
- = (nlVarPat new_var:takeList (tail pats) (repeat nlWildPat),[(new_var,used_lits)])
- where
- new_var = hash_x
-
-hash_x :: Name
-hash_x = mkInternalName unboundKey {- doesn't matter much -}
- (mkVarOccFS (fsLit "#x"))
- noSrcSpan
-
-make_row_vars_for_constructor :: (EqnNo, EquationInfo) -> [WarningPat]
-make_row_vars_for_constructor (_, EqnInfo { eqn_pats = pats})
- = takeList (tail pats) (repeat nlWildPat)
-
-compare_cons :: Pat Id -> Pat Id -> Bool
-compare_cons (ConPatOut{ pat_con = L _ id1 }) (ConPatOut { pat_con = L _ id2 }) = id1 == id2
-
-remove_dups :: [Pat Id] -> [Pat Id]
-remove_dups [] = []
-remove_dups (x:xs) | or (map (\y -> compare_cons x y) xs) = remove_dups xs
- | otherwise = x : remove_dups xs
-
-get_used_cons :: [(EqnNo, EquationInfo)] -> [Pat Id]
-get_used_cons qs = remove_dups [pat | q <- qs, let pat = firstPatN q,
- isConPatOut pat]
-
-isConPatOut :: Pat Id -> Bool
-isConPatOut (ConPatOut {}) = True
-isConPatOut _ = False
-
-remove_dups' :: [HsLit] -> [HsLit]
-remove_dups' [] = []
-remove_dups' (x:xs) | x `elem` xs = remove_dups' xs
- | otherwise = x : remove_dups' xs
-
-
-get_used_lits :: [(EqnNo, EquationInfo)] -> [HsLit]
-get_used_lits qs = remove_dups' all_literals
- where
- all_literals = get_used_lits' qs
-
-get_used_lits' :: [(EqnNo, EquationInfo)] -> [HsLit]
-get_used_lits' [] = []
-get_used_lits' (q:qs)
- | Just lit <- get_lit (firstPatN q) = lit : get_used_lits' qs
- | otherwise = get_used_lits qs
-
-get_lit :: Pat id -> Maybe HsLit
--- Get a representative HsLit to stand for the OverLit
--- It doesn't matter which one, because they will only be compared
--- with other HsLits gotten in the same way
-get_lit (LitPat lit) = Just lit
-get_lit (NPat (OverLit { ol_val = HsIntegral i}) mb _) = Just (HsIntPrim (mb_neg mb i))
-get_lit (NPat (OverLit { ol_val = HsFractional f }) mb _) = Just (HsFloatPrim (mb_neg mb f))
-get_lit (NPat (OverLit { ol_val = HsIsString s }) _ _) = Just (HsStringPrim s)
-get_lit _ = Nothing
-
-mb_neg :: Num a => Maybe b -> a -> a
-mb_neg Nothing v = v
-mb_neg (Just _) v = -v
-
-get_unused_cons :: [Pat Id] -> [DataCon]
-get_unused_cons used_cons = ASSERT( not (null used_cons) ) unused_cons
- where
- used_set :: UniqSet DataCon
- used_set = mkUniqSet [d | ConPatOut{ pat_con = L _ d} <- used_cons]
- (ConPatOut { pat_ty = ty }) = head used_cons
- Just (ty_con, inst_tys) = splitTyConApp_maybe ty
- unused_cons = filterOut is_used (tyConDataCons ty_con)
- is_used con = con `elementOfUniqSet` used_set
- || dataConCannotMatch inst_tys con
-
-all_vars :: [Pat Id] -> Bool
-all_vars [] = True
-all_vars (WildPat _:ps) = all_vars ps
-all_vars _ = False
-
-remove_var :: (EqnNo, EquationInfo) -> (EqnNo, EquationInfo)
-remove_var (n, eqn@(EqnInfo { eqn_pats = WildPat _ : ps})) = (n, eqn { eqn_pats = ps })
-remove_var _ = panic "Check.remove_var: equation does not begin with a variable"
-
------------------------
-eqnPats :: (EqnNo, EquationInfo) -> [Pat Id]
-eqnPats (_, eqn) = eqn_pats eqn
-
-okGroup :: [(EqnNo, EquationInfo)] -> Bool
--- True if all equations have at least one pattern, and
--- all have the same number of patterns
-okGroup [] = True
-okGroup (e:es) = n_pats > 0 && and [length (eqnPats e) == n_pats | e <- es]
- where
- n_pats = length (eqnPats e)
-
--- Half-baked print
-pprGroup :: [(EqnNo, EquationInfo)] -> SDoc
-pprEqnInfo :: (EqnNo, EquationInfo) -> SDoc
-pprGroup es = vcat (map pprEqnInfo es)
-pprEqnInfo e = ppr (eqnPats e)
-
-
-firstPatN :: (EqnNo, EquationInfo) -> Pat Id
-firstPatN (_, eqn) = firstPat eqn
-
-is_con :: Pat Id -> Bool
-is_con (ConPatOut {}) = True
-is_con _ = False
-
-is_lit :: Pat Id -> Bool
-is_lit (LitPat _) = True
-is_lit (NPat _ _ _) = True
-is_lit _ = False
-
-is_var :: Pat Id -> Bool
-is_var (WildPat _) = True
-is_var _ = False
-
-is_var_con :: DataCon -> Pat Id -> Bool
-is_var_con _ (WildPat _) = True
-is_var_con con (ConPatOut{ pat_con = L _ id }) | id == con = True
-is_var_con _ _ = False
-
-is_var_lit :: HsLit -> Pat Id -> Bool
-is_var_lit _ (WildPat _) = True
-is_var_lit lit pat
- | Just lit' <- get_lit pat = lit == lit'
- | otherwise = False
-\end{code}
-
-The difference beteewn @make_con@ and @make_whole_con@ is that
-@make_wole_con@ creates a new constructor with all their arguments, and
-@make_con@ takes a list of argumntes, creates the contructor getting their
-arguments from the list. See where \fbox{\ ???\ } are used for details.
-
-We need to reconstruct the patterns (make the constructors infix and
-similar) at the same time that we create the constructors.
-
-You can tell tuple constructors using
-\begin{verbatim}
- Id.isTupleCon
-\end{verbatim}
-You can see if one constructor is infix with this clearer code :-))))))))))
-\begin{verbatim}
- Lex.isLexConSym (Name.occNameString (Name.getOccName con))
-\end{verbatim}
-
- Rather clumsy but it works. (Simon Peyton Jones)
-
-
-We don't mind the @nilDataCon@ because it doesn't change the way to
-print the messsage, we are searching only for things like: @[1,2,3]@,
-not @x:xs@ ....
-
-In @reconstruct_pat@ we want to ``undo'' the work
-that we have done in @tidy_pat@.
-In particular:
-\begin{tabular}{lll}
- @((,) x y)@ & returns to be & @(x, y)@
-\\ @((:) x xs)@ & returns to be & @(x:xs)@
-\\ @(x:(...:[])@ & returns to be & @[x,...]@
-\end{tabular}
-%
-The difficult case is the third one becouse we need to follow all the
-contructors until the @[]@ to know that we need to use the second case,
-not the second. \fbox{\ ???\ }
-%
-\begin{code}
-isInfixCon :: DataCon -> Bool
-isInfixCon con = isDataSymOcc (getOccName con)
-
-is_nil :: Pat Name -> Bool
-is_nil (ConPatIn con (PrefixCon [])) = unLoc con == getName nilDataCon
-is_nil _ = False
-
-is_list :: Pat Name -> Bool
-is_list (ListPat _ _) = True
-is_list _ = False
-
-return_list :: DataCon -> Pat Name -> Bool
-return_list id q = id == consDataCon && (is_nil q || is_list q)
-
-make_list :: LPat Name -> Pat Name -> Pat Name
-make_list p q | is_nil q = ListPat [p] placeHolderType
-make_list p (ListPat ps ty) = ListPat (p:ps) ty
-make_list _ _ = panic "Check.make_list: Invalid argument"
-
-make_con :: Pat Id -> ExhaustivePat -> ExhaustivePat
-make_con (ConPatOut{ pat_con = L _ id }) (lp:lq:ps, constraints)
- | return_list id q = (noLoc (make_list lp q) : ps, constraints)
- | isInfixCon id = (nlInfixConPat (getName id) lp lq : ps, constraints)
- where q = unLoc lq
-
-make_con (ConPatOut{ pat_con = L _ id, pat_args = PrefixCon pats, pat_ty = ty }) (ps, constraints)
- | isTupleTyCon tc = (noLoc (TuplePat pats_con (tupleTyConBoxity tc) ty) : rest_pats, constraints)
- | isPArrFakeCon id = (noLoc (PArrPat pats_con placeHolderType) : rest_pats, constraints)
- | otherwise = (nlConPat name pats_con : rest_pats, constraints)
- where
- name = getName id
- (pats_con, rest_pats) = splitAtList pats ps
- tc = dataConTyCon id
-
--- reconstruct parallel array pattern
---
--- * don't check for the type only; we need to make sure that we are really
--- dealing with one of the fake constructors and not with the real
--- representation
-
-make_whole_con :: DataCon -> WarningPat
-make_whole_con con | isInfixCon con = nlInfixConPat name nlWildPat nlWildPat
- | otherwise = nlConPat name pats
- where
- name = getName con
- pats = [nlWildPat | _ <- dataConOrigArgTys con]
-\end{code}
-
-------------------------------------------------------------------------
- Tidying equations
-------------------------------------------------------------------------
-
-tidy_eqn does more or less the same thing as @tidy@ in @Match.lhs@;
-that is, it removes syntactic sugar, reducing the number of cases that
-must be handled by the main checking algorithm. One difference is
-that here we can do *all* the tidying at once (recursively), rather
-than doing it incrementally.
-
-\begin{code}
-tidy_eqn :: EquationInfo -> EquationInfo
-tidy_eqn eqn = eqn { eqn_pats = map tidy_pat (eqn_pats eqn),
- eqn_rhs = tidy_rhs (eqn_rhs eqn) }
- where
- -- Horrible hack. The tidy_pat stuff converts "might-fail" patterns to
- -- WildPats which of course loses the info that they can fail to match.
- -- So we stick in a CanFail as if it were a guard.
- tidy_rhs (MatchResult can_fail body)
- | any might_fail_pat (eqn_pats eqn) = MatchResult CanFail body
- | otherwise = MatchResult can_fail body
-
---------------
-might_fail_pat :: Pat Id -> Bool
--- Returns True of patterns that might fail (i.e. fall through) in a way
--- that is not covered by the checking algorithm. Specifically:
--- NPlusKPat
--- ViewPat (if refutable)
-
--- First the two special cases
-might_fail_pat (NPlusKPat {}) = True
-might_fail_pat (ViewPat _ p _) = not (isIrrefutableHsPat p)
-
--- Now the recursive stuff
-might_fail_pat (ParPat p) = might_fail_lpat p
-might_fail_pat (AsPat _ p) = might_fail_lpat p
-might_fail_pat (SigPatOut p _ ) = might_fail_lpat p
-might_fail_pat (ListPat ps _) = any might_fail_lpat ps
-might_fail_pat (TuplePat ps _ _) = any might_fail_lpat ps
-might_fail_pat (PArrPat ps _) = any might_fail_lpat ps
-might_fail_pat (BangPat p) = might_fail_lpat p
-might_fail_pat (ConPatOut { pat_args = ps }) = any might_fail_lpat (hsConPatArgs ps)
-
--- Finally the ones that are sure to succeed, or which are covered by the checking algorithm
-might_fail_pat (LazyPat _) = False -- Always succeeds
-might_fail_pat _ = False -- VarPat, WildPat, LitPat, NPat, TypePat
-
---------------
-might_fail_lpat :: LPat Id -> Bool
-might_fail_lpat (L _ p) = might_fail_pat p
-
---------------
-tidy_lpat :: LPat Id -> LPat Id
-tidy_lpat p = fmap tidy_pat p
-
---------------
-tidy_pat :: Pat Id -> Pat Id
-tidy_pat pat@(WildPat _) = pat
-tidy_pat (VarPat id) = WildPat (idType id)
-tidy_pat (ParPat p) = tidy_pat (unLoc p)
-tidy_pat (LazyPat p) = WildPat (hsLPatType p) -- For overlap and exhaustiveness checking
- -- purposes, a ~pat is like a wildcard
-tidy_pat (BangPat p) = tidy_pat (unLoc p)
-tidy_pat (AsPat _ p) = tidy_pat (unLoc p)
-tidy_pat (SigPatOut p _) = tidy_pat (unLoc p)
-tidy_pat (CoPat _ pat _) = tidy_pat pat
-
--- These two are might_fail patterns, so we map them to
--- WildPats. The might_fail_pat stuff arranges that the
--- guard says "this equation might fall through".
-tidy_pat (NPlusKPat id _ _ _) = WildPat (idType (unLoc id))
-tidy_pat (ViewPat _ _ ty) = WildPat ty
-
-tidy_pat (NPat lit mb_neg eq) = tidyNPat lit mb_neg eq
-
-tidy_pat pat@(ConPatOut { pat_con = L _ id, pat_args = ps })
- = pat { pat_args = tidy_con id ps }
-
-tidy_pat (ListPat ps ty)
- = unLoc $ foldr (\ x y -> mkPrefixConPat consDataCon [x,y] list_ty)
- (mkNilPat list_ty)
- (map tidy_lpat ps)
- where list_ty = mkListTy ty
-
--- introduce fake parallel array constructors to be able to handle parallel
--- arrays with the existing machinery for constructor pattern
---
-tidy_pat (PArrPat ps ty)
- = unLoc $ mkPrefixConPat (parrFakeCon (length ps))
- (map tidy_lpat ps)
- (mkPArrTy ty)
-
-tidy_pat (TuplePat ps boxity ty)
- = unLoc $ mkPrefixConPat (tupleCon boxity arity)
- (map tidy_lpat ps) ty
- where
- arity = length ps
-
--- Unpack string patterns fully, so we can see when they overlap with
--- each other, or even explicit lists of Chars.
-tidy_pat (LitPat lit)
- | HsString s <- lit
- = unLoc $ foldr (\c pat -> mkPrefixConPat consDataCon [mk_char_lit c, pat] stringTy)
- (mkPrefixConPat nilDataCon [] stringTy) (unpackFS s)
- | otherwise
- = tidyLitPat lit
- where
- mk_char_lit c = mkPrefixConPat charDataCon [nlLitPat (HsCharPrim c)] charTy
-
------------------
-tidy_con :: DataCon -> HsConPatDetails Id -> HsConPatDetails Id
-tidy_con _ (PrefixCon ps) = PrefixCon (map tidy_lpat ps)
-tidy_con _ (InfixCon p1 p2) = PrefixCon [tidy_lpat p1, tidy_lpat p2]
-tidy_con con (RecCon (HsRecFields fs _))
- | null fs = PrefixCon [nlWildPat | _ <- dataConOrigArgTys con]
- -- Special case for null patterns; maybe not a record at all
- | otherwise = PrefixCon (map (tidy_lpat.snd) all_pats)
- where
- -- pad out all the missing fields with WildPats.
- field_pats = map (\ f -> (f, nlWildPat)) (dataConFieldLabels con)
- all_pats = foldr (\(HsRecField id p _) acc -> insertNm (getName (unLoc id)) p acc)
- field_pats fs
-
- insertNm nm p [] = [(nm,p)]
- insertNm nm p (x@(n,_):xs)
- | nm == n = (nm,p):xs
- | otherwise = x : insertNm nm p xs
-\end{code}
+%\r
+% (c) The University of Glasgow 2006\r
+% (c) The GRASP/AQUA Project, Glasgow University, 1997-1998\r
+%\r
+% Author: Juan J. Quintela <quintela@krilin.dc.fi.udc.es>\r
+\r
+\begin{code}\r
+{-# OPTIONS -fno-warn-incomplete-patterns #-}\r
+-- The above warning supression flag is a temporary kludge.\r
+-- While working on this module you are encouraged to remove it and fix\r
+-- any warnings in the module. See\r
+-- http://hackage.haskell.org/trac/ghc/wiki/Commentary/CodingStyle#Warnings\r
+-- for details\r
+\r
+module Check ( check , ExhaustivePat ) where\r
+\r
+#include "HsVersions.h"\r
+\r
+import HsSyn \r
+import TcHsSyn\r
+import DsUtils\r
+import MatchLit\r
+import Id\r
+import DataCon\r
+import Name\r
+import TysWiredIn\r
+import PrelNames\r
+import TyCon\r
+import Type\r
+import Unify( dataConCannotMatch )\r
+import SrcLoc\r
+import UniqSet\r
+import Util\r
+import Outputable\r
+import FastString\r
+\end{code}\r
+\r
+This module performs checks about if one list of equations are:\r
+\begin{itemize}\r
+\item Overlapped\r
+\item Non exhaustive\r
+\end{itemize}\r
+To discover that we go through the list of equations in a tree-like fashion.\r
+\r
+If you like theory, a similar algorithm is described in:\r
+\begin{quotation}\r
+ {\em Two Techniques for Compiling Lazy Pattern Matching},\r
+ Luc Maranguet,\r
+ INRIA Rocquencourt (RR-2385, 1994)\r
+\end{quotation}\r
+The algorithm is based on the first technique, but there are some differences:\r
+\begin{itemize}\r
+\item We don't generate code\r
+\item We have constructors and literals (not only literals as in the \r
+ article)\r
+\item We don't use directions, we must select the columns from \r
+ left-to-right\r
+\end{itemize}\r
+(By the way the second technique is really similar to the one used in \r
+ @Match.lhs@ to generate code)\r
+\r
+This function takes the equations of a pattern and returns:\r
+\begin{itemize}\r
+\item The patterns that are not recognized\r
+\item The equations that are not overlapped\r
+\end{itemize}\r
+It simplify the patterns and then call @check'@ (the same semantics), and it \r
+needs to reconstruct the patterns again ....\r
+\r
+The problem appear with things like:\r
+\begin{verbatim}\r
+ f [x,y] = ....\r
+ f (x:xs) = .....\r
+\end{verbatim}\r
+We want to put the two patterns with the same syntax, (prefix form) and \r
+then all the constructors are equal:\r
+\begin{verbatim}\r
+ f (: x (: y [])) = ....\r
+ f (: x xs) = .....\r
+\end{verbatim}\r
+(more about that in @tidy_eqns@)\r
+\r
+We would prefer to have a @WarningPat@ of type @String@, but Strings and the \r
+Pretty Printer are not friends.\r
+\r
+We use @InPat@ in @WarningPat@ instead of @OutPat@\r
+because we need to print the \r
+warning messages in the same way they are introduced, i.e. if the user \r
+wrote:\r
+\begin{verbatim}\r
+ f [x,y] = ..\r
+\end{verbatim}\r
+He don't want a warning message written:\r
+\begin{verbatim}\r
+ f (: x (: y [])) ........\r
+\end{verbatim}\r
+Then we need to use InPats.\r
+\begin{quotation}\r
+ Juan Quintela 5 JUL 1998\\\r
+ User-friendliness and compiler writers are no friends.\r
+\end{quotation}\r
+\r
+\begin{code}\r
+type WarningPat = InPat Name\r
+type ExhaustivePat = ([WarningPat], [(Name, [HsLit])])\r
+type EqnNo = Int\r
+type EqnSet = UniqSet EqnNo\r
+\r
+\r
+check :: [EquationInfo] -> ([ExhaustivePat], [EquationInfo])\r
+ -- Second result is the shadowed equations\r
+ -- if there are view patterns, just give up - don't know what the function is\r
+check qs = pprTrace "check" (ppr tidy_qs) $\r
+ (untidy_warns, shadowed_eqns)\r
+ where\r
+ tidy_qs = map tidy_eqn qs\r
+ (warns, used_nos) = check' ([1..] `zip` tidy_qs)\r
+ untidy_warns = map untidy_exhaustive warns \r
+ shadowed_eqns = [eqn | (eqn,i) <- qs `zip` [1..], \r
+ not (i `elementOfUniqSet` used_nos)]\r
+\r
+untidy_exhaustive :: ExhaustivePat -> ExhaustivePat\r
+untidy_exhaustive ([pat], messages) = \r
+ ([untidy_no_pars pat], map untidy_message messages)\r
+untidy_exhaustive (pats, messages) = \r
+ (map untidy_pars pats, map untidy_message messages)\r
+\r
+untidy_message :: (Name, [HsLit]) -> (Name, [HsLit])\r
+untidy_message (string, lits) = (string, map untidy_lit lits)\r
+\end{code}\r
+\r
+The function @untidy@ does the reverse work of the @tidy_pat@ funcion.\r
+\r
+\begin{code}\r
+\r
+type NeedPars = Bool \r
+\r
+untidy_no_pars :: WarningPat -> WarningPat\r
+untidy_no_pars p = untidy False p\r
+\r
+untidy_pars :: WarningPat -> WarningPat\r
+untidy_pars p = untidy True p\r
+\r
+untidy :: NeedPars -> WarningPat -> WarningPat\r
+untidy b (L loc p) = L loc (untidy' b p)\r
+ where\r
+ untidy' _ p@(WildPat _) = p\r
+ untidy' _ p@(VarPat _) = p\r
+ untidy' _ (LitPat lit) = LitPat (untidy_lit lit)\r
+ untidy' _ p@(ConPatIn _ (PrefixCon [])) = p\r
+ untidy' b (ConPatIn name ps) = pars b (L loc (ConPatIn name (untidy_con ps)))\r
+ untidy' _ (ListPat pats ty) = ListPat (map untidy_no_pars pats) ty\r
+ untidy' _ (TuplePat pats box ty) = TuplePat (map untidy_no_pars pats) box ty\r
+ untidy' _ (PArrPat _ _) = panic "Check.untidy: Shouldn't get a parallel array here!"\r
+ untidy' _ (SigPatIn _ _) = panic "Check.untidy: SigPat"\r
+\r
+untidy_con :: HsConPatDetails Name -> HsConPatDetails Name\r
+untidy_con (PrefixCon pats) = PrefixCon (map untidy_pars pats) \r
+untidy_con (InfixCon p1 p2) = InfixCon (untidy_pars p1) (untidy_pars p2)\r
+untidy_con (RecCon (HsRecFields flds dd)) \r
+ = RecCon (HsRecFields [ fld { hsRecFieldArg = untidy_pars (hsRecFieldArg fld) }\r
+ | fld <- flds ] dd)\r
+\r
+pars :: NeedPars -> WarningPat -> Pat Name\r
+pars True p = ParPat p\r
+pars _ p = unLoc p\r
+\r
+untidy_lit :: HsLit -> HsLit\r
+untidy_lit (HsCharPrim c) = HsChar c\r
+untidy_lit lit = lit\r
+\end{code}\r
+\r
+This equation is the same that check, the only difference is that the\r
+boring work is done, that work needs to be done only once, this is\r
+the reason top have two functions, check is the external interface,\r
+@check'@ is called recursively.\r
+\r
+There are several cases:\r
+\r
+\begin{itemize} \r
+\item There are no equations: Everything is OK. \r
+\item There are only one equation, that can fail, and all the patterns are\r
+ variables. Then that equation is used and the same equation is \r
+ non-exhaustive.\r
+\item All the patterns are variables, and the match can fail, there are \r
+ more equations then the results is the result of the rest of equations \r
+ and this equation is used also.\r
+\r
+\item The general case, if all the patterns are variables (here the match \r
+ can't fail) then the result is that this equation is used and this \r
+ equation doesn't generate non-exhaustive cases.\r
+\r
+\item In the general case, there can exist literals ,constructors or only \r
+ vars in the first column, we actuate in consequence.\r
+\r
+\end{itemize}\r
+\r
+\r
+\begin{code}\r
+\r
+check' :: [(EqnNo, EquationInfo)] \r
+ -> ([ExhaustivePat], -- Pattern scheme that might not be matched at all\r
+ EqnSet) -- Eqns that are used (others are overlapped)\r
+\r
+check' [] = ([([],[])],emptyUniqSet)\r
+\r
+check' ((n, EqnInfo { eqn_pats = ps, eqn_rhs = MatchResult can_fail _ }) : rs) \r
+ | first_eqn_all_vars && case can_fail of { CantFail -> True; CanFail -> False }\r
+ = ([], unitUniqSet n) -- One eqn, which can't fail\r
+\r
+ | first_eqn_all_vars && null rs -- One eqn, but it can fail\r
+ = ([(takeList ps (repeat nlWildPat),[])], unitUniqSet n)\r
+\r
+ | first_eqn_all_vars -- Several eqns, first can fail\r
+ = (pats, addOneToUniqSet indexs n)\r
+ where\r
+ first_eqn_all_vars = all_vars ps\r
+ (pats,indexs) = check' rs\r
+\r
+check' qs\r
+ | some_literals = split_by_literals qs\r
+ | some_constructors = split_by_constructor qs\r
+ | only_vars = first_column_only_vars qs\r
+ | otherwise = pprPanic "Check.check': Not implemented :-(" (ppr first_pats)\r
+ -- Shouldn't happen\r
+ where\r
+ -- Note: RecPats will have been simplified to ConPats\r
+ -- at this stage.\r
+ first_pats = ASSERT2( okGroup qs, pprGroup qs ) map firstPatN qs\r
+ some_constructors = any is_con first_pats\r
+ some_literals = any is_lit first_pats\r
+ only_vars = all is_var first_pats\r
+\end{code}\r
+\r
+Here begins the code to deal with literals, we need to split the matrix\r
+in different matrix beginning by each literal and a last matrix with the \r
+rest of values.\r
+\r
+\begin{code}\r
+split_by_literals :: [(EqnNo, EquationInfo)] -> ([ExhaustivePat], EqnSet)\r
+split_by_literals qs = process_literals used_lits qs\r
+ where\r
+ used_lits = get_used_lits qs\r
+\end{code}\r
+\r
+@process_explicit_literals@ is a function that process each literal that appears \r
+in the column of the matrix. \r
+\r
+\begin{code}\r
+process_explicit_literals :: [HsLit] -> [(EqnNo, EquationInfo)] -> ([ExhaustivePat],EqnSet)\r
+process_explicit_literals lits qs = (concat pats, unionManyUniqSets indexs)\r
+ where \r
+ pats_indexs = map (\x -> construct_literal_matrix x qs) lits\r
+ (pats,indexs) = unzip pats_indexs \r
+\end{code}\r
+\r
+\r
+@process_literals@ calls @process_explicit_literals@ to deal with the literals \r
+that appears in the matrix and deal also with the rest of the cases. It \r
+must be one Variable to be complete.\r
+\r
+\begin{code}\r
+\r
+process_literals :: [HsLit] -> [(EqnNo, EquationInfo)] -> ([ExhaustivePat],EqnSet)\r
+process_literals used_lits qs \r
+ | null default_eqns = ASSERT( not (null qs) ) ([make_row_vars used_lits (head qs)] ++ pats,indexs)\r
+ | otherwise = (pats_default,indexs_default)\r
+ where\r
+ (pats,indexs) = process_explicit_literals used_lits qs\r
+ default_eqns = ASSERT2( okGroup qs, pprGroup qs ) \r
+ [remove_var q | q <- qs, is_var (firstPatN q)]\r
+ (pats',indexs') = check' default_eqns \r
+ pats_default = [(nlWildPat:ps,constraints) | (ps,constraints) <- (pats')] ++ pats \r
+ indexs_default = unionUniqSets indexs' indexs\r
+\end{code}\r
+\r
+Here we have selected the literal and we will select all the equations that \r
+begins for that literal and create a new matrix.\r
+\r
+\begin{code}\r
+construct_literal_matrix :: HsLit -> [(EqnNo, EquationInfo)] -> ([ExhaustivePat],EqnSet)\r
+construct_literal_matrix lit qs =\r
+ (map (\ (xs,ys) -> (new_lit:xs,ys)) pats,indexs) \r
+ where\r
+ (pats,indexs) = (check' (remove_first_column_lit lit qs)) \r
+ new_lit = nlLitPat lit\r
+\r
+remove_first_column_lit :: HsLit\r
+ -> [(EqnNo, EquationInfo)] \r
+ -> [(EqnNo, EquationInfo)]\r
+remove_first_column_lit lit qs\r
+ = ASSERT2( okGroup qs, pprGroup qs ) \r
+ [(n, shift_pat eqn) | q@(n,eqn) <- qs, is_var_lit lit (firstPatN q)]\r
+ where\r
+ shift_pat eqn@(EqnInfo { eqn_pats = _:ps}) = eqn { eqn_pats = ps }\r
+ shift_pat _ = panic "Check.shift_var: no patterns"\r
+\end{code}\r
+\r
+This function splits the equations @qs@ in groups that deal with the \r
+same constructor.\r
+\r
+\begin{code}\r
+split_by_constructor :: [(EqnNo, EquationInfo)] -> ([ExhaustivePat], EqnSet)\r
+split_by_constructor qs \r
+ | notNull unused_cons = need_default_case used_cons unused_cons qs \r
+ | otherwise = no_need_default_case used_cons qs \r
+ where \r
+ used_cons = get_used_cons qs \r
+ unused_cons = get_unused_cons used_cons \r
+\end{code}\r
+\r
+The first column of the patterns matrix only have vars, then there is \r
+nothing to do.\r
+\r
+\begin{code}\r
+first_column_only_vars :: [(EqnNo, EquationInfo)] -> ([ExhaustivePat],EqnSet)\r
+first_column_only_vars qs = (map (\ (xs,ys) -> (nlWildPat:xs,ys)) pats,indexs)\r
+ where\r
+ (pats, indexs) = check' (map remove_var qs)\r
+\end{code}\r
+\r
+This equation takes a matrix of patterns and split the equations by \r
+constructor, using all the constructors that appears in the first column \r
+of the pattern matching.\r
+\r
+We can need a default clause or not ...., it depends if we used all the \r
+constructors or not explicitly. The reasoning is similar to @process_literals@,\r
+the difference is that here the default case is not always needed.\r
+\r
+\begin{code}\r
+no_need_default_case :: [Pat Id] -> [(EqnNo, EquationInfo)] -> ([ExhaustivePat],EqnSet)\r
+no_need_default_case cons qs = (concat pats, unionManyUniqSets indexs)\r
+ where \r
+ pats_indexs = map (\x -> construct_matrix x qs) cons\r
+ (pats,indexs) = unzip pats_indexs \r
+\r
+need_default_case :: [Pat Id] -> [DataCon] -> [(EqnNo, EquationInfo)] -> ([ExhaustivePat],EqnSet)\r
+need_default_case used_cons unused_cons qs \r
+ | null default_eqns = (pats_default_no_eqns,indexs)\r
+ | otherwise = (pats_default,indexs_default)\r
+ where\r
+ (pats,indexs) = no_need_default_case used_cons qs\r
+ default_eqns = ASSERT2( okGroup qs, pprGroup qs ) \r
+ [remove_var q | q <- qs, is_var (firstPatN q)]\r
+ (pats',indexs') = check' default_eqns \r
+ pats_default = [(make_whole_con c:ps,constraints) | \r
+ c <- unused_cons, (ps,constraints) <- pats'] ++ pats\r
+ new_wilds = ASSERT( not (null qs) ) make_row_vars_for_constructor (head qs)\r
+ pats_default_no_eqns = [(make_whole_con c:new_wilds,[]) | c <- unused_cons] ++ pats\r
+ indexs_default = unionUniqSets indexs' indexs\r
+\r
+construct_matrix :: Pat Id -> [(EqnNo, EquationInfo)] -> ([ExhaustivePat],EqnSet)\r
+construct_matrix con qs =\r
+ (map (make_con con) pats,indexs) \r
+ where\r
+ (pats,indexs) = (check' (remove_first_column con qs)) \r
+\end{code}\r
+\r
+Here remove first column is more difficult that with literals due to the fact \r
+that constructors can have arguments.\r
+\r
+For instance, the matrix\r
+\begin{verbatim}\r
+ (: x xs) y\r
+ z y\r
+\end{verbatim}\r
+is transformed in:\r
+\begin{verbatim}\r
+ x xs y\r
+ _ _ y\r
+\end{verbatim}\r
+\r
+\begin{code}\r
+remove_first_column :: Pat Id -- Constructor \r
+ -> [(EqnNo, EquationInfo)] \r
+ -> [(EqnNo, EquationInfo)]\r
+remove_first_column (ConPatOut{ pat_con = L _ con, pat_args = PrefixCon con_pats }) qs\r
+ = ASSERT2( okGroup qs, pprGroup qs ) \r
+ [(n, shift_var eqn) | q@(n, eqn) <- qs, is_var_con con (firstPatN q)]\r
+ where\r
+ new_wilds = [WildPat (hsLPatType arg_pat) | arg_pat <- con_pats]\r
+ shift_var eqn@(EqnInfo { eqn_pats = ConPatOut{ pat_args = PrefixCon ps' } : ps}) \r
+ = eqn { eqn_pats = map unLoc ps' ++ ps }\r
+ shift_var eqn@(EqnInfo { eqn_pats = WildPat _ : ps })\r
+ = eqn { eqn_pats = new_wilds ++ ps }\r
+ shift_var _ = panic "Check.Shift_var:No done"\r
+\r
+make_row_vars :: [HsLit] -> (EqnNo, EquationInfo) -> ExhaustivePat\r
+make_row_vars used_lits (_, EqnInfo { eqn_pats = pats})\r
+ = (nlVarPat new_var:takeList (tail pats) (repeat nlWildPat),[(new_var,used_lits)])\r
+ where \r
+ new_var = hash_x\r
+\r
+hash_x :: Name\r
+hash_x = mkInternalName unboundKey {- doesn't matter much -}\r
+ (mkVarOccFS (fsLit "#x"))\r
+ noSrcSpan\r
+\r
+make_row_vars_for_constructor :: (EqnNo, EquationInfo) -> [WarningPat]\r
+make_row_vars_for_constructor (_, EqnInfo { eqn_pats = pats}) \r
+ = takeList (tail pats) (repeat nlWildPat)\r
+\r
+compare_cons :: Pat Id -> Pat Id -> Bool\r
+compare_cons (ConPatOut{ pat_con = L _ id1 }) (ConPatOut { pat_con = L _ id2 }) = id1 == id2 \r
+\r
+remove_dups :: [Pat Id] -> [Pat Id]\r
+remove_dups [] = []\r
+remove_dups (x:xs) | or (map (\y -> compare_cons x y) xs) = remove_dups xs\r
+ | otherwise = x : remove_dups xs\r
+\r
+get_used_cons :: [(EqnNo, EquationInfo)] -> [Pat Id]\r
+get_used_cons qs = remove_dups [pat | q <- qs, let pat = firstPatN q, \r
+ isConPatOut pat]\r
+\r
+isConPatOut :: Pat Id -> Bool\r
+isConPatOut (ConPatOut {}) = True\r
+isConPatOut _ = False\r
+\r
+remove_dups' :: [HsLit] -> [HsLit] \r
+remove_dups' [] = []\r
+remove_dups' (x:xs) | x `elem` xs = remove_dups' xs\r
+ | otherwise = x : remove_dups' xs \r
+\r
+\r
+get_used_lits :: [(EqnNo, EquationInfo)] -> [HsLit]\r
+get_used_lits qs = remove_dups' all_literals\r
+ where\r
+ all_literals = get_used_lits' qs\r
+\r
+get_used_lits' :: [(EqnNo, EquationInfo)] -> [HsLit]\r
+get_used_lits' [] = []\r
+get_used_lits' (q:qs) \r
+ | Just lit <- get_lit (firstPatN q) = lit : get_used_lits' qs\r
+ | otherwise = get_used_lits qs\r
+\r
+get_lit :: Pat id -> Maybe HsLit \r
+-- Get a representative HsLit to stand for the OverLit\r
+-- It doesn't matter which one, because they will only be compared\r
+-- with other HsLits gotten in the same way\r
+get_lit (LitPat lit) = Just lit\r
+get_lit (NPat (OverLit { ol_val = HsIntegral i}) mb _) = Just (HsIntPrim (mb_neg mb i))\r
+get_lit (NPat (OverLit { ol_val = HsFractional f }) mb _) = Just (HsFloatPrim (mb_neg mb f))\r
+get_lit (NPat (OverLit { ol_val = HsIsString s }) _ _) = Just (HsStringPrim s)\r
+get_lit _ = Nothing\r
+\r
+mb_neg :: Num a => Maybe b -> a -> a\r
+mb_neg Nothing v = v\r
+mb_neg (Just _) v = -v\r
+\r
+get_unused_cons :: [Pat Id] -> [DataCon]\r
+get_unused_cons used_cons = ASSERT( not (null used_cons) ) unused_cons\r
+ where\r
+ used_set :: UniqSet DataCon\r
+ used_set = mkUniqSet [d | ConPatOut{ pat_con = L _ d} <- used_cons]\r
+ (ConPatOut { pat_ty = ty }) = head used_cons\r
+ Just (ty_con, inst_tys) = splitTyConApp_maybe ty\r
+ unused_cons = filterOut is_used (tyConDataCons ty_con)\r
+ is_used con = con `elementOfUniqSet` used_set\r
+ || dataConCannotMatch inst_tys con\r
+\r
+all_vars :: [Pat Id] -> Bool\r
+all_vars [] = True\r
+all_vars (WildPat _:ps) = all_vars ps\r
+all_vars _ = False\r
+\r
+remove_var :: (EqnNo, EquationInfo) -> (EqnNo, EquationInfo)\r
+remove_var (n, eqn@(EqnInfo { eqn_pats = WildPat _ : ps})) = (n, eqn { eqn_pats = ps })\r
+remove_var _ = panic "Check.remove_var: equation does not begin with a variable"\r
+\r
+-----------------------\r
+eqnPats :: (EqnNo, EquationInfo) -> [Pat Id]\r
+eqnPats (_, eqn) = eqn_pats eqn\r
+\r
+okGroup :: [(EqnNo, EquationInfo)] -> Bool\r
+-- True if all equations have at least one pattern, and\r
+-- all have the same number of patterns\r
+okGroup [] = True\r
+okGroup (e:es) = n_pats > 0 && and [length (eqnPats e) == n_pats | e <- es]\r
+ where\r
+ n_pats = length (eqnPats e)\r
+\r
+-- Half-baked print\r
+pprGroup :: [(EqnNo, EquationInfo)] -> SDoc\r
+pprEqnInfo :: (EqnNo, EquationInfo) -> SDoc\r
+pprGroup es = vcat (map pprEqnInfo es)\r
+pprEqnInfo e = ppr (eqnPats e)\r
+\r
+\r
+firstPatN :: (EqnNo, EquationInfo) -> Pat Id\r
+firstPatN (_, eqn) = firstPat eqn\r
+\r
+is_con :: Pat Id -> Bool\r
+is_con (ConPatOut {}) = True\r
+is_con _ = False\r
+\r
+is_lit :: Pat Id -> Bool\r
+is_lit (LitPat _) = True\r
+is_lit (NPat _ _ _) = True\r
+is_lit _ = False\r
+\r
+is_var :: Pat Id -> Bool\r
+is_var (WildPat _) = True\r
+is_var _ = False\r
+\r
+is_var_con :: DataCon -> Pat Id -> Bool\r
+is_var_con _ (WildPat _) = True\r
+is_var_con con (ConPatOut{ pat_con = L _ id }) | id == con = True\r
+is_var_con _ _ = False\r
+\r
+is_var_lit :: HsLit -> Pat Id -> Bool\r
+is_var_lit _ (WildPat _) = True\r
+is_var_lit lit pat \r
+ | Just lit' <- get_lit pat = lit == lit'\r
+ | otherwise = False\r
+\end{code}\r
+\r
+The difference beteewn @make_con@ and @make_whole_con@ is that\r
+@make_wole_con@ creates a new constructor with all their arguments, and\r
+@make_con@ takes a list of argumntes, creates the contructor getting their\r
+arguments from the list. See where \fbox{\ ???\ } are used for details.\r
+\r
+We need to reconstruct the patterns (make the constructors infix and\r
+similar) at the same time that we create the constructors.\r
+\r
+You can tell tuple constructors using\r
+\begin{verbatim}\r
+ Id.isTupleCon\r
+\end{verbatim}\r
+You can see if one constructor is infix with this clearer code :-))))))))))\r
+\begin{verbatim}\r
+ Lex.isLexConSym (Name.occNameString (Name.getOccName con))\r
+\end{verbatim}\r
+\r
+ Rather clumsy but it works. (Simon Peyton Jones)\r
+\r
+\r
+We don't mind the @nilDataCon@ because it doesn't change the way to\r
+print the messsage, we are searching only for things like: @[1,2,3]@,\r
+not @x:xs@ ....\r
+\r
+In @reconstruct_pat@ we want to ``undo'' the work\r
+that we have done in @tidy_pat@.\r
+In particular:\r
+\begin{tabular}{lll}\r
+ @((,) x y)@ & returns to be & @(x, y)@\r
+\\ @((:) x xs)@ & returns to be & @(x:xs)@\r
+\\ @(x:(...:[])@ & returns to be & @[x,...]@\r
+\end{tabular}\r
+%\r
+The difficult case is the third one becouse we need to follow all the\r
+contructors until the @[]@ to know that we need to use the second case,\r
+not the second. \fbox{\ ???\ }\r
+%\r
+\begin{code}\r
+isInfixCon :: DataCon -> Bool\r
+isInfixCon con = isDataSymOcc (getOccName con)\r
+\r
+is_nil :: Pat Name -> Bool\r
+is_nil (ConPatIn con (PrefixCon [])) = unLoc con == getName nilDataCon\r
+is_nil _ = False\r
+\r
+is_list :: Pat Name -> Bool\r
+is_list (ListPat _ _) = True\r
+is_list _ = False\r
+\r
+return_list :: DataCon -> Pat Name -> Bool\r
+return_list id q = id == consDataCon && (is_nil q || is_list q) \r
+\r
+make_list :: LPat Name -> Pat Name -> Pat Name\r
+make_list p q | is_nil q = ListPat [p] placeHolderType\r
+make_list p (ListPat ps ty) = ListPat (p:ps) ty\r
+make_list _ _ = panic "Check.make_list: Invalid argument"\r
+\r
+make_con :: Pat Id -> ExhaustivePat -> ExhaustivePat \r
+make_con (ConPatOut{ pat_con = L _ id }) (lp:lq:ps, constraints) \r
+ | return_list id q = (noLoc (make_list lp q) : ps, constraints)\r
+ | isInfixCon id = (nlInfixConPat (getName id) lp lq : ps, constraints) \r
+ where q = unLoc lq \r
+\r
+make_con (ConPatOut{ pat_con = L _ id, pat_args = PrefixCon pats, pat_ty = ty }) (ps, constraints) \r
+ | isTupleTyCon tc = (noLoc (TuplePat pats_con (tupleTyConBoxity tc) ty) : rest_pats, constraints) \r
+ | isPArrFakeCon id = (noLoc (PArrPat pats_con placeHolderType) : rest_pats, constraints) \r
+ | otherwise = (nlConPat name pats_con : rest_pats, constraints)\r
+ where \r
+ name = getName id\r
+ (pats_con, rest_pats) = splitAtList pats ps\r
+ tc = dataConTyCon id\r
+\r
+-- reconstruct parallel array pattern\r
+--\r
+-- * don't check for the type only; we need to make sure that we are really\r
+-- dealing with one of the fake constructors and not with the real\r
+-- representation \r
+\r
+make_whole_con :: DataCon -> WarningPat\r
+make_whole_con con | isInfixCon con = nlInfixConPat name nlWildPat nlWildPat\r
+ | otherwise = nlConPat name pats\r
+ where \r
+ name = getName con\r
+ pats = [nlWildPat | _ <- dataConOrigArgTys con]\r
+\end{code}\r
+\r
+------------------------------------------------------------------------\r
+ Tidying equations\r
+------------------------------------------------------------------------\r
+\r
+tidy_eqn does more or less the same thing as @tidy@ in @Match.lhs@;\r
+that is, it removes syntactic sugar, reducing the number of cases that\r
+must be handled by the main checking algorithm. One difference is\r
+that here we can do *all* the tidying at once (recursively), rather \r
+than doing it incrementally.\r
+\r
+\begin{code}\r
+tidy_eqn :: EquationInfo -> EquationInfo\r
+tidy_eqn eqn = eqn { eqn_pats = map tidy_pat (eqn_pats eqn), \r
+ eqn_rhs = tidy_rhs (eqn_rhs eqn) }\r
+ where\r
+ -- Horrible hack. The tidy_pat stuff converts "might-fail" patterns to \r
+ -- WildPats which of course loses the info that they can fail to match. \r
+ -- So we stick in a CanFail as if it were a guard.\r
+ tidy_rhs (MatchResult can_fail body)\r
+ | any might_fail_pat (eqn_pats eqn) = MatchResult CanFail body\r
+ | otherwise = MatchResult can_fail body\r
+\r
+--------------\r
+might_fail_pat :: Pat Id -> Bool\r
+-- Returns True of patterns that might fail (i.e. fall through) in a way \r
+-- that is not covered by the checking algorithm. Specifically:\r
+-- NPlusKPat \r
+-- ViewPat (if refutable)\r
+\r
+-- First the two special cases\r
+might_fail_pat (NPlusKPat {}) = True\r
+might_fail_pat (ViewPat _ p _) = not (isIrrefutableHsPat p)\r
+\r
+-- Now the recursive stuff\r
+might_fail_pat (ParPat p) = might_fail_lpat p\r
+might_fail_pat (AsPat _ p) = might_fail_lpat p\r
+might_fail_pat (SigPatOut p _ ) = might_fail_lpat p\r
+might_fail_pat (ListPat ps _) = any might_fail_lpat ps\r
+might_fail_pat (TuplePat ps _ _) = any might_fail_lpat ps\r
+might_fail_pat (PArrPat ps _) = any might_fail_lpat ps\r
+might_fail_pat (BangPat p) = might_fail_lpat p\r
+might_fail_pat (ConPatOut { pat_args = ps }) = any might_fail_lpat (hsConPatArgs ps)\r
+\r
+-- Finally the ones that are sure to succeed, or which are covered by the checking algorithm\r
+might_fail_pat (LazyPat _) = False -- Always succeeds\r
+might_fail_pat _ = False -- VarPat, WildPat, LitPat, NPat, TypePat\r
+\r
+--------------\r
+might_fail_lpat :: LPat Id -> Bool\r
+might_fail_lpat (L _ p) = might_fail_pat p\r
+\r
+--------------\r
+tidy_lpat :: LPat Id -> LPat Id \r
+tidy_lpat p = fmap tidy_pat p\r
+\r
+--------------\r
+tidy_pat :: Pat Id -> Pat Id\r
+tidy_pat pat@(WildPat _) = pat\r
+tidy_pat (VarPat id) = WildPat (idType id) \r
+tidy_pat (ParPat p) = tidy_pat (unLoc p)\r
+tidy_pat (LazyPat p) = WildPat (hsLPatType p) -- For overlap and exhaustiveness checking\r
+ -- purposes, a ~pat is like a wildcard\r
+tidy_pat (BangPat p) = tidy_pat (unLoc p)\r
+tidy_pat (AsPat _ p) = tidy_pat (unLoc p)\r
+tidy_pat (SigPatOut p _) = tidy_pat (unLoc p)\r
+tidy_pat (CoPat _ pat _) = tidy_pat pat\r
+\r
+-- These two are might_fail patterns, so we map them to\r
+-- WildPats. The might_fail_pat stuff arranges that the\r
+-- guard says "this equation might fall through".\r
+tidy_pat (NPlusKPat id _ _ _) = WildPat (idType (unLoc id))\r
+tidy_pat (ViewPat _ _ ty) = WildPat ty\r
+\r
+tidy_pat pat@(ConPatOut { pat_con = L _ id, pat_args = ps })\r
+ = pat { pat_args = tidy_con id ps }\r
+\r
+tidy_pat (ListPat ps ty) \r
+ = unLoc $ foldr (\ x y -> mkPrefixConPat consDataCon [x,y] list_ty)\r
+ (mkNilPat list_ty)\r
+ (map tidy_lpat ps)\r
+ where list_ty = mkListTy ty\r
+\r
+-- introduce fake parallel array constructors to be able to handle parallel\r
+-- arrays with the existing machinery for constructor pattern\r
+--\r
+tidy_pat (PArrPat ps ty)\r
+ = unLoc $ mkPrefixConPat (parrFakeCon (length ps))\r
+ (map tidy_lpat ps) \r
+ (mkPArrTy ty)\r
+\r
+tidy_pat (TuplePat ps boxity ty)\r
+ = unLoc $ mkPrefixConPat (tupleCon boxity arity)\r
+ (map tidy_lpat ps) ty\r
+ where\r
+ arity = length ps\r
+\r
+tidy_pat (NPat lit mb_neg eq) = tidyNPat lit mb_neg eq\r
+\r
+-- Unpack string patterns fully, so we can see when they overlap with\r
+-- each other, or even explicit lists of Chars.\r
+tidy_pat (LitPat lit)\r
+ | HsString s <- lit\r
+ = unLoc $ foldr (\c pat -> mkPrefixConPat consDataCon [mk_char_lit c, pat] stringTy)\r
+ (mkPrefixConPat nilDataCon [] stringTy) (unpackFS s)\r
+ | otherwise\r
+ = tidyLitPat lit \r
+ where\r
+ mk_char_lit c = mkPrefixConPat charDataCon [nlLitPat (HsCharPrim c)] charTy\r
+\r
+-----------------\r
+tidy_con :: DataCon -> HsConPatDetails Id -> HsConPatDetails Id\r
+tidy_con _ (PrefixCon ps) = PrefixCon (map tidy_lpat ps)\r
+tidy_con _ (InfixCon p1 p2) = PrefixCon [tidy_lpat p1, tidy_lpat p2]\r
+tidy_con con (RecCon (HsRecFields fs _)) \r
+ | null fs = PrefixCon [nlWildPat | _ <- dataConOrigArgTys con]\r
+ -- Special case for null patterns; maybe not a record at all\r
+ | otherwise = PrefixCon (map (tidy_lpat.snd) all_pats)\r
+ where\r
+ -- pad out all the missing fields with WildPats.\r
+ field_pats = map (\ f -> (f, nlWildPat)) (dataConFieldLabels con)\r
+ all_pats = foldr (\(HsRecField id p _) acc -> insertNm (getName (unLoc id)) p acc)\r
+ field_pats fs\r
+ \r
+ insertNm nm p [] = [(nm,p)]\r
+ insertNm nm p (x@(n,_):xs)\r
+ | nm == n = (nm,p):xs\r
+ | otherwise = x : insertNm nm p xs\r
+\end{code}\r
(addTickSyntaxExpr hpcSrcSpan bindExpr)
(addTickSyntaxExpr hpcSrcSpan returnExpr)
-addTickStmt isGuard (TransformStmt stmts ids usingExpr maybeByExpr returnExpr bindExpr) = do
- t_s <- (addTickLStmts isGuard stmts)
- t_u <- (addTickLHsExprAlways usingExpr)
- t_m <- (addTickMaybeByLHsExpr maybeByExpr)
- t_r <- (addTickSyntaxExpr hpcSrcSpan returnExpr)
- t_b <- (addTickSyntaxExpr hpcSrcSpan bindExpr)
- return $ TransformStmt t_s ids t_u t_m t_r t_b
-
-addTickStmt isGuard stmt@(GroupStmt { grpS_stmts = stmts
- , grpS_by = by, grpS_using = using
- , grpS_ret = returnExpr, grpS_bind = bindExpr
- , grpS_fmap = liftMExpr }) = do
+addTickStmt isGuard stmt@(TransStmt { trS_stmts = stmts
+ , trS_by = by, trS_using = using
+ , trS_ret = returnExpr, trS_bind = bindExpr
+ , trS_fmap = liftMExpr }) = do
t_s <- addTickLStmts isGuard stmts
t_y <- fmapMaybeM addTickLHsExprAlways by
t_u <- addTickLHsExprAlways using
t_f <- addTickSyntaxExpr hpcSrcSpan returnExpr
t_b <- addTickSyntaxExpr hpcSrcSpan bindExpr
t_m <- addTickSyntaxExpr hpcSrcSpan liftMExpr
- return $ stmt { grpS_stmts = t_s, grpS_by = t_y, grpS_using = t_u
- , grpS_ret = t_f, grpS_bind = t_b, grpS_fmap = t_m }
+ return $ stmt { trS_stmts = t_s, trS_by = t_y, trS_using = t_u
+ , trS_ret = t_f, trS_bind = t_b, trS_fmap = t_m }
addTickStmt isGuard stmt@(RecStmt {})
= do { stmts' <- addTickLStmts isGuard (recS_stmts stmt)
(addTickLStmts isGuard stmts)
(return ids)
-addTickMaybeByLHsExpr :: Maybe (LHsExpr Id) -> TM (Maybe (LHsExpr Id))
-addTickMaybeByLHsExpr maybeByExpr =
- case maybeByExpr of
- Nothing -> return Nothing
- Just byExpr -> addTickLHsExprAlways byExpr >>= (return . Just)
-
addTickHsLocalBinds :: HsLocalBinds Id -> TM (HsLocalBinds Id)
addTickHsLocalBinds (HsValBinds binds) =
liftM HsValBinds
where
bndrs_tuple_type = mkBigCoreVarTupTy bndrs
--- This function factors out commonality between the desugaring strategies for TransformStmt.
--- Given such a statement it gives you back an expression representing how to compute the transformed
--- list and the tuple that you need to bind from that list in order to proceed with your desugaring
-dsTransformStmt :: Stmt Id -> DsM (CoreExpr, LPat Id)
-dsTransformStmt (TransformStmt stmts binders usingExpr maybeByExpr _ _)
- = do { (expr, binders_tuple_type) <- dsInnerListComp (stmts, binders)
- ; usingExpr' <- dsLExpr usingExpr
-
- ; using_args <-
- case maybeByExpr of
- Nothing -> return [expr]
- Just byExpr -> do
- byExpr' <- dsLExpr byExpr
-
- us <- newUniqueSupply
- [tuple_binder] <- newSysLocalsDs [binders_tuple_type]
- let byExprWrapper = mkTupleCase us binders byExpr' tuple_binder (Var tuple_binder)
-
- return [Lam tuple_binder byExprWrapper, expr]
-
- ; let inner_list_expr = mkApps usingExpr' ((Type binders_tuple_type) : using_args)
- pat = mkBigLHsVarPatTup binders
- ; return (inner_list_expr, pat) }
-
-- This function factors out commonality between the desugaring strategies for GroupStmt.
-- Given such a statement it gives you back an expression representing how to compute the transformed
-- list and the tuple that you need to bind from that list in order to proceed with your desugaring
-dsGroupStmt :: Stmt Id -> DsM (CoreExpr, LPat Id)
-dsGroupStmt (GroupStmt { grpS_stmts = stmts, grpS_bndrs = binderMap
- , grpS_by = by, grpS_using = using }) = do
- let (fromBinders, toBinders) = unzip binderMap
-
- fromBindersTypes = map idType fromBinders
- toBindersTypes = map idType toBinders
-
- toBindersTupleType = mkBigCoreTupTy toBindersTypes
+dsTransStmt :: Stmt Id -> DsM (CoreExpr, LPat Id)
+dsTransStmt (TransStmt { trS_form = form, trS_stmts = stmts, trS_bndrs = binderMap
+ , trS_by = by, trS_using = using }) = do
+ let (from_bndrs, to_bndrs) = unzip binderMap
+ from_bndrs_tys = map idType from_bndrs
+ to_bndrs_tys = map idType to_bndrs
+ to_bndrs_tup_ty = mkBigCoreTupTy to_bndrs_tys
-- Desugar an inner comprehension which outputs a list of tuples of the "from" binders
- (expr, from_tup_ty) <- dsInnerListComp (stmts, fromBinders)
+ (expr, from_tup_ty) <- dsInnerListComp (stmts, from_bndrs)
-- Work out what arguments should be supplied to that expression: i.e. is an extraction
-- function required? If so, create that desugared function and add to arguments
usingArgs <- case by of
Nothing -> return [expr]
Just by_e -> do { by_e' <- dsLExpr by_e
- ; us <- newUniqueSupply
- ; [from_tup_id] <- newSysLocalsDs [from_tup_ty]
- ; let by_wrap = mkTupleCase us fromBinders by_e'
- from_tup_id (Var from_tup_id)
- ; return [Lam from_tup_id by_wrap, expr] }
+ ; lam <- matchTuple from_bndrs by_e'
+ ; return [lam, expr] }
-- Create an unzip function for the appropriate arity and element types and find "map"
- (unzip_fn, unzip_rhs) <- mkUnzipBind fromBindersTypes
+ unzip_stuff <- mkUnzipBind form from_bndrs_tys
map_id <- dsLookupGlobalId mapName
-- Generate the expressions to build the grouped list
let -- First we apply the grouping function to the inner list
- inner_list_expr = mkApps usingExpr' ((Type from_tup_ty) : usingArgs)
+ inner_list_expr = mkApps usingExpr' (Type from_tup_ty : usingArgs)
-- Then we map our "unzip" across it to turn the lists of tuples into tuples of lists
-- We make sure we instantiate the type variable "a" to be a list of "from" tuples and
-- the "b" to be a tuple of "to" lists!
- unzipped_inner_list_expr = mkApps (Var map_id)
- [Type (mkListTy from_tup_ty), Type toBindersTupleType, Var unzip_fn, inner_list_expr]
-- Then finally we bind the unzip function around that expression
- bound_unzipped_inner_list_expr = Let (Rec [(unzip_fn, unzip_rhs)]) unzipped_inner_list_expr
-
- -- Build a pattern that ensures the consumer binds into the NEW binders, which hold lists rather than single values
- let pat = mkBigLHsVarPatTup toBinders
+ bound_unzipped_inner_list_expr
+ = case unzip_stuff of
+ Nothing -> inner_list_expr
+ Just (unzip_fn, unzip_rhs) -> Let (Rec [(unzip_fn, unzip_rhs)]) $
+ mkApps (Var map_id) $
+ [ Type (mkListTy from_tup_ty)
+ , Type to_bndrs_tup_ty
+ , Var unzip_fn
+ , inner_list_expr]
+
+ -- Build a pattern that ensures the consumer binds into the NEW binders,
+ -- which hold lists rather than single values
+ let pat = mkBigLHsVarPatTup to_bndrs
return (bound_unzipped_inner_list_expr, pat)
-
\end{code}
%************************************************************************
core_rest <- deListComp quals list
dsLocalBinds binds core_rest
-deListComp (stmt@(TransformStmt {}) : quals) list = do
- (inner_list_expr, pat) <- dsTransformStmt stmt
- deBindComp pat inner_list_expr quals list
-
-deListComp (stmt@(GroupStmt {}) : quals) list = do
- (inner_list_expr, pat) <- dsGroupStmt stmt
+deListComp (stmt@(TransStmt {}) : quals) list = do
+ (inner_list_expr, pat) <- dsTransStmt stmt
deBindComp pat inner_list_expr quals list
deListComp (BindStmt pat list1 _ _ : quals) core_list2 = do -- rule A' above
deBindComp pat core_list1 quals core_list2
deListComp (ParStmt stmtss_w_bndrs _ _ _ : quals) list
- = do
- exps_and_qual_tys <- mapM dsInnerListComp stmtss_w_bndrs
- let (exps, qual_tys) = unzip exps_and_qual_tys
+ = do { exps_and_qual_tys <- mapM dsInnerListComp stmtss_w_bndrs
+ ; let (exps, qual_tys) = unzip exps_and_qual_tys
- (zip_fn, zip_rhs) <- mkZipBind qual_tys
+ ; (zip_fn, zip_rhs) <- mkZipBind qual_tys
-- Deal with [e | pat <- zip l1 .. ln] in example above
- deBindComp pat (Let (Rec [(zip_fn, zip_rhs)]) (mkApps (Var zip_fn) exps))
- quals list
-
+ ; deBindComp pat (Let (Rec [(zip_fn, zip_rhs)]) (mkApps (Var zip_fn) exps))
+ quals list }
where
bndrs_s = map snd stmtss_w_bndrs
core_rest <- dfListComp c_id n_id quals
dsLocalBinds binds core_rest
-dfListComp c_id n_id (stmt@(TransformStmt {}) : quals) = do
- (inner_list_expr, pat) <- dsTransformStmt stmt
- -- Anyway, we bind the newly transformed list via the generic binding function
- dfBindComp c_id n_id (pat, inner_list_expr) quals
-
-dfListComp c_id n_id (stmt@(GroupStmt {}) : quals) = do
- (inner_list_expr, pat) <- dsGroupStmt stmt
+dfListComp c_id n_id (stmt@(TransStmt {}) : quals) = do
+ (inner_list_expr, pat) <- dsTransStmt stmt
-- Anyway, we bind the newly grouped list via the generic binding function
dfBindComp c_id n_id (pat, inner_list_expr) quals
-- Increasing order of tag
-mkUnzipBind :: [Type] -> DsM (Id, CoreExpr)
+mkUnzipBind :: TransForm -> [Type] -> DsM (Maybe (Id, CoreExpr))
-- mkUnzipBind [t1, t2]
-- = (unzip, \ys :: [(t1, t2)] -> foldr (\ax :: (t1, t2) axs :: ([t1], [t2])
-- -> case ax of
-- ys)
--
-- We use foldr here in all cases, even if rules are turned off, because we may as well!
-mkUnzipBind elt_tys = do
- ax <- newSysLocalDs elt_tuple_ty
- axs <- newSysLocalDs elt_list_tuple_ty
- ys <- newSysLocalDs elt_tuple_list_ty
- xs <- mapM newSysLocalDs elt_tys
- xss <- mapM newSysLocalDs elt_list_tys
+mkUnzipBind ThenForm _
+ = return Nothing -- No unzipping for ThenForm
+mkUnzipBind _ elt_tys
+ = do { ax <- newSysLocalDs elt_tuple_ty
+ ; axs <- newSysLocalDs elt_list_tuple_ty
+ ; ys <- newSysLocalDs elt_tuple_list_ty
+ ; xs <- mapM newSysLocalDs elt_tys
+ ; xss <- mapM newSysLocalDs elt_list_tys
- unzip_fn <- newSysLocalDs unzip_fn_ty
-
- [us1, us2] <- sequence [newUniqueSupply, newUniqueSupply]
-
- let nil_tuple = mkBigCoreTup (map mkNilExpr elt_tys)
-
- concat_expressions = map mkConcatExpression (zip3 elt_tys (map Var xs) (map Var xss))
- tupled_concat_expression = mkBigCoreTup concat_expressions
-
- folder_body_inner_case = mkTupleCase us1 xss tupled_concat_expression axs (Var axs)
- folder_body_outer_case = mkTupleCase us2 xs folder_body_inner_case ax (Var ax)
- folder_body = mkLams [ax, axs] folder_body_outer_case
-
- unzip_body <- mkFoldrExpr elt_tuple_ty elt_list_tuple_ty folder_body nil_tuple (Var ys)
- return (unzip_fn, mkLams [ys] unzip_body)
+ ; unzip_fn <- newSysLocalDs unzip_fn_ty
+
+ ; [us1, us2] <- sequence [newUniqueSupply, newUniqueSupply]
+
+ ; let nil_tuple = mkBigCoreTup (map mkNilExpr elt_tys)
+ concat_expressions = map mkConcatExpression (zip3 elt_tys (map Var xs) (map Var xss))
+ tupled_concat_expression = mkBigCoreTup concat_expressions
+
+ folder_body_inner_case = mkTupleCase us1 xss tupled_concat_expression axs (Var axs)
+ folder_body_outer_case = mkTupleCase us2 xs folder_body_inner_case ax (Var ax)
+ folder_body = mkLams [ax, axs] folder_body_outer_case
+
+ ; unzip_body <- mkFoldrExpr elt_tuple_ty elt_list_tuple_ty folder_body nil_tuple (Var ys)
+ ; return (Just (unzip_fn, mkLams [ys] unzip_body)) }
where
elt_tuple_ty = mkBigCoreTupTy elt_tys
elt_tuple_list_ty = mkListTy elt_tuple_ty
; return $ mkApps then_exp' [ mkApps guard_exp' [exp']
, rest ] }
--- Transform statements desugar like this:
---
--- [ .. | qs, then f by e ] -> f (\q_v -> e) [| qs |]
---
--- where [| qs |] is the desugared inner monad comprehenion generated by the
--- statements `qs`.
-dsMcStmt (TransformStmt stmts binders usingExpr maybeByExpr return_op bind_op) stmts_rest
- = do { expr <- dsInnerMonadComp stmts binders return_op
- ; let binders_tup_type = mkBigCoreTupTy $ map idType binders
- ; usingExpr' <- dsLExpr usingExpr
- ; using_args <- case maybeByExpr of
- Nothing -> return [expr]
- Just byExpr -> do
- byExpr' <- dsLExpr byExpr
- us <- newUniqueSupply
- tup_binder <- newSysLocalDs binders_tup_type
- let byExprWrapper = mkTupleCase us binders byExpr' tup_binder (Var tup_binder)
- return [Lam tup_binder byExprWrapper, expr]
-
- ; let pat = mkBigLHsVarPatTup binders
- rhs = mkApps usingExpr' ((Type binders_tup_type) : using_args)
-
- ; dsMcBindStmt pat rhs bind_op noSyntaxExpr stmts_rest }
-
-- Group statements desugar like this:
--
-- [| (q, then group by e using f); rest |]
-- n_tup :: n qt
-- unzip :: n qt -> (n t1, ..., n tk) (needs Functor n)
-dsMcStmt (GroupStmt { grpS_stmts = stmts, grpS_bndrs = bndrs
- , grpS_by = by, grpS_using = using
- , grpS_ret = return_op, grpS_bind = bind_op
- , grpS_fmap = fmap_op }) stmts_rest
+dsMcStmt (TransStmt { trS_stmts = stmts, trS_bndrs = bndrs
+ , trS_by = by, trS_using = using
+ , trS_ret = return_op, trS_bind = bind_op
+ , trS_fmap = fmap_op, trS_form = form }) stmts_rest
= do { let (from_bndrs, to_bndrs) = unzip bndrs
from_bndr_tys = map idType from_bndrs -- Types ty
-- Generate the expressions to build the grouped list
-- Build a pattern that ensures the consumer binds into the NEW binders,
-- which hold monads rather than single values
- ; fmap_op' <- dsExpr fmap_op
; bind_op' <- dsExpr bind_op
- ; let bind_ty = exprType bind_op' -- m2 (n (a,b,c)) -> (n (a,b,c) -> r1) -> r2
+ ; let bind_ty = exprType bind_op' -- m2 (n (a,b,c)) -> (n (a,b,c) -> r1) -> r2
n_tup_ty = funArgTy $ funArgTy $ funResultTy bind_ty -- n (a,b,c)
tup_n_ty = mkBigCoreVarTupTy to_bndrs
; body <- dsMcStmts stmts_rest
; n_tup_var <- newSysLocalDs n_tup_ty
; tup_n_var <- newSysLocalDs tup_n_ty
- ; tup_n_expr <- mkMcUnzipM fmap_op' n_tup_var from_bndr_tys
+ ; tup_n_expr <- mkMcUnzipM form fmap_op n_tup_var from_bndr_tys
; us <- newUniqueSupply
; let rhs' = mkApps usingExpr' usingArgs
body' = mkTupleCase us to_bndrs body tup_n_var tup_n_expr
-- = ( fmap (selN1 :: (t1, t2) -> t1) ys
-- , fmap (selN2 :: (t1, t2) -> t2) ys )
-mkMcUnzipM :: CoreExpr -- fmap
+mkMcUnzipM :: TransForm
+ -> SyntaxExpr TcId -- fmap
-> Id -- Of type n (a,b,c)
-> [Type] -- [a,b,c]
-> DsM CoreExpr -- Of type (n a, n b, n c)
-mkMcUnzipM fmap_op ys elt_tys
- = do { xs <- mapM newSysLocalDs elt_tys
- ; tup_xs <- newSysLocalDs (mkBigCoreTupTy elt_tys)
+mkMcUnzipM ThenForm _ ys _
+ = return (Var ys) -- No unzipping to do
+
+mkMcUnzipM _ fmap_op ys elt_tys
+ = do { fmap_op' <- dsExpr fmap_op
+ ; xs <- mapM newSysLocalDs elt_tys
+ ; tup_xs <- newSysLocalDs (mkBigCoreTupTy elt_tys)
; let arg_ty = idType ys
- mk_elt i = mkApps fmap_op -- fmap :: forall a b. (a -> b) -> n a -> n b
+ mk_elt i = mkApps fmap_op' -- fmap :: forall a b. (a -> b) -> n a -> n b
[ Type arg_ty, Type (elt_tys !! i)
, mk_sel i, Var ys]
-- with type (forall a. a -> m a)
-- See notes [Monad Comprehensions]
-- After renaming, the ids are the binders
- -- bound by the stmts and used after them
+ -- bound by the stmts and used after themp
- -- "qs, then f by e" ==> TransformStmt qs binders f (Just e) (return) (>>=)
- -- "qs, then f" ==> TransformStmt qs binders f Nothing (return) (>>=)
- | TransformStmt
- [LStmt idL] -- Stmts are the ones to the left of the 'then'
-
- [idR] -- After renaming, the Ids are the binders occurring
- -- within this transform statement that are used after it
-
- (LHsExpr idR) -- "then f"
-
- (Maybe (LHsExpr idR)) -- "by e" (optional)
-
- (SyntaxExpr idR) -- The 'return' function for inner monad
- -- comprehensions
- (SyntaxExpr idR) -- The '(>>=)' operator.
- -- See Note [Monad Comprehensions]
-
- | GroupStmt {
- grpS_stmts :: [LStmt idL], -- Stmts to the *left* of the 'group'
+ | TransStmt {
+ trS_form :: TransForm,
+ trS_stmts :: [LStmt idL], -- Stmts to the *left* of the 'group'
-- which generates the tuples to be grouped
- grpS_bndrs :: [(idR, idR)], -- See Note [GroupStmt binder map]
+ trS_bndrs :: [(idR, idR)], -- See Note [TransStmt binder map]
- grpS_by :: Maybe (LHsExpr idR), -- "by e" (optional)
-
- grpS_using :: LHsExpr idR,
- grpS_explicit :: Bool, -- True <=> explicit "using f"
- -- False <=> implicit; grpS_using is filled in with
- -- 'groupWith' (list comprehensions) or
- -- 'groupM' (monad comprehensions)
-
- -- Invariant: if grpS_explicit = False, then grp_by = Just e
- -- That is, we can have group by e
- -- group using f
- -- group by e using f
-
- grpS_ret :: SyntaxExpr idR, -- The 'return' function for inner monad
- -- comprehensions
- grpS_bind :: SyntaxExpr idR, -- The '(>>=)' operator
- grpS_fmap :: SyntaxExpr idR -- The polymorphic 'fmap' function for desugaring
+ trS_using :: LHsExpr idR,
+ trS_by :: Maybe (LHsExpr idR), -- "by e" (optional)
+ -- Invariant: if trS_form = GroupBy, then grp_by = Just e
+
+ trS_ret :: SyntaxExpr idR, -- The monomorphic 'return' function for
+ -- the inner monad comprehensions
+ trS_bind :: SyntaxExpr idR, -- The '(>>=)' operator
+ trS_fmap :: SyntaxExpr idR -- The polymorphic 'fmap' function for desugaring
+ -- Only for 'group' forms
} -- See Note [Monad Comprehensions]
-- Recursive statement (see Note [How RecStmt works] below)
-- be quite as simple as (m (tya, tyb, tyc)).
}
deriving (Data, Typeable)
+
+data TransForm -- The 'f' below is the 'using' function, 'e' is the by function
+ = ThenForm -- then f or then f by e
+ | GroupFormU -- group using f or group using f by e
+ | GroupFormB -- group by e
+ -- In the GroupByFormB, trS_using is filled in with
+ -- 'groupWith' (list comprehensions) or
+ -- 'groupM' (monad comprehensions)
+ deriving (Data, Typeable)
\end{code}
Note [The type of bind in Stmts]
So we must be careful not to make assumptions about the type.
In particular, the monad may not be uniform throughout.
-Note [GroupStmt binder map]
+Note [TransStmt binder map]
~~~~~~~~~~~~~~~~~~~~~~~~~~~
-The [(idR,idR)] in a GroupStmt behaves as follows:
+The [(idR,idR)] in a TransStmt behaves as follows:
* Before renaming: []
pprStmt (ParStmt stmtss _ _ _) = hsep (map doStmts stmtss)
where doStmts stmts = ptext (sLit "| ") <> ppr stmts
-pprStmt (TransformStmt stmts bndrs using by _ _)
- = sep (ppr_lc_stmts stmts ++ [pprTransformStmt bndrs using by])
-
-pprStmt (GroupStmt { grpS_stmts = stmts, grpS_by = by, grpS_using = using, grpS_explicit = explicit })
- = sep (ppr_lc_stmts stmts ++ [pprGroupStmt by using explicit])
+pprStmt (TransStmt { trS_stmts = stmts, trS_by = by, trS_using = using, trS_form = form })
+ = sep (ppr_lc_stmts stmts ++ [pprTransStmt by using form])
pprStmt (RecStmt { recS_stmts = segment, recS_rec_ids = rec_ids
, recS_later_ids = later_ids })
, nest 2 (ppr using)
, nest 2 (pprBy by)]
-pprGroupStmt :: OutputableBndr id => Maybe (LHsExpr id)
- -> LHsExpr id -> Bool
+pprTransStmt :: OutputableBndr id => Maybe (LHsExpr id)
+ -> LHsExpr id -> TransForm
-> SDoc
-pprGroupStmt by using explicit
- = sep [ ptext (sLit "then group"), nest 2 (pprBy by), nest 2 pp_using ]
- where
- pp_using | explicit = ptext (sLit "using") <+> ppr using
- | otherwise = empty
+pprTransStmt by using ThenForm
+ = sep [ ptext (sLit "then"), nest 2 (ppr using), nest 2 (pprBy by)]
+pprTransStmt by _ GroupFormB
+ = sep [ ptext (sLit "then group"), nest 2 (pprBy by) ]
+pprTransStmt by using GroupFormU
+ = sep [ ptext (sLit "then group"), nest 2 (pprBy by), nest 2 (ptext (sLit "using") <+> ppr using)]
pprBy :: OutputableBndr id => Maybe (LHsExpr id) -> SDoc
pprBy Nothing = empty
2 (ppr_stmt stmt)
where
-- For Group and Transform Stmts, don't print the nested stmts!
- ppr_stmt (GroupStmt { grpS_by = by, grpS_using = using
- , grpS_explicit = explicit }) = pprGroupStmt by using explicit
- ppr_stmt (TransformStmt _ bndrs using by _ _) = pprTransformStmt bndrs using by
- ppr_stmt stmt = pprStmt stmt
+ ppr_stmt (TransStmt { trS_by = by, trS_using = using
+ , trS_form = form }) = pprTransStmt by using form
+ ppr_stmt stmt = pprStmt stmt
\end{code}
-- Stmts
mkTransformStmt, mkTransformByStmt, mkExprStmt, mkBindStmt, mkLastStmt,
- emptyGroupStmt, mkGroupUsingStmt, mkGroupByStmt, mkGroupByUsingStmt,
+ emptyTransStmt, mkGroupUsingStmt, mkGroupByStmt, mkGroupByUsingStmt,
emptyRecStmt, mkRecStmt,
-- Template Haskell
mkNPat :: HsOverLit id -> Maybe (SyntaxExpr id) -> Pat id
mkNPlusKPat :: Located id -> HsOverLit id -> Pat id
-mkTransformStmt :: [LStmt idL] -> LHsExpr idR -> StmtLR idL idR
-mkTransformByStmt :: [LStmt idL] -> LHsExpr idR -> LHsExpr idR -> StmtLR idL idR
-
mkLastStmt :: LHsExpr idR -> StmtLR idL idR
mkExprStmt :: LHsExpr idR -> StmtLR idL idR
mkBindStmt :: LPat idL -> LHsExpr idR -> StmtLR idL idR
mkNPat lit neg = NPat lit neg noSyntaxExpr
mkNPlusKPat id lit = NPlusKPat id lit noSyntaxExpr noSyntaxExpr
-mkTransformStmt stmts usingExpr = TransformStmt stmts [] usingExpr Nothing noSyntaxExpr noSyntaxExpr
-mkTransformByStmt stmts usingExpr byExpr = TransformStmt stmts [] usingExpr (Just byExpr) noSyntaxExpr noSyntaxExpr
-
+mkTransformStmt :: [LStmt idL] -> LHsExpr idR -> StmtLR idL idR
+mkTransformByStmt :: [LStmt idL] -> LHsExpr idR -> LHsExpr idR -> StmtLR idL idR
mkGroupUsingStmt :: [LStmt idL] -> LHsExpr idR -> StmtLR idL idR
mkGroupByStmt :: [LStmt idL] -> LHsExpr idR -> StmtLR idL idR
mkGroupByUsingStmt :: [LStmt idL] -> LHsExpr idR -> LHsExpr idR -> StmtLR idL idR
-emptyGroupStmt :: StmtLR idL idR
-emptyGroupStmt = GroupStmt { grpS_stmts = [], grpS_bndrs = [], grpS_explicit = False
- , grpS_by = Nothing, grpS_using = noLoc noSyntaxExpr
- , grpS_ret = noSyntaxExpr, grpS_bind = noSyntaxExpr
- , grpS_fmap = noSyntaxExpr }
-mkGroupUsingStmt ss u = emptyGroupStmt { grpS_stmts = ss, grpS_explicit = True, grpS_using = u }
-mkGroupByStmt ss b = emptyGroupStmt { grpS_stmts = ss, grpS_by = Just b }
-mkGroupByUsingStmt ss b u = emptyGroupStmt { grpS_stmts = ss, grpS_by = Just b
- , grpS_explicit = True, grpS_using = u }
+emptyTransStmt :: StmtLR idL idR
+emptyTransStmt = TransStmt { trS_form = undefined, trS_stmts = [], trS_bndrs = []
+ , trS_by = Nothing, trS_using = noLoc noSyntaxExpr
+ , trS_ret = noSyntaxExpr, trS_bind = noSyntaxExpr
+ , trS_fmap = noSyntaxExpr }
+mkTransformStmt ss u = emptyTransStmt { trS_form = ThenForm, trS_stmts = ss, trS_using = u }
+mkTransformByStmt ss u b = emptyTransStmt { trS_form = ThenForm, trS_stmts = ss, trS_using = u, trS_by = Just b }
+mkGroupByStmt ss b = emptyTransStmt { trS_form = GroupFormB, trS_stmts = ss, trS_by = Just b }
+mkGroupUsingStmt ss u = emptyTransStmt { trS_form = GroupFormU, trS_stmts = ss, trS_using = u }
+mkGroupByUsingStmt ss b u = emptyTransStmt { trS_form = GroupFormU, trS_stmts = ss
+ , trS_by = Just b, trS_using = u }
mkLastStmt expr = LastStmt expr noSyntaxExpr
mkExprStmt expr = ExprStmt expr noSyntaxExpr noSyntaxExpr placeHolderType
collectStmtBinders (LastStmt {}) = []
collectStmtBinders (ParStmt xs _ _ _) = collectLStmtsBinders
$ concatMap fst xs
-collectStmtBinders (TransformStmt stmts _ _ _ _ _) = collectLStmtsBinders stmts
-collectStmtBinders (GroupStmt { grpS_stmts = stmts }) = collectLStmtsBinders stmts
-collectStmtBinders (RecStmt { recS_stmts = ss }) = collectLStmtsBinders ss
+collectStmtBinders (TransStmt { trS_stmts = stmts }) = collectLStmtsBinders stmts
+collectStmtBinders (RecStmt { recS_stmts = ss }) = collectLStmtsBinders ss
----------------- Patterns --------------------------
hs_stmt (LastStmt {}) = emptyNameSet
hs_stmt (ParStmt xs _ _ _) = hs_lstmts $ concatMap fst xs
- hs_stmt (TransformStmt stmts _ _ _ _ _) = hs_lstmts stmts
- hs_stmt (GroupStmt { grpS_stmts = stmts }) = hs_lstmts stmts
- hs_stmt (RecStmt { recS_stmts = ss }) = hs_lstmts ss
+ hs_stmt (TransStmt { trS_stmts = stmts }) = hs_lstmts stmts
+ hs_stmt (RecStmt { recS_stmts = ss }) = hs_lstmts ss
hs_local_binds (HsValBinds val_binds) = hsValBindsImplicits val_binds
hs_local_binds (HsIPBinds _) = emptyNameSet
methodNamesStmt (RecStmt { recS_stmts = stmts }) = methodNamesStmts stmts `addOneFV` loopAName
methodNamesStmt (LetStmt _) = emptyFVs
methodNamesStmt (ParStmt _ _ _ _) = emptyFVs
-methodNamesStmt (TransformStmt {}) = emptyFVs
-methodNamesStmt (GroupStmt {}) = emptyFVs
- -- ParStmt, TransformStmt and GroupStmt can't occur in commands, but it's not convenient to error
+methodNamesStmt (TransStmt {}) = emptyFVs
+ -- ParStmt and TransStmt can't occur in commands, but it's not convenient to error
-- here so we just do what's convenient
\end{code}
; return ( ([L loc (ParStmt segs' mzip_op bind_op return_op)], thing)
, fvs1 `plusFV` fvs2 `plusFV` fvs3 `plusFV` fvs4) }
-rnStmt ctxt (L loc (TransformStmt stmts _ using by _ _)) thing_inside
- = do { (using', fvs1) <- rnLExpr using
-
- ; ((stmts', (by', used_bndrs, thing)), fvs2)
- <- rnStmts (TransformStmtCtxt ctxt) stmts $ \ bndrs ->
- do { (by', fvs_by) <- case by of
- Nothing -> return (Nothing, emptyFVs)
- Just e -> do { (e', fvs) <- rnLExpr e; return (Just e', fvs) }
- ; (thing, fvs_thing) <- thing_inside bndrs
- ; let fvs = fvs_by `plusFV` fvs_thing
- used_bndrs = filter (`elemNameSet` fvs) bndrs
- -- The paper (Fig 5) has a bug here; we must treat any free varaible of
- -- the "thing inside", **or of the by-expression**, as used
- ; return ((by', used_bndrs, thing), fvs) }
-
- -- Lookup `(>>=)` and `fail` for monad comprehensions
- ; ((return_op, fvs3), (bind_op, fvs4)) <-
- if isMonadCompExpr ctxt
- then (,) <$> lookupSyntaxName returnMName
- <*> lookupSyntaxName bindMName
- else return ( (noSyntaxExpr, emptyFVs)
- , (noSyntaxExpr, emptyFVs) )
-
- ; return (([L loc (TransformStmt stmts' used_bndrs using' by' return_op bind_op)], thing),
- fvs1 `plusFV` fvs2 `plusFV` fvs3 `plusFV` fvs4) }
-
-rnStmt ctxt (L loc (GroupStmt { grpS_stmts = stmts, grpS_by = by, grpS_explicit = explicit
- , grpS_using = using })) thing_inside
+rnStmt ctxt (L loc (TransStmt { trS_stmts = stmts, trS_by = by, trS_form = form
+ , trS_using = using })) thing_inside
= do { -- Rename the 'using' expression in the context before the transform is begun
let implicit_name | isMonadCompExpr ctxt = groupMName
| otherwise = groupWithName
- ; (using', fvs1) <- if explicit
- then rnLExpr using
- else do { (e,fvs) <- lookupSyntaxName implicit_name
- ; return (noLoc e, fvs) }
+ ; (using', fvs1) <- case form of
+ GroupFormB -> do { (e,fvs) <- lookupSyntaxName implicit_name
+ ; return (noLoc e, fvs) }
+ _ -> rnLExpr using
-- Rename the stmts and the 'by' expression
-- Keep track of the variables mentioned in the 'by' expression
; (thing, fvs_thing) <- thing_inside bndrs
; let fvs = fvs_by `plusFV` fvs_thing
used_bndrs = filter (`elemNameSet` fvs) bndrs
+ -- The paper (Fig 5) has a bug here; we must treat any free varaible of
+ -- the "thing inside", **or of the by-expression**, as used
; return ((by', used_bndrs, thing), fvs) }
-- Lookup `return`, `(>>=)` and `liftM` for monad comprehensions
- ; ((return_op, fvs3), (bind_op, fvs4), (fmap_op, fvs5)) <-
- if isMonadCompExpr ctxt
- then (,,) <$> lookupSyntaxName returnMName
- <*> lookupSyntaxName bindMName
- <*> lookupSyntaxName fmapName
- else return ( (noSyntaxExpr, emptyFVs)
- , (noSyntaxExpr, emptyFVs)
- , (noSyntaxExpr, emptyFVs) )
-
- ; let all_fvs = fvs1 `plusFV` fvs2 `plusFV` fvs3 `plusFV` fvs4
- `plusFV` fvs5
+ ; (return_op, fvs3) <- lookupSyntaxName returnMName
+ ; (bind_op, fvs4) <- lookupSyntaxName bindMName
+ ; (fmap_op, fvs5) <- case form of
+ ThenForm -> return (noSyntaxExpr, emptyFVs)
+ _ -> lookupSyntaxName fmapName
+
+ ; let all_fvs = fvs1 `plusFV` fvs2 `plusFV` fvs3
+ `plusFV` fvs4 `plusFV` fvs5
bndr_map = used_bndrs `zip` used_bndrs
- -- See Note [GroupStmt binder map] in HsExpr
+ -- See Note [TransStmt binder map] in HsExpr
; traceRn (text "rnStmt: implicitly rebound these used binders:" <+> ppr bndr_map)
- ; return (([L loc (GroupStmt { grpS_stmts = stmts', grpS_bndrs = bndr_map
- , grpS_by = by', grpS_using = using', grpS_explicit = explicit
- , grpS_ret = return_op, grpS_bind = bind_op
- , grpS_fmap = fmap_op })], thing), all_fvs) }
+ ; return (([L loc (TransStmt { trS_stmts = stmts', trS_bndrs = bndr_map
+ , trS_by = by', trS_using = using', trS_form = form
+ , trS_ret = return_op, trS_bind = bind_op
+ , trS_fmap = fmap_op })], thing), all_fvs) }
type ParSeg id = ([LStmt id], [id]) -- The Names are bound by the Stmts
rn_rec_stmt_lhs _ stmt@(L _ (ParStmt _ _ _ _)) -- Syntactically illegal in mdo
= pprPanic "rn_rec_stmt" (ppr stmt)
-rn_rec_stmt_lhs _ stmt@(L _ (TransformStmt {})) -- Syntactically illegal in mdo
- = pprPanic "rn_rec_stmt" (ppr stmt)
-
-rn_rec_stmt_lhs _ stmt@(L _ (GroupStmt {})) -- Syntactically illegal in mdo
+rn_rec_stmt_lhs _ stmt@(L _ (TransStmt {})) -- Syntactically illegal in mdo
= pprPanic "rn_rec_stmt" (ppr stmt)
rn_rec_stmt_lhs _ (L _ (LetStmt EmptyLocalBinds))
rn_rec_stmt _ stmt@(L _ (ParStmt {})) _ -- Syntactically illegal in mdo
= pprPanic "rn_rec_stmt: ParStmt" (ppr stmt)
-rn_rec_stmt _ stmt@(L _ (TransformStmt {})) _ -- Syntactically illegal in mdo
- = pprPanic "rn_rec_stmt: TransformStmt" (ppr stmt)
-
-rn_rec_stmt _ stmt@(L _ (GroupStmt {})) _ -- Syntactically illegal in mdo
- = pprPanic "rn_rec_stmt: GroupStmt" (ppr stmt)
+rn_rec_stmt _ stmt@(L _ (TransStmt {})) _ -- Syntactically illegal in mdo
+ = pprPanic "rn_rec_stmt: TransStmt" (ppr stmt)
rn_rec_stmt _ (L _ (LetStmt EmptyLocalBinds)) _
= panic "rn_rec_stmt: LetStmt EmptyLocalBinds"
, ptext (sLit "in") <+> pprAStmtContext ctxt ]
pprStmtCat :: Stmt a -> SDoc
-pprStmtCat (TransformStmt {}) = ptext (sLit "transform")
-pprStmtCat (GroupStmt {}) = ptext (sLit "group")
+pprStmtCat (TransStmt {}) = ptext (sLit "transform")
pprStmtCat (LastStmt {}) = ptext (sLit "return expression")
pprStmtCat (ExprStmt {}) = ptext (sLit "exprssion")
pprStmtCat (BindStmt {}) = ptext (sLit "binding")
ParStmt {}
| Opt_ParallelListComp `xopt` dflags -> isOK
| otherwise -> Just (ptext (sLit "Use -XParallelListComp"))
- TransformStmt {}
- | Opt_TransformListComp `xopt` dflags -> isOK
- | otherwise -> Just (ptext (sLit "Use -XTransformListComp"))
- GroupStmt {}
+ TransStmt {}
| Opt_TransformListComp `xopt` dflags -> isOK
| otherwise -> Just (ptext (sLit "Use -XTransformListComp"))
LastStmt {} -> notOK
zonkExpr env ret_op `thenM` \ new_ret ->
returnM (env, LastStmt new_expr new_ret)
-zonkStmt env (TransformStmt stmts binders usingExpr maybeByExpr return_op bind_op)
- = do { (env', stmts') <- zonkStmts env stmts
- ; let binders' = zonkIdOccs env' binders
- ; usingExpr' <- zonkLExpr env' usingExpr
- ; maybeByExpr' <- zonkMaybeLExpr env' maybeByExpr
- ; return_op' <- zonkExpr env' return_op
- ; bind_op' <- zonkExpr env' bind_op
- ; return (env', TransformStmt stmts' binders' usingExpr' maybeByExpr' return_op' bind_op') }
-
-zonkStmt env (GroupStmt { grpS_stmts = stmts, grpS_bndrs = binderMap
- , grpS_by = by, grpS_explicit = explicit, grpS_using = using
- , grpS_ret = return_op, grpS_bind = bind_op, grpS_fmap = liftM_op })
+zonkStmt env (TransStmt { trS_stmts = stmts, trS_bndrs = binderMap
+ , trS_by = by, trS_form = form, trS_using = using
+ , trS_ret = return_op, trS_bind = bind_op, trS_fmap = liftM_op })
= do { (env', stmts') <- zonkStmts env stmts
; binderMap' <- mappM (zonkBinderMapEntry env') binderMap
- ; by' <- fmapMaybeM (zonkLExpr env') by
- ; using' <- zonkLExpr env using
+ ; by' <- fmapMaybeM (zonkLExpr env') by
+ ; using' <- zonkLExpr env using
; return_op' <- zonkExpr env' return_op
- ; bind_op' <- zonkExpr env' bind_op
- ; liftM_op' <- zonkExpr env' liftM_op
+ ; bind_op' <- zonkExpr env' bind_op
+ ; liftM_op' <- zonkExpr env' liftM_op
; let env'' = extendZonkEnv env' (map snd binderMap')
- ; return (env'', GroupStmt { grpS_stmts = stmts', grpS_bndrs = binderMap'
- , grpS_by = by', grpS_explicit = explicit, grpS_using = using'
- , grpS_ret = return_op', grpS_bind = bind_op', grpS_fmap = liftM_op' }) }
+ ; return (env'', TransStmt { trS_stmts = stmts', trS_bndrs = binderMap'
+ , trS_by = by', trS_form = form, trS_using = using'
+ , trS_ret = return_op', trS_bind = bind_op', trS_fmap = liftM_op' }) }
where
zonkBinderMapEntry env (oldBinder, newBinder) = do
let oldBinder' = zonkIdOcc env oldBinder
; new_fail <- zonkExpr env fail_op
; return (env1, BindStmt new_pat new_expr new_bind new_fail) }
-zonkMaybeLExpr :: ZonkEnv -> Maybe (LHsExpr TcId) -> TcM (Maybe (LHsExpr Id))
-zonkMaybeLExpr _ Nothing = return Nothing
-zonkMaybeLExpr env (Just e) = (zonkLExpr env e) >>= (return . Just)
-
-
-------------------------------------------------------------------------
zonkRecFields :: ZonkEnv -> HsRecordBinds TcId -> TcM (HsRecordBinds TcId)
zonkRecFields env (HsRecFields flds dd)
tcDoStmt, tcMDoStmt, tcGuardStmt
) where
-import {-# SOURCE #-} TcExpr( tcSyntaxOp, tcInferRhoNC, tcCheckId,
+import {-# SOURCE #-} TcExpr( tcSyntaxOp, tcInferRhoNC, tcInferRho, tcCheckId,
tcMonoExpr, tcMonoExprNC, tcPolyExpr )
import HsSyn
; return (ids, pairs', thing) }
; return ( (stmts', ids) : pairs', thing ) }
-tcLcStmt m_tc ctxt (TransformStmt stmts binders usingExpr maybeByExpr _ _) elt_ty thing_inside = do
- (stmts', (binders', usingExpr', maybeByExpr', thing)) <-
- tcStmtsAndThen (TransformStmtCtxt ctxt) (tcLcStmt m_tc) stmts elt_ty $ \elt_ty' -> do
- let alphaListTy = mkTyConApp m_tc [alphaTy]
-
- (usingExpr', maybeByExpr') <-
- case maybeByExpr of
- Nothing -> do
- -- We must validate that usingExpr :: forall a. [a] -> [a]
- let using_ty = mkForAllTy alphaTyVar (alphaListTy `mkFunTy` alphaListTy)
- usingExpr' <- tcPolyExpr usingExpr using_ty
- return (usingExpr', Nothing)
- Just byExpr -> do
- -- We must infer a type such that e :: t and then check that
- -- usingExpr :: forall a. (a -> t) -> [a] -> [a]
- (byExpr', tTy) <- tcInferRhoNC byExpr
- let using_ty = mkForAllTy alphaTyVar $
- (alphaTy `mkFunTy` tTy)
- `mkFunTy` alphaListTy `mkFunTy` alphaListTy
- usingExpr' <- tcPolyExpr usingExpr using_ty
- return (usingExpr', Just byExpr')
-
- binders' <- tcLookupLocalIds binders
- thing <- thing_inside elt_ty'
-
- return (binders', usingExpr', maybeByExpr', thing)
-
- return (TransformStmt stmts' binders' usingExpr' maybeByExpr' noSyntaxExpr noSyntaxExpr, thing)
-
-tcLcStmt m_tc ctxt (GroupStmt { grpS_stmts = stmts, grpS_bndrs = bindersMap
- , grpS_by = by, grpS_using = using
- , grpS_explicit = explicit }) elt_ty thing_inside
- = do { let (bndr_names, list_bndr_names) = unzip bindersMap
-
- ; (stmts', (bndr_ids, by', using_ty, elt_ty')) <-
- tcStmtsAndThen (TransformStmtCtxt ctxt) (tcLcStmt m_tc) stmts elt_ty $ \elt_ty' -> do
- (by', using_ty) <-
- case by of
- Nothing -> -- check that using :: forall a. [a] -> [[a]]
- return (Nothing, mkForAllTy alphaTyVar $
- alphaListTy `mkFunTy` alphaListListTy)
-
- Just by_e -> -- check that using :: forall a. (a -> t) -> [a] -> [[a]]
- -- where by :: t
- do { (by_e', t_ty) <- tcInferRhoNC by_e
- ; return (Just by_e', mkForAllTy alphaTyVar $
- (alphaTy `mkFunTy` t_ty)
- `mkFunTy` alphaListTy
- `mkFunTy` alphaListListTy) }
- -- Find the Ids (and hence types) of all old binders
- bndr_ids <- tcLookupLocalIds bndr_names
-
- return (bndr_ids, by', using_ty, elt_ty')
-
- -- Ensure that every old binder of type b is linked up with
- -- its new binder which should have type [b]
- ; let list_bndr_ids = zipWith mk_list_bndr list_bndr_names bndr_ids
- bindersMap' = bndr_ids `zip` list_bndr_ids
+tcLcStmt m_tc ctxt (TransStmt { trS_form = form, trS_stmts = stmts
+ , trS_bndrs = bindersMap
+ , trS_by = by, trS_using = using }) elt_ty thing_inside
+ = do { let (bndr_names, n_bndr_names) = unzip bindersMap
+ unused_ty = pprPanic "tcLcStmt: inner ty" (ppr bindersMap)
+ -- The inner 'stmts' lack a LastStmt, so the element type
+ -- passed in to tcStmtsAndThen is never looked at
+ ; (stmts', (bndr_ids, by'))
+ <- tcStmtsAndThen (TransformStmtCtxt ctxt) (tcLcStmt m_tc) stmts unused_ty $ \_ -> do
+ { by' <- case by of
+ Nothing -> return Nothing
+ Just e -> do { e_ty <- tcInferRho e; return (Just e_ty) }
+ ; bndr_ids <- tcLookupLocalIds bndr_names
+ ; return (bndr_ids, by') }
+
+ ; let m_app ty = mkTyConApp m_tc [ty]
+
+ --------------- Typecheck the 'using' function -------------
+ -- using :: ((a,b,c)->t) -> m (a,b,c) -> m (a,b,c)m (ThenForm)
+ -- :: ((a,b,c)->t) -> m (a,b,c) -> m (m (a,b,c))) (GroupForm)
+
+ -- n_app :: Type -> Type -- Wraps a 'ty' into '[ty]' for GroupForm
+ ; let n_app = case form of
+ ThenForm -> (\ty -> ty)
+ _ -> m_app
+
+ by_arrow :: Type -> Type -- Wraps 'ty' to '(a->t) -> ty' if the By is present
+ by_arrow = case by' of
+ Nothing -> \ty -> ty
+ Just (_,e_ty) -> \ty -> e_ty `mkFunTy` ty
+
+ tup_ty = mkBigCoreVarTupTy bndr_ids
+ poly_arg_ty = m_app alphaTy
+ poly_res_ty = m_app (n_app alphaTy)
+ using_poly_ty = mkForAllTy alphaTyVar $ by_arrow $
+ poly_arg_ty `mkFunTy` poly_res_ty
+
+ ; using' <- tcPolyExpr using using_poly_ty
+ ; let final_using = fmap (HsWrap (WpTyApp tup_ty)) using'
+
+ -- 'stmts' returns a result of type (m1_ty tuple_ty),
+ -- typically something like [(Int,Bool,Int)]
+ -- We don't know what tuple_ty is yet, so we use a variable
+ ; let mk_n_bndr :: Name -> TcId -> TcId
+ mk_n_bndr n_bndr_name bndr_id = mkLocalId n_bndr_name (n_app (idType bndr_id))
+
+ -- Ensure that every old binder of type `b` is linked up with its
+ -- new binder which should have type `n b`
-- See Note [GroupStmt binder map] in HsExpr
-
- ; using' <- tcPolyExpr using using_ty
-
- -- Type check the thing in the environment with
- -- these new binders and return the result
- ; thing <- tcExtendIdEnv list_bndr_ids (thing_inside elt_ty')
- ; return (emptyGroupStmt { grpS_stmts = stmts', grpS_bndrs = bindersMap'
- , grpS_by = by', grpS_using = using'
- , grpS_explicit = explicit }, thing) }
- where
- alphaListTy = mkTyConApp m_tc [alphaTy]
- alphaListListTy = mkTyConApp m_tc [alphaListTy]
-
- mk_list_bndr :: Name -> TcId -> TcId
- mk_list_bndr list_bndr_name bndr_id
- = mkLocalId list_bndr_name (mkTyConApp m_tc [idType bndr_id])
+ n_bndr_ids = zipWith mk_n_bndr n_bndr_names bndr_ids
+ bindersMap' = bndr_ids `zip` n_bndr_ids
+
+ -- Type check the thing in the environment with
+ -- these new binders and return the result
+ ; thing <- tcExtendIdEnv n_bndr_ids (thing_inside elt_ty)
+
+ ; return (emptyTransStmt { trS_stmts = stmts', trS_bndrs = bindersMap'
+ , trS_by = fmap fst by', trS_using = final_using
+ , trS_form = form }, thing) }
tcLcStmt _ _ stmt _ _
= pprPanic "tcLcStmt: unexpected Stmt" (ppr stmt)
; thing <- thing_inside new_res_ty
; return (ExprStmt rhs' then_op' guard_op' rhs_ty, thing) }
--- Transform statements.
---
--- [ body | stmts, then f ] -> f :: forall a. m a -> m a
--- [ body | stmts, then f by e ] -> f :: forall a. (a -> t) -> m a -> m a
---
-tcMcStmt ctxt (TransformStmt stmts binders usingExpr maybeByExpr return_op bind_op) res_ty thing_inside
- = do { let star_star_kind = liftedTypeKind `mkArrowKind` liftedTypeKind
- ; m1_ty <- newFlexiTyVarTy star_star_kind
- ; m2_ty <- newFlexiTyVarTy star_star_kind
- ; n_ty <- newFlexiTyVarTy star_star_kind
- ; tup_ty_var <- newFlexiTyVarTy liftedTypeKind
- ; new_res_ty <- newFlexiTyVarTy liftedTypeKind
- ; let m1_tup_ty = m1_ty `mkAppTy` tup_ty_var
-
- -- 'stmts' returns a result of type (m1_ty tuple_ty),
- -- typically something like [(Int,Bool,Int)]
- -- We don't know what tuple_ty is yet, so we use a variable
- ; (stmts', (binders', usingExpr', maybeByExpr', return_op', bind_op', thing)) <-
- tcStmtsAndThen (TransformStmtCtxt ctxt) tcMcStmt stmts m1_tup_ty $ \res_ty' -> do
- { (usingExpr', maybeByExpr') <-
- case maybeByExpr of
- Nothing -> do
- -- We must validate that usingExpr :: forall a. m a -> m a
- let using_ty = mkForAllTy alphaTyVar $
- (m_ty `mkAppTy` alphaTy)
- `mkFunTy`
- (m_ty `mkAppTy` alphaTy)
- usingExpr' <- tcPolyExpr usingExpr using_ty
- return (usingExpr', Nothing)
- Just byExpr -> do
- -- We must infer a type such that e :: t and then check that
- -- usingExpr :: forall a. (a -> t) -> m a -> m a
- (byExpr', tTy) <- tcInferRhoNC byExpr
- let using_ty = mkForAllTy alphaTyVar $
- (alphaTy `mkFunTy` tTy)
- `mkFunTy`
- (m_ty `mkAppTy` alphaTy)
- `mkFunTy`
- (m_ty `mkAppTy` alphaTy)
- usingExpr' <- tcPolyExpr usingExpr using_ty
- return (usingExpr', Just byExpr')
-
- ; bndr_ids <- tcLookupLocalIds binders
-
- -- `return` and `>>=` are used to pass around/modify our
- -- binders, so we know their types:
- --
- -- return :: (a,b,c,..) -> m (a,b,c,..)
- -- (>>=) :: m (a,b,c,..)
- -- -> ( (a,b,c,..) -> m (a,b,c,..) )
- -- -> m (a,b,c,..)
- --
- ; let bndr_ty = mkBigCoreVarTupTy bndr_ids
- m_bndr_ty = m_ty `mkAppTy` bndr_ty
-
- ; return_op' <- tcSyntaxOp MCompOrigin return_op
- (bndr_ty `mkFunTy` m_bndr_ty)
-
- ; bind_op' <- tcSyntaxOp MCompOrigin bind_op $
- m_bndr_ty `mkFunTy` (bndr_ty `mkFunTy` res_ty)
- `mkFunTy` res_ty
-
- -- Unify types of the inner comprehension and the binders type
- ; _ <- unifyType res_ty' m_bndr_ty
-
- -- Typecheck the `thing` with out old type (which is the type
- -- of the final result of our comprehension)
- ; thing <- thing_inside res_ty
-
- ; return (bndr_ids, usingExpr', maybeByExpr', return_op', bind_op', thing) }
-
- ; return (TransformStmt stmts' binders' usingExpr' maybeByExpr' return_op' bind_op', thing) }
-
-- Grouping statements
--
-- [ body | stmts, then group by e ]
-- f :: forall a. (a -> t) -> m a -> m (m a)
-- [ body | stmts, then group using f ]
-- -> f :: forall a. m a -> m (m a)
+
+-- We type [ body | (stmts, group by e using f), ... ]
+-- f <optional by> [ (a,b,c) | stmts ] >>= \(a,b,c) -> ...body....
--
-tcMcStmt ctxt (GroupStmt { grpS_stmts = stmts, grpS_bndrs = bindersMap
- , grpS_by = by, grpS_using = using, grpS_explicit = explicit
- , grpS_ret = return_op, grpS_bind = bind_op
- , grpS_fmap = fmap_op }) res_ty thing_inside
+-- We type the functions as follows:
+-- f <optional by> :: m1 (a,b,c) -> m2 (a,b,c) (ThenForm)
+-- :: m1 (a,b,c) -> m2 (n (a,b,c)) (GroupForm)
+-- (>>=) :: m2 (a,b,c) -> ((a,b,c) -> res) -> res (ThenForm)
+-- :: m2 (n (a,b,c)) -> (n (a,b,c) -> res) -> res (GroupForm)
+--
+tcMcStmt ctxt (TransStmt { trS_stmts = stmts, trS_bndrs = bindersMap
+ , trS_by = by, trS_using = using, trS_form = form
+ , trS_ret = return_op, trS_bind = bind_op
+ , trS_fmap = fmap_op }) res_ty thing_inside
= do { let star_star_kind = liftedTypeKind `mkArrowKind` liftedTypeKind
- ; m1_ty <- newFlexiTyVarTy star_star_kind
- ; m2_ty <- newFlexiTyVarTy star_star_kind
- ; n_ty <- newFlexiTyVarTy star_star_kind
- ; tup_ty_var <- newFlexiTyVarTy liftedTypeKind
+ ; m1_ty <- newFlexiTyVarTy star_star_kind
+ ; m2_ty <- newFlexiTyVarTy star_star_kind
+ ; tup_ty <- newFlexiTyVarTy liftedTypeKind
+ ; by_e_ty <- newFlexiTyVarTy liftedTypeKind -- The type of the 'by' expression (if any)
+
+ --------------- Typecheck the 'using' function -------------
+ -- using :: ((a,b,c)->t) -> m1 (a,b,c) -> m2 (n (a,b,c))
+
+ -- n_app :: Type -> Type -- Wraps a 'ty' into '(n ty)' for GroupForm
+ ; n_app <- case form of
+ ThenForm -> return (\ty -> ty)
+ _ -> do { n_ty <- newFlexiTyVarTy star_star_kind
+ ; return (n_ty `mkAppTy`) }
+ ; let by_arrow :: Type -> Type -- Wraps 'ty' to '(a->t) -> ty' if the By is present
+ by_arrow = case by of
+ Nothing -> \ty -> ty
+ Just {} -> \ty -> by_e_ty `mkFunTy` ty
+
+ poly_arg_ty = m1_ty `mkAppTy` alphaTy
+ using_arg_ty = m1_ty `mkAppTy` tup_ty
+ poly_res_ty = m2_ty `mkAppTy` n_app alphaTy
+ using_res_ty = m2_ty `mkAppTy` n_app tup_ty
+ using_poly_ty = mkForAllTy alphaTyVar $ by_arrow $
+ poly_arg_ty `mkFunTy` poly_res_ty
+
+ ; using' <- tcPolyExpr using using_poly_ty
+ ; let final_using = fmap (HsWrap (WpTyApp tup_ty)) using'
+
+ --------------- Typecheck the 'bind' function -------------
+ -- (>>=) :: m2 (n (a,b,c)) -> ( n (a,b,c) -> new_res_ty ) -> res_ty
; new_res_ty <- newFlexiTyVarTy liftedTypeKind
- ; let (bndr_names, n_bndr_names) = unzip bindersMap
- m1_tup_ty = m1_ty `mkAppTy` tup_ty_var
-
+ ; let n_tup_ty = n_app tup_ty -- n (a,b,c)
+ ; bind_op' <- tcSyntaxOp MCompOrigin bind_op $
+ using_res_ty `mkFunTy` (n_tup_ty `mkFunTy` new_res_ty)
+ `mkFunTy` res_ty
+
+ --------------- Typecheck the 'fmap' function -------------
+ ; fmap_op' <- case form of
+ ThenForm -> return noSyntaxExpr
+ _ -> fmap unLoc . tcPolyExpr (noLoc fmap_op) $
+ mkForAllTy alphaTyVar $ mkForAllTy betaTyVar $
+ (alphaTy `mkFunTy` betaTy)
+ `mkFunTy` (n_app alphaTy)
+ `mkFunTy` (n_app betaTy)
+
-- 'stmts' returns a result of type (m1_ty tuple_ty),
-- typically something like [(Int,Bool,Int)]
-- We don't know what tuple_ty is yet, so we use a variable
- ; (stmts', (bndr_ids, by_e_ty, return_op')) <-
- tcStmtsAndThen (TransformStmtCtxt ctxt) tcMcStmt stmts m1_tup_ty $ \res_ty' -> do
- { by_e_ty <- case by of
- Nothing -> return Nothing
- Just e -> do { e_ty <- tcInferRhoNC e; return (Just e_ty) }
+ ; let (bndr_names, n_bndr_names) = unzip bindersMap
+ ; (stmts', (bndr_ids, by', return_op')) <-
+ tcStmtsAndThen (TransformStmtCtxt ctxt) tcMcStmt stmts using_arg_ty $ \res_ty' -> do
+ { by' <- case by of
+ Nothing -> return Nothing
+ Just e -> do { e' <- tcMonoExpr e by_e_ty; return (Just e') }
-- Find the Ids (and hence types) of all old binders
; bndr_ids <- tcLookupLocalIds bndr_names
-- 'return' is only used for the binders, so we know its type.
- --
-- return :: (a,b,c,..) -> m (a,b,c,..)
; return_op' <- tcSyntaxOp MCompOrigin return_op $
(mkBigCoreVarTupTy bndr_ids) `mkFunTy` res_ty'
- ; return (bndr_ids, by_e_ty, return_op') }
-
-
-
- ; let tup_ty = mkBigCoreVarTupTy bndr_ids -- (a,b,c)
- using_arg_ty = m1_ty `mkAppTy` tup_ty -- m1 (a,b,c)
- n_tup_ty = n_ty `mkAppTy` tup_ty -- n (a,b,c)
- using_res_ty = m2_ty `mkAppTy` n_tup_ty -- m2 (n (a,b,c))
- using_fun_ty = using_arg_ty `mkFunTy` using_arg_ty
-
- -- (>>=) :: m2 (n (a,b,c)) -> ( n (a,b,c) -> new_res_ty ) -> res_ty
- -- using :: ((a,b,c)->t) -> m1 (a,b,c) -> m2 (n (a,b,c))
-
- --------------- Typecheck the 'bind' function -------------
- ; bind_op' <- tcSyntaxOp MCompOrigin bind_op $
- using_res_ty `mkFunTy` (n_tup_ty `mkFunTy` new_res_ty)
- `mkFunTy` res_ty
-
- --------------- Typecheck the 'using' function -------------
- ; let poly_fun_ty = (m1_ty `mkAppTy` alphaTy) `mkFunTy`
- (m2_ty `mkAppTy` (n_ty `mkAppTy` alphaTy))
- using_poly_ty = case by_e_ty of
- Nothing -> mkForAllTy alphaTyVar poly_fun_ty
- -- using :: forall a. m1 a -> m2 (n a)
-
- Just (_,t_ty) -> mkForAllTy alphaTyVar $
- (alphaTy `mkFunTy` t_ty) `mkFunTy` poly_fun_ty
- -- using :: forall a. (a->t) -> m1 a -> m2 (n a)
- -- where by :: t
-
- ; using' <- tcPolyExpr using using_poly_ty
- ; coi <- unifyType (applyTy using_poly_ty tup_ty)
- (case by_e_ty of
- Nothing -> using_fun_ty
- Just (_,t_ty) -> (tup_ty `mkFunTy` t_ty) `mkFunTy` using_fun_ty)
- ; let final_using = fmap (mkHsWrapCoI coi . HsWrap (WpTyApp tup_ty)) using'
-
- --------------- Typecheck the 'fmap' function -------------
- ; fmap_op' <- fmap unLoc . tcPolyExpr (noLoc fmap_op) $
- mkForAllTy alphaTyVar $ mkForAllTy betaTyVar $
- (alphaTy `mkFunTy` betaTy)
- `mkFunTy` (n_ty `mkAppTy` alphaTy)
- `mkFunTy` (n_ty `mkAppTy` betaTy)
+ ; return (bndr_ids, by', return_op') }
; let mk_n_bndr :: Name -> TcId -> TcId
- mk_n_bndr n_bndr_name bndr_id
- = mkLocalId n_bndr_name (n_ty `mkAppTy` idType bndr_id)
+ mk_n_bndr n_bndr_name bndr_id = mkLocalId n_bndr_name (n_app (idType bndr_id))
-- Ensure that every old binder of type `b` is linked up with its
-- new binder which should have type `n b`
n_bndr_ids = zipWith mk_n_bndr n_bndr_names bndr_ids
bindersMap' = bndr_ids `zip` n_bndr_ids
- -- Type check the thing in the environment with these new binders and
- -- return the result
+ -- Type check the thing in the environment with
+ -- these new binders and return the result
; thing <- tcExtendIdEnv n_bndr_ids (thing_inside res_ty)
- ; return (GroupStmt { grpS_stmts = stmts', grpS_bndrs = bindersMap'
- , grpS_by = fmap fst by_e_ty, grpS_using = final_using
- , grpS_ret = return_op', grpS_bind = bind_op'
- , grpS_fmap = fmap_op', grpS_explicit = explicit }, thing) }
+ ; return (TransStmt { trS_stmts = stmts', trS_bndrs = bindersMap'
+ , trS_by = by', trS_using = final_using
+ , trS_ret = return_op', trS_bind = bind_op'
+ , trS_fmap = fmap_op', trS_form = form }, thing) }
-- Typecheck `ParStmt`. See `tcLcStmt` for more informations about typechecking
-- of `ParStmt`s.