-%
-% (c) The University of Glasgow 2006
-% (c) The GRASP/AQUA Project, Glasgow University, 1997-1998
-%
-% Author: Juan J. Quintela <quintela@krilin.dc.fi.udc.es>
-
-\begin{code}
-{-# OPTIONS -fno-warn-incomplete-patterns #-}
--- The above warning supression flag is a temporary kludge.
--- While working on this module you are encouraged to remove it and fix
--- any warnings in the module. See
--- http://hackage.haskell.org/trac/ghc/wiki/Commentary/CodingStyle#Warnings
--- for details
-
-module Check ( check , ExhaustivePat ) where
-
-#include "HsVersions.h"
-
-import HsSyn
-import TcHsSyn
-import DsUtils
-import MatchLit
-import Id
-import DataCon
-import Name
-import TysWiredIn
-import PrelNames
-import TyCon
-import Type
-import Unify( dataConCannotMatch )
-import SrcLoc
-import UniqSet
-import Util
-import Outputable
-import FastString
-\end{code}
-
-This module performs checks about if one list of equations are:
-\begin{itemize}
-\item Overlapped
-\item Non exhaustive
-\end{itemize}
-To discover that we go through the list of equations in a tree-like fashion.
-
-If you like theory, a similar algorithm is described in:
-\begin{quotation}
- {\em Two Techniques for Compiling Lazy Pattern Matching},
- Luc Maranguet,
- INRIA Rocquencourt (RR-2385, 1994)
-\end{quotation}
-The algorithm is based on the first technique, but there are some differences:
-\begin{itemize}
-\item We don't generate code
-\item We have constructors and literals (not only literals as in the
- article)
-\item We don't use directions, we must select the columns from
- left-to-right
-\end{itemize}
-(By the way the second technique is really similar to the one used in
- @Match.lhs@ to generate code)
-
-This function takes the equations of a pattern and returns:
-\begin{itemize}
-\item The patterns that are not recognized
-\item The equations that are not overlapped
-\end{itemize}
-It simplify the patterns and then call @check'@ (the same semantics), and it
-needs to reconstruct the patterns again ....
-
-The problem appear with things like:
-\begin{verbatim}
- f [x,y] = ....
- f (x:xs) = .....
-\end{verbatim}
-We want to put the two patterns with the same syntax, (prefix form) and
-then all the constructors are equal:
-\begin{verbatim}
- f (: x (: y [])) = ....
- f (: x xs) = .....
-\end{verbatim}
-(more about that in @tidy_eqns@)
-
-We would prefer to have a @WarningPat@ of type @String@, but Strings and the
-Pretty Printer are not friends.
-
-We use @InPat@ in @WarningPat@ instead of @OutPat@
-because we need to print the
-warning messages in the same way they are introduced, i.e. if the user
-wrote:
-\begin{verbatim}
- f [x,y] = ..
-\end{verbatim}
-He don't want a warning message written:
-\begin{verbatim}
- f (: x (: y [])) ........
-\end{verbatim}
-Then we need to use InPats.
-\begin{quotation}
- Juan Quintela 5 JUL 1998\\
- User-friendliness and compiler writers are no friends.
-\end{quotation}
-
-\begin{code}
-type WarningPat = InPat Name
-type ExhaustivePat = ([WarningPat], [(Name, [HsLit])])
-type EqnNo = Int
-type EqnSet = UniqSet EqnNo
-
-
-check :: [EquationInfo] -> ([ExhaustivePat], [EquationInfo])
- -- Second result is the shadowed equations
- -- if there are view patterns, just give up - don't know what the function is
-check qs = (untidy_warns, shadowed_eqns)
- where
- (warns, used_nos) = check' ([1..] `zip` map tidy_eqn qs)
- untidy_warns = map untidy_exhaustive warns
- shadowed_eqns = [eqn | (eqn,i) <- qs `zip` [1..],
- not (i `elementOfUniqSet` used_nos)]
-
-untidy_exhaustive :: ExhaustivePat -> ExhaustivePat
-untidy_exhaustive ([pat], messages) =
- ([untidy_no_pars pat], map untidy_message messages)
-untidy_exhaustive (pats, messages) =
- (map untidy_pars pats, map untidy_message messages)
-
-untidy_message :: (Name, [HsLit]) -> (Name, [HsLit])
-untidy_message (string, lits) = (string, map untidy_lit lits)
-\end{code}
-
-The function @untidy@ does the reverse work of the @tidy_pat@ funcion.
-
-\begin{code}
-
-type NeedPars = Bool
-
-untidy_no_pars :: WarningPat -> WarningPat
-untidy_no_pars p = untidy False p
-
-untidy_pars :: WarningPat -> WarningPat
-untidy_pars p = untidy True p
-
-untidy :: NeedPars -> WarningPat -> WarningPat
-untidy b (L loc p) = L loc (untidy' b p)
- where
- untidy' _ p@(WildPat _) = p
- untidy' _ p@(VarPat _) = p
- untidy' _ (LitPat lit) = LitPat (untidy_lit lit)
- untidy' _ p@(ConPatIn _ (PrefixCon [])) = p
- untidy' b (ConPatIn name ps) = pars b (L loc (ConPatIn name (untidy_con ps)))
- untidy' _ (ListPat pats ty) = ListPat (map untidy_no_pars pats) ty
- untidy' _ (TuplePat pats box ty) = TuplePat (map untidy_no_pars pats) box ty
- untidy' _ (PArrPat _ _) = panic "Check.untidy: Shouldn't get a parallel array here!"
- untidy' _ (SigPatIn _ _) = panic "Check.untidy: SigPat"
-
-untidy_con :: HsConPatDetails Name -> HsConPatDetails Name
-untidy_con (PrefixCon pats) = PrefixCon (map untidy_pars pats)
-untidy_con (InfixCon p1 p2) = InfixCon (untidy_pars p1) (untidy_pars p2)
-untidy_con (RecCon (HsRecFields flds dd))
- = RecCon (HsRecFields [ fld { hsRecFieldArg = untidy_pars (hsRecFieldArg fld) }
- | fld <- flds ] dd)
-
-pars :: NeedPars -> WarningPat -> Pat Name
-pars True p = ParPat p
-pars _ p = unLoc p
-
-untidy_lit :: HsLit -> HsLit
-untidy_lit (HsCharPrim c) = HsChar c
-untidy_lit lit = lit
-\end{code}
-
-This equation is the same that check, the only difference is that the
-boring work is done, that work needs to be done only once, this is
-the reason top have two functions, check is the external interface,
-@check'@ is called recursively.
-
-There are several cases:
-
-\begin{itemize}
-\item There are no equations: Everything is OK.
-\item There are only one equation, that can fail, and all the patterns are
- variables. Then that equation is used and the same equation is
- non-exhaustive.
-\item All the patterns are variables, and the match can fail, there are
- more equations then the results is the result of the rest of equations
- and this equation is used also.
-
-\item The general case, if all the patterns are variables (here the match
- can't fail) then the result is that this equation is used and this
- equation doesn't generate non-exhaustive cases.
-
-\item In the general case, there can exist literals ,constructors or only
- vars in the first column, we actuate in consequence.
-
-\end{itemize}
-
-
-\begin{code}
-
-check' :: [(EqnNo, EquationInfo)]
- -> ([ExhaustivePat], -- Pattern scheme that might not be matched at all
- EqnSet) -- Eqns that are used (others are overlapped)
-
-check' [] = ([([],[])],emptyUniqSet)
-
-check' ((n, EqnInfo { eqn_pats = ps, eqn_rhs = MatchResult can_fail _ }) : rs)
- | first_eqn_all_vars && case can_fail of { CantFail -> True; CanFail -> False }
- = ([], unitUniqSet n) -- One eqn, which can't fail
-
- | first_eqn_all_vars && null rs -- One eqn, but it can fail
- = ([(takeList ps (repeat nlWildPat),[])], unitUniqSet n)
-
- | first_eqn_all_vars -- Several eqns, first can fail
- = (pats, addOneToUniqSet indexs n)
- where
- first_eqn_all_vars = all_vars ps
- (pats,indexs) = check' rs
-
-check' qs
- | some_literals = split_by_literals qs
- | some_constructors = split_by_constructor qs
- | only_vars = first_column_only_vars qs
- | otherwise = pprPanic "Check.check': Not implemented :-(" (ppr first_pats)
- -- Shouldn't happen
- where
- -- Note: RecPats will have been simplified to ConPats
- -- at this stage.
- first_pats = ASSERT2( okGroup qs, pprGroup qs ) map firstPatN qs
- some_constructors = any is_con first_pats
- some_literals = any is_lit first_pats
- only_vars = all is_var first_pats
-\end{code}
-
-Here begins the code to deal with literals, we need to split the matrix
-in different matrix beginning by each literal and a last matrix with the
-rest of values.
-
-\begin{code}
-split_by_literals :: [(EqnNo, EquationInfo)] -> ([ExhaustivePat], EqnSet)
-split_by_literals qs = process_literals used_lits qs
- where
- used_lits = get_used_lits qs
-\end{code}
-
-@process_explicit_literals@ is a function that process each literal that appears
-in the column of the matrix.
-
-\begin{code}
-process_explicit_literals :: [HsLit] -> [(EqnNo, EquationInfo)] -> ([ExhaustivePat],EqnSet)
-process_explicit_literals lits qs = (concat pats, unionManyUniqSets indexs)
- where
- pats_indexs = map (\x -> construct_literal_matrix x qs) lits
- (pats,indexs) = unzip pats_indexs
-\end{code}
-
-
-@process_literals@ calls @process_explicit_literals@ to deal with the literals
-that appears in the matrix and deal also with the rest of the cases. It
-must be one Variable to be complete.
-
-\begin{code}
-
-process_literals :: [HsLit] -> [(EqnNo, EquationInfo)] -> ([ExhaustivePat],EqnSet)
-process_literals used_lits qs
- | null default_eqns = ASSERT( not (null qs) ) ([make_row_vars used_lits (head qs)] ++ pats,indexs)
- | otherwise = (pats_default,indexs_default)
- where
- (pats,indexs) = process_explicit_literals used_lits qs
- default_eqns = ASSERT2( okGroup qs, pprGroup qs )
- [remove_var q | q <- qs, is_var (firstPatN q)]
- (pats',indexs') = check' default_eqns
- pats_default = [(nlWildPat:ps,constraints) | (ps,constraints) <- (pats')] ++ pats
- indexs_default = unionUniqSets indexs' indexs
-\end{code}
-
-Here we have selected the literal and we will select all the equations that
-begins for that literal and create a new matrix.
-
-\begin{code}
-construct_literal_matrix :: HsLit -> [(EqnNo, EquationInfo)] -> ([ExhaustivePat],EqnSet)
-construct_literal_matrix lit qs =
- (map (\ (xs,ys) -> (new_lit:xs,ys)) pats,indexs)
- where
- (pats,indexs) = (check' (remove_first_column_lit lit qs))
- new_lit = nlLitPat lit
-
-remove_first_column_lit :: HsLit
- -> [(EqnNo, EquationInfo)]
- -> [(EqnNo, EquationInfo)]
-remove_first_column_lit lit qs
- = ASSERT2( okGroup qs, pprGroup qs )
- [(n, shift_pat eqn) | q@(n,eqn) <- qs, is_var_lit lit (firstPatN q)]
- where
- shift_pat eqn@(EqnInfo { eqn_pats = _:ps}) = eqn { eqn_pats = ps }
- shift_pat _ = panic "Check.shift_var: no patterns"
-\end{code}
-
-This function splits the equations @qs@ in groups that deal with the
-same constructor.
-
-\begin{code}
-split_by_constructor :: [(EqnNo, EquationInfo)] -> ([ExhaustivePat], EqnSet)
-split_by_constructor qs
- | notNull unused_cons = need_default_case used_cons unused_cons qs
- | otherwise = no_need_default_case used_cons qs
- where
- used_cons = get_used_cons qs
- unused_cons = get_unused_cons used_cons
-\end{code}
-
-The first column of the patterns matrix only have vars, then there is
-nothing to do.
-
-\begin{code}
-first_column_only_vars :: [(EqnNo, EquationInfo)] -> ([ExhaustivePat],EqnSet)
-first_column_only_vars qs = (map (\ (xs,ys) -> (nlWildPat:xs,ys)) pats,indexs)
- where
- (pats, indexs) = check' (map remove_var qs)
-\end{code}
-
-This equation takes a matrix of patterns and split the equations by
-constructor, using all the constructors that appears in the first column
-of the pattern matching.
-
-We can need a default clause or not ...., it depends if we used all the
-constructors or not explicitly. The reasoning is similar to @process_literals@,
-the difference is that here the default case is not always needed.
-
-\begin{code}
-no_need_default_case :: [Pat Id] -> [(EqnNo, EquationInfo)] -> ([ExhaustivePat],EqnSet)
-no_need_default_case cons qs = (concat pats, unionManyUniqSets indexs)
- where
- pats_indexs = map (\x -> construct_matrix x qs) cons
- (pats,indexs) = unzip pats_indexs
-
-need_default_case :: [Pat Id] -> [DataCon] -> [(EqnNo, EquationInfo)] -> ([ExhaustivePat],EqnSet)
-need_default_case used_cons unused_cons qs
- | null default_eqns = (pats_default_no_eqns,indexs)
- | otherwise = (pats_default,indexs_default)
- where
- (pats,indexs) = no_need_default_case used_cons qs
- default_eqns = ASSERT2( okGroup qs, pprGroup qs )
- [remove_var q | q <- qs, is_var (firstPatN q)]
- (pats',indexs') = check' default_eqns
- pats_default = [(make_whole_con c:ps,constraints) |
- c <- unused_cons, (ps,constraints) <- pats'] ++ pats
- new_wilds = ASSERT( not (null qs) ) make_row_vars_for_constructor (head qs)
- pats_default_no_eqns = [(make_whole_con c:new_wilds,[]) | c <- unused_cons] ++ pats
- indexs_default = unionUniqSets indexs' indexs
-
-construct_matrix :: Pat Id -> [(EqnNo, EquationInfo)] -> ([ExhaustivePat],EqnSet)
-construct_matrix con qs =
- (map (make_con con) pats,indexs)
- where
- (pats,indexs) = (check' (remove_first_column con qs))
-\end{code}
-
-Here remove first column is more difficult that with literals due to the fact
-that constructors can have arguments.
-
-For instance, the matrix
-\begin{verbatim}
- (: x xs) y
- z y
-\end{verbatim}
-is transformed in:
-\begin{verbatim}
- x xs y
- _ _ y
-\end{verbatim}
-
-\begin{code}
-remove_first_column :: Pat Id -- Constructor
- -> [(EqnNo, EquationInfo)]
- -> [(EqnNo, EquationInfo)]
-remove_first_column (ConPatOut{ pat_con = L _ con, pat_args = PrefixCon con_pats }) qs
- = ASSERT2( okGroup qs, pprGroup qs )
- [(n, shift_var eqn) | q@(n, eqn) <- qs, is_var_con con (firstPatN q)]
- where
- new_wilds = [WildPat (hsLPatType arg_pat) | arg_pat <- con_pats]
- shift_var eqn@(EqnInfo { eqn_pats = ConPatOut{ pat_args = PrefixCon ps' } : ps})
- = eqn { eqn_pats = map unLoc ps' ++ ps }
- shift_var eqn@(EqnInfo { eqn_pats = WildPat _ : ps })
- = eqn { eqn_pats = new_wilds ++ ps }
- shift_var _ = panic "Check.Shift_var:No done"
-
-make_row_vars :: [HsLit] -> (EqnNo, EquationInfo) -> ExhaustivePat
-make_row_vars used_lits (_, EqnInfo { eqn_pats = pats})
- = (nlVarPat new_var:takeList (tail pats) (repeat nlWildPat),[(new_var,used_lits)])
- where
- new_var = hash_x
-
-hash_x :: Name
-hash_x = mkInternalName unboundKey {- doesn't matter much -}
- (mkVarOccFS (fsLit "#x"))
- noSrcSpan
-
-make_row_vars_for_constructor :: (EqnNo, EquationInfo) -> [WarningPat]
-make_row_vars_for_constructor (_, EqnInfo { eqn_pats = pats})
- = takeList (tail pats) (repeat nlWildPat)
-
-compare_cons :: Pat Id -> Pat Id -> Bool
-compare_cons (ConPatOut{ pat_con = L _ id1 }) (ConPatOut { pat_con = L _ id2 }) = id1 == id2
-
-remove_dups :: [Pat Id] -> [Pat Id]
-remove_dups [] = []
-remove_dups (x:xs) | or (map (\y -> compare_cons x y) xs) = remove_dups xs
- | otherwise = x : remove_dups xs
-
-get_used_cons :: [(EqnNo, EquationInfo)] -> [Pat Id]
-get_used_cons qs = remove_dups [pat | q <- qs, let pat = firstPatN q,
- isConPatOut pat]
-
-isConPatOut :: Pat Id -> Bool
-isConPatOut (ConPatOut {}) = True
-isConPatOut _ = False
-
-remove_dups' :: [HsLit] -> [HsLit]
-remove_dups' [] = []
-remove_dups' (x:xs) | x `elem` xs = remove_dups' xs
- | otherwise = x : remove_dups' xs
-
-
-get_used_lits :: [(EqnNo, EquationInfo)] -> [HsLit]
-get_used_lits qs = remove_dups' all_literals
- where
- all_literals = get_used_lits' qs
-
-get_used_lits' :: [(EqnNo, EquationInfo)] -> [HsLit]
-get_used_lits' [] = []
-get_used_lits' (q:qs)
- | Just lit <- get_lit (firstPatN q) = lit : get_used_lits' qs
- | otherwise = get_used_lits qs
-
-get_lit :: Pat id -> Maybe HsLit
--- Get a representative HsLit to stand for the OverLit
--- It doesn't matter which one, because they will only be compared
--- with other HsLits gotten in the same way
-get_lit (LitPat lit) = Just lit
-get_lit (NPat (OverLit { ol_val = HsIntegral i}) mb _) = Just (HsIntPrim (mb_neg mb i))
-get_lit (NPat (OverLit { ol_val = HsFractional f }) mb _) = Just (HsFloatPrim (mb_neg mb f))
-get_lit (NPat (OverLit { ol_val = HsIsString s }) _ _) = Just (HsStringPrim s)
-get_lit _ = Nothing
-
-mb_neg :: Num a => Maybe b -> a -> a
-mb_neg Nothing v = v
-mb_neg (Just _) v = -v
-
-get_unused_cons :: [Pat Id] -> [DataCon]
-get_unused_cons used_cons = ASSERT( not (null used_cons) ) unused_cons
- where
- used_set :: UniqSet DataCon
- used_set = mkUniqSet [d | ConPatOut{ pat_con = L _ d} <- used_cons]
- (ConPatOut { pat_ty = ty }) = head used_cons
- Just (ty_con, inst_tys) = splitTyConApp_maybe ty
- unused_cons = filterOut is_used (tyConDataCons ty_con)
- is_used con = con `elementOfUniqSet` used_set
- || dataConCannotMatch inst_tys con
-
-all_vars :: [Pat Id] -> Bool
-all_vars [] = True
-all_vars (WildPat _:ps) = all_vars ps
-all_vars _ = False
-
-remove_var :: (EqnNo, EquationInfo) -> (EqnNo, EquationInfo)
-remove_var (n, eqn@(EqnInfo { eqn_pats = WildPat _ : ps})) = (n, eqn { eqn_pats = ps })
-remove_var _ = panic "Check.remove_var: equation does not begin with a variable"
-
------------------------
-eqnPats :: (EqnNo, EquationInfo) -> [Pat Id]
-eqnPats (_, eqn) = eqn_pats eqn
-
-okGroup :: [(EqnNo, EquationInfo)] -> Bool
--- True if all equations have at least one pattern, and
--- all have the same number of patterns
-okGroup [] = True
-okGroup (e:es) = n_pats > 0 && and [length (eqnPats e) == n_pats | e <- es]
- where
- n_pats = length (eqnPats e)
-
--- Half-baked print
-pprGroup :: [(EqnNo, EquationInfo)] -> SDoc
-pprEqnInfo :: (EqnNo, EquationInfo) -> SDoc
-pprGroup es = vcat (map pprEqnInfo es)
-pprEqnInfo e = ppr (eqnPats e)
-
-
-firstPatN :: (EqnNo, EquationInfo) -> Pat Id
-firstPatN (_, eqn) = firstPat eqn
-
-is_con :: Pat Id -> Bool
-is_con (ConPatOut {}) = True
-is_con _ = False
-
-is_lit :: Pat Id -> Bool
-is_lit (LitPat _) = True
-is_lit (NPat _ _ _) = True
-is_lit _ = False
-
-is_var :: Pat Id -> Bool
-is_var (WildPat _) = True
-is_var _ = False
-
-is_var_con :: DataCon -> Pat Id -> Bool
-is_var_con _ (WildPat _) = True
-is_var_con con (ConPatOut{ pat_con = L _ id }) | id == con = True
-is_var_con _ _ = False
-
-is_var_lit :: HsLit -> Pat Id -> Bool
-is_var_lit _ (WildPat _) = True
-is_var_lit lit pat
- | Just lit' <- get_lit pat = lit == lit'
- | otherwise = False
-\end{code}
-
-The difference beteewn @make_con@ and @make_whole_con@ is that
-@make_wole_con@ creates a new constructor with all their arguments, and
-@make_con@ takes a list of argumntes, creates the contructor getting their
-arguments from the list. See where \fbox{\ ???\ } are used for details.
-
-We need to reconstruct the patterns (make the constructors infix and
-similar) at the same time that we create the constructors.
-
-You can tell tuple constructors using
-\begin{verbatim}
- Id.isTupleCon
-\end{verbatim}
-You can see if one constructor is infix with this clearer code :-))))))))))
-\begin{verbatim}
- Lex.isLexConSym (Name.occNameString (Name.getOccName con))
-\end{verbatim}
-
- Rather clumsy but it works. (Simon Peyton Jones)
-
-
-We don't mind the @nilDataCon@ because it doesn't change the way to
-print the messsage, we are searching only for things like: @[1,2,3]@,
-not @x:xs@ ....
-
-In @reconstruct_pat@ we want to ``undo'' the work
-that we have done in @tidy_pat@.
-In particular:
-\begin{tabular}{lll}
- @((,) x y)@ & returns to be & @(x, y)@
-\\ @((:) x xs)@ & returns to be & @(x:xs)@
-\\ @(x:(...:[])@ & returns to be & @[x,...]@
-\end{tabular}
-%
-The difficult case is the third one becouse we need to follow all the
-contructors until the @[]@ to know that we need to use the second case,
-not the second. \fbox{\ ???\ }
-%
-\begin{code}
-isInfixCon :: DataCon -> Bool
-isInfixCon con = isDataSymOcc (getOccName con)
-
-is_nil :: Pat Name -> Bool
-is_nil (ConPatIn con (PrefixCon [])) = unLoc con == getName nilDataCon
-is_nil _ = False
-
-is_list :: Pat Name -> Bool
-is_list (ListPat _ _) = True
-is_list _ = False
-
-return_list :: DataCon -> Pat Name -> Bool
-return_list id q = id == consDataCon && (is_nil q || is_list q)
-
-make_list :: LPat Name -> Pat Name -> Pat Name
-make_list p q | is_nil q = ListPat [p] placeHolderType
-make_list p (ListPat ps ty) = ListPat (p:ps) ty
-make_list _ _ = panic "Check.make_list: Invalid argument"
-
-make_con :: Pat Id -> ExhaustivePat -> ExhaustivePat
-make_con (ConPatOut{ pat_con = L _ id }) (lp:lq:ps, constraints)
- | return_list id q = (noLoc (make_list lp q) : ps, constraints)
- | isInfixCon id = (nlInfixConPat (getName id) lp lq : ps, constraints)
- where q = unLoc lq
-
-make_con (ConPatOut{ pat_con = L _ id, pat_args = PrefixCon pats, pat_ty = ty }) (ps, constraints)
- | isTupleTyCon tc = (noLoc (TuplePat pats_con (tupleTyConBoxity tc) ty) : rest_pats, constraints)
- | isPArrFakeCon id = (noLoc (PArrPat pats_con placeHolderType) : rest_pats, constraints)
- | otherwise = (nlConPat name pats_con : rest_pats, constraints)
- where
- name = getName id
- (pats_con, rest_pats) = splitAtList pats ps
- tc = dataConTyCon id
-
--- reconstruct parallel array pattern
---
--- * don't check for the type only; we need to make sure that we are really
--- dealing with one of the fake constructors and not with the real
--- representation
-
-make_whole_con :: DataCon -> WarningPat
-make_whole_con con | isInfixCon con = nlInfixConPat name nlWildPat nlWildPat
- | otherwise = nlConPat name pats
- where
- name = getName con
- pats = [nlWildPat | _ <- dataConOrigArgTys con]
-\end{code}
-
-------------------------------------------------------------------------
- Tidying equations
-------------------------------------------------------------------------
-
-tidy_eqn does more or less the same thing as @tidy@ in @Match.lhs@;
-that is, it removes syntactic sugar, reducing the number of cases that
-must be handled by the main checking algorithm. One difference is
-that here we can do *all* the tidying at once (recursively), rather
-than doing it incrementally.
-
-\begin{code}
-tidy_eqn :: EquationInfo -> EquationInfo
-tidy_eqn eqn = eqn { eqn_pats = map tidy_pat (eqn_pats eqn),
- eqn_rhs = tidy_rhs (eqn_rhs eqn) }
- where
- -- Horrible hack. The tidy_pat stuff converts "might-fail" patterns to
- -- WildPats which of course loses the info that they can fail to match.
- -- So we stick in a CanFail as if it were a guard.
- tidy_rhs (MatchResult can_fail body)
- | any might_fail_pat (eqn_pats eqn) = MatchResult CanFail body
- | otherwise = MatchResult can_fail body
-
---------------
-might_fail_pat :: Pat Id -> Bool
--- Returns True of patterns that might fail (i.e. fall through) in a way
--- that is not covered by the checking algorithm. Specifically:
--- NPlusKPat
--- ViewPat (if refutable)
-
--- First the two special cases
-might_fail_pat (NPlusKPat {}) = True
-might_fail_pat (ViewPat _ p _) = not (isIrrefutableHsPat p)
-
--- Now the recursive stuff
-might_fail_pat (ParPat p) = might_fail_lpat p
-might_fail_pat (AsPat _ p) = might_fail_lpat p
-might_fail_pat (SigPatOut p _ ) = might_fail_lpat p
-might_fail_pat (ListPat ps _) = any might_fail_lpat ps
-might_fail_pat (TuplePat ps _ _) = any might_fail_lpat ps
-might_fail_pat (PArrPat ps _) = any might_fail_lpat ps
-might_fail_pat (BangPat p) = might_fail_lpat p
-might_fail_pat (ConPatOut { pat_args = ps }) = any might_fail_lpat (hsConPatArgs ps)
-
--- Finally the ones that are sure to succeed, or which are covered by the checking algorithm
-might_fail_pat (LazyPat _) = False -- Always succeeds
-might_fail_pat _ = False -- VarPat, WildPat, LitPat, NPat, TypePat
-
---------------
-might_fail_lpat :: LPat Id -> Bool
-might_fail_lpat (L _ p) = might_fail_pat p
-
---------------
-tidy_lpat :: LPat Id -> LPat Id
-tidy_lpat p = fmap tidy_pat p
-
---------------
-tidy_pat :: Pat Id -> Pat Id
-tidy_pat pat@(WildPat _) = pat
-tidy_pat (VarPat id) = WildPat (idType id)
-tidy_pat (ParPat p) = tidy_pat (unLoc p)
-tidy_pat (LazyPat p) = WildPat (hsLPatType p) -- For overlap and exhaustiveness checking
- -- purposes, a ~pat is like a wildcard
-tidy_pat (BangPat p) = tidy_pat (unLoc p)
-tidy_pat (AsPat _ p) = tidy_pat (unLoc p)
-tidy_pat (SigPatOut p _) = tidy_pat (unLoc p)
-tidy_pat (CoPat _ pat _) = tidy_pat pat
-
--- These two are might_fail patterns, so we map them to
--- WildPats. The might_fail_pat stuff arranges that the
--- guard says "this equation might fall through".
-tidy_pat (NPlusKPat id _ _ _) = WildPat (idType (unLoc id))
-tidy_pat (ViewPat _ _ ty) = WildPat ty
-
-tidy_pat (NPat lit mb_neg eq) = tidyNPat lit mb_neg eq
-
-tidy_pat pat@(ConPatOut { pat_con = L _ id, pat_args = ps })
- = pat { pat_args = tidy_con id ps }
-
-tidy_pat (ListPat ps ty)
- = unLoc $ foldr (\ x y -> mkPrefixConPat consDataCon [x,y] list_ty)
- (mkNilPat list_ty)
- (map tidy_lpat ps)
- where list_ty = mkListTy ty
-
--- introduce fake parallel array constructors to be able to handle parallel
--- arrays with the existing machinery for constructor pattern
---
-tidy_pat (PArrPat ps ty)
- = unLoc $ mkPrefixConPat (parrFakeCon (length ps))
- (map tidy_lpat ps)
- (mkPArrTy ty)
-
-tidy_pat (TuplePat ps boxity ty)
- = unLoc $ mkPrefixConPat (tupleCon boxity arity)
- (map tidy_lpat ps) ty
- where
- arity = length ps
-
--- Unpack string patterns fully, so we can see when they overlap with
--- each other, or even explicit lists of Chars.
-tidy_pat (LitPat lit)
- | HsString s <- lit
- = unLoc $ foldr (\c pat -> mkPrefixConPat consDataCon [mk_char_lit c, pat] stringTy)
- (mkPrefixConPat nilDataCon [] stringTy) (unpackFS s)
- | otherwise
- = tidyLitPat lit
- where
- mk_char_lit c = mkPrefixConPat charDataCon [nlLitPat (HsCharPrim c)] charTy
-
------------------
-tidy_con :: DataCon -> HsConPatDetails Id -> HsConPatDetails Id
-tidy_con _ (PrefixCon ps) = PrefixCon (map tidy_lpat ps)
-tidy_con _ (InfixCon p1 p2) = PrefixCon [tidy_lpat p1, tidy_lpat p2]
-tidy_con con (RecCon (HsRecFields fs _))
- | null fs = PrefixCon [nlWildPat | _ <- dataConOrigArgTys con]
- -- Special case for null patterns; maybe not a record at all
- | otherwise = PrefixCon (map (tidy_lpat.snd) all_pats)
- where
- -- pad out all the missing fields with WildPats.
- field_pats = map (\ f -> (f, nlWildPat)) (dataConFieldLabels con)
- all_pats = foldr (\(HsRecField id p _) acc -> insertNm (getName (unLoc id)) p acc)
- field_pats fs
-
- insertNm nm p [] = [(nm,p)]
- insertNm nm p (x@(n,_):xs)
- | nm == n = (nm,p):xs
- | otherwise = x : insertNm nm p xs
-\end{code}
+%\r
+% (c) The University of Glasgow 2006\r
+% (c) The GRASP/AQUA Project, Glasgow University, 1997-1998\r
+%\r
+% Author: Juan J. Quintela <quintela@krilin.dc.fi.udc.es>\r
+\r
+\begin{code}\r
+{-# OPTIONS -fno-warn-incomplete-patterns #-}\r
+-- The above warning supression flag is a temporary kludge.\r
+-- While working on this module you are encouraged to remove it and fix\r
+-- any warnings in the module. See\r
+-- http://hackage.haskell.org/trac/ghc/wiki/Commentary/CodingStyle#Warnings\r
+-- for details\r
+\r
+module Check ( check , ExhaustivePat ) where\r
+\r
+#include "HsVersions.h"\r
+\r
+import HsSyn \r
+import TcHsSyn\r
+import DsUtils\r
+import MatchLit\r
+import Id\r
+import DataCon\r
+import Name\r
+import TysWiredIn\r
+import PrelNames\r
+import TyCon\r
+import Type\r
+import Unify( dataConCannotMatch )\r
+import SrcLoc\r
+import UniqSet\r
+import Util\r
+import Outputable\r
+import FastString\r
+\end{code}\r
+\r
+This module performs checks about if one list of equations are:\r
+\begin{itemize}\r
+\item Overlapped\r
+\item Non exhaustive\r
+\end{itemize}\r
+To discover that we go through the list of equations in a tree-like fashion.\r
+\r
+If you like theory, a similar algorithm is described in:\r
+\begin{quotation}\r
+ {\em Two Techniques for Compiling Lazy Pattern Matching},\r
+ Luc Maranguet,\r
+ INRIA Rocquencourt (RR-2385, 1994)\r
+\end{quotation}\r
+The algorithm is based on the first technique, but there are some differences:\r
+\begin{itemize}\r
+\item We don't generate code\r
+\item We have constructors and literals (not only literals as in the \r
+ article)\r
+\item We don't use directions, we must select the columns from \r
+ left-to-right\r
+\end{itemize}\r
+(By the way the second technique is really similar to the one used in \r
+ @Match.lhs@ to generate code)\r
+\r
+This function takes the equations of a pattern and returns:\r
+\begin{itemize}\r
+\item The patterns that are not recognized\r
+\item The equations that are not overlapped\r
+\end{itemize}\r
+It simplify the patterns and then call @check'@ (the same semantics), and it \r
+needs to reconstruct the patterns again ....\r
+\r
+The problem appear with things like:\r
+\begin{verbatim}\r
+ f [x,y] = ....\r
+ f (x:xs) = .....\r
+\end{verbatim}\r
+We want to put the two patterns with the same syntax, (prefix form) and \r
+then all the constructors are equal:\r
+\begin{verbatim}\r
+ f (: x (: y [])) = ....\r
+ f (: x xs) = .....\r
+\end{verbatim}\r
+(more about that in @tidy_eqns@)\r
+\r
+We would prefer to have a @WarningPat@ of type @String@, but Strings and the \r
+Pretty Printer are not friends.\r
+\r
+We use @InPat@ in @WarningPat@ instead of @OutPat@\r
+because we need to print the \r
+warning messages in the same way they are introduced, i.e. if the user \r
+wrote:\r
+\begin{verbatim}\r
+ f [x,y] = ..\r
+\end{verbatim}\r
+He don't want a warning message written:\r
+\begin{verbatim}\r
+ f (: x (: y [])) ........\r
+\end{verbatim}\r
+Then we need to use InPats.\r
+\begin{quotation}\r
+ Juan Quintela 5 JUL 1998\\\r
+ User-friendliness and compiler writers are no friends.\r
+\end{quotation}\r
+\r
+\begin{code}\r
+type WarningPat = InPat Name\r
+type ExhaustivePat = ([WarningPat], [(Name, [HsLit])])\r
+type EqnNo = Int\r
+type EqnSet = UniqSet EqnNo\r
+\r
+\r
+check :: [EquationInfo] -> ([ExhaustivePat], [EquationInfo])\r
+ -- Second result is the shadowed equations\r
+ -- if there are view patterns, just give up - don't know what the function is\r
+check qs = pprTrace "check" (ppr tidy_qs) $\r
+ (untidy_warns, shadowed_eqns)\r
+ where\r
+ tidy_qs = map tidy_eqn qs\r
+ (warns, used_nos) = check' ([1..] `zip` tidy_qs)\r
+ untidy_warns = map untidy_exhaustive warns \r
+ shadowed_eqns = [eqn | (eqn,i) <- qs `zip` [1..], \r
+ not (i `elementOfUniqSet` used_nos)]\r
+\r
+untidy_exhaustive :: ExhaustivePat -> ExhaustivePat\r
+untidy_exhaustive ([pat], messages) = \r
+ ([untidy_no_pars pat], map untidy_message messages)\r
+untidy_exhaustive (pats, messages) = \r
+ (map untidy_pars pats, map untidy_message messages)\r
+\r
+untidy_message :: (Name, [HsLit]) -> (Name, [HsLit])\r
+untidy_message (string, lits) = (string, map untidy_lit lits)\r
+\end{code}\r
+\r
+The function @untidy@ does the reverse work of the @tidy_pat@ funcion.\r
+\r
+\begin{code}\r
+\r
+type NeedPars = Bool \r
+\r
+untidy_no_pars :: WarningPat -> WarningPat\r
+untidy_no_pars p = untidy False p\r
+\r
+untidy_pars :: WarningPat -> WarningPat\r
+untidy_pars p = untidy True p\r
+\r
+untidy :: NeedPars -> WarningPat -> WarningPat\r
+untidy b (L loc p) = L loc (untidy' b p)\r
+ where\r
+ untidy' _ p@(WildPat _) = p\r
+ untidy' _ p@(VarPat _) = p\r
+ untidy' _ (LitPat lit) = LitPat (untidy_lit lit)\r
+ untidy' _ p@(ConPatIn _ (PrefixCon [])) = p\r
+ untidy' b (ConPatIn name ps) = pars b (L loc (ConPatIn name (untidy_con ps)))\r
+ untidy' _ (ListPat pats ty) = ListPat (map untidy_no_pars pats) ty\r
+ untidy' _ (TuplePat pats box ty) = TuplePat (map untidy_no_pars pats) box ty\r
+ untidy' _ (PArrPat _ _) = panic "Check.untidy: Shouldn't get a parallel array here!"\r
+ untidy' _ (SigPatIn _ _) = panic "Check.untidy: SigPat"\r
+\r
+untidy_con :: HsConPatDetails Name -> HsConPatDetails Name\r
+untidy_con (PrefixCon pats) = PrefixCon (map untidy_pars pats) \r
+untidy_con (InfixCon p1 p2) = InfixCon (untidy_pars p1) (untidy_pars p2)\r
+untidy_con (RecCon (HsRecFields flds dd)) \r
+ = RecCon (HsRecFields [ fld { hsRecFieldArg = untidy_pars (hsRecFieldArg fld) }\r
+ | fld <- flds ] dd)\r
+\r
+pars :: NeedPars -> WarningPat -> Pat Name\r
+pars True p = ParPat p\r
+pars _ p = unLoc p\r
+\r
+untidy_lit :: HsLit -> HsLit\r
+untidy_lit (HsCharPrim c) = HsChar c\r
+untidy_lit lit = lit\r
+\end{code}\r
+\r
+This equation is the same that check, the only difference is that the\r
+boring work is done, that work needs to be done only once, this is\r
+the reason top have two functions, check is the external interface,\r
+@check'@ is called recursively.\r
+\r
+There are several cases:\r
+\r
+\begin{itemize} \r
+\item There are no equations: Everything is OK. \r
+\item There are only one equation, that can fail, and all the patterns are\r
+ variables. Then that equation is used and the same equation is \r
+ non-exhaustive.\r
+\item All the patterns are variables, and the match can fail, there are \r
+ more equations then the results is the result of the rest of equations \r
+ and this equation is used also.\r
+\r
+\item The general case, if all the patterns are variables (here the match \r
+ can't fail) then the result is that this equation is used and this \r
+ equation doesn't generate non-exhaustive cases.\r
+\r
+\item In the general case, there can exist literals ,constructors or only \r
+ vars in the first column, we actuate in consequence.\r
+\r
+\end{itemize}\r
+\r
+\r
+\begin{code}\r
+\r
+check' :: [(EqnNo, EquationInfo)] \r
+ -> ([ExhaustivePat], -- Pattern scheme that might not be matched at all\r
+ EqnSet) -- Eqns that are used (others are overlapped)\r
+\r
+check' [] = ([([],[])],emptyUniqSet)\r
+\r
+check' ((n, EqnInfo { eqn_pats = ps, eqn_rhs = MatchResult can_fail _ }) : rs) \r
+ | first_eqn_all_vars && case can_fail of { CantFail -> True; CanFail -> False }\r
+ = ([], unitUniqSet n) -- One eqn, which can't fail\r
+\r
+ | first_eqn_all_vars && null rs -- One eqn, but it can fail\r
+ = ([(takeList ps (repeat nlWildPat),[])], unitUniqSet n)\r
+\r
+ | first_eqn_all_vars -- Several eqns, first can fail\r
+ = (pats, addOneToUniqSet indexs n)\r
+ where\r
+ first_eqn_all_vars = all_vars ps\r
+ (pats,indexs) = check' rs\r
+\r
+check' qs\r
+ | some_literals = split_by_literals qs\r
+ | some_constructors = split_by_constructor qs\r
+ | only_vars = first_column_only_vars qs\r
+ | otherwise = pprPanic "Check.check': Not implemented :-(" (ppr first_pats)\r
+ -- Shouldn't happen\r
+ where\r
+ -- Note: RecPats will have been simplified to ConPats\r
+ -- at this stage.\r
+ first_pats = ASSERT2( okGroup qs, pprGroup qs ) map firstPatN qs\r
+ some_constructors = any is_con first_pats\r
+ some_literals = any is_lit first_pats\r
+ only_vars = all is_var first_pats\r
+\end{code}\r
+\r
+Here begins the code to deal with literals, we need to split the matrix\r
+in different matrix beginning by each literal and a last matrix with the \r
+rest of values.\r
+\r
+\begin{code}\r
+split_by_literals :: [(EqnNo, EquationInfo)] -> ([ExhaustivePat], EqnSet)\r
+split_by_literals qs = process_literals used_lits qs\r
+ where\r
+ used_lits = get_used_lits qs\r
+\end{code}\r
+\r
+@process_explicit_literals@ is a function that process each literal that appears \r
+in the column of the matrix. \r
+\r
+\begin{code}\r
+process_explicit_literals :: [HsLit] -> [(EqnNo, EquationInfo)] -> ([ExhaustivePat],EqnSet)\r
+process_explicit_literals lits qs = (concat pats, unionManyUniqSets indexs)\r
+ where \r
+ pats_indexs = map (\x -> construct_literal_matrix x qs) lits\r
+ (pats,indexs) = unzip pats_indexs \r
+\end{code}\r
+\r
+\r
+@process_literals@ calls @process_explicit_literals@ to deal with the literals \r
+that appears in the matrix and deal also with the rest of the cases. It \r
+must be one Variable to be complete.\r
+\r
+\begin{code}\r
+\r
+process_literals :: [HsLit] -> [(EqnNo, EquationInfo)] -> ([ExhaustivePat],EqnSet)\r
+process_literals used_lits qs \r
+ | null default_eqns = ASSERT( not (null qs) ) ([make_row_vars used_lits (head qs)] ++ pats,indexs)\r
+ | otherwise = (pats_default,indexs_default)\r
+ where\r
+ (pats,indexs) = process_explicit_literals used_lits qs\r
+ default_eqns = ASSERT2( okGroup qs, pprGroup qs ) \r
+ [remove_var q | q <- qs, is_var (firstPatN q)]\r
+ (pats',indexs') = check' default_eqns \r
+ pats_default = [(nlWildPat:ps,constraints) | (ps,constraints) <- (pats')] ++ pats \r
+ indexs_default = unionUniqSets indexs' indexs\r
+\end{code}\r
+\r
+Here we have selected the literal and we will select all the equations that \r
+begins for that literal and create a new matrix.\r
+\r
+\begin{code}\r
+construct_literal_matrix :: HsLit -> [(EqnNo, EquationInfo)] -> ([ExhaustivePat],EqnSet)\r
+construct_literal_matrix lit qs =\r
+ (map (\ (xs,ys) -> (new_lit:xs,ys)) pats,indexs) \r
+ where\r
+ (pats,indexs) = (check' (remove_first_column_lit lit qs)) \r
+ new_lit = nlLitPat lit\r
+\r
+remove_first_column_lit :: HsLit\r
+ -> [(EqnNo, EquationInfo)] \r
+ -> [(EqnNo, EquationInfo)]\r
+remove_first_column_lit lit qs\r
+ = ASSERT2( okGroup qs, pprGroup qs ) \r
+ [(n, shift_pat eqn) | q@(n,eqn) <- qs, is_var_lit lit (firstPatN q)]\r
+ where\r
+ shift_pat eqn@(EqnInfo { eqn_pats = _:ps}) = eqn { eqn_pats = ps }\r
+ shift_pat _ = panic "Check.shift_var: no patterns"\r
+\end{code}\r
+\r
+This function splits the equations @qs@ in groups that deal with the \r
+same constructor.\r
+\r
+\begin{code}\r
+split_by_constructor :: [(EqnNo, EquationInfo)] -> ([ExhaustivePat], EqnSet)\r
+split_by_constructor qs \r
+ | notNull unused_cons = need_default_case used_cons unused_cons qs \r
+ | otherwise = no_need_default_case used_cons qs \r
+ where \r
+ used_cons = get_used_cons qs \r
+ unused_cons = get_unused_cons used_cons \r
+\end{code}\r
+\r
+The first column of the patterns matrix only have vars, then there is \r
+nothing to do.\r
+\r
+\begin{code}\r
+first_column_only_vars :: [(EqnNo, EquationInfo)] -> ([ExhaustivePat],EqnSet)\r
+first_column_only_vars qs = (map (\ (xs,ys) -> (nlWildPat:xs,ys)) pats,indexs)\r
+ where\r
+ (pats, indexs) = check' (map remove_var qs)\r
+\end{code}\r
+\r
+This equation takes a matrix of patterns and split the equations by \r
+constructor, using all the constructors that appears in the first column \r
+of the pattern matching.\r
+\r
+We can need a default clause or not ...., it depends if we used all the \r
+constructors or not explicitly. The reasoning is similar to @process_literals@,\r
+the difference is that here the default case is not always needed.\r
+\r
+\begin{code}\r
+no_need_default_case :: [Pat Id] -> [(EqnNo, EquationInfo)] -> ([ExhaustivePat],EqnSet)\r
+no_need_default_case cons qs = (concat pats, unionManyUniqSets indexs)\r
+ where \r
+ pats_indexs = map (\x -> construct_matrix x qs) cons\r
+ (pats,indexs) = unzip pats_indexs \r
+\r
+need_default_case :: [Pat Id] -> [DataCon] -> [(EqnNo, EquationInfo)] -> ([ExhaustivePat],EqnSet)\r
+need_default_case used_cons unused_cons qs \r
+ | null default_eqns = (pats_default_no_eqns,indexs)\r
+ | otherwise = (pats_default,indexs_default)\r
+ where\r
+ (pats,indexs) = no_need_default_case used_cons qs\r
+ default_eqns = ASSERT2( okGroup qs, pprGroup qs ) \r
+ [remove_var q | q <- qs, is_var (firstPatN q)]\r
+ (pats',indexs') = check' default_eqns \r
+ pats_default = [(make_whole_con c:ps,constraints) | \r
+ c <- unused_cons, (ps,constraints) <- pats'] ++ pats\r
+ new_wilds = ASSERT( not (null qs) ) make_row_vars_for_constructor (head qs)\r
+ pats_default_no_eqns = [(make_whole_con c:new_wilds,[]) | c <- unused_cons] ++ pats\r
+ indexs_default = unionUniqSets indexs' indexs\r
+\r
+construct_matrix :: Pat Id -> [(EqnNo, EquationInfo)] -> ([ExhaustivePat],EqnSet)\r
+construct_matrix con qs =\r
+ (map (make_con con) pats,indexs) \r
+ where\r
+ (pats,indexs) = (check' (remove_first_column con qs)) \r
+\end{code}\r
+\r
+Here remove first column is more difficult that with literals due to the fact \r
+that constructors can have arguments.\r
+\r
+For instance, the matrix\r
+\begin{verbatim}\r
+ (: x xs) y\r
+ z y\r
+\end{verbatim}\r
+is transformed in:\r
+\begin{verbatim}\r
+ x xs y\r
+ _ _ y\r
+\end{verbatim}\r
+\r
+\begin{code}\r
+remove_first_column :: Pat Id -- Constructor \r
+ -> [(EqnNo, EquationInfo)] \r
+ -> [(EqnNo, EquationInfo)]\r
+remove_first_column (ConPatOut{ pat_con = L _ con, pat_args = PrefixCon con_pats }) qs\r
+ = ASSERT2( okGroup qs, pprGroup qs ) \r
+ [(n, shift_var eqn) | q@(n, eqn) <- qs, is_var_con con (firstPatN q)]\r
+ where\r
+ new_wilds = [WildPat (hsLPatType arg_pat) | arg_pat <- con_pats]\r
+ shift_var eqn@(EqnInfo { eqn_pats = ConPatOut{ pat_args = PrefixCon ps' } : ps}) \r
+ = eqn { eqn_pats = map unLoc ps' ++ ps }\r
+ shift_var eqn@(EqnInfo { eqn_pats = WildPat _ : ps })\r
+ = eqn { eqn_pats = new_wilds ++ ps }\r
+ shift_var _ = panic "Check.Shift_var:No done"\r
+\r
+make_row_vars :: [HsLit] -> (EqnNo, EquationInfo) -> ExhaustivePat\r
+make_row_vars used_lits (_, EqnInfo { eqn_pats = pats})\r
+ = (nlVarPat new_var:takeList (tail pats) (repeat nlWildPat),[(new_var,used_lits)])\r
+ where \r
+ new_var = hash_x\r
+\r
+hash_x :: Name\r
+hash_x = mkInternalName unboundKey {- doesn't matter much -}\r
+ (mkVarOccFS (fsLit "#x"))\r
+ noSrcSpan\r
+\r
+make_row_vars_for_constructor :: (EqnNo, EquationInfo) -> [WarningPat]\r
+make_row_vars_for_constructor (_, EqnInfo { eqn_pats = pats}) \r
+ = takeList (tail pats) (repeat nlWildPat)\r
+\r
+compare_cons :: Pat Id -> Pat Id -> Bool\r
+compare_cons (ConPatOut{ pat_con = L _ id1 }) (ConPatOut { pat_con = L _ id2 }) = id1 == id2 \r
+\r
+remove_dups :: [Pat Id] -> [Pat Id]\r
+remove_dups [] = []\r
+remove_dups (x:xs) | or (map (\y -> compare_cons x y) xs) = remove_dups xs\r
+ | otherwise = x : remove_dups xs\r
+\r
+get_used_cons :: [(EqnNo, EquationInfo)] -> [Pat Id]\r
+get_used_cons qs = remove_dups [pat | q <- qs, let pat = firstPatN q, \r
+ isConPatOut pat]\r
+\r
+isConPatOut :: Pat Id -> Bool\r
+isConPatOut (ConPatOut {}) = True\r
+isConPatOut _ = False\r
+\r
+remove_dups' :: [HsLit] -> [HsLit] \r
+remove_dups' [] = []\r
+remove_dups' (x:xs) | x `elem` xs = remove_dups' xs\r
+ | otherwise = x : remove_dups' xs \r
+\r
+\r
+get_used_lits :: [(EqnNo, EquationInfo)] -> [HsLit]\r
+get_used_lits qs = remove_dups' all_literals\r
+ where\r
+ all_literals = get_used_lits' qs\r
+\r
+get_used_lits' :: [(EqnNo, EquationInfo)] -> [HsLit]\r
+get_used_lits' [] = []\r
+get_used_lits' (q:qs) \r
+ | Just lit <- get_lit (firstPatN q) = lit : get_used_lits' qs\r
+ | otherwise = get_used_lits qs\r
+\r
+get_lit :: Pat id -> Maybe HsLit \r
+-- Get a representative HsLit to stand for the OverLit\r
+-- It doesn't matter which one, because they will only be compared\r
+-- with other HsLits gotten in the same way\r
+get_lit (LitPat lit) = Just lit\r
+get_lit (NPat (OverLit { ol_val = HsIntegral i}) mb _) = Just (HsIntPrim (mb_neg mb i))\r
+get_lit (NPat (OverLit { ol_val = HsFractional f }) mb _) = Just (HsFloatPrim (mb_neg mb f))\r
+get_lit (NPat (OverLit { ol_val = HsIsString s }) _ _) = Just (HsStringPrim s)\r
+get_lit _ = Nothing\r
+\r
+mb_neg :: Num a => Maybe b -> a -> a\r
+mb_neg Nothing v = v\r
+mb_neg (Just _) v = -v\r
+\r
+get_unused_cons :: [Pat Id] -> [DataCon]\r
+get_unused_cons used_cons = ASSERT( not (null used_cons) ) unused_cons\r
+ where\r
+ used_set :: UniqSet DataCon\r
+ used_set = mkUniqSet [d | ConPatOut{ pat_con = L _ d} <- used_cons]\r
+ (ConPatOut { pat_ty = ty }) = head used_cons\r
+ Just (ty_con, inst_tys) = splitTyConApp_maybe ty\r
+ unused_cons = filterOut is_used (tyConDataCons ty_con)\r
+ is_used con = con `elementOfUniqSet` used_set\r
+ || dataConCannotMatch inst_tys con\r
+\r
+all_vars :: [Pat Id] -> Bool\r
+all_vars [] = True\r
+all_vars (WildPat _:ps) = all_vars ps\r
+all_vars _ = False\r
+\r
+remove_var :: (EqnNo, EquationInfo) -> (EqnNo, EquationInfo)\r
+remove_var (n, eqn@(EqnInfo { eqn_pats = WildPat _ : ps})) = (n, eqn { eqn_pats = ps })\r
+remove_var _ = panic "Check.remove_var: equation does not begin with a variable"\r
+\r
+-----------------------\r
+eqnPats :: (EqnNo, EquationInfo) -> [Pat Id]\r
+eqnPats (_, eqn) = eqn_pats eqn\r
+\r
+okGroup :: [(EqnNo, EquationInfo)] -> Bool\r
+-- True if all equations have at least one pattern, and\r
+-- all have the same number of patterns\r
+okGroup [] = True\r
+okGroup (e:es) = n_pats > 0 && and [length (eqnPats e) == n_pats | e <- es]\r
+ where\r
+ n_pats = length (eqnPats e)\r
+\r
+-- Half-baked print\r
+pprGroup :: [(EqnNo, EquationInfo)] -> SDoc\r
+pprEqnInfo :: (EqnNo, EquationInfo) -> SDoc\r
+pprGroup es = vcat (map pprEqnInfo es)\r
+pprEqnInfo e = ppr (eqnPats e)\r
+\r
+\r
+firstPatN :: (EqnNo, EquationInfo) -> Pat Id\r
+firstPatN (_, eqn) = firstPat eqn\r
+\r
+is_con :: Pat Id -> Bool\r
+is_con (ConPatOut {}) = True\r
+is_con _ = False\r
+\r
+is_lit :: Pat Id -> Bool\r
+is_lit (LitPat _) = True\r
+is_lit (NPat _ _ _) = True\r
+is_lit _ = False\r
+\r
+is_var :: Pat Id -> Bool\r
+is_var (WildPat _) = True\r
+is_var _ = False\r
+\r
+is_var_con :: DataCon -> Pat Id -> Bool\r
+is_var_con _ (WildPat _) = True\r
+is_var_con con (ConPatOut{ pat_con = L _ id }) | id == con = True\r
+is_var_con _ _ = False\r
+\r
+is_var_lit :: HsLit -> Pat Id -> Bool\r
+is_var_lit _ (WildPat _) = True\r
+is_var_lit lit pat \r
+ | Just lit' <- get_lit pat = lit == lit'\r
+ | otherwise = False\r
+\end{code}\r
+\r
+The difference beteewn @make_con@ and @make_whole_con@ is that\r
+@make_wole_con@ creates a new constructor with all their arguments, and\r
+@make_con@ takes a list of argumntes, creates the contructor getting their\r
+arguments from the list. See where \fbox{\ ???\ } are used for details.\r
+\r
+We need to reconstruct the patterns (make the constructors infix and\r
+similar) at the same time that we create the constructors.\r
+\r
+You can tell tuple constructors using\r
+\begin{verbatim}\r
+ Id.isTupleCon\r
+\end{verbatim}\r
+You can see if one constructor is infix with this clearer code :-))))))))))\r
+\begin{verbatim}\r
+ Lex.isLexConSym (Name.occNameString (Name.getOccName con))\r
+\end{verbatim}\r
+\r
+ Rather clumsy but it works. (Simon Peyton Jones)\r
+\r
+\r
+We don't mind the @nilDataCon@ because it doesn't change the way to\r
+print the messsage, we are searching only for things like: @[1,2,3]@,\r
+not @x:xs@ ....\r
+\r
+In @reconstruct_pat@ we want to ``undo'' the work\r
+that we have done in @tidy_pat@.\r
+In particular:\r
+\begin{tabular}{lll}\r
+ @((,) x y)@ & returns to be & @(x, y)@\r
+\\ @((:) x xs)@ & returns to be & @(x:xs)@\r
+\\ @(x:(...:[])@ & returns to be & @[x,...]@\r
+\end{tabular}\r
+%\r
+The difficult case is the third one becouse we need to follow all the\r
+contructors until the @[]@ to know that we need to use the second case,\r
+not the second. \fbox{\ ???\ }\r
+%\r
+\begin{code}\r
+isInfixCon :: DataCon -> Bool\r
+isInfixCon con = isDataSymOcc (getOccName con)\r
+\r
+is_nil :: Pat Name -> Bool\r
+is_nil (ConPatIn con (PrefixCon [])) = unLoc con == getName nilDataCon\r
+is_nil _ = False\r
+\r
+is_list :: Pat Name -> Bool\r
+is_list (ListPat _ _) = True\r
+is_list _ = False\r
+\r
+return_list :: DataCon -> Pat Name -> Bool\r
+return_list id q = id == consDataCon && (is_nil q || is_list q) \r
+\r
+make_list :: LPat Name -> Pat Name -> Pat Name\r
+make_list p q | is_nil q = ListPat [p] placeHolderType\r
+make_list p (ListPat ps ty) = ListPat (p:ps) ty\r
+make_list _ _ = panic "Check.make_list: Invalid argument"\r
+\r
+make_con :: Pat Id -> ExhaustivePat -> ExhaustivePat \r
+make_con (ConPatOut{ pat_con = L _ id }) (lp:lq:ps, constraints) \r
+ | return_list id q = (noLoc (make_list lp q) : ps, constraints)\r
+ | isInfixCon id = (nlInfixConPat (getName id) lp lq : ps, constraints) \r
+ where q = unLoc lq \r
+\r
+make_con (ConPatOut{ pat_con = L _ id, pat_args = PrefixCon pats, pat_ty = ty }) (ps, constraints) \r
+ | isTupleTyCon tc = (noLoc (TuplePat pats_con (tupleTyConBoxity tc) ty) : rest_pats, constraints) \r
+ | isPArrFakeCon id = (noLoc (PArrPat pats_con placeHolderType) : rest_pats, constraints) \r
+ | otherwise = (nlConPat name pats_con : rest_pats, constraints)\r
+ where \r
+ name = getName id\r
+ (pats_con, rest_pats) = splitAtList pats ps\r
+ tc = dataConTyCon id\r
+\r
+-- reconstruct parallel array pattern\r
+--\r
+-- * don't check for the type only; we need to make sure that we are really\r
+-- dealing with one of the fake constructors and not with the real\r
+-- representation \r
+\r
+make_whole_con :: DataCon -> WarningPat\r
+make_whole_con con | isInfixCon con = nlInfixConPat name nlWildPat nlWildPat\r
+ | otherwise = nlConPat name pats\r
+ where \r
+ name = getName con\r
+ pats = [nlWildPat | _ <- dataConOrigArgTys con]\r
+\end{code}\r
+\r
+------------------------------------------------------------------------\r
+ Tidying equations\r
+------------------------------------------------------------------------\r
+\r
+tidy_eqn does more or less the same thing as @tidy@ in @Match.lhs@;\r
+that is, it removes syntactic sugar, reducing the number of cases that\r
+must be handled by the main checking algorithm. One difference is\r
+that here we can do *all* the tidying at once (recursively), rather \r
+than doing it incrementally.\r
+\r
+\begin{code}\r
+tidy_eqn :: EquationInfo -> EquationInfo\r
+tidy_eqn eqn = eqn { eqn_pats = map tidy_pat (eqn_pats eqn), \r
+ eqn_rhs = tidy_rhs (eqn_rhs eqn) }\r
+ where\r
+ -- Horrible hack. The tidy_pat stuff converts "might-fail" patterns to \r
+ -- WildPats which of course loses the info that they can fail to match. \r
+ -- So we stick in a CanFail as if it were a guard.\r
+ tidy_rhs (MatchResult can_fail body)\r
+ | any might_fail_pat (eqn_pats eqn) = MatchResult CanFail body\r
+ | otherwise = MatchResult can_fail body\r
+\r
+--------------\r
+might_fail_pat :: Pat Id -> Bool\r
+-- Returns True of patterns that might fail (i.e. fall through) in a way \r
+-- that is not covered by the checking algorithm. Specifically:\r
+-- NPlusKPat \r
+-- ViewPat (if refutable)\r
+\r
+-- First the two special cases\r
+might_fail_pat (NPlusKPat {}) = True\r
+might_fail_pat (ViewPat _ p _) = not (isIrrefutableHsPat p)\r
+\r
+-- Now the recursive stuff\r
+might_fail_pat (ParPat p) = might_fail_lpat p\r
+might_fail_pat (AsPat _ p) = might_fail_lpat p\r
+might_fail_pat (SigPatOut p _ ) = might_fail_lpat p\r
+might_fail_pat (ListPat ps _) = any might_fail_lpat ps\r
+might_fail_pat (TuplePat ps _ _) = any might_fail_lpat ps\r
+might_fail_pat (PArrPat ps _) = any might_fail_lpat ps\r
+might_fail_pat (BangPat p) = might_fail_lpat p\r
+might_fail_pat (ConPatOut { pat_args = ps }) = any might_fail_lpat (hsConPatArgs ps)\r
+\r
+-- Finally the ones that are sure to succeed, or which are covered by the checking algorithm\r
+might_fail_pat (LazyPat _) = False -- Always succeeds\r
+might_fail_pat _ = False -- VarPat, WildPat, LitPat, NPat, TypePat\r
+\r
+--------------\r
+might_fail_lpat :: LPat Id -> Bool\r
+might_fail_lpat (L _ p) = might_fail_pat p\r
+\r
+--------------\r
+tidy_lpat :: LPat Id -> LPat Id \r
+tidy_lpat p = fmap tidy_pat p\r
+\r
+--------------\r
+tidy_pat :: Pat Id -> Pat Id\r
+tidy_pat pat@(WildPat _) = pat\r
+tidy_pat (VarPat id) = WildPat (idType id) \r
+tidy_pat (ParPat p) = tidy_pat (unLoc p)\r
+tidy_pat (LazyPat p) = WildPat (hsLPatType p) -- For overlap and exhaustiveness checking\r
+ -- purposes, a ~pat is like a wildcard\r
+tidy_pat (BangPat p) = tidy_pat (unLoc p)\r
+tidy_pat (AsPat _ p) = tidy_pat (unLoc p)\r
+tidy_pat (SigPatOut p _) = tidy_pat (unLoc p)\r
+tidy_pat (CoPat _ pat _) = tidy_pat pat\r
+\r
+-- These two are might_fail patterns, so we map them to\r
+-- WildPats. The might_fail_pat stuff arranges that the\r
+-- guard says "this equation might fall through".\r
+tidy_pat (NPlusKPat id _ _ _) = WildPat (idType (unLoc id))\r
+tidy_pat (ViewPat _ _ ty) = WildPat ty\r
+\r
+tidy_pat pat@(ConPatOut { pat_con = L _ id, pat_args = ps })\r
+ = pat { pat_args = tidy_con id ps }\r
+\r
+tidy_pat (ListPat ps ty) \r
+ = unLoc $ foldr (\ x y -> mkPrefixConPat consDataCon [x,y] list_ty)\r
+ (mkNilPat list_ty)\r
+ (map tidy_lpat ps)\r
+ where list_ty = mkListTy ty\r
+\r
+-- introduce fake parallel array constructors to be able to handle parallel\r
+-- arrays with the existing machinery for constructor pattern\r
+--\r
+tidy_pat (PArrPat ps ty)\r
+ = unLoc $ mkPrefixConPat (parrFakeCon (length ps))\r
+ (map tidy_lpat ps) \r
+ (mkPArrTy ty)\r
+\r
+tidy_pat (TuplePat ps boxity ty)\r
+ = unLoc $ mkPrefixConPat (tupleCon boxity arity)\r
+ (map tidy_lpat ps) ty\r
+ where\r
+ arity = length ps\r
+\r
+tidy_pat (NPat lit mb_neg eq) = tidyNPat lit mb_neg eq\r
+\r
+-- Unpack string patterns fully, so we can see when they overlap with\r
+-- each other, or even explicit lists of Chars.\r
+tidy_pat (LitPat lit)\r
+ | HsString s <- lit\r
+ = unLoc $ foldr (\c pat -> mkPrefixConPat consDataCon [mk_char_lit c, pat] stringTy)\r
+ (mkPrefixConPat nilDataCon [] stringTy) (unpackFS s)\r
+ | otherwise\r
+ = tidyLitPat lit \r
+ where\r
+ mk_char_lit c = mkPrefixConPat charDataCon [nlLitPat (HsCharPrim c)] charTy\r
+\r
+-----------------\r
+tidy_con :: DataCon -> HsConPatDetails Id -> HsConPatDetails Id\r
+tidy_con _ (PrefixCon ps) = PrefixCon (map tidy_lpat ps)\r
+tidy_con _ (InfixCon p1 p2) = PrefixCon [tidy_lpat p1, tidy_lpat p2]\r
+tidy_con con (RecCon (HsRecFields fs _)) \r
+ | null fs = PrefixCon [nlWildPat | _ <- dataConOrigArgTys con]\r
+ -- Special case for null patterns; maybe not a record at all\r
+ | otherwise = PrefixCon (map (tidy_lpat.snd) all_pats)\r
+ where\r
+ -- pad out all the missing fields with WildPats.\r
+ field_pats = map (\ f -> (f, nlWildPat)) (dataConFieldLabels con)\r
+ all_pats = foldr (\(HsRecField id p _) acc -> insertNm (getName (unLoc id)) p acc)\r
+ field_pats fs\r
+ \r
+ insertNm nm p [] = [(nm,p)]\r
+ insertNm nm p (x@(n,_):xs)\r
+ | nm == n = (nm,p):xs\r
+ | otherwise = x : insertNm nm p xs\r
+\end{code}\r