# ensure we don't clash with any pre-supplied autoconf ones.
+# FPTOOLS_SET_PLATFORM_VARS
+# ----------------------------------
+# Set the platform variables
+AC_DEFUN([FPTOOLS_SET_PLATFORM_VARS],
+[
+ # If no argument was given for a configuration variable, then discard
+ # the guessed canonical system and use the configuration of the
+ # bootstrapping ghc. If an argument was given, map it from gnu format
+ # to ghc format.
+ #
+ # For why we do it this way, see: #3637, #1717, #2951
+ #
+ # In bindists, we haven't called AC_CANONICAL_{BUILD,HOST,TARGET}
+ # so this justs uses $bootstrap_target.
+
+ if test "$build_alias" = ""
+ then
+ if test "$bootstrap_target" != ""
+ then
+ build=$bootstrap_target
+ echo "Build platform inferred as: $build"
+ else
+ echo "Can't work out build platform"
+ exit 1
+ fi
+
+ BuildArch=`echo "$build" | sed 's/-.*//'`
+ BuildVendor=`echo "$build" | sed -e 's/.*-\(.*\)-.*/\1/'`
+ BuildOS=`echo "$build" | sed 's/.*-//'`
+ else
+ GHC_CONVERT_CPU([$build_cpu], [BuildArch])
+ GHC_CONVERT_VENDOR([$build_vendor], [BuildVendor])
+ GHC_CONVERT_OS([$build_os], [BuildOS])
+ fi
+
+ if test "$host_alias" = ""
+ then
+ if test "$bootstrap_target" != ""
+ then
+ host=$bootstrap_target
+ echo "Host platform inferred as: $host"
+ else
+ echo "Can't work out host platform"
+ exit 1
+ fi
+
+ HostArch=`echo "$host" | sed 's/-.*//'`
+ HostVendor=`echo "$host" | sed -e 's/.*-\(.*\)-.*/\1/'`
+ HostOS=`echo "$host" | sed 's/.*-//'`
+ else
+ GHC_CONVERT_CPU([$host_cpu], [HostArch])
+ GHC_CONVERT_VENDOR([$host_vendor], [HostVendor])
+ GHC_CONVERT_OS([$host_os], [HostOS])
+ fi
+
+ if test "$target_alias" = ""
+ then
+ if test "$bootstrap_target" != ""
+ then
+ target=$bootstrap_target
+ echo "Target platform inferred as: $target"
+ else
+ echo "Can't work out target platform"
+ exit 1
+ fi
+
+ TargetArch=`echo "$target" | sed 's/-.*//'`
+ TargetVendor=`echo "$target" | sed -e 's/.*-\(.*\)-.*/\1/'`
+ TargetOS=`echo "$target" | sed 's/.*-//'`
+ else
+ GHC_CONVERT_CPU([$target_cpu], [TargetArch])
+ GHC_CONVERT_VENDOR([$target_vendor], [TargetVendor])
+ GHC_CONVERT_OS([$target_os], [TargetOS])
+ fi
+])
+
+
# FPTOOLS_SET_C_LD_FLAGS
# ----------------------------------
# Set the C, LD and CPP flags for a given platform
(_, NativeDirectCall) -> getRegsWithoutNode
([_], NativeReturn) -> allRegs
(_, NativeReturn) -> getRegsWithNode
- (_, GC) -> getRegsWithNode
+ -- GC calling convention *must* put values in registers
+ (_, GC) -> allRegs
(_, PrimOpCall) -> allRegs
([_], PrimOpReturn) -> allRegs
(_, PrimOpReturn) -> getRegsWithNode
(reg_assts, stk_args) = assign_regs [] reps regs
stk_args' = case conv of NativeReturn -> part
PrimOpReturn -> part
+ GC | length stk_args /= 0 -> panic "Failed to allocate registers for GC call"
_ -> stk_args
where part = uncurry (++)
(L.partition (not . isGcPtrType . arg_ty) stk_args)
-- PicBaseReg from the corresponding label (or label difference).
--
cmmMachOpFold mop1 [CmmMachOp mop2 [arg1,arg2], arg3]
- | mop1 == mop2 && isAssociativeMachOp mop1
+ | mop2 `associates_with` mop1
&& not (isLit arg1) && not (isPicReg arg1)
- = cmmMachOpFold mop1 [arg1, cmmMachOpFold mop2 [arg2,arg3]]
+ = cmmMachOpFold mop2 [arg1, cmmMachOpFold mop1 [arg2,arg3]]
+ where
+ MO_Add{} `associates_with` MO_Sub{} = True
+ mop1 `associates_with` mop2 =
+ mop1 == mop2 && isAssociativeMachOp mop1
+
+-- special case: (a - b) + c ==> a + (c - b)
+cmmMachOpFold mop1@(MO_Add{}) [CmmMachOp mop2@(MO_Sub{}) [arg1,arg2], arg3]
+ | not (isLit arg1) && not (isPicReg arg1)
+ = cmmMachOpFold mop1 [arg1, cmmMachOpFold mop2 [arg3,arg2]]
-- Make a RegOff if we can
cmmMachOpFold (MO_Add _) [CmmReg reg, CmmLit (CmmInt n rep)]
= case mop of
MO_Mul rep
| Just p <- exactLog2 n ->
- CmmMachOp (MO_Shl rep) [x, CmmLit (CmmInt p rep)]
+ cmmMachOpFold (MO_Shl rep) [x, CmmLit (CmmInt p rep)]
MO_U_Quot rep
| Just p <- exactLog2 n ->
- CmmMachOp (MO_U_Shr rep) [x, CmmLit (CmmInt p rep)]
+ cmmMachOpFold (MO_U_Shr rep) [x, CmmLit (CmmInt p rep)]
MO_S_Quot rep
| Just p <- exactLog2 n,
CmmReg _ <- x -> -- We duplicate x below, hence require
CmmMachOp (MO_And rep) [x1, CmmLit (CmmInt (n-1) rep)]
x3 = CmmMachOp (MO_Add rep) [x, x2]
in
- CmmMachOp (MO_S_Shr rep) [x3, CmmLit (CmmInt p rep)]
+ cmmMachOpFold (MO_S_Shr rep) [x3, CmmLit (CmmInt p rep)]
other
-> unchanged
where
emitPrimOp [r] IndexArrayOp [obj,ix] _ = doReadPtrArrayOp r obj ix
emitPrimOp [] WriteArrayOp [obj,ix,v] _ = doWritePtrArrayOp obj ix v
+emitPrimOp [res] SizeofArrayOp [arg] _
+ = stmtC $ CmmAssign (CmmLocal res) (cmmLoadIndexW arg (fixedHdrSize + oFFSET_StgMutArrPtrs_ptrs) bWord)
+emitPrimOp [res] SizeofMutableArrayOp [arg] live
+ = emitPrimOp [res] SizeofArrayOp [arg] live
+
-- IndexXXXoffAddr
emitPrimOp res IndexOffAddrOp_Char args _ = doIndexOffAddrOp (Just mo_u_8ToWord) b8 res args
emitPrimOp [r] IndexArrayOp [obj,ix] = doReadPtrArrayOp r obj ix
emitPrimOp [] WriteArrayOp [obj,ix,v] = doWritePtrArrayOp obj ix v
+emitPrimOp [res] SizeofArrayOp [arg]
+ = emit $ mkAssign (CmmLocal res) (cmmLoadIndexW arg (fixedHdrSize + oFFSET_StgMutArrPtrs_ptrs) bWord)
+emitPrimOp [res] SizeofMutableArrayOp [arg]
+ = emitPrimOp [res] SizeofArrayOp [arg]
+
-- IndexXXXoffAddr
emitPrimOp res IndexOffAddrOp_Char args = doIndexOffAddrOp (Just mo_u_8ToWord) b8 res args
-- ** Operations on 'CoreRule's
seqRules, ruleArity, ruleName, ruleIdName, ruleActivation,
setRuleIdName,
- isBuiltinRule, isLocalRule
+ isBuiltinRule, isLocalRule,
+
+ -- * Core vectorisation declarations data type
+ CoreVect(..)
) where
#include "HsVersions.h"
%************************************************************************
+%* *
+\subsection{Vectorisation declarations}
+%* *
+%************************************************************************
+
+Representation of desugared vectorisation declarations that are fed to the vectoriser (via
+'ModGuts').
+
+\begin{code}
+data CoreVect = Vect Id (Maybe CoreExpr)
+\end{code}
+
+
+%************************************************************************
%* *
Unfoldings
%* *
import Unique
import Outputable
import TysPrim
-import PrelNames( absentErrorIdKey )
import FastString
import Maybes
import Util
\begin{code}
exprIsDupable :: CoreExpr -> Bool
-exprIsDupable (Type _) = True
-exprIsDupable (Var _) = True
-exprIsDupable (Lit lit) = litIsDupable lit
-exprIsDupable (Note _ e) = exprIsDupable e
-exprIsDupable (Cast e _) = exprIsDupable e
-exprIsDupable expr
- = go expr 0
+exprIsDupable e
+ = isJust (go dupAppSize e)
where
- go (Var _) _ = True
- go (App f a) n_args = n_args < dupAppSize
- && exprIsDupable a
- && go f (n_args+1)
- go _ _ = False
+ go :: Int -> CoreExpr -> Maybe Int
+ go n (Type {}) = Just n
+ go n (Var {}) = decrement n
+ go n (Note _ e) = go n e
+ go n (Cast e _) = go n e
+ go n (App f a) | Just n' <- go n a = go n' f
+ go n (Lit lit) | litIsDupable lit = decrement n
+ go _ _ = Nothing
+
+ decrement :: Int -> Maybe Int
+ decrement 0 = Nothing
+ decrement n = Just (n-1)
dupAppSize :: Int
-dupAppSize = 4 -- Size of application we are prepared to duplicate
+dupAppSize = 8 -- Size of term we are prepared to duplicate
+ -- This is *just* big enough to make test MethSharing
+ -- inline enough join points. Really it should be
+ -- smaller, and could be if we fixed Trac #4960.
\end{code}
%************************************************************************
= go other_expr []
where
-- Accumulate value arguments, then decide
+ go (Cast e _) val_args = go e val_args
go (App f a) val_args | isRuntimeArg a = go f (a:val_args)
| otherwise = go f val_args
exprOkForSpeculation other_expr
= case collectArgs other_expr of
- (Var f, args) | f `hasKey` absentErrorIdKey -- Note [Absent error Id]
- -> all exprOkForSpeculation args -- in WwLib
- | otherwise
- -> spec_ok (idDetails f) args
+ (Var f, args) -> spec_ok (idDetails f) args
_ -> False
where
rEC_SEL_ERROR_ID, rUNTIME_ERROR_ID, iRREFUT_PAT_ERROR_ID, rEC_CON_ERROR_ID :: Id
pAT_ERROR_ID, nO_METHOD_BINDING_ERROR_ID, nON_EXHAUSTIVE_GUARDS_ERROR_ID :: Id
+aBSENT_ERROR_ID :: Id
rEC_SEL_ERROR_ID = mkRuntimeErrorId recSelErrorName
rUNTIME_ERROR_ID = mkRuntimeErrorId runtimeErrorName
iRREFUT_PAT_ERROR_ID = mkRuntimeErrorId irrefutPatErrorName
pAT_ERROR_ID = mkRuntimeErrorId patErrorName
nO_METHOD_BINDING_ERROR_ID = mkRuntimeErrorId noMethodBindingErrorName
nON_EXHAUSTIVE_GUARDS_ERROR_ID = mkRuntimeErrorId nonExhaustiveGuardsErrorName
-
-aBSENT_ERROR_ID :: Id
--- Not bottoming; no unfolding! See Note [Absent error Id] in WwLib
-aBSENT_ERROR_ID = mkVanillaGlobal absentErrorName runtimeErrorTy
+aBSENT_ERROR_ID = mkRuntimeErrorId absentErrorName
mkRuntimeErrorId :: Name -> Id
mkRuntimeErrorId name = pc_bottoming_Id name runtimeErrorTy
tcg_anns = anns,
tcg_binds = binds,
tcg_imp_specs = imp_specs,
- tcg_ev_binds = ev_binds,
- tcg_fords = fords,
- tcg_rules = rules,
- tcg_insts = insts,
- tcg_fam_insts = fam_insts,
- tcg_hpc = other_hpc_info })
+ tcg_ev_binds = ev_binds,
+ tcg_fords = fords,
+ tcg_rules = rules,
+ tcg_vects = vects,
+ tcg_insts = insts,
+ tcg_fam_insts = fam_insts,
+ tcg_hpc = other_hpc_info })
= do { let dflags = hsc_dflags hsc_env
; showPass dflags "Desugar"
<- case target of
HscNothing ->
return (emptyMessages,
- Just ([], nilOL, [], NoStubs, hpcInfo, emptyModBreaks))
+ Just ([], nilOL, [], [], NoStubs, hpcInfo, emptyModBreaks))
_ -> do
(binds_cvr,ds_hpc_info, modBreaks)
<- if (opt_Hpc
(typeEnvTyCons type_env) binds
else return (binds, hpcInfo, emptyModBreaks)
initDs hsc_env mod rdr_env type_env $ do
- do { ds_ev_binds <- dsEvBinds ev_binds
- ; core_prs <- dsTopLHsBinds auto_scc binds_cvr
+ do { ds_ev_binds <- dsEvBinds ev_binds
+ ; core_prs <- dsTopLHsBinds auto_scc binds_cvr
; (spec_prs, spec_rules) <- dsImpSpecs imp_specs
- ; (ds_fords, foreign_prs) <- dsForeigns fords
- ; rules <- mapMaybeM dsRule rules
- ; return ( ds_ev_binds
+ ; (ds_fords, foreign_prs) <- dsForeigns fords
+ ; ds_rules <- mapMaybeM dsRule rules
+ ; ds_vects <- mapM dsVect vects
+ ; return ( ds_ev_binds
, foreign_prs `appOL` core_prs `appOL` spec_prs
- , spec_rules ++ rules
+ , spec_rules ++ ds_rules, ds_vects
, ds_fords, ds_hpc_info, modBreaks) }
- ; case mb_res of {
- Nothing -> return (msgs, Nothing) ;
- Just (ds_ev_binds, all_prs, all_rules, ds_fords,ds_hpc_info, modBreaks) -> do
+ ; case mb_res of {
+ Nothing -> return (msgs, Nothing) ;
+ Just (ds_ev_binds, all_prs, all_rules, ds_vects, ds_fords,ds_hpc_info, modBreaks) -> do
{ -- Add export flags to bindings
keep_alive <- readIORef keep_var
mg_foreign = ds_fords,
mg_hpc_info = ds_hpc_info,
mg_modBreaks = modBreaks,
+ mg_vect_decls = ds_vects,
mg_vect_info = noVectInfo
}
; return (msgs, Just mod_guts)
Nor do we want to warn of conversion identities on the LHS;
the rule is precisly to optimise them:
{-# RULES "fromRational/id" fromRational = id :: Rational -> Rational #-}
+
+
+%************************************************************************
+%* *
+%* Desugaring vectorisation declarations
+%* *
+%************************************************************************
+
+\begin{code}
+dsVect :: LVectDecl Id -> DsM CoreVect
+dsVect (L loc (HsVect v rhs))
+ = putSrcSpanDs loc $
+ do { rhs' <- fmapMaybeM dsLExpr rhs
+ ; return $ Vect (unLoc v) rhs'
+ }
+-- dsVect (L loc (HsVect v Nothing))
+-- = return $ Vect v Nothing
+-- dsVect (L loc (HsVect v (Just rhs)))
+-- = putSrcSpanDs loc $
+-- do { rhs' <- dsLExpr rhs
+-- ; return $ Vect v (Just rhs')
+-- }
+\end{code}
-- singletonP x1 +:+ ... +:+ singletonP xn
--
dsExpr (ExplicitPArr ty []) = do
- emptyP <- dsLookupGlobalId emptyPName
+ emptyP <- dsLookupDPHId emptyPName
return (Var emptyP `App` Type ty)
dsExpr (ExplicitPArr ty xs) = do
- singletonP <- dsLookupGlobalId singletonPName
- appP <- dsLookupGlobalId appPName
+ singletonP <- dsLookupDPHId singletonPName
+ appP <- dsLookupDPHId appPName
xs' <- mapM dsLExpr xs
return . foldr1 (binary appP) $ map (unary singletonP) xs'
where
-- <<[:e' | qs:]>> p (filterP (\x -> case x of {p -> True; _ -> False}) e)
--
dsPArrComp (BindStmt p e _ _ : qs) body _ = do
- filterP <- dsLookupGlobalId filterPName
+ filterP <- dsLookupDPHId filterPName
ce <- dsLExpr e
let ety'ce = parrElemType ce
false = Var falseDataConId
dePArrComp qs body p gen
dsPArrComp qs body _ = do -- no ParStmt in `qs'
- sglP <- dsLookupGlobalId singletonPName
+ sglP <- dsLookupDPHId singletonPName
let unitArray = mkApps (Var sglP) [Type unitTy, mkCoreTup []]
dePArrComp qs body (noLoc $ WildPat unitTy) unitArray
-- <<[:e' | :]>> pa ea = mapP (\pa -> e') ea
--
dePArrComp [] e' pa cea = do
- mapP <- dsLookupGlobalId mapPName
+ mapP <- dsLookupDPHId mapPName
let ty = parrElemType cea
(clam, ty'e') <- deLambda ty pa e'
return $ mkApps (Var mapP) [Type ty, Type ty'e', clam, cea]
-- <<[:e' | b, qs:]>> pa ea = <<[:e' | qs:]>> pa (filterP (\pa -> b) ea)
--
dePArrComp (ExprStmt b _ _ : qs) body pa cea = do
- filterP <- dsLookupGlobalId filterPName
+ filterP <- dsLookupDPHId filterPName
let ty = parrElemType cea
(clam,_) <- deLambda ty pa b
dePArrComp qs body pa (mkApps (Var filterP) [Type ty, clam, cea])
-- <<[:e' | qs:]>> (pa, p) (crossMapP ea ef)
--
dePArrComp (BindStmt p e _ _ : qs) body pa cea = do
- filterP <- dsLookupGlobalId filterPName
- crossMapP <- dsLookupGlobalId crossMapPName
+ filterP <- dsLookupDPHId filterPName
+ crossMapP <- dsLookupDPHId crossMapPName
ce <- dsLExpr e
let ety'cea = parrElemType cea
ety'ce = parrElemType ce
-- {x_1, ..., x_n} = DV (ds) -- Defined Variables
--
dePArrComp (LetStmt ds : qs) body pa cea = do
- mapP <- dsLookupGlobalId mapPName
+ mapP <- dsLookupDPHId mapPName
let xs = collectLocalBinders ds
ty'cea = parrElemType cea
v <- newSysLocalDs ty'cea
---
parStmts [] pa cea = return (pa, cea)
parStmts ((qs, xs):qss) pa cea = do -- subsequent statements (zip'ed)
- zipP <- dsLookupGlobalId zipPName
+ zipP <- dsLookupDPHId zipPName
let pa' = mkLHsPatTup [pa, mkLHsVarPatTup xs]
ty'cea = parrElemType cea
res_expr = mkLHsVarTuple xs
t1 <- repLTy t
tcon <- repTy (HsTyVar (tyConName parrTyCon))
repTapp tcon t1
-repTy (HsTupleTy _ tys) = do
+repTy (HsTupleTy Boxed tys) = do
tys1 <- repLTys tys
tcon <- repTupleTyCon (length tys)
repTapps tcon tys1
+repTy (HsTupleTy Unboxed tys) = do
+ tys1 <- repLTys tys
+ tcon <- repUnboxedTupleTyCon (length tys)
+ repTapps tcon tys1
repTy (HsOpTy ty1 n ty2) = repLTy ((nlHsTyVar (unLoc n) `nlHsAppTy` ty1)
`nlHsAppTy` ty2)
repTy (HsParTy t) = repLTy t
repE (ExplicitList _ es) = do { xs <- repLEs es; repListExp xs }
repE e@(ExplicitPArr _ _) = notHandled "Parallel arrays" (ppr e)
repE e@(ExplicitTuple es boxed)
- | not (isBoxed boxed) = notHandled "Unboxed tuples" (ppr e)
| not (all tupArgPresent es) = notHandled "Tuple sections" (ppr e)
- | otherwise = do { xs <- repLEs [e | Present e <- es]; repTup xs }
+ | isBoxed boxed = do { xs <- repLEs [e | Present e <- es]; repTup xs }
+ | otherwise = do { xs <- repLEs [e | Present e <- es]; repUnboxedTup xs }
repE (RecordCon c _ flds)
= do { x <- lookupLOcc c;
repP (AsPat x p) = do { x' <- lookupLBinder x; p1 <- repLP p; repPaspat x' p1 }
repP (ParPat p) = repLP p
repP (ListPat ps _) = do { qs <- repLPs ps; repPlist qs }
-repP p@(TuplePat ps boxed _)
- | not (isBoxed boxed) = notHandled "Unboxed tuples" (ppr p)
- | otherwise = do { qs <- repLPs ps; repPtup qs }
+repP (TuplePat ps boxed _)
+ | isBoxed boxed = do { qs <- repLPs ps; repPtup qs }
+ | otherwise = do { qs <- repLPs ps; repPunboxedTup qs }
repP (ConPatIn dc details)
= do { con_str <- lookupLOcc dc
; case details of
repPtup :: Core [TH.PatQ] -> DsM (Core TH.PatQ)
repPtup (MkC ps) = rep2 tupPName [ps]
+repPunboxedTup :: Core [TH.PatQ] -> DsM (Core TH.PatQ)
+repPunboxedTup (MkC ps) = rep2 unboxedTupPName [ps]
+
repPcon :: Core TH.Name -> Core [TH.PatQ] -> DsM (Core TH.PatQ)
repPcon (MkC s) (MkC ps) = rep2 conPName [s, ps]
repTup :: Core [TH.ExpQ] -> DsM (Core TH.ExpQ)
repTup (MkC es) = rep2 tupEName [es]
+repUnboxedTup :: Core [TH.ExpQ] -> DsM (Core TH.ExpQ)
+repUnboxedTup (MkC es) = rep2 unboxedTupEName [es]
+
repCond :: Core TH.ExpQ -> Core TH.ExpQ -> Core TH.ExpQ -> DsM (Core TH.ExpQ)
repCond (MkC x) (MkC y) (MkC z) = rep2 condEName [x,y,z]
-- Note: not Core Int; it's easier to be direct here
repTupleTyCon i = rep2 tupleTName [mkIntExprInt i]
+repUnboxedTupleTyCon :: Int -> DsM (Core TH.TypeQ)
+-- Note: not Core Int; it's easier to be direct here
+repUnboxedTupleTyCon i = rep2 unboxedTupleTName [mkIntExprInt i]
+
repArrowTyCon :: DsM (Core TH.TypeQ)
repArrowTyCon = rep2 arrowTName []
charLName, stringLName, integerLName, intPrimLName, wordPrimLName,
floatPrimLName, doublePrimLName, rationalLName,
-- Pat
- litPName, varPName, tupPName, conPName, tildePName, bangPName, infixPName,
+ litPName, varPName, tupPName, unboxedTupPName,
+ conPName, tildePName, bangPName, infixPName,
asPName, wildPName, recPName, listPName, sigPName, viewPName,
-- FieldPat
fieldPatName,
clauseName,
-- Exp
varEName, conEName, litEName, appEName, infixEName,
- infixAppName, sectionLName, sectionRName, lamEName, tupEName,
+ infixAppName, sectionLName, sectionRName, lamEName,
+ tupEName, unboxedTupEName,
condEName, letEName, caseEName, doEName, compEName,
fromEName, fromThenEName, fromToEName, fromThenToEName,
listEName, sigEName, recConEName, recUpdEName,
rationalLName = libFun (fsLit "rationalL") rationalLIdKey
-- data Pat = ...
-litPName, varPName, tupPName, conPName, infixPName, tildePName, bangPName,
+litPName, varPName, tupPName, unboxedTupPName, conPName, infixPName, tildePName, bangPName,
asPName, wildPName, recPName, listPName, sigPName, viewPName :: Name
litPName = libFun (fsLit "litP") litPIdKey
varPName = libFun (fsLit "varP") varPIdKey
tupPName = libFun (fsLit "tupP") tupPIdKey
+unboxedTupPName = libFun (fsLit "unboxedTupP") unboxedTupPIdKey
conPName = libFun (fsLit "conP") conPIdKey
infixPName = libFun (fsLit "infixP") infixPIdKey
tildePName = libFun (fsLit "tildeP") tildePIdKey
-- data Exp = ...
varEName, conEName, litEName, appEName, infixEName, infixAppName,
- sectionLName, sectionRName, lamEName, tupEName, condEName,
+ sectionLName, sectionRName, lamEName, tupEName, unboxedTupEName, condEName,
letEName, caseEName, doEName, compEName :: Name
varEName = libFun (fsLit "varE") varEIdKey
conEName = libFun (fsLit "conE") conEIdKey
sectionRName = libFun (fsLit "sectionR") sectionRIdKey
lamEName = libFun (fsLit "lamE") lamEIdKey
tupEName = libFun (fsLit "tupE") tupEIdKey
+unboxedTupEName = libFun (fsLit "unboxedTupE") unboxedTupEIdKey
condEName = libFun (fsLit "condE") condEIdKey
letEName = libFun (fsLit "letE") letEIdKey
caseEName = libFun (fsLit "caseE") caseEIdKey
varStrictTypeName = libFun (fsLit "varStrictType") varStrictTKey
-- data Type = ...
-forallTName, varTName, conTName, tupleTName, arrowTName,
+forallTName, varTName, conTName, tupleTName, unboxedTupleTName, arrowTName,
listTName, appTName, sigTName :: Name
forallTName = libFun (fsLit "forallT") forallTIdKey
varTName = libFun (fsLit "varT") varTIdKey
conTName = libFun (fsLit "conT") conTIdKey
tupleTName = libFun (fsLit "tupleT") tupleTIdKey
+unboxedTupleTName = libFun (fsLit "unboxedTupleT") unboxedTupleTIdKey
arrowTName = libFun (fsLit "arrowT") arrowTIdKey
listTName = libFun (fsLit "listT") listTIdKey
appTName = libFun (fsLit "appT") appTIdKey
liftStringIdKey = mkPreludeMiscIdUnique 218
-- data Pat = ...
-litPIdKey, varPIdKey, tupPIdKey, conPIdKey, infixPIdKey, tildePIdKey, bangPIdKey,
+litPIdKey, varPIdKey, tupPIdKey, unboxedTupPIdKey, conPIdKey, infixPIdKey, tildePIdKey, bangPIdKey,
asPIdKey, wildPIdKey, recPIdKey, listPIdKey, sigPIdKey, viewPIdKey :: Unique
litPIdKey = mkPreludeMiscIdUnique 220
varPIdKey = mkPreludeMiscIdUnique 221
tupPIdKey = mkPreludeMiscIdUnique 222
+unboxedTupPIdKey = mkPreludeMiscIdUnique 362
conPIdKey = mkPreludeMiscIdUnique 223
infixPIdKey = mkPreludeMiscIdUnique 312
tildePIdKey = mkPreludeMiscIdUnique 224
-- data Exp = ...
varEIdKey, conEIdKey, litEIdKey, appEIdKey, infixEIdKey, infixAppIdKey,
- sectionLIdKey, sectionRIdKey, lamEIdKey, tupEIdKey, condEIdKey,
+ sectionLIdKey, sectionRIdKey, lamEIdKey, tupEIdKey, unboxedTupEIdKey,
+ condEIdKey,
letEIdKey, caseEIdKey, doEIdKey, compEIdKey,
fromEIdKey, fromThenEIdKey, fromToEIdKey, fromThenToEIdKey,
listEIdKey, sigEIdKey, recConEIdKey, recUpdEIdKey :: Unique
sectionRIdKey = mkPreludeMiscIdUnique 247
lamEIdKey = mkPreludeMiscIdUnique 248
tupEIdKey = mkPreludeMiscIdUnique 249
+unboxedTupEIdKey = mkPreludeMiscIdUnique 263
condEIdKey = mkPreludeMiscIdUnique 250
letEIdKey = mkPreludeMiscIdUnique 251
caseEIdKey = mkPreludeMiscIdUnique 252
varStrictTKey = mkPreludeMiscIdUnique 287
-- data Type = ...
-forallTIdKey, varTIdKey, conTIdKey, tupleTIdKey, arrowTIdKey,
+forallTIdKey, varTIdKey, conTIdKey, tupleTIdKey, unboxedTupleTIdKey, arrowTIdKey,
listTIdKey, appTIdKey, sigTIdKey :: Unique
forallTIdKey = mkPreludeMiscIdUnique 290
varTIdKey = mkPreludeMiscIdUnique 291
conTIdKey = mkPreludeMiscIdUnique 292
tupleTIdKey = mkPreludeMiscIdUnique 294
+unboxedTupleTIdKey = mkPreludeMiscIdUnique 361
arrowTIdKey = mkPreludeMiscIdUnique 295
listTIdKey = mkPreludeMiscIdUnique 296
appTIdKey = mkPreludeMiscIdUnique 293
foldlM, foldrM, ifDOptM, unsetOptM,
Applicative(..),(<$>),
- newLocalName,
- duplicateLocalDs, newSysLocalDs, newSysLocalsDs, newUniqueId,
- newFailLocalDs, newPredVarDs,
- getSrcSpanDs, putSrcSpanDs,
- getModuleDs,
- newUnique,
- UniqSupply, newUniqueSupply,
- getDOptsDs, getGhcModeDs, doptDs,
- dsLookupGlobal, dsLookupGlobalId, dsLookupTyCon, dsLookupDataCon,
+ newLocalName,
+ duplicateLocalDs, newSysLocalDs, newSysLocalsDs, newUniqueId,
+ newFailLocalDs, newPredVarDs,
+ getSrcSpanDs, putSrcSpanDs,
+ getModuleDs,
+ mkPrintUnqualifiedDs,
+ newUnique,
+ UniqSupply, newUniqueSupply,
+ getDOptsDs, getGhcModeDs, doptDs,
+ dsLookupGlobal, dsLookupGlobalId, dsLookupDPHId, dsLookupTyCon, dsLookupDataCon,
dsLookupClass,
DsMetaEnv, DsMetaVal(..), dsLookupMetaEnv, dsExtendMetaEnv,
; let msg = mkErrMsg loc (ds_unqual env) err
; updMutVar (ds_msgs env) (\ (w,e) -> (w, e `snocBag` msg))
; failM }
+
+mkPrintUnqualifiedDs :: DsM PrintUnqualified
+mkPrintUnqualifiedDs = ds_unqual <$> getGblEnv
\end{code}
\begin{code}
dsLookupGlobalId name
= tyThingId <$> dsLookupGlobal name
+-- Looking up a global DPH 'Id' is like 'dsLookupGlobalId', but the package, in which the looked
+-- up name is located, varies with the active DPH backend.
+--
+dsLookupDPHId :: (PackageId -> Name) -> DsM Id
+dsLookupDPHId nameInPkg
+ = do { dflags <- getDOpts
+ ; case dphPackageMaybe dflags of
+ Just pkg -> tyThingId <$> dsLookupGlobal (nameInPkg pkg)
+ Nothing -> failWithDs $ ptext err
+ }
+ where
+ err = sLit "To use -XParallelArrays select a DPH backend with -fdph-par or -fdph-seq"
+
dsLookupTyCon :: Name -> DsM TyCon
dsLookupTyCon name
= tyThingTyCon <$> dsLookupGlobal name
isPArrFakeAlts [] = panic "DsUtils: unexpectedly found an empty list of PArr fake alternatives"
--
mk_parrCase fail = do
- lengthP <- dsLookupGlobalId lengthPName
+ lengthP <- dsLookupDPHId lengthPName
alt <- unboxAlt
return (mkWildCase (len lengthP) intTy ty [alt])
where
--
unboxAlt = do
l <- newSysLocalDs intPrimTy
- indexP <- dsLookupGlobalId indexPName
+ indexP <- dsLookupDPHId indexPName
alts <- mapM (mkAlt indexP) sorted_alts
return (DataAlt intDataCon, [l], mkWildCase (Var l) intPrimTy ty (dft : alts))
where
@echo 'cLdLinkerOpts = words "$(CONF_LD_LINKER_OPTS_STAGE$*)"' >> $@
@echo 'cIntegerLibrary :: String' >> $@
@echo 'cIntegerLibrary = "$(INTEGER_LIBRARY)"' >> $@
- @echo 'cSplitObjs :: String' >> $@
- @echo 'cSplitObjs = "$(SupportsSplitObjs)"' >> $@
+ @echo 'cSupportsSplitObjs :: String' >> $@
+ @echo 'cSupportsSplitObjs = "$(SupportsSplitObjs)"' >> $@
@echo 'cGhcWithInterpreter :: String' >> $@
@echo 'cGhcWithInterpreter = "$(GhcWithInterpreter)"' >> $@
@echo 'cGhcWithNativeCodeGen :: String' >> $@
collect (VarT tv) = return [PlainTV tv]
collect (ConT _) = return []
collect (TupleT _) = return []
+ collect (UnboxedTupleT _) = return []
collect ArrowT = return []
collect ListT = return []
collect (AppT t1 t2)
; return $ HsLam (mkMatchGroup [mkSimpleMatch ps' e']) }
cvt (TupE [e]) = cvt e -- Singleton tuples treated like nothing (just parens)
cvt (TupE es) = do { es' <- mapM cvtl es; return $ ExplicitTuple (map Present es') Boxed }
+ cvt (UnboxedTupE [e]) = cvt e -- Singleton tuples treated like nothing (just parens)
+ cvt (UnboxedTupE es) = do { es' <- mapM cvtl es; return $ ExplicitTuple (map Present es') Unboxed }
cvt (CondE x y z) = do { x' <- cvtl x; y' <- cvtl y; z' <- cvtl z;
; return $ HsIf (Just noSyntaxExpr) x' y' z' }
cvt (LetE ds e) = do { ds' <- cvtLocalDecs (ptext (sLit "a let expression")) ds
cvtp (TH.VarP s) = do { s' <- vName s; return $ Hs.VarPat s' }
cvtp (TupP [p]) = cvtp p
cvtp (TupP ps) = do { ps' <- cvtPats ps; return $ TuplePat ps' Boxed void }
+cvtp (UnboxedTupP [p]) = cvtp p
+cvtp (UnboxedTupP ps) = do { ps' <- cvtPats ps; return $ TuplePat ps' Unboxed void }
cvtp (ConP s ps) = do { s' <- cNameL s; ps' <- cvtPats ps; return $ ConPatIn s' (PrefixCon ps') }
cvtp (InfixP p1 s p2) = do { s' <- cNameL s; p1' <- cvtPat p1; p2' <- cvtPat p2
; return $ ConPatIn s' (InfixCon p1' p2') }
-> failWith (ptext (sLit "Illegal 1-tuple type constructor"))
| otherwise
-> mk_apps (HsTyVar (getRdrName (tupleTyCon Boxed n))) tys'
+ UnboxedTupleT n
+ | length tys' == n -- Saturated
+ -> if n==1 then return (head tys') -- Singleton tuples treated
+ -- like nothing (ie just parens)
+ else returnL (HsTupleTy Unboxed tys')
+ | n == 1
+ -> failWith (ptext (sLit "Illegal 1-unboxed-tuple type constructor"))
+ | otherwise
+ -> mk_apps (HsTyVar (getRdrName (tupleTyCon Unboxed n))) tys'
ArrowT
| [x',y'] <- tys' -> returnL (HsFunTy x' y')
| otherwise -> mk_apps (HsTyVar (getRdrName funTyCon)) tys'
-- If it's just defaultInlinePragma, then we said
-- SPECIALISE, not SPECIALISE_INLINE
- -- A specialisation pragma for instance declarations only
- -- {-# SPECIALISE instance Eq [Int] #-}
- | SpecInstSig (LHsType name) -- (Class tys); should be a specialisation of the
- -- current instance decl
+ -- A specialisation pragma for instance declarations only
+ -- {-# SPECIALISE instance Eq [Int] #-}
+ | SpecInstSig (LHsType name) -- (Class tys); should be a specialisation of the
+ -- current instance decl
deriving (Data, Typeable)
-- ** @RULE@ declarations
RuleDecl(..), LRuleDecl, RuleBndr(..),
collectRuleBndrSigTys,
+ -- ** @VECTORISE@ declarations
+ VectDecl(..), LVectDecl,
-- ** @default@ declarations
DefaultDecl(..), LDefaultDecl,
-- ** Top-level template haskell splice
) where
-- friends:
-import {-# SOURCE #-} HsExpr( HsExpr, pprExpr )
+import {-# SOURCE #-} HsExpr( LHsExpr, HsExpr, pprExpr )
-- Because Expr imports Decls via HsBracket
import HsBinds
| WarningD (WarnDecl id)
| AnnD (AnnDecl id)
| RuleD (RuleDecl id)
+ | VectD (VectDecl id)
| SpliceD (SpliceDecl id)
| DocD (DocDecl)
| QuasiQuoteD (HsQuasiQuote id)
-- Snaffled out of both top-level fixity signatures,
-- and those in class declarations
- hs_defds :: [LDefaultDecl id],
- hs_fords :: [LForeignDecl id],
- hs_warnds :: [LWarnDecl id],
- hs_annds :: [LAnnDecl id],
- hs_ruleds :: [LRuleDecl id],
+ hs_defds :: [LDefaultDecl id],
+ hs_fords :: [LForeignDecl id],
+ hs_warnds :: [LWarnDecl id],
+ hs_annds :: [LAnnDecl id],
+ hs_ruleds :: [LRuleDecl id],
+ hs_vects :: [LVectDecl id],
- hs_docs :: [LDocDecl]
+ hs_docs :: [LDocDecl]
} deriving (Data, Typeable)
emptyGroup, emptyRdrGroup, emptyRnGroup :: HsGroup a
emptyGroup = HsGroup { hs_tyclds = [], hs_instds = [], hs_derivds = [],
hs_fixds = [], hs_defds = [], hs_annds = [],
- hs_fords = [], hs_warnds = [], hs_ruleds = [],
+ hs_fords = [], hs_warnds = [], hs_ruleds = [], hs_vects = [],
hs_valds = error "emptyGroup hs_valds: Can't happen",
hs_docs = [] }
appendGroups :: HsGroup a -> HsGroup a -> HsGroup a
appendGroups
HsGroup {
- hs_valds = val_groups1,
- hs_tyclds = tyclds1,
- hs_instds = instds1,
+ hs_valds = val_groups1,
+ hs_tyclds = tyclds1,
+ hs_instds = instds1,
hs_derivds = derivds1,
- hs_fixds = fixds1,
- hs_defds = defds1,
- hs_annds = annds1,
- hs_fords = fords1,
- hs_warnds = warnds1,
- hs_ruleds = rulds1,
+ hs_fixds = fixds1,
+ hs_defds = defds1,
+ hs_annds = annds1,
+ hs_fords = fords1,
+ hs_warnds = warnds1,
+ hs_ruleds = rulds1,
+ hs_vects = vects1,
hs_docs = docs1 }
HsGroup {
- hs_valds = val_groups2,
- hs_tyclds = tyclds2,
- hs_instds = instds2,
+ hs_valds = val_groups2,
+ hs_tyclds = tyclds2,
+ hs_instds = instds2,
hs_derivds = derivds2,
- hs_fixds = fixds2,
- hs_defds = defds2,
- hs_annds = annds2,
- hs_fords = fords2,
- hs_warnds = warnds2,
- hs_ruleds = rulds2,
- hs_docs = docs2 }
+ hs_fixds = fixds2,
+ hs_defds = defds2,
+ hs_annds = annds2,
+ hs_fords = fords2,
+ hs_warnds = warnds2,
+ hs_ruleds = rulds2,
+ hs_vects = vects2,
+ hs_docs = docs2 }
=
HsGroup {
- hs_valds = val_groups1 `plusHsValBinds` val_groups2,
- hs_tyclds = tyclds1 ++ tyclds2,
- hs_instds = instds1 ++ instds2,
+ hs_valds = val_groups1 `plusHsValBinds` val_groups2,
+ hs_tyclds = tyclds1 ++ tyclds2,
+ hs_instds = instds1 ++ instds2,
hs_derivds = derivds1 ++ derivds2,
- hs_fixds = fixds1 ++ fixds2,
- hs_annds = annds1 ++ annds2,
- hs_defds = defds1 ++ defds2,
- hs_fords = fords1 ++ fords2,
- hs_warnds = warnds1 ++ warnds2,
- hs_ruleds = rulds1 ++ rulds2,
- hs_docs = docs1 ++ docs2 }
+ hs_fixds = fixds1 ++ fixds2,
+ hs_annds = annds1 ++ annds2,
+ hs_defds = defds1 ++ defds2,
+ hs_fords = fords1 ++ fords2,
+ hs_warnds = warnds1 ++ warnds2,
+ hs_ruleds = rulds1 ++ rulds2,
+ hs_vects = vects1 ++ vects2,
+ hs_docs = docs1 ++ docs2 }
\end{code}
\begin{code}
ppr (ForD fd) = ppr fd
ppr (SigD sd) = ppr sd
ppr (RuleD rd) = ppr rd
+ ppr (VectD vect) = ppr vect
ppr (WarningD wd) = ppr wd
ppr (AnnD ad) = ppr ad
ppr (SpliceD dd) = ppr dd
hs_annds = ann_decls,
hs_fords = foreign_decls,
hs_defds = default_decls,
- hs_ruleds = rule_decls })
+ hs_ruleds = rule_decls,
+ hs_vects = vect_decls })
= vcat_mb empty
[ppr_ds fix_decls, ppr_ds default_decls,
ppr_ds deprec_decls, ppr_ds ann_decls,
ppr_ds rule_decls,
+ ppr_ds vect_decls,
if isEmptyValBinds val_decls
then Nothing
else Just (ppr val_decls),
ppr (RuleBndrSig name ty) = ppr name <> dcolon <> ppr ty
\end{code}
+
+%************************************************************************
+%* *
+\subsection{Vectorisation declarations}
+%* *
+%************************************************************************
+
+A vectorisation pragma
+
+ {-# VECTORISE f = closure1 g (scalar_map g) #-} OR
+ {-# VECTORISE SCALAR f #-}
+
+Note [Typechecked vectorisation pragmas]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+In case of the first variant of vectorisation pragmas (with an explicit expression),
+we need to infer the type of that expression during type checking and then keep that type
+around until vectorisation, so that it can be checked against the *vectorised* type of 'f'.
+(We cannot determine vectorised types during type checking due to internal information of
+the vectoriser being needed.)
+
+To this end, we annotate the 'Id' of 'f' (the variable mentioned in the PRAGMA) with the
+inferred type of the expression. This is slightly dodgy, as this is really the type of
+'$v_f' (the name of the vectorised function).
+
+\begin{code}
+type LVectDecl name = Located (VectDecl name)
+
+data VectDecl name
+ = HsVect
+ (Located name)
+ (Maybe (LHsExpr name)) -- 'Nothing' => SCALAR declaration
+ deriving (Data, Typeable)
+
+instance OutputableBndr name => Outputable (VectDecl name) where
+ ppr (HsVect v rhs)
+ = sep [text "{-# VECTORISE" <+> ppr v,
+ nest 4 (case rhs of
+ Nothing -> text "SCALAR #-}"
+ Just rhs -> pprExpr (unLoc rhs) <+> text "#-}") ]
+\end{code}
+
%************************************************************************
%* *
\subsection[DocDecl]{Document comments}
mkSimpleMatch, unguardedGRHSs, unguardedRHS,
mkMatchGroup, mkMatch, mkHsLam, mkHsIf,
mkHsWrap, mkLHsWrap, mkHsWrapCoI, mkLHsWrapCoI,
- coiToHsWrapper, mkHsDictLet,
+ coiToHsWrapper, mkHsLams, mkHsDictLet,
mkHsOpApp, mkHsDo, mkHsWrapPat, mkHsWrapPatCoI,
nlHsTyApp, nlHsVar, nlHsLit, nlHsApp, nlHsApps, nlHsIntLit, nlHsVarApps,
mkMatchGroup :: [LMatch id] -> MatchGroup id
mkMatchGroup matches = MatchGroup matches placeHolderType
+mkHsLams :: [TyVar] -> [EvVar] -> LHsExpr Id -> LHsExpr Id
+mkHsLams tyvars dicts expr = mkLHsWrap (mkWpTyLams tyvars <.> mkWpLams dicts) expr
+
mkHsDictLet :: TcEvBinds -> LHsExpr Id -> LHsExpr Id
-mkHsDictLet ev_binds expr = mkLHsWrap (WpLet ev_binds) expr
+mkHsDictLet ev_binds expr = mkLHsWrap (mkWpLet ev_binds) expr
mkHsConApp :: DataCon -> [Type] -> [HsExpr Id] -> LHsExpr Id
-- Used for constructing dictionary terms etc, so no locations
freeNamesIfLetBndr :: IfaceLetBndr -> NameSet
-- Remember IfaceLetBndr is used only for *nested* bindings
--- The cut-down IdInfo never contains any Names, but the type may!
-freeNamesIfLetBndr (IfLetBndr _name ty _info) = freeNamesIfType ty
+-- The IdInfo can contain an unfolding (in the case of
+-- local INLINE pragmas), so look there too
+freeNamesIfLetBndr (IfLetBndr _name ty info) = freeNamesIfType ty
+ &&& freeNamesIfIdInfo info
freeNamesIfTvBndr :: IfaceTvBndr -> NameSet
freeNamesIfTvBndr (_fs,k) = freeNamesIfType k
-{-# OPTIONS_GHC -fno-warn-warnings-deprecations #-}
-
-- -----------------------------------------------------------------------------
-- | GHC LLVM Mangler
--
-- This script processes the assembly produced by LLVM, rearranging the code
-- so that an info table appears before its corresponding function. We also
-- use it to fix up the stack alignment, which needs to be 16 byte aligned
--- but always ends up off by 4 bytes because GHC sets it to the wrong starting
--- value in the RTS.
+-- but always ends up off by 4 bytes because GHC sets it to the 'wrong'
+-- starting value in the RTS.
--
-- We only need this for Mac OS X, other targets don't use it.
--
module LlvmMangler ( llvmFixupAsm ) where
-import Data.ByteString.Char8 ( ByteString )
-import qualified Data.ByteString.Char8 as BS
-
-import LlvmCodeGen.Ppr ( infoSection, iTableSuf )
-
+import Control.Exception
+import qualified Data.ByteString.Char8 as B
import Data.Char
-import Outputable
-import Util
-
-
-{- Configuration. -}
-newSection, oldSection, functionSuf, tableSuf, funDivider, eol :: ByteString
-newSection = BS.pack "\n.text\n"
-oldSection = BS.pack infoSection
-functionSuf = BS.pack $ if ghciTablesNextToCode then "_info:" else "\n_"
-tableSuf = BS.pack $ "_info" ++ iTableSuf ++ ":"
-funDivider = BS.pack "\n\n"
-eol = BS.pack "\n"
-
-
+import qualified Data.IntMap as I
+import System.IO
+
+-- Magic Strings
+infoSec, newInfoSec, newLine, spInst, jmpInst :: B.ByteString
+infoSec = B.pack "\t.section\t__STRIP,__me"
+newInfoSec = B.pack "\n\t.text"
+newLine = B.pack "\n"
+spInst = B.pack ", %esp\n"
+jmpInst = B.pack "jmp"
+
+infoLen, spFix :: Int
+infoLen = B.length infoSec
+spFix = 4
+
+-- Search Predicates
eolPred, dollarPred, commaPred :: Char -> Bool
-eolPred = ((==) '\n')
+eolPred = ((==) '\n')
dollarPred = ((==) '$')
-commaPred = ((==) ',')
+commaPred = ((==) ',')
-- | Read in assembly file and process
llvmFixupAsm :: FilePath -> FilePath -> IO ()
llvmFixupAsm f1 f2 = do
- asm <- BS.readFile f1
- BS.writeFile f2 BS.empty
- allTables f2 asm
+ r <- openBinaryFile f1 ReadMode
+ w <- openBinaryFile f2 WriteMode
+ fixTables r w I.empty
+ B.hPut w (B.pack "\n\n")
+ hClose r
+ hClose w
return ()
--- | Run over whole assembly file
-allTables :: FilePath -> ByteString -> IO ()
-allTables f str = do
- rem <- oneTable f str
- if BS.null rem
- then return ()
- else allTables f rem
-
{- |
- Look for the next function that needs to have its info table
- arranged to be before it and process it. This will print out
- any code before this function, then the info table, then the
- function. It will return the remainder of the assembly code
- to process.
-
- We rely here on the fact that LLVM prints all global variables
- at the end of the file, so an info table will always appear
- after its function.
-
- To try to help explain the string searches, here is some
- assembly code that would be processed by this program, with
- split markers placed in it like so, <split marker>:
-
- [ ...asm code... ]
- jmp *%eax
- <before|fheader>
- .def Main_main_info
- .section TEXT
- .globl _Main_main_info
- _Main_main<bl|al>_info:
- sub $12, %esp
- [ ...asm code... ]
- jmp *%eax
- <fun|after>
- .def .....
-
- [ ...asm code... ]
-
- .long 231231
- <bit'|itable_h>
- .section TEXT
- .global _Main_main_entry
- .align 4
- <bit|itable>_Main_main_entry:
- .long 0
- [ ...asm code... ]
- <itable'|ait>
- .section TEXT
+ Here we process the assembly file one function and data
+ defenition at a time. When a function is encountered that
+ should have a info table we store it in a map. Otherwise
+ we print it. When an info table is found we retrieve its
+ function from the map and print them both.
+
+ For all functions we fix up the stack alignment. We also
+ fix up the section defenition for functions and info tables.
-}
-oneTable :: FilePath -> ByteString -> IO ByteString
-oneTable f str =
- let last' xs = if (null xs) then 0 else last xs
-
- -- get the function
- (bl, al) = BS.breakSubstring functionSuf str
- start = last' $ BS.findSubstrings funDivider bl
- (before, fheader) = BS.splitAt start bl
- (fun, after) = BS.breakSubstring funDivider al
- label = snd $ BS.breakEnd eolPred bl
-
- -- get the info table
- ilabel = label `BS.append` tableSuf
- (bit, itable) = BS.breakSubstring ilabel after
- (itable', ait) = BS.breakSubstring funDivider itable
- istart = last' $ BS.findSubstrings funDivider bit
- (bit', iheader) = BS.splitAt istart bit
-
- -- fixup stack alignment
- fun' = fixupStack fun BS.empty
-
- -- fix up sections
- fheader' = replaceSection fheader
- iheader' = replaceSection iheader
-
- function = [before, eol, iheader', itable', eol, fheader', fun', eol]
- remainder = bit' `BS.append` ait
- in if BS.null al
- then do
- BS.appendFile f bl
- return BS.empty
-
- else if ghciTablesNextToCode
- then if BS.null itable
- then error $ "Function without matching info table! ("
- ++ (BS.unpack label) ++ ")"
- else do
- mapM_ (BS.appendFile f) function
- return remainder
-
- else do
- -- TNTC not turned on so just fix up stack
- mapM_ (BS.appendFile f) [before, fheader, fun']
- return after
-
--- | Replace the current section in a function or table header with the
--- text section specifier.
-replaceSection :: ByteString -> ByteString
-replaceSection sec =
- let (s1, s2) = BS.breakSubstring oldSection sec
- s1' = fst $ BS.breakEnd eolPred s1
- s2' = snd $ BS.break eolPred s2
- in s1' `BS.append` newSection `BS.append` s2'
-
-
--- | Mac OS X requires that the stack be 16 byte aligned when making a function
--- call (only really required though when making a call that will pass through
--- the dynamic linker). During code generation we marked any points where we
--- make a call that requires this alignment. The alignment isn't correctly
--- generated by LLVM as LLVM rightly assumes that the stack wil be aligned to
--- 16n + 12 on entry (since the function call was 16 byte aligned and the return
--- address should have been pushed, so sub 4). GHC though since it always uses
--- jumps keeps the stack 16 byte aligned on both function calls and function
--- entry. We correct LLVM's alignment then by putting inline assembly in that
--- subtracts and adds 4 to the sp as required.
-fixupStack :: ByteString -> ByteString -> ByteString
-fixupStack fun nfun | BS.null nfun =
+fixTables :: Handle -> Handle -> I.IntMap B.ByteString -> IO ()
+fixTables r w m = do
+ f <- getFun r B.empty
+ if B.null f
+ then return ()
+ else let fun = fixupStack f B.empty
+ (a,b) = B.breakSubstring infoSec fun
+ (x,c) = B.break eolPred b
+ fun' = a `B.append` newInfoSec `B.append` c
+ n = readInt $ B.drop infoLen x
+ (bs, m') | B.null b = ([fun], m)
+ | even n = ([], I.insert n fun' m)
+ | otherwise = case I.lookup (n+1) m of
+ Just xf' -> ([fun',xf'], m)
+ Nothing -> ([fun'], m)
+ in mapM_ (B.hPut w) bs >> fixTables r w m'
+
+-- | Read in the next function/data defenition
+getFun :: Handle -> B.ByteString -> IO B.ByteString
+getFun r f = do
+ l <- (try (B.hGetLine r))::IO (Either IOError B.ByteString)
+ case l of
+ Right l' | B.null l' -> return f
+ | otherwise -> getFun r (f `B.append` newLine `B.append` l')
+ Left _ -> return B.empty
+
+{-|
+ Mac OS X requires that the stack be 16 byte aligned when making a function
+ call (only really required though when making a call that will pass through
+ the dynamic linker). The alignment isn't correctly generated by LLVM as
+ LLVM rightly assumes that the stack wil be aligned to 16n + 12 on entry
+ (since the function call was 16 byte aligned and the return address should
+ have been pushed, so sub 4). GHC though since it always uses jumps keeps
+ the stack 16 byte aligned on both function calls and function entry.
+
+ We correct the alignment here.
+-}
+fixupStack :: B.ByteString -> B.ByteString -> B.ByteString
+fixupStack f f' | B.null f' =
let -- fixup sub op
- (a, b) = BS.breakSubstring (BS.pack ", %esp\n") fun
- (a', strNum) = BS.breakEnd dollarPred a
- Just num = readInt (BS.unpack strNum)
- num' = BS.pack $ show (num + 4::Int)
- fix = a' `BS.append` num'
- in if BS.null b
- then nfun `BS.append` a
- else fixupStack b (nfun `BS.append` fix)
-
-fixupStack fun nfun =
+ (a, c) = B.breakSubstring spInst f
+ (b, n) = B.breakEnd dollarPred a
+ num = B.pack $ show $ readInt n + spFix
+ in if B.null c
+ then f' `B.append` f
+ else fixupStack c $ f' `B.append` b `B.append` num
+
+fixupStack f f' =
let -- fixup add ops
- (a, b) = BS.breakSubstring (BS.pack "jmp") fun
- -- We need to avoid processing jumps to labels, they are of the form:
- -- jmp\tL..., jmp\t_f..., jmpl\t_f..., jmpl\t*%eax...
- labelJump = BS.index b 4 == 'L'
- (jmp, b') = BS.break eolPred b
- (a', numx) = BS.breakEnd dollarPred a
- (strNum, x) = BS.break commaPred numx
- Just num = readInt (BS.unpack strNum)
- num' = BS.pack $ show (num + 4::Int)
- fix = a' `BS.append` num' `BS.append` x `BS.append` jmp
- in if BS.null b
- then nfun `BS.append` a
- else if labelJump
- then fixupStack b' (nfun `BS.append` a `BS.append` jmp)
- else fixupStack b' (nfun `BS.append` fix)
-
-
--- | 'read' is one of my least favourite functions.
-readInt :: String -> Maybe Int
-readInt str
- | not $ null $ filter (not . isDigit) str
- = pprTrace "LLvmMangler"
- (text "Cannot read" <+> text (show str) <+> text "as it's not an Int")
- Nothing
-
- | otherwise
- = Just $ read str
+ (a, c) = B.breakSubstring jmpInst f
+ (l, b) = B.break eolPred c
+ (a', n) = B.breakEnd dollarPred a
+ (n', x) = B.break commaPred n
+ num = B.pack $ show $ readInt n' + spFix
+ in if B.null c
+ then f' `B.append` f
+ -- We need to avoid processing jumps to labels, they are of the form:
+ -- jmp\tL..., jmp\t_f..., jmpl\t_f..., jmpl\t*%eax...
+ else if B.index c 4 == 'L'
+ then fixupStack b $ f' `B.append` a `B.append` l
+ else fixupStack b $ f' `B.append` a' `B.append` num `B.append`
+ x `B.append` l
+
+-- | read an int or error
+readInt :: B.ByteString -> Int
+readInt str | B.all isDigit str = (read . B.unpack) str
+ | otherwise = error $ "LLvmMangler Cannot read" ++ show str
+ ++ "as it's not an Int"
++ o_files
++ [ "-shared" ]
++ bsymbolicFlag
- ++ [ "-Wl,-soname," ++ takeFileName output_fn ] -- set the library soname
+ -- Set the library soname. We use -h rather than -soname as
+ -- Solaris 10 doesn't support the latter:
+ ++ [ "-Wl,-h," ++ takeFileName output_fn ]
++ extra_ld_inputs
++ lib_path_opts
++ extra_ld_opts
Option(..), showOpt,
DynLibLoader(..),
fFlags, fLangFlags, xFlags,
- DPHBackend(..), dphPackage,
+ DPHBackend(..), dphPackageMaybe,
wayNames,
-- ** Manipulating DynFlags
import Data.List
import Data.Map (Map)
import qualified Data.Map as Map
+import Data.Maybe
import System.FilePath
import System.IO ( stderr, hPutChar )
| Opt_D_dump_rn_stats
| Opt_D_dump_opt_cmm
| Opt_D_dump_simpl_stats
+ | Opt_D_dump_cs_trace -- Constraint solver in type checker
| Opt_D_dump_tc_trace
| Opt_D_dump_if_trace
+ | Opt_D_dump_vt_trace
| Opt_D_dump_splices
| Opt_D_dump_BCOs
| Opt_D_dump_vect
, Flag "ddump-worker-wrapper" (setDumpFlag Opt_D_dump_worker_wrapper)
, Flag "ddump-rn-trace" (setDumpFlag Opt_D_dump_rn_trace)
, Flag "ddump-if-trace" (setDumpFlag Opt_D_dump_if_trace)
+ , Flag "ddump-cs-trace" (setDumpFlag Opt_D_dump_cs_trace)
, Flag "ddump-tc-trace" (setDumpFlag Opt_D_dump_tc_trace)
+ , Flag "ddump-vt-trace" (setDumpFlag Opt_D_dump_vt_trace)
, Flag "ddump-splices" (setDumpFlag Opt_D_dump_splices)
, Flag "ddump-rn-stats" (setDumpFlag Opt_D_dump_rn_stats)
, Flag "ddump-opt-cmm" (setDumpFlag Opt_D_dump_opt_cmm)
setDPHBackend :: DPHBackend -> DynP ()
setDPHBackend backend = upd $ \dflags -> dflags { dphBackend = backend }
--- Query the DPH backend package to be used by the vectoriser.
+-- Query the DPH backend package to be used by the vectoriser and desugaring of DPH syntax.
--
-dphPackage :: DynFlags -> PackageId
-dphPackage dflags
+dphPackageMaybe :: DynFlags -> Maybe PackageId
+dphPackageMaybe dflags
= case dphBackend dflags of
- DPHPar -> dphParPackageId
- DPHSeq -> dphSeqPackageId
- DPHThis -> thisPackage dflags
- DPHNone -> ghcError (CmdLineError dphBackendError)
-
-dphBackendError :: String
-dphBackendError = "To use -fvectorise select a DPH backend with -fdph-par or -fdph-seq"
+ DPHPar -> Just dphParPackageId
+ DPHSeq -> Just dphSeqPackageId
+ DPHThis -> Just (thisPackage dflags)
+ DPHNone -> Nothing
setMainIs :: String -> DynP ()
setMainIs arg
-- Splitting
can_split :: Bool
-can_split = cSplitObjs == "YES"
+can_split = cSupportsSplitObjs == "YES"
-- -----------------------------------------------------------------------------
-- Compiler Info
("Host platform", String cHostPlatformString),
("Target platform", String cTargetPlatformString),
("Have interpreter", String cGhcWithInterpreter),
- ("Object splitting", String cSplitObjs),
+ ("Object splitting supported", String cSupportsSplitObjs),
("Have native code generator", String cGhcWithNativeCodeGen),
("Support SMP", String cGhcWithSMP),
("Unregisterised", String cGhcUnregisterised),
typeKind,
parseName,
RunResult(..),
- runStmt, parseImportDecl, SingleStep(..),
+ runStmt, runStmtWithLocation,
+ parseImportDecl, SingleStep(..),
resume,
Resume(resumeStmt, resumeThreadId, resumeBreakInfo, resumeSpan,
resumeHistory, resumeHistoryIx),
#ifdef GHCI
, hscGetModuleExports
, hscTcRnLookupRdrName
- , hscStmt, hscTcExpr, hscImport, hscKcType
+ , hscStmt, hscStmtWithLocation
+ , hscTcExpr, hscImport, hscKcType
, hscCompileCoreExpr
#endif
newHscEnv :: DynFlags -> IO HscEnv
newHscEnv dflags
= do { eps_var <- newIORef initExternalPackageState
- ; us <- mkSplitUniqSupply 'r'
- ; nc_var <- newIORef (initNameCache us knownKeyNames)
- ; fc_var <- newIORef emptyUFM
+ ; us <- mkSplitUniqSupply 'r'
+ ; nc_var <- newIORef (initNameCache us knownKeyNames)
+ ; fc_var <- newIORef emptyUFM
; mlc_var <- newIORef emptyModuleEnv
; optFuel <- initOptFuelState
; return (HscEnv { hsc_dflags = dflags,
hsc_type_env_var = Nothing } ) }
-knownKeyNames :: [Name] -- Put here to avoid loops involving DsMeta,
- -- where templateHaskellNames are defined
-knownKeyNames = map getName wiredInThings
- ++ basicKnownKeyNames
+knownKeyNames :: [Name] -- Put here to avoid loops involving DsMeta,
+ -- where templateHaskellNames are defined
+knownKeyNames
+ = map getName wiredInThings
+ ++ basicKnownKeyNames
#ifdef GHCI
- ++ templateHaskellNames
+ ++ templateHaskellNames
#endif
-- -----------------------------------------------------------------------------
-> String -- The statement
-> IO (Maybe ([Id], HValue))
-- ^ 'Nothing' <==> empty statement (or comment only), but no parse error
-hscStmt hsc_env stmt = runHsc hsc_env $ do
- maybe_stmt <- hscParseStmt stmt
+hscStmt hsc_env stmt = hscStmtWithLocation hsc_env stmt "<interactive>" 1
+
+hscStmtWithLocation -- Compile a stmt all the way to an HValue, but don't run it
+ :: HscEnv
+ -> String -- The statement
+ -> String -- the source
+ -> Int -- ^ starting line
+ -> IO (Maybe ([Id], HValue))
+ -- ^ 'Nothing' <==> empty statement (or comment only), but no parse error
+hscStmtWithLocation hsc_env stmt source linenumber = runHsc hsc_env $ do
+ maybe_stmt <- hscParseStmtWithLocation source linenumber stmt
case maybe_stmt of
Nothing -> return Nothing
Just parsed_stmt -> do -- The real stuff
hscParseStmt :: String -> Hsc (Maybe (LStmt RdrName))
hscParseStmt = hscParseThing parseStmt
+hscParseStmtWithLocation :: String -> Int
+ -> String -> Hsc (Maybe (LStmt RdrName))
+hscParseStmtWithLocation source linenumber stmt =
+ hscParseThingWithLocation source linenumber parseStmt stmt
+
hscParseType :: String -> Hsc (LHsType RdrName)
hscParseType = hscParseThing parseType
#endif
hscParseIdentifier hsc_env str = runHsc hsc_env $
hscParseThing parseIdentifier str
-
hscParseThing :: (Outputable thing)
=> Lexer.P thing
-> String
-> Hsc thing
+hscParseThing = hscParseThingWithLocation "<interactive>" 1
-hscParseThing parser str
+hscParseThingWithLocation :: (Outputable thing)
+ => String -> Int
+ -> Lexer.P thing
+ -> String
+ -> Hsc thing
+hscParseThingWithLocation source linenumber parser str
= {-# SCC "Parser" #-} do
dflags <- getDynFlags
liftIO $ showPass dflags "Parser"
-
+
let buf = stringToStringBuffer str
- loc = mkSrcLoc (fsLit "<interactive>") 1 1
+ loc = mkSrcLoc (fsLit source) linenumber 1
case unP parser (mkPState dflags buf loc) of
mg_insts = [],
mg_fam_insts = [],
mg_rules = [],
+ mg_vect_decls = [],
mg_binds = binds,
mg_foreign = NoStubs,
mg_warns = NoWarnings,
import BasicTypes ( IPName, defaultFixity, WarningTxt(..) )
import OptimizationFuel ( OptFuelState )
import IfaceSyn
-import CoreSyn ( CoreRule )
+import CoreSyn ( CoreRule, CoreVect )
import Maybes ( orElse, expectJust, catMaybes )
import Outputable
import BreakArray
mg_binds :: ![CoreBind], -- ^ Bindings for this module
mg_foreign :: !ForeignStubs, -- ^ Foreign exports declared in this module
mg_warns :: !Warnings, -- ^ Warnings declared in the module
- mg_anns :: [Annotation], -- ^ Annotations declared in this module
- mg_hpc_info :: !HpcInfo, -- ^ Coverage tick boxes in the module
+ mg_anns :: [Annotation], -- ^ Annotations declared in this module
+ mg_hpc_info :: !HpcInfo, -- ^ Coverage tick boxes in the module
mg_modBreaks :: !ModBreaks, -- ^ Breakpoints for the module
+ mg_vect_decls:: ![CoreVect], -- ^ Vectorisation declarations in this module
+ -- (produced by desugarer & consumed by vectoriser)
mg_vect_info :: !VectInfo, -- ^ Pool of vectorised declarations in the module
-- The next two fields are unusual, because they give instance
module InteractiveEval (
#ifdef GHCI
RunResult(..), Status(..), Resume(..), History(..),
- runStmt, parseImportDecl, SingleStep(..),
+ runStmt, runStmtWithLocation,
+ parseImportDecl, SingleStep(..),
resume,
abandon, abandonAll,
getResumeContext,
-- | Run a statement in the current interactive context. Statement
-- may bind multple values.
runStmt :: GhcMonad m => String -> SingleStep -> m RunResult
-runStmt expr step =
+runStmt = runStmtWithLocation "<interactive>" 1
+
+-- | Run a statement in the current interactive context. Passing debug information
+-- Statement may bind multple values.
+runStmtWithLocation :: GhcMonad m => String -> Int ->
+ String -> SingleStep -> m RunResult
+runStmtWithLocation source linenumber expr step =
do
hsc_env <- getSession
let dflags' = dopt_unset (hsc_dflags hsc_env) Opt_WarnUnusedBinds
hsc_env' = hsc_env{ hsc_dflags = dflags' }
- r <- liftIO $ hscStmt hsc_env' expr
+ r <- liftIO $ hscStmtWithLocation hsc_env' expr source linenumber
case r of
Nothing -> return RunFailed -- empty statement / comment
| IToptions_prag String
| ITinclude_prag String
| ITlanguage_prag
+ | ITvect_prag
+ | ITvect_scalar_prag
| ITdotdot -- reserved symbols
| ITcolon
("generated", token ITgenerated_prag),
("core", token ITcore_prag),
("unpack", token ITunpack_prag),
- ("ann", token ITann_prag)])
+ ("ann", token ITann_prag),
+ ("vectorize", token ITvect_prag)])
twoWordPrags = Map.fromList([("inline conlike", token (ITinline_prag Inline ConLike)),
("notinline conlike", token (ITinline_prag NoInline ConLike)),
("specialize inline", token (ITspec_inline_prag True)),
- ("specialize notinline", token (ITspec_inline_prag False))])
-
+ ("specialize notinline", token (ITspec_inline_prag False)),
+ ("vectorize scalar", token ITvect_scalar_prag)])
dispatch_pragmas :: Map String Action -> Action
dispatch_pragmas prags span buf len = case Map.lookup (clean_pragma (lexemeToString buf len)) prags of
canonical prag' = case prag' of
"noinline" -> "notinline"
"specialise" -> "specialize"
+ "vectorise" -> "vectorize"
"constructorlike" -> "conlike"
_ -> prag'
canon_ws s = unwords (map canonical (words s))
'{-# WARNING' { L _ ITwarning_prag }
'{-# UNPACK' { L _ ITunpack_prag }
'{-# ANN' { L _ ITann_prag }
+ '{-# VECTORISE' { L _ ITvect_prag }
+ '{-# VECTORISE_SCALAR' { L _ ITvect_scalar_prag }
'#-}' { L _ ITclose_prag }
'..' { L _ ITdotdot } -- reserved symbols
| '{-# DEPRECATED' deprecations '#-}' { $2 }
| '{-# WARNING' warnings '#-}' { $2 }
| '{-# RULES' rules '#-}' { $2 }
+ | '{-# VECTORISE_SCALAR' qvar '#-}' { unitOL $ LL $ VectD (HsVect $2 Nothing) }
+ | '{-# VECTORISE' qvar '=' exp '#-}' { unitOL $ LL $ VectD (HsVect $2 (Just $4)) }
| annotation { unitOL $1 }
| decl { unLoc $1 }
wiredInIds, ghcPrimIds,
primOpRules, builtinRules,
- ghcPrimExports,
- wiredInThings, basicKnownKeyNames,
- primOpId,
-
- -- Random other things
- maybeCharLikeCon, maybeIntLikeCon,
+ ghcPrimExports,
+ wiredInThings, basicKnownKeyNames,
+ primOpId,
+
+ -- Random other things
+ maybeCharLikeCon, maybeIntLikeCon,
- -- Class categories
- isNumericClass, isStandardClass
+ -- Class categories
+ isNumericClass, isStandardClass
) where
#include "HsVersions.h"
-import PrelNames ( basicKnownKeyNames,
- hasKey, charDataConKey, intDataConKey,
- numericClassKeys, standardClassKeys )
+import PrelNames ( basicKnownKeyNames,
+ hasKey, charDataConKey, intDataConKey,
+ numericClassKeys, standardClassKeys )
import PrelRules
import PrimOp ( PrimOp, allThePrimOps, primOpOcc, primOpTag, maxPrimOpTag )
import DataCon ( DataCon )
%************************************************************************
-%* *
+%* *
\subsection{Known key Names}
-%* *
+%* *
%************************************************************************
This section tells what the compiler knows about the assocation of
names with uniques. These ones are the *non* wired-in ones. The
wired in ones are defined in TysWiredIn etc.
+The names for DPH can come from one of multiple backend packages. At the point where
+'basicKnownKeyNames' is used, we don't know which backend it will be. Hence, we list
+the names for multiple backends. That works out fine, although they use the same uniques,
+as we are guaranteed to only load one backend; hence, only one of the different names
+sharing a unique will be used.
+
\begin{code}
basicKnownKeyNames :: [Name]
basicKnownKeyNames
= genericTyConNames
++ typeableClassNames
+ ++ dphKnownKeyNames dphSeqPackageId ++ dphKnownKeyNames dphParPackageId
++ [ -- Type constructors (synonyms especially)
ioTyConName, ioDataConName,
runMainIOName,
-- Enum stuff
enumFromName, enumFromThenName,
enumFromThenToName, enumFromToName,
- enumFromToPName, enumFromThenToPName,
-- Monad stuff
thenIOName, bindIOName, returnIOName, failIOName,
dollarName, -- The ($) apply function
- -- Parallel array operations
- nullPName, lengthPName, replicatePName, singletonPName, mapPName,
- filterPName, zipPName, crossMapPName, indexPName,
- toPName, emptyPName, appPName,
-
-- FFI primitive types that are not wired-in.
stablePtrTyConName, ptrTyConName, funPtrTyConName,
int8TyConName, int16TyConName, int32TyConName, int64TyConName,
genericTyConNames :: [Name]
genericTyConNames = [crossTyConName, plusTyConName, genUnitTyConName]
+
+-- Know names from the DPH package which vary depending on the selected DPH backend.
+--
+dphKnownKeyNames :: PackageId -> [Name]
+dphKnownKeyNames dphPkg
+ = map ($ dphPkg)
+ [
+ -- Parallel array operations
+ nullPName, lengthPName, replicatePName, singletonPName, mapPName,
+ filterPName, zipPName, crossMapPName, indexPName,
+ toPName, emptyPName, appPName,
+ enumFromToPName, enumFromThenToPName
+
+ ]
\end{code}
gHC_PRIM, gHC_TYPES, gHC_UNIT, gHC_ORDERING, gHC_GENERICS,
gHC_MAGIC,
gHC_CLASSES, gHC_BASE, gHC_ENUM,
- gHC_SHOW, gHC_READ, gHC_NUM, gHC_INTEGER, gHC_INTEGER_TYPE, gHC_LIST, gHC_PARR,
+ gHC_SHOW, gHC_READ, gHC_NUM, gHC_INTEGER, gHC_INTEGER_TYPE, gHC_LIST,
gHC_HETMET_CODETYPES,
gHC_TUPLE, dATA_TUPLE, dATA_EITHER, dATA_STRING, dATA_FOLDABLE, dATA_TRAVERSABLE,
gHC_PACK, gHC_CONC, gHC_IO, gHC_IO_Exception,
gHC_NUM = mkBaseModule (fsLit "GHC.Num")
gHC_INTEGER = mkIntegerModule (fsLit "GHC.Integer")
gHC_INTEGER_TYPE= mkIntegerModule (fsLit "GHC.Integer.Type")
-gHC_LIST = mkBaseModule (fsLit "GHC.List")
-gHC_PARR = mkBaseModule (fsLit "GHC.PArr")
gHC_HETMET_CODETYPES = mkBaseModule (fsLit "GHC.HetMet.CodeTypes")
-gHC_TUPLE = mkPrimModule (fsLit "GHC.Tuple")
-dATA_TUPLE = mkBaseModule (fsLit "Data.Tuple")
+gHC_LIST = mkBaseModule (fsLit "GHC.List")
+gHC_TUPLE = mkPrimModule (fsLit "GHC.Tuple")
+dATA_TUPLE = mkBaseModule (fsLit "Data.Tuple")
dATA_EITHER = mkBaseModule (fsLit "Data.Either")
dATA_STRING = mkBaseModule (fsLit "Data.String")
dATA_FOLDABLE = mkBaseModule (fsLit "Data.Foldable")
gHC_EXTS = mkBaseModule (fsLit "GHC.Exts")
cONTROL_EXCEPTION_BASE = mkBaseModule (fsLit "Control.Exception.Base")
+gHC_PARR :: PackageId -> Module
+gHC_PARR pkg = mkModule pkg (mkModuleNameFS (fsLit "Data.Array.Parallel"))
+
+gHC_PARR' :: Module
+gHC_PARR' = mkBaseModule (fsLit "GHC.PArr")
+
mAIN, rOOT_MAIN :: Module
mAIN = mkMainModule_ mAIN_NAME
rOOT_MAIN = mkMainModule (fsLit ":Main") -- Root module for initialisation
enumFromToPName, enumFromThenToPName, nullPName, lengthPName,
singletonPName, replicatePName, mapPName, filterPName,
zipPName, crossMapPName, indexPName, toPName,
- emptyPName, appPName :: Name
-enumFromToPName = varQual gHC_PARR (fsLit "enumFromToP") enumFromToPIdKey
-enumFromThenToPName= varQual gHC_PARR (fsLit "enumFromThenToP") enumFromThenToPIdKey
-nullPName = varQual gHC_PARR (fsLit "nullP") nullPIdKey
-lengthPName = varQual gHC_PARR (fsLit "lengthP") lengthPIdKey
-singletonPName = varQual gHC_PARR (fsLit "singletonP") singletonPIdKey
-replicatePName = varQual gHC_PARR (fsLit "replicateP") replicatePIdKey
-mapPName = varQual gHC_PARR (fsLit "mapP") mapPIdKey
-filterPName = varQual gHC_PARR (fsLit "filterP") filterPIdKey
-zipPName = varQual gHC_PARR (fsLit "zipP") zipPIdKey
-crossMapPName = varQual gHC_PARR (fsLit "crossMapP") crossMapPIdKey
-indexPName = varQual gHC_PARR (fsLit "!:") indexPIdKey
-toPName = varQual gHC_PARR (fsLit "toP") toPIdKey
-emptyPName = varQual gHC_PARR (fsLit "emptyP") emptyPIdKey
-appPName = varQual gHC_PARR (fsLit "+:+") appPIdKey
+ emptyPName, appPName :: PackageId -> Name
+enumFromToPName pkg = varQual (gHC_PARR pkg) (fsLit "enumFromToP") enumFromToPIdKey
+enumFromThenToPName pkg = varQual (gHC_PARR pkg) (fsLit "enumFromThenToP") enumFromThenToPIdKey
+nullPName pkg = varQual (gHC_PARR pkg) (fsLit "nullP") nullPIdKey
+lengthPName pkg = varQual (gHC_PARR pkg) (fsLit "lengthP") lengthPIdKey
+singletonPName pkg = varQual (gHC_PARR pkg) (fsLit "singletonP") singletonPIdKey
+replicatePName pkg = varQual (gHC_PARR pkg) (fsLit "replicateP") replicatePIdKey
+mapPName pkg = varQual (gHC_PARR pkg) (fsLit "mapP") mapPIdKey
+filterPName pkg = varQual (gHC_PARR pkg) (fsLit "filterP") filterPIdKey
+zipPName pkg = varQual (gHC_PARR pkg) (fsLit "zipP") zipPIdKey
+crossMapPName pkg = varQual (gHC_PARR pkg) (fsLit "crossMapP") crossMapPIdKey
+indexPName pkg = varQual (gHC_PARR pkg) (fsLit "!:") indexPIdKey
+toPName pkg = varQual (gHC_PARR pkg) (fsLit "toP") toPIdKey
+emptyPName pkg = varQual (gHC_PARR pkg) (fsLit "emptyP") emptyPIdKey
+appPName pkg = varQual (gHC_PARR pkg) (fsLit "+:+") appPIdKey
-- code type things
hetmet_brak_name, hetmet_esc_name, hetmet_csp_name :: Name
doubleDataConName = mkWiredInDataConName UserSyntax gHC_TYPES (fsLit "D#") doubleDataConKey doubleDataCon
parrTyConName, parrDataConName :: Name
-parrTyConName = mkWiredInTyConName BuiltInSyntax gHC_PARR (fsLit "[::]") parrTyConKey parrTyCon
-parrDataConName = mkWiredInDataConName UserSyntax gHC_PARR (fsLit "PArr") parrDataConKey parrDataCon
+parrTyConName = mkWiredInTyConName BuiltInSyntax
+ gHC_PARR' (fsLit "[::]") parrTyConKey parrTyCon
+parrDataConName = mkWiredInDataConName UserSyntax
+ gHC_PARR' (fsLit "PArr") parrDataConKey parrDataCon
hetMetCodeTypeTyConName :: Name
hetMetCodeTypeTyConName = mkWiredInTyConName BuiltInSyntax gHC_HETMET_CODETYPES (fsLit "<[]>@") hetMetCodeTypeTyConKey hetMetCodeTypeTyCon
tyvar = head alphaTyVars
tyvarTys = replicate arity $ mkTyVarTy tyvar
nameStr = mkFastString ("MkPArr" ++ show arity)
- name = mkWiredInName gHC_PARR (mkDataOccFS nameStr) unique
+ name = mkWiredInName gHC_PARR' (mkDataOccFS nameStr) unique
(ADataCon data_con) UserSyntax
unique = mkPArrDataConUnique arity
with
has_side_effects = True
+primop SizeofArrayOp "sizeofArray#" GenPrimOp
+ Array# a -> Int#
+ {Return the number of elements in the array.}
+
+primop SizeofMutableArrayOp "sizeofMutableArray#" GenPrimOp
+ MutableArray# s a -> Int#
+ {Return the number of elements in the array.}
+
primop IndexArrayOp "indexArray#" GenPrimOp
Array# a -> Int# -> (# a #)
{Read from specified index of immutable array. Result is packaged into
has_side_effects = True
primop ThreadStatusOp "threadStatus#" GenPrimOp
- ThreadId# -> State# RealWorld -> (# State# RealWorld, Int# #)
+ ThreadId# -> State# RealWorld -> (# State# RealWorld, Int#, Int#, Int# #)
with
out_of_line = True
has_side_effects = True
hs_fords = foreign_decls,
hs_defds = default_decls,
hs_ruleds = rule_decls,
+ hs_vects = vect_decls,
hs_docs = docs })
= do {
-- (A) Process the fixity declarations, creating a mapping from
(rn_inst_decls, src_fvs2) <- rnList rnSrcInstDecl inst_decls ;
(rn_rule_decls, src_fvs3) <- setOptM Opt_ScopedTypeVariables $
- rnList rnHsRuleDecl rule_decls ;
- -- Inside RULES, scoped type variables are on
- (rn_foreign_decls, src_fvs4) <- rnList rnHsForeignDecl foreign_decls ;
- (rn_ann_decls, src_fvs5) <- rnList rnAnnDecl ann_decls ;
- (rn_default_decls, src_fvs6) <- rnList rnDefaultDecl default_decls ;
- (rn_deriv_decls, src_fvs7) <- rnList rnSrcDerivDecl deriv_decls ;
+ rnList rnHsRuleDecl rule_decls ;
+ -- Inside RULES, scoped type variables are on
+ (rn_vect_decls, src_fvs4) <- rnList rnHsVectDecl vect_decls ;
+ (rn_foreign_decls, src_fvs5) <- rnList rnHsForeignDecl foreign_decls ;
+ (rn_ann_decls, src_fvs6) <- rnList rnAnnDecl ann_decls ;
+ (rn_default_decls, src_fvs7) <- rnList rnDefaultDecl default_decls ;
+ (rn_deriv_decls, src_fvs8) <- rnList rnSrcDerivDecl deriv_decls ;
-- Haddock docs; no free vars
rn_docs <- mapM (wrapLocM rnDocDecl) docs ;
hs_annds = rn_ann_decls,
hs_defds = rn_default_decls,
hs_ruleds = rn_rule_decls,
+ hs_vects = rn_vect_decls,
hs_docs = rn_docs } ;
tycl_bndrs = hsTyClDeclsBinders rn_tycl_decls rn_inst_decls ;
ford_bndrs = hsForeignDeclsBinders rn_foreign_decls ;
other_def = (Just (mkNameSet tycl_bndrs `unionNameSets` mkNameSet ford_bndrs), emptyNameSet) ;
other_fvs = plusFVs [src_fvs1, src_fvs2, src_fvs3, src_fvs4,
- src_fvs5, src_fvs6, src_fvs7] ;
+ src_fvs5, src_fvs6, src_fvs7, src_fvs8] ;
-- It is tiresome to gather the binders from type and class decls
src_dus = [other_def] `plusDU` bind_dus `plusDU` usesOnly other_fvs ;
%*********************************************************
+%* *
+\subsection{Vectorisation declarations}
+%* *
+%*********************************************************
+
+\begin{code}
+rnHsVectDecl :: VectDecl RdrName -> RnM (VectDecl Name, FreeVars)
+rnHsVectDecl (HsVect var Nothing)
+ = do { var' <- wrapLocM lookupTopBndrRn var
+ ; return (HsVect var' Nothing, unitFV (unLoc var'))
+ }
+rnHsVectDecl (HsVect var (Just rhs))
+ = do { var' <- wrapLocM lookupTopBndrRn var
+ ; (rhs', fv_rhs) <- rnLExpr rhs
+ ; return (HsVect var' (Just rhs'), fv_rhs `addOneFV` unLoc var')
+ }
+\end{code}
+
+%*********************************************************
%* *
\subsection{Type, class and iface sig declarations}
%* *
= addl (gp { hs_annds = L l d : ts }) ds
add gp@(HsGroup {hs_ruleds = ts}) l (RuleD d) ds
= addl (gp { hs_ruleds = L l d : ts }) ds
+add gp@(HsGroup {hs_vects = ts}) l (VectD d) ds
+ = addl (gp { hs_vects = L l d : ts }) ds
add gp l (DocD d) ds
= addl (gp { hs_docs = (L l d) : (hs_docs gp) }) ds
import CoreLint ( lintCoreBindings )
import PrelNames ( iNTERACTIVE )
import HscTypes
-import Module ( PackageId, Module )
+import Module ( Module )
import DynFlags
import StaticFlags
import Rules ( RuleBase )
| CoreCSE
| CoreDoRuleCheck CompilerPhase String -- Check for non-application of rules
-- matching this string
- | CoreDoVectorisation PackageId
+ | CoreDoVectorisation
| CoreDoNothing -- Useful when building up
| CoreDoPasses [CoreToDo] -- lists of these things
coreDumpFlag CoreDoSpecialising = Just Opt_D_dump_spec
coreDumpFlag CoreDoSpecConstr = Just Opt_D_dump_spec
coreDumpFlag CoreCSE = Just Opt_D_dump_cse
-coreDumpFlag (CoreDoVectorisation {}) = Just Opt_D_dump_vect
-coreDumpFlag CoreDesugar = Just Opt_D_dump_ds
-coreDumpFlag CoreTidy = Just Opt_D_dump_simpl
-coreDumpFlag CorePrep = Just Opt_D_dump_prep
+coreDumpFlag CoreDoVectorisation = Just Opt_D_dump_vect
+coreDumpFlag CoreDesugar = Just Opt_D_dump_ds
+coreDumpFlag CoreTidy = Just Opt_D_dump_simpl
+coreDumpFlag CorePrep = Just Opt_D_dump_prep
coreDumpFlag CoreDoPrintCore = Nothing
coreDumpFlag (CoreDoRuleCheck {}) = Nothing
ppr CoreDoSpecialising = ptext (sLit "Specialise")
ppr CoreDoSpecConstr = ptext (sLit "SpecConstr")
ppr CoreCSE = ptext (sLit "Common sub-expression")
- ppr (CoreDoVectorisation {}) = ptext (sLit "Vectorisation")
- ppr CoreDesugar = ptext (sLit "Desugar")
- ppr CoreTidy = ptext (sLit "Tidy Core")
+ ppr CoreDoVectorisation = ptext (sLit "Vectorisation")
+ ppr CoreDesugar = ptext (sLit "Desugar")
+ ppr CoreTidy = ptext (sLit "Tidy Core")
ppr CorePrep = ptext (sLit "CorePrep")
ppr CoreDoPrintCore = ptext (sLit "Print core")
ppr (CoreDoRuleCheck {}) = ptext (sLit "Rule check")
]
vectorisation
- = runWhen (dopt Opt_Vectorise dflags)
- $ CoreDoPasses [ simpl_gently, CoreDoVectorisation (dphPackage dflags) ]
-
+ = runWhen (dopt Opt_Vectorise dflags) $
+ CoreDoPasses [ simpl_gently, CoreDoVectorisation ]
-- By default, we have 2 phases before phase 0.
doCorePass CoreDoSpecConstr = {-# SCC "SpecConstr" #-}
specConstrProgram
-doCorePass (CoreDoVectorisation be) = {-# SCC "Vectorise" #-}
- vectorise be
+doCorePass CoreDoVectorisation = {-# SCC "Vectorise" #-}
+ vectorise
doCorePass CoreDoGlomBinds = doPassDM glomBinds
doCorePass CoreDoPrintCore = observe printCore
-- let ji = \xij -> ei
-- in case [...hole...] of { pi -> ji xij }
do { tick (CaseOfCase case_bndr)
- ; (env', dup_cont, nodup_cont) <- mkDupableCont env cont
- -- NB: call mkDupableCont here, *not* prepareCaseCont
- -- We must make a duplicable continuation, whereas prepareCaseCont
- -- doesn't when there is a single case branch
+ ; (env', dup_cont, nodup_cont) <- prepareCaseCont env alts cont
+ -- NB: We call prepareCaseCont here. If there is only one
+ -- alternative, then dup_cont may be big, but that's ok
+ -- becuase we push it into the single alternative, and then
+ -- use mkDupableAlt to turn that simplified alternative into
+ -- a join point if it's too big to duplicate.
+ -- And this is important: see Note [Fusing case continuations]
; let alt_env = se `setInScope` env'
; (alt_env', case_bndr') <- simplBinder alt_env case_bndr
-- See Note [Duplicated env]
\end{code}
+Note [Fusing case continuations]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+It's important to fuse two successive case continuations when the
+first has one alternative. That's why we call prepareCaseCont here.
+Consider this, which arises from thunk splitting (see Note [Thunk
+splitting] in WorkWrap):
+
+ let
+ x* = case (case v of {pn -> rn}) of
+ I# a -> I# a
+ in body
+
+The simplifier will find
+ (Var v) with continuation
+ Select (pn -> rn) (
+ Select [I# a -> I# a] (
+ StrictBind body Stop
+
+So we'll call mkDupableCont on
+ Select [I# a -> I# a] (StrictBind body Stop)
+There is just one alternative in the first Select, so we want to
+simplify the rhs (I# a) with continuation (StricgtBind body Stop)
+Supposing that body is big, we end up with
+ let $j a = <let x = I# a in body>
+ in case v of { pn -> case rn of
+ I# a -> $j a }
+This is just what we want because the rn produces a box that
+the case rn cancels with.
+
+See Trac #4957 a fuller example.
+
Note [Case binders and join points]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Consider this
Unlike StrictArg, there doesn't seem anything to gain from
duplicating a StrictBind continuation, so we don't.
-The desire not to duplicate is the entire reason that
-mkDupableCont returns a pair of continuations.
-
Note [Single-alternative cases]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Here's another single-alternative where we really want to do case-of-case:
-data Mk1 = Mk1 Int#
-data Mk1 = Mk2 Int#
+data Mk1 = Mk1 Int# | Mk2 Int#
M1.f =
\r [x_s74 y_s6X]
So the outer case is doing *nothing at all*, other than serving as a
join-point. In this case we really want to do case-of-case and decide
-whether to use a real join point or just duplicate the continuation.
+whether to use a real join point or just duplicate the continuation:
+
+ let $j s7c = case x of
+ Mk1 ipv77 -> (==) s7c ipv77
+ Mk1 ipv79 -> (==) s7c ipv79
+ in
+ case y of
+ Mk1 ipv70 -> $j ipv70
+ Mk2 ipv72 -> $j ipv72
Hence: check whether the case binder's type is unlifted, because then
the outer case is *not* a seq.
-- in case x of
-- I# y -> let x = I# y in x }
-- See comments above. Is it not beautifully short?
+-- Moreover, it works just as well when there are
+-- several binders, and if the binders are lifted
+-- E.g. x = e
+-- --> x = let x = e in
+-- case x of (a,b) -> let x = (a,b) in x
splitThunk :: Var -> Expr Var -> UniqSM [(Var, Expr Var)]
splitThunk fn_id rhs = do
\begin{code}
module TcBinds ( tcLocalBinds, tcTopBinds,
tcHsBootSigs, tcPolyBinds,
- PragFun, tcSpecPrags, mkPragFun,
+ PragFun, tcSpecPrags, tcVectDecls, mkPragFun,
TcSigInfo(..), SigFun, mkSigFun,
badBootDeclErr ) where
import NameEnv
import SrcLoc
import Bag
+import ListSetOps
import ErrUtils
import Digraph
import Maybes
impSpecErr name
= hang (ptext (sLit "You cannot SPECIALISE") <+> quotes (ppr name))
2 (vcat [ ptext (sLit "because its definition has no INLINE/INLINABLE pragma")
- , ptext (sLit "(or you compiled its definining module without -O)")])
+ , ptext (sLit "(or you compiled its defining module without -O)")])
+
+--------------
+tcVectDecls :: [LVectDecl Name] -> TcM [LVectDecl TcId]
+tcVectDecls decls
+ = do { decls' <- mapM (wrapLocM tcVect) decls
+ ; let ids = [unLoc id | L _ (HsVect id _) <- decls']
+ dups = findDupsEq (==) ids
+ ; mapM_ reportVectDups dups
+ ; return decls'
+ }
+ where
+ reportVectDups (first:_second:_more)
+ = addErrAt (getSrcSpan first) $
+ ptext (sLit "Duplicate vectorisation declarations for") <+> ppr first
+ reportVectDups _ = return ()
+
+--------------
+tcVect :: VectDecl Name -> TcM (VectDecl TcId)
+-- We can't typecheck the expression of a vectorisation declaration against the vectorised type
+-- of the original definition as this requires internals of the vectoriser not available during
+-- type checking. Instead, we infer the type of the expression and leave it to the vectoriser
+-- to check the compatibility of the Core types.
+tcVect (HsVect name Nothing)
+ = addErrCtxt (vectCtxt name) $
+ do { id <- wrapLocM tcLookupId name
+ ; return (HsVect id Nothing)
+ }
+tcVect (HsVect name@(L loc _) (Just rhs))
+ = addErrCtxt (vectCtxt name) $
+ do { _id <- wrapLocM tcLookupId name -- need to ensure that the name is already defined
+
+ -- turn the vectorisation declaration into a single non-recursive binding
+ ; let bind = L loc $ mkFunBind name [mkSimpleMatch [] rhs]
+ sigFun = const Nothing
+ pragFun = mkPragFun [] (unitBag bind)
+
+ -- perform type inference (including generalisation)
+ ; (binds, [id']) <- tcPolyInfer TopLevel False sigFun pragFun NonRecursive [bind]
+
+ ; traceTc "tcVect inferred type" $ ppr (varType id')
+
+ -- add the type variable and dictionary bindings produced by type generalisation to the
+ -- right-hand side of the vectorisation declaration
+ ; let [AbsBinds tvs evs _ evBinds actualBinds] = (map unLoc . bagToList) binds
+ ; let [bind'] = bagToList actualBinds
+ MatchGroup
+ [L _ (Match _ _ (GRHSs [L _ (GRHS _ rhs')] _))]
+ _ = (fun_matches . unLoc) bind'
+ rhsWrapped = mkHsLams tvs evs (mkHsDictLet evBinds rhs')
+
+ -- We return the type-checked 'Id', to propagate the inferred signature
+ -- to the vectoriser - see "Note [Typechecked vectorisation pragmas]" in HsDecls
+ ; return $ HsVect (L loc id') (Just rhsWrapped)
+ }
+
+vectCtxt :: Located Name -> SDoc
+vectCtxt name = ptext (sLit "When checking the vectorisation declaration for") <+> ppr name
+
--------------
-- If typechecking the binds fails, then return with each
-- signature-less binder given type (forall a.a), to minimise
\begin{code}
module TcCanonical(
mkCanonical, mkCanonicals, mkCanonicalFEV, canWanteds, canGivens,
- canOccursCheck, canEq
+ canOccursCheck, canEq,
+ rewriteWithFunDeps
) where
#include "HsVersions.h"
import BasicTypes
import Type
import TcRnTypes
-
+import FunDeps
+import qualified TcMType as TcM
import TcType
import TcErrors
import Coercion
import TypeRep
import Name
import Var
+import VarEnv ( TidyEnv )
import Outputable
import Control.Monad ( unless, when, zipWithM, zipWithM_ )
import MonadUtils
import HsBinds
import TcSMonad
+import FastString
\end{code}
Note [Canonicalisation]
; return $ (mkCoVarCoercion cv, rhs_var, ct) }
else -- Derived or Wanted: make a new *unification* flatten variable
do { rhs_var <- newFlexiTcSTy (typeKind fam_ty)
- ; cv <- newWantedCoVar fam_ty rhs_var
+ ; cv <- newCoVar fam_ty rhs_var
; let ct = CFunEqCan { cc_id = cv
, cc_flavor = mkWantedFlavor fl
-- Always Wanted, not Derived
canEq fl cv ty1 ty2
| tcEqType ty1 ty2 -- Dealing with equality here avoids
-- later spurious occurs checks for a~a
- = do { when (isWanted fl) (setWantedCoBind cv ty1)
+ = do { when (isWanted fl) (setCoBind cv ty1)
; return emptyCCan }
-- If one side is a variable, orient and flatten,
Just (t2a,t2b,t2c) <- splitCoPredTy_maybe s2
= do { (v1,v2,v3)
<- if isWanted fl then -- Wanted
- do { v1 <- newWantedCoVar t1a t2a
- ; v2 <- newWantedCoVar t1b t2b
- ; v3 <- newWantedCoVar t1c t2c
+ do { v1 <- newCoVar t1a t2a
+ ; v2 <- newCoVar t1b t2b
+ ; v3 <- newCoVar t1c t2c
; let res_co = mkCoPredCo (mkCoVarCoercion v1)
(mkCoVarCoercion v2) (mkCoVarCoercion v3)
- ; setWantedCoBind cv res_co
+ ; setCoBind cv res_co
; return (v1,v2,v3) }
else if isGiven fl then -- Given
let co_orig = mkCoVarCoercion cv
canEq fl cv (FunTy s1 t1) (FunTy s2 t2)
= do { (argv, resv) <-
if isWanted fl then
- do { argv <- newWantedCoVar s1 s2
- ; resv <- newWantedCoVar t1 t2
- ; setWantedCoBind cv $
+ do { argv <- newCoVar s1 s2
+ ; resv <- newCoVar t1 t2
+ ; setCoBind cv $
mkFunCoercion (mkCoVarCoercion argv) (mkCoVarCoercion resv)
; return (argv,resv) }
canEq fl cv (PredTy (IParam n1 t1)) (PredTy (IParam n2 t2))
| n1 == n2
= if isWanted fl then
- do { v <- newWantedCoVar t1 t2
- ; setWantedCoBind cv $ mkIParamPredCo n1 (mkCoVarCoercion cv)
+ do { v <- newCoVar t1 t2
+ ; setCoBind cv $ mkIParamPredCo n1 (mkCoVarCoercion cv)
; canEq fl v t1 t2 }
else return emptyCCan -- DV: How to decompose given IP coercions?
canEq fl cv (PredTy (ClassP c1 tys1)) (PredTy (ClassP c2 tys2))
| c1 == c2
= if isWanted fl then
- do { vs <- zipWithM newWantedCoVar tys1 tys2
- ; setWantedCoBind cv $ mkClassPPredCo c1 (map mkCoVarCoercion vs)
+ do { vs <- zipWithM newCoVar tys1 tys2
+ ; setCoBind cv $ mkClassPPredCo c1 (map mkCoVarCoercion vs)
; andCCans <$> zipWith3M (canEq fl) vs tys1 tys2
}
else return emptyCCan
= -- Generate equalities for each of the corresponding arguments
do { argsv
<- if isWanted fl then
- do { argsv <- zipWithM newWantedCoVar tys1 tys2
- ; setWantedCoBind cv $
+ do { argsv <- zipWithM newCoVar tys1 tys2
+ ; setCoBind cv $
mkTyConCoercion tc1 (map mkCoVarCoercion argsv)
; return argsv }
, Just (s2,t2) <- tcSplitAppTy_maybe ty2
= do { (cv1,cv2) <-
if isWanted fl
- then do { cv1 <- newWantedCoVar s1 s2
- ; cv2 <- newWantedCoVar t1 t2
- ; setWantedCoBind cv $
+ then do { cv1 <- newCoVar s1 s2
+ ; cv2 <- newCoVar t1 t2
+ ; setCoBind cv $
mkAppCoercion (mkCoVarCoercion cv1) (mkCoVarCoercion cv2)
; return (cv1,cv2) }
= OtherCls ty
-- See note [Canonical ordering for equality constraints].
-reOrient :: TcsUntouchables -> TypeClassifier -> TypeClassifier -> Bool
+reOrient :: CtFlavor -> TypeClassifier -> TypeClassifier -> Bool
-- (t1 `reOrient` t2) responds True
-- iff we should flip to (t2~t1)
-- We try to say False if possible, to minimise evidence generation
--
-- Postcondition: After re-orienting, first arg is not OTherCls
-reOrient _untch (OtherCls {}) (FunCls {}) = True
-reOrient _untch (OtherCls {}) (FskCls {}) = True
-reOrient _untch (OtherCls {}) (VarCls {}) = True
-reOrient _untch (OtherCls {}) (OtherCls {}) = panic "reOrient" -- One must be Var/Fun
+reOrient _fl (OtherCls {}) (FunCls {}) = True
+reOrient _fl (OtherCls {}) (FskCls {}) = True
+reOrient _fl (OtherCls {}) (VarCls {}) = True
+reOrient _fl (OtherCls {}) (OtherCls {}) = panic "reOrient" -- One must be Var/Fun
+
+reOrient _fl (FunCls {}) (VarCls _tv) = False
+ -- But consider the following variation: isGiven fl && isMetaTyVar tv
-reOrient _untch (FunCls {}) (VarCls {}) = False
-- See Note [No touchables as FunEq RHS] in TcSMonad
-reOrient _untch (FunCls {}) _ = False -- Fun/Other on rhs
+reOrient _fl (FunCls {}) _ = False -- Fun/Other on rhs
-reOrient _untch (VarCls {}) (FunCls {}) = True
+reOrient _fl (VarCls {}) (FunCls {}) = True
-reOrient _untch (VarCls {}) (FskCls {}) = False
+reOrient _fl (VarCls {}) (FskCls {}) = False
-reOrient _untch (VarCls {}) (OtherCls {}) = False
-reOrient _untch (VarCls tv1) (VarCls tv2)
+reOrient _fl (VarCls {}) (OtherCls {}) = False
+reOrient _fl (VarCls tv1) (VarCls tv2)
| isMetaTyVar tv2 && not (isMetaTyVar tv1) = True
| otherwise = False
-- Just for efficiency, see CTyEqCan invariants
-reOrient _untch (FskCls {}) (VarCls tv2) = isMetaTyVar tv2
+reOrient _fl (FskCls {}) (VarCls tv2) = isMetaTyVar tv2
-- Just for efficiency, see CTyEqCan invariants
-reOrient _untch (FskCls {}) (FskCls {}) = False
-reOrient _untch (FskCls {}) (FunCls {}) = True
-reOrient _untch (FskCls {}) (OtherCls {}) = False
+reOrient _fl (FskCls {}) (FskCls {}) = False
+reOrient _fl (FskCls {}) (FunCls {}) = True
+reOrient _fl (FskCls {}) (OtherCls {}) = False
------------------
canEqLeaf :: TcsUntouchables
-- Preconditions:
-- * one of the two arguments is not OtherCls
-- * the two types are not equal (looking through synonyms)
-canEqLeaf untch fl cv cls1 cls2
+canEqLeaf _untch fl cv cls1 cls2
| cls1 `re_orient` cls2
= do { cv' <- if isWanted fl
- then do { cv' <- newWantedCoVar s2 s1
- ; setWantedCoBind cv $ mkSymCoercion (mkCoVarCoercion cv')
+ then do { cv' <- newCoVar s2 s1
+ ; setCoBind cv $ mkSymCoercion (mkCoVarCoercion cv')
; return cv' }
else if isGiven fl then
newGivenCoVar s2 s1 (mkSymCoercion (mkCoVarCoercion cv))
= do { traceTcS "canEqLeaf" (ppr (unClassify cls1) $$ ppr (unClassify cls2))
; canEqLeafOriented fl cv cls1 s2 }
where
- re_orient = reOrient untch
+ re_orient = reOrient fl
s1 = unClassify cls1
s2 = unClassify cls2
; cv_new <- if no_flattening_happened then return cv
else if isGiven fl then return cv
else if isWanted fl then
- do { cv' <- newWantedCoVar (unClassify (FunCls fn xis1)) xi2
+ do { cv' <- newCoVar (unClassify (FunCls fn xis1)) xi2
-- cv' : F xis ~ xi2
; let -- fun_co :: F xis1 ~ F tys1
fun_co = mkTyConCoercion fn cos1
want_co = mkSymCoercion fun_co
`mkTransCoercion` mkCoVarCoercion cv'
`mkTransCoercion` co2
- ; setWantedCoBind cv want_co
+ ; setCoBind cv want_co
; return cv' }
else -- Derived
newDerivedId (EqPred (unClassify (FunCls fn xis1)) xi2)
; cv_new <- if no_flattening_happened then return cv
else if isGiven fl then return cv
else if isWanted fl then
- do { cv' <- newWantedCoVar (mkTyVarTy tv) xi2' -- cv' : tv ~ xi2
- ; setWantedCoBind cv (mkCoVarCoercion cv' `mkTransCoercion` co)
+ do { cv' <- newCoVar (mkTyVarTy tv) xi2' -- cv' : tv ~ xi2
+ ; setCoBind cv (mkCoVarCoercion cv' `mkTransCoercion` co)
; return cv' }
else -- Derived
newDerivedId (EqPred (mkTyVarTy tv) xi2')
itself, and so on.
+%************************************************************************
+%* *
+%* Functional dependencies, instantiation of equations
+%* *
+%************************************************************************
+
+When we spot an equality arising from a functional dependency,
+we now use that equality (a "wanted") to rewrite the work-item
+constraint right away. This avoids two dangers
+
+ Danger 1: If we send the original constraint on down the pipeline
+ it may react with an instance declaration, and in delicate
+ situations (when a Given overlaps with an instance) that
+ may produce new insoluble goals: see Trac #4952
+
+ Danger 2: If we don't rewrite the constraint, it may re-react
+ with the same thing later, and produce the same equality
+ again --> termination worries.
+To achieve this required some refactoring of FunDeps.lhs (nicer
+now!).
+
+\begin{code}
+rewriteWithFunDeps :: [Equation]
+ -> [Xi] -> CtFlavor
+ -> TcS (Maybe ([Xi], [Coercion], CanonicalCts))
+rewriteWithFunDeps eqn_pred_locs xis fl
+ = do { fd_ev_poss <- mapM (instFunDepEqn fl) eqn_pred_locs
+ ; let fd_ev_pos :: [(Int,FlavoredEvVar)]
+ fd_ev_pos = concat fd_ev_poss
+ (rewritten_xis, cos) = unzip (rewriteDictParams fd_ev_pos xis)
+ ; fds <- mapM (\(_,fev) -> mkCanonicalFEV fev) fd_ev_pos
+ ; let fd_work = unionManyBags fds
+ ; if isEmptyBag fd_work
+ then return Nothing
+ else return (Just (rewritten_xis, cos, fd_work)) }
+
+instFunDepEqn :: CtFlavor -- Precondition: Only Wanted or Derived
+ -> Equation
+ -> TcS [(Int, FlavoredEvVar)]
+-- Post: Returns the position index as well as the corresponding FunDep equality
+instFunDepEqn fl (FDEqn { fd_qtvs = qtvs, fd_eqs = eqs
+ , fd_pred1 = d1, fd_pred2 = d2 })
+ = do { let tvs = varSetElems qtvs
+ ; tvs' <- mapM instFlexiTcS tvs
+ ; let subst = zipTopTvSubst tvs (mkTyVarTys tvs')
+ ; mapM (do_one subst) eqs }
+ where
+ fl' = case fl of
+ Given _ -> panic "mkFunDepEqns"
+ Wanted loc -> Wanted (push_ctx loc)
+ Derived loc -> Derived (push_ctx loc)
+
+ push_ctx loc = pushErrCtxt FunDepOrigin (False, mkEqnMsg d1 d2) loc
+
+ do_one subst (FDEq { fd_pos = i, fd_ty_left = ty1, fd_ty_right = ty2 })
+ = do { let sty1 = substTy subst ty1
+ sty2 = substTy subst ty2
+ ; ev <- newCoVar sty1 sty2
+ ; return (i, mkEvVarX ev fl') }
+
+rewriteDictParams :: [(Int,FlavoredEvVar)] -- A set of coercions : (pos, ty' ~ ty)
+ -> [Type] -- A sequence of types: tys
+ -> [(Type,Coercion)] -- Returns : [(ty', co : ty' ~ ty)]
+rewriteDictParams param_eqs tys
+ = zipWith do_one tys [0..]
+ where
+ do_one :: Type -> Int -> (Type,Coercion)
+ do_one ty n = case lookup n param_eqs of
+ Just wev -> (get_fst_ty wev, mkCoVarCoercion (evVarOf wev))
+ Nothing -> (ty,ty) -- Identity
+
+ get_fst_ty wev = case evVarOfPred wev of
+ EqPred ty1 _ -> ty1
+ _ -> panic "rewriteDictParams: non equality fundep"
+
+mkEqnMsg :: (TcPredType, SDoc) -> (TcPredType, SDoc) -> TidyEnv
+ -> TcM (TidyEnv, SDoc)
+mkEqnMsg (pred1,from1) (pred2,from2) tidy_env
+ = do { zpred1 <- TcM.zonkTcPredType pred1
+ ; zpred2 <- TcM.zonkTcPredType pred2
+ ; let { tpred1 = tidyPred tidy_env zpred1
+ ; tpred2 = tidyPred tidy_env zpred2 }
+ ; let msg = vcat [ptext (sLit "When using functional dependencies to combine"),
+ nest 2 (sep [ppr tpred1 <> comma, nest 2 from1]),
+ nest 2 (sep [ppr tpred2 <> comma, nest 2 from2])]
+ ; return (tidy_env, msg) }
+\end{code}
\ No newline at end of file
import TysPrim( intPrimTy )
import PrimOp( tagToEnumKey )
import PrelNames
+import Module
import DynFlags
import SrcLoc
import Util
; expr1' <- tcPolyExpr expr1 elt_ty
; expr2' <- tcPolyExpr expr2 elt_ty
; enum_from_to <- newMethodFromName (PArrSeqOrigin seq)
- enumFromToPName elt_ty
+ (enumFromToPName basePackageId) elt_ty -- !!!FIXME: chak
; return $ mkHsWrapCoI coi
(PArrSeq enum_from_to (FromTo expr1' expr2')) }
; expr2' <- tcPolyExpr expr2 elt_ty
; expr3' <- tcPolyExpr expr3 elt_ty
; eft <- newMethodFromName (PArrSeqOrigin seq)
- enumFromThenToPName elt_ty
+ (enumFromThenToPName basePackageId) elt_ty -- !!!FIXME: chak
; return $ mkHsWrapCoI coi
(PArrSeq eft (FromThenTo expr1' expr2' expr3')) }
zonkTopDecls :: Bag EvBind
-> LHsBinds TcId -> NameSet
- -> [LRuleDecl TcId] -> [LTcSpecPrag] -> [LForeignDecl TcId]
- -> TcM ([Id],
- Bag EvBind,
- Bag (LHsBind Id),
- [LForeignDecl Id],
- [LTcSpecPrag],
- [LRuleDecl Id])
-zonkTopDecls ev_binds binds sig_ns rules imp_specs fords
- = do { (env1, ev_binds') <- zonkEvBinds emptyZonkEnv ev_binds
+ -> [LRuleDecl TcId] -> [LVectDecl TcId] -> [LTcSpecPrag] -> [LForeignDecl TcId]
+ -> TcM ([Id],
+ Bag EvBind,
+ Bag (LHsBind Id),
+ [LForeignDecl Id],
+ [LTcSpecPrag],
+ [LRuleDecl Id],
+ [LVectDecl Id])
+zonkTopDecls ev_binds binds sig_ns rules vects imp_specs fords
+ = do { (env1, ev_binds') <- zonkEvBinds emptyZonkEnv ev_binds
-- Warn about missing signatures
-- Do this only when we we have a type to offer
| otherwise = noSigWarn
; (env2, binds') <- zonkRecMonoBinds env1 sig_warn binds
- -- Top level is implicitly recursive
- ; rules' <- zonkRules env2 rules
+ -- Top level is implicitly recursive
+ ; rules' <- zonkRules env2 rules
+ ; vects' <- zonkVects env2 vects
; specs' <- zonkLTcSpecPrags env2 imp_specs
- ; fords' <- zonkForeignExports env2 fords
- ; return (zonkEnvIds env2, ev_binds', binds', fords', specs', rules') }
+ ; fords' <- zonkForeignExports env2 fords
+ ; return (zonkEnvIds env2, ev_binds', binds', fords', specs', rules', vects') }
---------------------------------------------
zonkLocalBinds :: ZonkEnv -> HsLocalBinds TcId -> TcM (ZonkEnv, HsLocalBinds Id)
| otherwise = ASSERT( isImmutableTyVar v) return (env, v)
\end{code}
+\begin{code}
+zonkVects :: ZonkEnv -> [LVectDecl TcId] -> TcM [LVectDecl Id]
+zonkVects env = mappM (wrapLocM (zonkVect env))
+
+zonkVect :: ZonkEnv -> VectDecl TcId -> TcM (VectDecl Id)
+zonkVect env (HsVect v Nothing)
+ = do { v' <- wrapLocM (zonkIdBndr env) v
+ ; return $ HsVect v' Nothing
+ }
+zonkVect env (HsVect v (Just e))
+ = do { v' <- wrapLocM (zonkIdBndr env) v
+ ; e' <- zonkLExpr env e
+ ; return $ HsVect v' (Just e')
+ }
+\end{code}
%************************************************************************
%* *
-- Deal with 'SPECIALISE instance' pragmas
-- See Note [SPECIALISE instance pragmas]
- ; spec_info <- tcSpecInstPrags dfun_id ibinds
+ ; spec_info@(spec_inst_prags,_) <- tcSpecInstPrags dfun_id ibinds
-- Typecheck the methods
; (meth_ids, meth_binds)
main_bind = AbsBinds { abs_tvs = inst_tyvars
, abs_ev_vars = dfun_ev_vars
, abs_exports = [(inst_tyvars, dfun_id_w_fun, self_dict,
- SpecPrags [] {- spec_inst_prags -})]
+ SpecPrags spec_inst_prags)]
, abs_ev_binds = emptyTcEvBinds
, abs_binds = unitBag dict_bind }
import HsBinds
import Inst( tyVarsOfEvVar )
-import InstEnv
import Class
import TyCon
import Name
, ptext (sLit "new work =") <+> ppr work <> comma
, ptext (sLit "stop =") <+> ppr stop])
-type SimplifierStage = WorkItem -> InertSet -> TcS StageResult
+type SubGoalDepth = Int -- Starts at zero; used to limit infinite
+ -- recursion of sub-goals
+type SimplifierStage = SubGoalDepth -> WorkItem -> InertSet -> TcS StageResult
-- Combine a sequence of simplifier 'stages' to create a pipeline
-runSolverPipeline :: [(String, SimplifierStage)]
- -> InertSet -> WorkItem
+runSolverPipeline :: SubGoalDepth
+ -> [(String, SimplifierStage)]
+ -> InertSet -> WorkItem
-> TcS (InertSet, WorkList)
-- Precondition: non-empty list of stages
-runSolverPipeline pipeline inerts workItem
+runSolverPipeline depth pipeline inerts workItem
= do { traceTcS "Start solver pipeline" $
vcat [ ptext (sLit "work item =") <+> ppr workItem
, ptext (sLit "inerts =") <+> ppr inerts]
; let itr_in = SR { sr_inerts = inerts
- , sr_new_work = emptyWorkList
- , sr_stop = ContinueWith workItem }
+ , sr_new_work = emptyWorkList
+ , sr_stop = ContinueWith workItem }
; itr_out <- run_pipeline pipeline itr_in
; let new_inert
= case sr_stop itr_out of
(SR { sr_new_work = accum_work
, sr_inerts = inerts
, sr_stop = ContinueWith work_item })
- = do { itr <- stage work_item inerts
+ = do { itr <- stage depth work_item inerts
; traceTcS ("Stage result (" ++ name ++ ")") (ppr itr)
; let itr' = itr { sr_new_work = accum_work `unionWorkLists` sr_new_work itr }
; run_pipeline stages itr' }
-> (ct,evVarPred ev)) ws)
, text "inert = " <+> ppr inert ]
- ; (flag, inert_ret) <- foldlBagM (tryPreSolveAndInteract sctx dyn_flags) (True,inert) ws
+ ; (flag, inert_ret) <- foldrBagM (tryPreSolveAndInteract sctx dyn_flags) (True,inert) ws
+ -- use foldr to preserve the order
; traceTcS "solveInteract, after clever canonicalization (and interaction):" $
vcat [ text "No interaction happened = " <+> ppr flag
tryPreSolveAndInteract :: SimplContext
-> DynFlags
- -> (Bool, InertSet)
-> FlavoredEvVar
+ -> (Bool, InertSet)
-> TcS (Bool, InertSet)
-- Returns: True if it was able to discharge this constraint AND all previous ones
-tryPreSolveAndInteract sctx dyn_flags (all_previous_discharged, inert)
- flavev@(EvVarX ev_var fl)
+tryPreSolveAndInteract sctx dyn_flags flavev@(EvVarX ev_var fl) (all_previous_discharged, inert)
= do { let inert_cts = get_inert_cts (evVarPred ev_var)
; this_one_discharged <- dischargeFromCCans inert_cts flavev
else do
{ extra_cts <- mkCanonical fl ev_var
- ; inert_ret <- solveInteractWithDepth (ctxtStkDepth dyn_flags,0,[])
- inert extra_cts
+ ; inert_ret <- solveInteractWithDepth (ctxtStkDepth dyn_flags,0,[]) extra_cts inert
; return (False, inert_ret) } }
where
= inert_eqs inert `unionBags` cCanMapToBag (inert_funeqs inert)
dischargeFromCCans :: CanonicalCts -> FlavoredEvVar -> TcS Bool
+-- See if this (pre-canonicalised) work-item is identical to a
+-- one already in the inert set. Reasons:
+-- a) Avoid creating superclass constraints for millions of incoming (Num a) constraints
+-- b) Termination for improve_eqs in TcSimplify.simpl_loop
dischargeFromCCans cans (EvVarX ev fl)
- = Bag.foldlBagM discharge_ct False cans
- where discharge_ct :: Bool -> CanonicalCt -> TcS Bool
- discharge_ct True _ct = return True
- discharge_ct False ct
- | evVarPred (cc_id ct) `tcEqPred` evVarPred ev
- , cc_flavor ct `canSolve` fl
- = do { when (isWanted fl) $ set_ev_bind ev (cc_id ct)
- ; return True }
- where set_ev_bind x y
- | EqPred {} <- evVarPred y
- = setEvBind x (EvCoercion (mkCoVarCoercion y))
- | otherwise = setEvBind x (EvId y)
- discharge_ct False _ct = return False
+ = Bag.foldrBag discharge_ct (return False) cans
+ where
+ the_pred = evVarPred ev
+
+ discharge_ct :: CanonicalCt -> TcS Bool -> TcS Bool
+ discharge_ct ct _rest
+ | evVarPred (cc_id ct) `tcEqPred` the_pred
+ , cc_flavor ct `canSolve` fl
+ = do { when (isWanted fl) $ set_ev_bind ev (cc_id ct)
+ -- Deriveds need no evidence
+ -- For Givens, we already have evidence, and we don't need it twice
+ ; return True }
+ where
+ set_ev_bind x y
+ | EqPred {} <- evVarPred y = setEvBind x (EvCoercion (mkCoVarCoercion y))
+ | otherwise = setEvBind x (EvId y)
+
+ discharge_ct _ct rest = rest
\end{code}
Note [Avoiding the superclass explosion]
constraints.
\begin{code}
-solveOne :: InertSet -> WorkItem -> TcS InertSet
-solveOne inerts workItem
+solveOne :: WorkItem -> InertSet -> TcS InertSet
+solveOne workItem inerts
= do { dyn_flags <- getDynFlags
- ; solveOneWithDepth (ctxtStkDepth dyn_flags,0,[]) inerts workItem
+ ; solveOneWithDepth (ctxtStkDepth dyn_flags,0,[]) workItem inerts
}
-----------------
solveInteractWithDepth :: (Int, Int, [WorkItem])
- -> InertSet -> WorkList -> TcS InertSet
-solveInteractWithDepth ctxt@(max_depth,n,stack) inert ws
+ -> WorkList -> InertSet -> TcS InertSet
+solveInteractWithDepth ctxt@(max_depth,n,stack) ws inert
| isEmptyWorkList ws
= return inert
| otherwise
= do { traceTcS "solveInteractWithDepth" $
vcat [ text "Current depth =" <+> ppr n
- , text "Max depth =" <+> ppr max_depth ]
+ , text "Max depth =" <+> ppr max_depth
+ , text "ws =" <+> ppr ws ]
-- Solve equalities first
; let (eqs, non_eqs) = Bag.partitionBag isCTyEqCan ws
- ; is_from_eqs <- Bag.foldlBagM (solveOneWithDepth ctxt) inert eqs
- ; Bag.foldlBagM (solveOneWithDepth ctxt) is_from_eqs non_eqs }
+ ; is_from_eqs <- Bag.foldrBagM (solveOneWithDepth ctxt) inert eqs
+ ; Bag.foldrBagM (solveOneWithDepth ctxt) is_from_eqs non_eqs }
+ -- use foldr to preserve the order
------------------
-- Fully interact the given work item with an inert set, and return a
-- new inert set which has assimilated the new information.
solveOneWithDepth :: (Int, Int, [WorkItem])
- -> InertSet -> WorkItem -> TcS InertSet
-solveOneWithDepth (max_depth, n, stack) inert work
- = do { traceTcS0 (indent ++ "Solving {") (ppr work)
- ; (new_inert, new_work) <- runSolverPipeline thePipeline inert work
+ -> WorkItem -> InertSet -> TcS InertSet
+solveOneWithDepth (max_depth, depth, stack) work inert
+ = do { traceFireTcS depth (text "Solving {" <+> ppr work)
+ ; (new_inert, new_work) <- runSolverPipeline depth thePipeline inert work
- ; traceTcS0 (indent ++ "Subgoals:") (ppr new_work)
-
-- Recursively solve the new work generated
-- from workItem, with a greater depth
- ; res_inert <- solveInteractWithDepth (max_depth, n+1, work:stack)
- new_inert new_work
+ ; res_inert <- solveInteractWithDepth (max_depth, depth+1, work:stack) new_work new_inert
+
+ ; traceFireTcS depth (text "Done }" <+> ppr work)
- ; traceTcS0 (indent ++ "Done }") (ppr work)
; return res_inert }
- where
- indent = replicate (2*n) ' '
thePipeline :: [(String,SimplifierStage)]
thePipeline = [ ("interact with inert eqs", interactWithInertEqsStage)
\begin{code}
spontaneousSolveStage :: SimplifierStage
-spontaneousSolveStage workItem inerts
+spontaneousSolveStage depth workItem inerts
= do { mSolve <- trySpontaneousSolve workItem
; case mSolve of
-- its status change. This in turn may produce more work.
-- We do this *right now* (rather than just putting workItem'
-- back into the work-list) because we've solved
- -> do { (new_inert, new_work) <- runSolverPipeline
+ -> do { bumpStepCountTcS
+ ; traceFireTcS depth (ptext (sLit "Spontaneous (w/d)") <+> ppr workItem)
+ ; (new_inert, new_work) <- runSolverPipeline depth
[ ("recursive interact with inert eqs", interactWithInertEqsStage)
, ("recursive interact with inerts", interactWithInertsStage)
] inerts workItem'
| otherwise
-> -- Original was given; he must then be inert all right, and
-- workList' are all givens from flattening
- return $ SR { sr_new_work = emptyWorkList
- , sr_inerts = inerts `updInertSet` workItem'
- , sr_stop = Stop }
+ do { bumpStepCountTcS
+ ; traceFireTcS depth (ptext (sLit "Spontaneous (g)") <+> ppr workItem)
+ ; return $ SR { sr_new_work = emptyWorkList
+ , sr_inerts = inerts `updInertSet` workItem'
+ , sr_stop = Stop } }
SPError -> -- Return with no new work
return $ SR { sr_new_work = emptyWorkList
, sr_inerts = inerts
| otherwise
= do { tch1 <- isTouchableMetaTyVar tv1
; if tch1 then trySpontaneousEqOneWay cv gw tv1 xi
- else do { traceTcS "Untouchable LHS, can't spontaneously solve workitem:" (ppr workItem)
+ else do { traceTcS "Untouchable LHS, can't spontaneously solve workitem:"
+ (ppr workItem)
; return SPCantSolve }
}
; setWantedTyBind tv xi
; cv_given <- newGivenCoVar (mkTyVarTy tv) xi xi
- ; when (isWanted wd) (setWantedCoBind cv xi)
+ ; when (isWanted wd) (setCoBind cv xi)
-- We don't want to do this for Derived, that's why we use 'when (isWanted wd)'
; return $ SPSolved (CTyEqCan { cc_id = cv_given
\end{code}
-
-
*********************************************************************************
* *
The interact-with-inert Stage
* *
*********************************************************************************
+Note [The Solver Invariant]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+We always add Givens first. So you might think that the solver has
+the invariant
+
+ If the work-item is Given,
+ then the inert item must Given
+
+But this isn't quite true. Suppose we have,
+ c1: [W] beta ~ [alpha], c2 : [W] blah, c3 :[W] alpha ~ Int
+After processing the first two, we get
+ c1: [G] beta ~ [alpha], c2 : [W] blah
+Now, c3 does not interact with the the given c1, so when we spontaneously
+solve c3, we must re-react it with the inert set. So we can attempt a
+reaction between inert c2 [W] and work-item c3 [G].
+
+It *is* true that [Solver Invariant]
+ If the work-item is Given,
+ AND there is a reaction
+ then the inert item must Given
+or, equivalently,
+ If the work-item is Given,
+ and the inert item is Wanted/Derived
+ then there is no reaction
+
\begin{code}
-- Interaction result of WorkItem <~> AtomicInert
data InteractResult
, ir_new_work :: WorkList
-- new work items to add to the WorkList
+
+ , ir_fire :: Maybe String -- Tells whether a rule fired, and if so what
}
-- What to do with the inert reactant.
-data InertAction = KeepInert
- | DropInert
- | KeepTransformedInert CanonicalCt -- Keep a slightly transformed inert
+data InertAction = KeepInert | DropInert
-mkIRContinue :: Monad m => WorkItem -> InertAction -> WorkList -> m InteractResult
-mkIRContinue wi keep newWork = return $ IR (ContinueWith wi) keep newWork
+mkIRContinue :: String -> WorkItem -> InertAction -> WorkList -> TcS InteractResult
+mkIRContinue rule wi keep newWork
+ = return $ IR { ir_stop = ContinueWith wi, ir_inert_action = keep
+ , ir_new_work = newWork, ir_fire = Just rule }
-mkIRStop :: Monad m => InertAction -> WorkList -> m InteractResult
-mkIRStop keep newWork = return $ IR Stop keep newWork
+mkIRStopK :: String -> WorkList -> TcS InteractResult
+mkIRStopK rule newWork
+ = return $ IR { ir_stop = Stop, ir_inert_action = KeepInert
+ , ir_new_work = newWork, ir_fire = Just rule }
-dischargeWorkItem :: Monad m => m InteractResult
-dischargeWorkItem = mkIRStop KeepInert emptyWorkList
+mkIRStopD :: String -> WorkList -> TcS InteractResult
+mkIRStopD rule newWork
+ = return $ IR { ir_stop = Stop, ir_inert_action = DropInert
+ , ir_new_work = newWork, ir_fire = Just rule }
noInteraction :: Monad m => WorkItem -> m InteractResult
-noInteraction workItem = mkIRContinue workItem KeepInert emptyWorkList
+noInteraction wi
+ = return $ IR { ir_stop = ContinueWith wi, ir_inert_action = KeepInert
+ , ir_new_work = emptyWorkList, ir_fire = Nothing }
data WhichComesFromInert = LeftComesFromInert | RightComesFromInert
-- See Note [Efficient Orientation]
-- interact the WorkItem with the entire equalities of the InertSet
interactWithInertEqsStage :: SimplifierStage
-interactWithInertEqsStage workItem inert
- = Bag.foldlBagM interactNext initITR (inert_eqs inert)
+interactWithInertEqsStage depth workItem inert
+ = Bag.foldrBagM (interactNext depth) initITR (inert_eqs inert)
+ -- use foldr to preserve the order
where
initITR = SR { sr_inerts = inert { inert_eqs = emptyCCan }
, sr_new_work = emptyWorkList
-- "Other" constraints it contains!
interactWithInertsStage :: SimplifierStage
-interactWithInertsStage workItem inert
+interactWithInertsStage depth workItem inert
= let (relevant, inert_residual) = getISRelevant workItem inert
initITR = SR { sr_inerts = inert_residual
, sr_new_work = emptyWorkList
, sr_stop = ContinueWith workItem }
- in Bag.foldlBagM interactNext initITR relevant
+ in Bag.foldrBagM (interactNext depth) initITR relevant
+ -- use foldr to preserve the order
where
getISRelevant :: CanonicalCt -> InertSet -> (CanonicalCts, InertSet)
getISRelevant (CFrozenErr {}) is = (emptyCCan, is)
, inert_ips = emptyCCanMap
, inert_funeqs = emptyCCanMap })
-interactNext :: StageResult -> AtomicInert -> TcS StageResult
-interactNext it inert
- | ContinueWith workItem <- sr_stop it
- = do { let inerts = sr_inerts it
-
- ; ir <- interactWithInert inert workItem
-
- -- New inerts depend on whether we KeepInert or not and must
- -- be updated with FD improvement information from the interaction result (ir)
- ; let inerts_new = case ir_inert_action ir of
- KeepInert -> inerts `updInertSet` inert
- DropInert -> inerts
- KeepTransformedInert inert' -> inerts `updInertSet` inert'
+interactNext :: SubGoalDepth -> AtomicInert -> StageResult -> TcS StageResult
+interactNext depth inert it
+ | ContinueWith work_item <- sr_stop it
+ = do { let inerts = sr_inerts it
+
+ ; IR { ir_new_work = new_work, ir_inert_action = inert_action
+ , ir_fire = fire_info, ir_stop = stop }
+ <- interactWithInert inert work_item
+
+ ; let mk_msg rule
+ = text rule <+> keep_doc
+ <+> vcat [ ptext (sLit "Inert =") <+> ppr inert
+ , ptext (sLit "Work =") <+> ppr work_item
+ , ppUnless (isEmptyBag new_work) $
+ ptext (sLit "New =") <+> ppr new_work ]
+ keep_doc = case inert_action of
+ KeepInert -> ptext (sLit "[keep]")
+ DropInert -> ptext (sLit "[drop]")
+ ; case fire_info of
+ Just rule -> do { bumpStepCountTcS
+ ; traceFireTcS depth (mk_msg rule) }
+ Nothing -> return ()
+
+ -- New inerts depend on whether we KeepInert or not
+ ; let inerts_new = case inert_action of
+ KeepInert -> inerts `updInertSet` inert
+ DropInert -> inerts
; return $ SR { sr_inerts = inerts_new
- , sr_new_work = sr_new_work it `unionWorkLists` ir_new_work ir
- , sr_stop = ir_stop ir } }
+ , sr_new_work = sr_new_work it `unionWorkLists` new_work
+ , sr_stop = stop } }
| otherwise
= return $ it { sr_inerts = (sr_inerts it) `updInertSet` inert }
-- Do a single interaction of two constraints.
interactWithInert :: AtomicInert -> WorkItem -> TcS InteractResult
-interactWithInert inert workitem
- = do { ctxt <- getTcSContext
- ; let is_allowed = allowedInteraction (simplEqsOnly ctxt) inert workitem
+interactWithInert inert workItem
+ = do { ctxt <- getTcSContext
+ ; let is_allowed = allowedInteraction (simplEqsOnly ctxt) inert workItem
- ; if is_allowed then
- doInteractWithInert inert workitem
+ ; if is_allowed then
+ doInteractWithInert inert workItem
else
- noInteraction workitem
- }
+ noInteraction workItem
+ }
allowedInteraction :: Bool -> AtomicInert -> WorkItem -> Bool
-- Allowed interactions
-- Identical class constraints.
doInteractWithInert
- (CDictCan { cc_id = d1, cc_flavor = fl1, cc_class = cls1, cc_tyargs = tys1 })
- workItem@(CDictCan { cc_flavor = fl2, cc_class = cls2, cc_tyargs = tys2 })
+ inertItem@(CDictCan { cc_id = d1, cc_flavor = fl1, cc_class = cls1, cc_tyargs = tys1 })
+ workItem@(CDictCan { cc_id = d2, cc_flavor = fl2, cc_class = cls2, cc_tyargs = tys2 })
| cls1 == cls2 && (and $ zipWith tcEqType tys1 tys2)
- = solveOneFromTheOther (d1,fl1) workItem
+ = solveOneFromTheOther "Cls/Cls" (EvId d1,fl1) workItem
| cls1 == cls2 && (not (isGiven fl1 && isGiven fl2))
= -- See Note [When improvement happens]
do { let pty1 = ClassP cls1 tys1
pty2 = ClassP cls2 tys2
- work_item_pred_loc = (pty2, pprFlavorArising fl2)
inert_pred_loc = (pty1, pprFlavorArising fl1)
- loc = combineCtLoc fl1 fl2
- eqn_pred_locs = improveFromAnother work_item_pred_loc inert_pred_loc
- -- See Note [Efficient Orientation]
-
- ; derived_evs <- mkDerivedFunDepEqns loc eqn_pred_locs
- ; fd_work <- mapM mkCanonicalFEV derived_evs
- -- See Note [Generating extra equalities]
-
- ; mkIRContinue workItem KeepInert (unionManyBags fd_work)
- }
+ work_item_pred_loc = (pty2, pprFlavorArising fl2)
+ fd_eqns = improveFromAnother
+ inert_pred_loc -- the template
+ work_item_pred_loc -- the one we aim to rewrite
+ -- See Note [Efficient Orientation]
+
+ ; m <- rewriteWithFunDeps fd_eqns tys2 fl2
+ ; case m of
+ Nothing -> noInteraction workItem
+ Just (rewritten_tys2, cos2, fd_work)
+ | tcEqTypes tys1 rewritten_tys2
+ -> -- Solve him on the spot in this case
+ case fl2 of
+ Given {} -> pprPanic "Unexpected given" (ppr inertItem $$ ppr workItem)
+ Derived {} -> mkIRStopK "Cls/Cls fundep (solved)" fd_work
+ Wanted {}
+ | isDerived fl1
+ -> do { setDictBind d2 (EvCast d1 dict_co)
+ ; let inert_w = inertItem { cc_flavor = fl2 }
+ -- A bit naughty: we take the inert Derived,
+ -- turn it into a Wanted, use it to solve the work-item
+ -- and put it back into the work-list
+ -- Maybe rather than starting again, we could *replace* the
+ -- inert item, but its safe and simple to restart
+ ; mkIRStopD "Cls/Cls fundep (solved)" (inert_w `consBag` fd_work) }
+
+ | otherwise
+ -> do { setDictBind d2 (EvCast d1 dict_co)
+ ; mkIRStopK "Cls/Cls fundep (solved)" fd_work }
+
+ | otherwise
+ -> -- We could not quite solve him, but we still rewrite him
+ -- Example: class C a b c | a -> b
+ -- Given: C Int Bool x, Wanted: C Int beta y
+ -- Then rewrite the wanted to C Int Bool y
+ -- but note that is still not identical to the given
+ -- The important thing is that the rewritten constraint is
+ -- inert wrt the given.
+ -- However it is not necessarily inert wrt previous inert-set items.
+ -- class C a b c d | a -> b, b c -> d
+ -- Inert: c1: C b Q R S, c2: C P Q a b
+ -- Work: C P alpha R beta
+ -- Does not react with c1; reacts with c2, with alpha:=Q
+ -- NOW it reacts with c1!
+ -- So we must stop, and put the rewritten constraint back in the work list
+ do { d2' <- newDictVar cls1 rewritten_tys2
+ ; case fl2 of
+ Given {} -> pprPanic "Unexpected given" (ppr inertItem $$ ppr workItem)
+ Wanted {} -> setDictBind d2 (EvCast d2' dict_co)
+ Derived {} -> return ()
+ ; let workItem' = workItem { cc_id = d2', cc_tyargs = rewritten_tys2 }
+ ; mkIRStopK "Cls/Cls fundep (partial)" (workItem' `consBag` fd_work) }
+
+ where
+ dict_co = mkTyConCoercion (classTyCon cls1) cos2
+ }
-- Class constraint and given equality: use the equality to rewrite
-- the class constraint.
= do { rewritten_dict <- rewriteDict (cv,tv,xi) (dv,wfl,cl,xis)
-- Continue with rewritten Dictionary because we can only be in the
-- interactWithEqsStage, so the dictionary is inert.
- ; mkIRContinue rewritten_dict KeepInert emptyWorkList }
+ ; mkIRContinue "Eq/Cls" rewritten_dict KeepInert emptyWorkList }
doInteractWithInert (CDictCan { cc_id = dv, cc_flavor = ifl, cc_class = cl, cc_tyargs = xis })
workItem@(CTyEqCan { cc_id = cv, cc_flavor = wfl, cc_tyvar = tv, cc_rhs = xi })
| wfl `canRewrite` ifl
, tv `elemVarSet` tyVarsOfTypes xis
= do { rewritten_dict <- rewriteDict (cv,tv,xi) (dv,ifl,cl,xis)
- ; mkIRContinue workItem DropInert (workListFromCCan rewritten_dict) }
+ ; mkIRContinue "Cls/Eq" workItem DropInert (workListFromCCan rewritten_dict) }
-- Class constraint and given equality: use the equality to rewrite
-- the class constraint.
| ifl `canRewrite` wfl
, tv `elemVarSet` tyVarsOfType ty
= do { rewritten_ip <- rewriteIP (cv,tv,xi) (ipid,wfl,nm,ty)
- ; mkIRContinue rewritten_ip KeepInert emptyWorkList }
+ ; mkIRContinue "Eq/IP" rewritten_ip KeepInert emptyWorkList }
doInteractWithInert (CIPCan { cc_id = ipid, cc_flavor = ifl, cc_ip_nm = nm, cc_ip_ty = ty })
workItem@(CTyEqCan { cc_id = cv, cc_flavor = wfl, cc_tyvar = tv, cc_rhs = xi })
| wfl `canRewrite` ifl
, tv `elemVarSet` tyVarsOfType ty
= do { rewritten_ip <- rewriteIP (cv,tv,xi) (ipid,ifl,nm,ty)
- ; mkIRContinue workItem DropInert (workListFromCCan rewritten_ip) }
+ ; mkIRContinue "IP/Eq" workItem DropInert (workListFromCCan rewritten_ip) }
-- Two implicit parameter constraints. If the names are the same,
-- but their types are not, we generate a wanted type equality
= -- See Note [Overriding implicit parameters]
-- Dump the inert item, override totally with the new one
-- Do not require type equality
- mkIRContinue workItem DropInert emptyWorkList
+ -- For example, given let ?x::Int = 3 in let ?x::Bool = True in ...
+ -- we must *override* the outer one with the inner one
+ mkIRContinue "IP/IP override" workItem DropInert emptyWorkList
| nm1 == nm2 && ty1 `tcEqType` ty2
- = solveOneFromTheOther (id1,ifl) workItem
+ = solveOneFromTheOther "IP/IP" (EvId id1,ifl) workItem
| nm1 == nm2
= -- See Note [When improvement happens]
- do { co_var <- newWantedCoVar ty2 ty1 -- See Note [Efficient Orientation]
+ do { co_var <- newCoVar ty2 ty1 -- See Note [Efficient Orientation]
; let flav = Wanted (combineCtLoc ifl wfl)
; cans <- mkCanonical flav co_var
- ; mkIRContinue workItem KeepInert cans }
-
-
+ ; mkIRContinue "IP/IP fundep" workItem KeepInert cans }
-- Never rewrite a given with a wanted equality, and a type function
-- equality can never rewrite an equality. We rewrite LHS *and* RHS
| ifl `canRewrite` wfl
, tv `elemVarSet` tyVarsOfTypes (xi2:args) -- Rewrite RHS as well
= do { rewritten_funeq <- rewriteFunEq (cv1,tv,xi1) (cv2,wfl,tc,args,xi2)
- ; mkIRStop KeepInert (workListFromCCan rewritten_funeq) }
+ ; mkIRStopK "Eq/FunEq" (workListFromCCan rewritten_funeq) }
-- Must Stop here, because we may no longer be inert after the rewritting.
-- Inert: function equality, work item: equality
| wfl `canRewrite` ifl
, tv `elemVarSet` tyVarsOfTypes (xi1:args) -- Rewrite RHS as well
= do { rewritten_funeq <- rewriteFunEq (cv2,tv,xi2) (cv1,ifl,tc,args,xi1)
- ; mkIRContinue workItem DropInert (workListFromCCan rewritten_funeq) }
+ ; mkIRContinue "FunEq/Eq" workItem DropInert (workListFromCCan rewritten_funeq) }
-- One may think that we could (KeepTransformedInert rewritten_funeq)
-- but that is wrong, because it may end up not being inert with respect
-- to future inerts. Example:
, cc_tyargs = args2, cc_rhs = xi2 })
| fl1 `canSolve` fl2 && lhss_match
= do { cans <- rewriteEqLHS LeftComesFromInert (mkCoVarCoercion cv1,xi1) (cv2,fl2,xi2)
- ; mkIRStop KeepInert cans }
+ ; mkIRStopK "FunEq/FunEq" cans }
| fl2 `canSolve` fl1 && lhss_match
= do { cans <- rewriteEqLHS RightComesFromInert (mkCoVarCoercion cv2,xi2) (cv1,fl1,xi1)
- ; mkIRContinue workItem DropInert cans }
+ ; mkIRContinue "FunEq/FunEq" workItem DropInert cans }
where
lhss_match = tc1 == tc2 && and (zipWith tcEqType args1 args2)
-- Check for matching LHS
| fl1 `canSolve` fl2 && tv1 == tv2
= do { cans <- rewriteEqLHS LeftComesFromInert (mkCoVarCoercion cv1,xi1) (cv2,fl2,xi2)
- ; mkIRStop KeepInert cans }
+ ; mkIRStopK "Eq/Eq lhs" cans }
| fl2 `canSolve` fl1 && tv1 == tv2
= do { cans <- rewriteEqLHS RightComesFromInert (mkCoVarCoercion cv2,xi2) (cv1,fl1,xi1)
- ; mkIRContinue workItem DropInert cans }
+ ; mkIRContinue "Eq/Eq lhs" workItem DropInert cans }
+
-- Check for rewriting RHS
| fl1 `canRewrite` fl2 && tv1 `elemVarSet` tyVarsOfType xi2
= do { rewritten_eq <- rewriteEqRHS (cv1,tv1,xi1) (cv2,fl2,tv2,xi2)
- ; mkIRStop KeepInert rewritten_eq }
+ ; mkIRStopK "Eq/Eq rhs" rewritten_eq }
+
| fl2 `canRewrite` fl1 && tv2 `elemVarSet` tyVarsOfType xi1
= do { rewritten_eq <- rewriteEqRHS (cv2,tv2,xi2) (cv1,fl1,tv1,xi1)
- ; mkIRContinue workItem DropInert rewritten_eq }
+ ; mkIRContinue "Eq/Eq rhs" workItem DropInert rewritten_eq }
doInteractWithInert (CTyEqCan { cc_id = cv1, cc_flavor = fl1, cc_tyvar = tv1, cc_rhs = xi1 })
(CFrozenErr { cc_id = cv2, cc_flavor = fl2 })
| fl1 `canRewrite` fl2 && tv1 `elemVarSet` tyVarsOfEvVar cv2
= do { rewritten_frozen <- rewriteFrozen (cv1, tv1, xi1) (cv2, fl2)
- ; mkIRStop KeepInert rewritten_frozen }
+ ; mkIRStopK "Frozen/Eq" rewritten_frozen }
doInteractWithInert (CFrozenErr { cc_id = cv2, cc_flavor = fl2 })
workItem@(CTyEqCan { cc_id = cv1, cc_flavor = fl1, cc_tyvar = tv1, cc_rhs = xi1 })
| fl1 `canRewrite` fl2 && tv1 `elemVarSet` tyVarsOfEvVar cv2
= do { rewritten_frozen <- rewriteFrozen (cv1, tv1, xi1) (cv2, fl2)
- ; mkIRContinue workItem DropInert rewritten_frozen }
+ ; mkIRContinue "Frozen/Eq" workItem DropInert rewritten_frozen }
-- Fall-through case for all other situations
doInteractWithInert _ workItem = noInteraction workItem
xi2' = substTyWith [tv] [xi1] xi2
xi2_co = substTyWith [tv] [mkCoVarCoercion cv1] xi2 -- xi2_co :: xi2 ~ xi2'
- ; cv2' <- case gw of
- Wanted {} -> do { cv2' <- newWantedCoVar (mkTyConApp tc args') xi2'
- ; setWantedCoBind cv2 $
- fun_co `mkTransCoercion`
- mkCoVarCoercion cv2' `mkTransCoercion` mkSymCoercion xi2_co
- ; return cv2' }
- Given {} -> newGivenCoVar (mkTyConApp tc args') xi2' $
- mkSymCoercion fun_co `mkTransCoercion`
- mkCoVarCoercion cv2 `mkTransCoercion` xi2_co
- Derived {} -> newDerivedId (EqPred (mkTyConApp tc args') xi2')
+
+ ; cv2' <- newCoVar (mkTyConApp tc args') xi2'
+ ; case gw of
+ Wanted {} -> setCoBind cv2 (fun_co `mkTransCoercion`
+ mkCoVarCoercion cv2' `mkTransCoercion`
+ mkSymCoercion xi2_co)
+ Given {} -> setCoBind cv2' (mkSymCoercion fun_co `mkTransCoercion`
+ mkCoVarCoercion cv2 `mkTransCoercion`
+ xi2_co)
+ Derived {} -> return ()
; return (CFunEqCan { cc_id = cv2'
, cc_flavor = gw
rewriteEqRHS (cv1,tv1,xi1) (cv2,gw,tv2,xi2)
| Just tv2' <- tcGetTyVar_maybe xi2'
, tv2 == tv2' -- In this case xi2[xi1/tv1] = tv2, so we have tv2~tv2
- = do { when (isWanted gw) (setWantedCoBind cv2 (mkSymCoercion co2'))
+ = do { when (isWanted gw) (setCoBind cv2 (mkSymCoercion co2'))
; return emptyCCan }
| otherwise
- = do { cv2' <-
- case gw of
- Wanted {}
- -> do { cv2' <- newWantedCoVar (mkTyVarTy tv2) xi2'
- ; setWantedCoBind cv2 $
- mkCoVarCoercion cv2' `mkTransCoercion` mkSymCoercion co2'
- ; return cv2' }
- Given {}
- -> newGivenCoVar (mkTyVarTy tv2) xi2' $
- mkCoVarCoercion cv2 `mkTransCoercion` co2'
- Derived {}
- -> newDerivedId (EqPred (mkTyVarTy tv2) xi2')
-
- ; canEq gw cv2' (mkTyVarTy tv2) xi2'
- }
+ = do { cv2' <- newCoVar (mkTyVarTy tv2) xi2'
+ ; case gw of
+ Wanted {} -> setCoBind cv2 $ mkCoVarCoercion cv2' `mkTransCoercion`
+ mkSymCoercion co2'
+ Given {} -> setCoBind cv2' $ mkCoVarCoercion cv2 `mkTransCoercion`
+ co2'
+ Derived {} -> return ()
+ ; canEq gw cv2' (mkTyVarTy tv2) xi2' }
where
xi2' = substTyWith [tv1] [xi1] xi2
co2' = substTyWith [tv1] [mkCoVarCoercion cv1] xi2 -- xi2 ~ xi2[xi1/tv1]
-
rewriteEqLHS :: WhichComesFromInert -> (Coercion,Xi) -> (CoVar,CtFlavor,Xi) -> TcS WorkList
-- Used to ineract two equalities of the following form:
-- First Equality: co1: (XXX ~ xi1)
-- Second Equality: cv2: (XXX ~ xi2)
--- Where the cv1 `canSolve` cv2 equality
+-- Where the cv1 `canRewrite` cv2 equality
-- We have an option of creating new work (xi1 ~ xi2) OR (xi2 ~ xi1),
-- See Note [Efficient Orientation] for that
-rewriteEqLHS which (co1,xi1) (cv2,gw,xi2)
- = do { cv2' <- case (isWanted gw, which) of
- (True,LeftComesFromInert) ->
- do { cv2' <- newWantedCoVar xi2 xi1
- ; setWantedCoBind cv2 $
- co1 `mkTransCoercion` mkSymCoercion (mkCoVarCoercion cv2')
- ; return cv2' }
- (True,RightComesFromInert) ->
- do { cv2' <- newWantedCoVar xi1 xi2
- ; setWantedCoBind cv2 $
- co1 `mkTransCoercion` mkCoVarCoercion cv2'
- ; return cv2' }
- (False,LeftComesFromInert) ->
- if isGiven gw then
- newGivenCoVar xi2 xi1 $
- mkSymCoercion (mkCoVarCoercion cv2) `mkTransCoercion` co1
- else newDerivedId (EqPred xi2 xi1)
- (False,RightComesFromInert) ->
- if isGiven gw then
- newGivenCoVar xi1 xi2 $
- mkSymCoercion co1 `mkTransCoercion` mkCoVarCoercion cv2
- else newDerivedId (EqPred xi1 xi2)
+rewriteEqLHS LeftComesFromInert (co1,xi1) (cv2,gw,xi2)
+ = do { cv2' <- newCoVar xi2 xi1
+ ; case gw of
+ Wanted {} -> setCoBind cv2 $
+ co1 `mkTransCoercion` mkSymCoercion (mkCoVarCoercion cv2')
+ Given {} -> setCoBind cv2' $
+ mkSymCoercion (mkCoVarCoercion cv2) `mkTransCoercion` co1
+ Derived {} -> return ()
+ ; mkCanonical gw cv2' }
+
+rewriteEqLHS RightComesFromInert (co1,xi1) (cv2,gw,xi2)
+ = do { cv2' <- newCoVar xi1 xi2
+ ; case gw of
+ Wanted {} -> setCoBind cv2 $
+ co1 `mkTransCoercion` mkCoVarCoercion cv2'
+ Given {} -> setCoBind cv2' $
+ mkSymCoercion co1 `mkTransCoercion` mkCoVarCoercion cv2
+ Derived {} -> return ()
; mkCanonical gw cv2' }
-
+
rewriteFrozen :: (CoVar,TcTyVar,Xi) -> (CoVar,CtFlavor) -> TcS WorkList
rewriteFrozen (cv1, tv1, xi1) (cv2, fl2)
- = do { cv2' <-
- case fl2 of
- Wanted {} -> do { cv2' <- newWantedCoVar ty2a' ty2b'
- -- ty2a[xi1/tv1] ~ ty2b[xi1/tv1]
- ; setWantedCoBind cv2 $
- co2a' `mkTransCoercion`
- mkCoVarCoercion cv2' `mkTransCoercion`
- mkSymCoercion co2b'
- ; return cv2' }
-
- Given {} -> newGivenCoVar ty2a' ty2b' $
- mkSymCoercion co2a' `mkTransCoercion`
- mkCoVarCoercion cv2 `mkTransCoercion`
- co2b'
-
- Derived {} -> newDerivedId (EqPred ty2a' ty2b')
+ = do { cv2' <- newCoVar ty2a' ty2b' -- ty2a[xi1/tv1] ~ ty2b[xi1/tv1]
+ ; case fl2 of
+ Wanted {} -> setCoBind cv2 $ co2a' `mkTransCoercion`
+ mkCoVarCoercion cv2' `mkTransCoercion`
+ mkSymCoercion co2b'
+
+ Given {} -> setCoBind cv2' $ mkSymCoercion co2a' `mkTransCoercion`
+ mkCoVarCoercion cv2 `mkTransCoercion`
+ co2b'
+
+ Derived {} -> return ()
+
; return (singleCCan $ CFrozenErr { cc_id = cv2', cc_flavor = fl2 }) }
where
(ty2a, ty2b) = coVarKind cv2 -- cv2 : ty2a ~ ty2b
co2a' = substTyWith [tv1] [mkCoVarCoercion cv1] ty2a -- ty2a ~ ty2a[xi1/tv1]
co2b' = substTyWith [tv1] [mkCoVarCoercion cv1] ty2b -- ty2b ~ ty2b[xi1/tv1]
-solveOneFromTheOther :: (EvVar, CtFlavor) -> CanonicalCt -> TcS InteractResult
--- First argument inert, second argument workitem. They both represent
--- wanted/given/derived evidence for the *same* predicate so we try here to
--- discharge one directly from the other.
+solveOneFromTheOther :: String -> (EvTerm, CtFlavor) -> CanonicalCt -> TcS InteractResult
+-- First argument inert, second argument work-item. They both represent
+-- wanted/given/derived evidence for the *same* predicate so
+-- we can discharge one directly from the other.
--
-- Precondition: value evidence only (implicit parameters, classes)
-- not coercion
-solveOneFromTheOther (iid,ifl) workItem
- | ifl `canSolve` wfl
- = do { when (isWanted wfl) $ setEvBind wid (EvId iid)
- -- Overwrite the binding, if one exists
- -- For Givens, which are lambda-bound, nothing to overwrite,
- ; dischargeWorkItem }
- | wfl `canSolve` ifl
- = do { when (isWanted ifl) $ setEvBind iid (EvId wid)
- ; mkIRContinue workItem DropInert emptyWorkList }
-
- | otherwise -- One of the two is Derived, we can just throw it away,
- -- preferrably the work item.
- = if isDerived wfl then dischargeWorkItem
- else mkIRContinue workItem DropInert emptyWorkList
+solveOneFromTheOther info (ev_term,ifl) workItem
+ | isDerived wfl
+ = mkIRStopK ("Solved[DW] " ++ info) emptyWorkList
+
+ | isDerived ifl -- The inert item is Derived, we can just throw it away,
+ -- The workItem is inert wrt earlier inert-set items,
+ -- so it's safe to continue on from this point
+ = mkIRContinue ("Solved[DI] " ++ info) workItem DropInert emptyWorkList
+ | otherwise
+ = ASSERT( ifl `canSolve` wfl )
+ -- Because of Note [The Solver Invariant], plus Derived dealt with
+ do { when (isWanted wfl) $ setEvBind wid ev_term
+ -- Overwrite the binding, if one exists
+ -- If both are Given, we already have evidence; no need to duplicate
+ ; mkIRStopK ("Solved " ++ info) emptyWorkList }
where
wfl = cc_flavor workItem
wid = cc_id workItem
-- arising from top-level instances.
topReactionsStage :: SimplifierStage
-topReactionsStage workItem inerts
+topReactionsStage depth workItem inerts
= do { tir <- tryTopReact workItem
; case tir of
NoTopInt ->
, sr_new_work = emptyWorkList
, sr_stop = ContinueWith workItem }
SomeTopInt tir_new_work tir_new_inert ->
- return $ SR { sr_inerts = inerts
- , sr_new_work = tir_new_work
- , sr_stop = tir_new_inert
- }
+ do { bumpStepCountTcS
+ ; traceFireTcS depth (ptext (sLit "Top react")
+ <+> vcat [ ptext (sLit "Work =") <+> ppr workItem
+ , ptext (sLit "New =") <+> ppr tir_new_work ])
+ ; return $ SR { sr_inerts = inerts
+ , sr_new_work = tir_new_work
+ , sr_stop = tir_new_inert
+ } }
}
tryTopReact :: WorkItem -> TcS TopInteractResult
= return NoTopInt -- NB: Superclasses already added since it's canonical
-- Derived dictionary: just look for functional dependencies
-doTopReact workItem@(CDictCan { cc_flavor = Derived loc
+doTopReact workItem@(CDictCan { cc_flavor = fl@(Derived loc)
, cc_class = cls, cc_tyargs = xis })
- = do { fd_work <- findClassFunDeps cls xis loc
- ; if isEmptyWorkList fd_work then
- return NoTopInt
- else return $ SomeTopInt { tir_new_work = fd_work
- , tir_new_inert = ContinueWith workItem } }
+ = do { instEnvs <- getInstEnvs
+ ; let fd_eqns = improveFromInstEnv instEnvs
+ (ClassP cls xis, pprArisingAt loc)
+ ; m <- rewriteWithFunDeps fd_eqns xis fl
+ ; case m of
+ Nothing -> return NoTopInt
+ Just (xis',_,fd_work) ->
+ let workItem' = workItem { cc_tyargs = xis' }
+ -- Deriveds are not supposed to have identity (cc_id is unused!)
+ in return $ SomeTopInt { tir_new_work = fd_work
+ , tir_new_inert = ContinueWith workItem' } }
+
-- Wanted dictionary
-doTopReact workItem@(CDictCan { cc_id = dv, cc_flavor = Wanted loc
- , cc_class = cls, cc_tyargs = xis })
+doTopReact workItem@(CDictCan { cc_id = dv, cc_flavor = fl@(Wanted loc)
+ , cc_class = cls, cc_tyargs = xis })
= do { -- See Note [MATCHING-SYNONYMS]
; lkp_inst_res <- matchClassInst cls xis loc
- ; case lkp_inst_res of
- NoInstance ->
- do { traceTcS "doTopReact/ no class instance for" (ppr dv)
- ; fd_work <- findClassFunDeps cls xis loc
- ; return $ SomeTopInt
- { tir_new_work = fd_work
- , tir_new_inert = ContinueWith workItem } }
-
- GenInst wtvs ev_term -> -- Solved
+ ; case lkp_inst_res of
+ NoInstance ->
+ do { traceTcS "doTopReact/ no class instance for" (ppr dv)
+
+ ; instEnvs <- getInstEnvs
+ ; let fd_eqns = improveFromInstEnv instEnvs
+ (ClassP cls xis, pprArisingAt loc)
+ ; m <- rewriteWithFunDeps fd_eqns xis fl
+ ; case m of
+ Nothing -> return NoTopInt
+ Just (xis',cos,fd_work) ->
+ do { let dict_co = mkTyConCoercion (classTyCon cls) cos
+ ; dv'<- newDictVar cls xis'
+ ; setDictBind dv (EvCast dv' dict_co)
+ ; let workItem' = CDictCan { cc_id = dv', cc_flavor = fl,
+ cc_class = cls, cc_tyargs = xis' }
+ ; return $
+ SomeTopInt { tir_new_work = singleCCan workItem' `andCCan` fd_work
+ , tir_new_inert = Stop } } }
+
+ GenInst wtvs ev_term -- Solved
-- No need to do fundeps stuff here; the instance
-- matches already so we won't get any more info
-- from functional dependencies
- do { traceTcS "doTopReact/ found class instance for" (ppr dv)
- ; setDictBind dv ev_term
- ; inst_work <- canWanteds wtvs
- ; if null wtvs
+ | null wtvs
+ -> do { traceTcS "doTopReact/ found nullary class instance for" (ppr dv)
+ ; setDictBind dv ev_term
-- Solved in one step and no new wanted work produced.
-- i.e we directly matched a top-level instance
-- No point in caching this in 'inert'; hence Stop
- then return $ SomeTopInt { tir_new_work = emptyWorkList
- , tir_new_inert = Stop }
-
- -- Solved and new wanted work produced, you may cache the
- -- (tentatively solved) dictionary as Given! (used to be: Derived)
- else do { let solved = makeSolvedByInst workItem
- ; return $ SomeTopInt
- { tir_new_work = inst_work
- , tir_new_inert = ContinueWith solved } }
- } }
+ ; return $ SomeTopInt { tir_new_work = emptyWorkList
+ , tir_new_inert = Stop } }
+
+ | otherwise
+ -> do { traceTcS "doTopReact/ found nullary class instance for" (ppr dv)
+ ; setDictBind dv ev_term
+ -- Solved and new wanted work produced, you may cache the
+ -- (tentatively solved) dictionary as Given! (used to be: Derived)
+ ; let solved = workItem { cc_flavor = given_fl }
+ given_fl = Given (setCtLocOrigin loc UnkSkol)
+ ; inst_work <- canWanteds wtvs
+ ; return $ SomeTopInt { tir_new_work = inst_work
+ , tir_new_inert = ContinueWith solved } }
+ }
-- Type functions
doTopReact (CFunEqCan { cc_id = cv, cc_flavor = fl
-- See Note [Type synonym families] in TyCon
coe = mkTyConApp coe_tc rep_tys
; cv' <- case fl of
- Wanted {} -> do { cv' <- newWantedCoVar rhs_ty xi
- ; setWantedCoBind cv $
+ Wanted {} -> do { cv' <- newCoVar rhs_ty xi
+ ; setCoBind cv $
coe `mkTransCoercion`
mkCoVarCoercion cv'
; return cv' }
-- Any other work item does not react with any top-level equations
doTopReact _workItem = return NoTopInt
-
-----------------------
-findClassFunDeps :: Class -> [Xi] -> WantedLoc -> TcS WorkList
--- Look for a fundep reaction beween the wanted item
--- and a top-level instance declaration
-findClassFunDeps cls xis loc
- = do { instEnvs <- getInstEnvs
- ; let eqn_pred_locs = improveFromInstEnv (classInstances instEnvs)
- (ClassP cls xis, pprArisingAt loc)
- ; derived_evs <- mkDerivedFunDepEqns loc eqn_pred_locs
- -- NB: fundeps generate some wanted equalities, but
- -- we don't use their evidence for anything
- ; cts <- mapM mkCanonicalFEV derived_evs
- ; return $ unionManyBags cts }
\end{code}
--------------------------------
-- Creating new evidence variables
newEvVar, newCoVar, newEvVars,
- newWantedCoVar, writeWantedCoVar, readWantedCoVar,
+ writeWantedCoVar, readWantedCoVar,
newIP, newDict, newSilentGiven, isSilentEvVar,
newWantedEvVar, newWantedEvVars,
newEvVars theta = mapM newEvVar theta
newWantedEvVar :: TcPredType -> TcM EvVar
-newWantedEvVar (EqPred ty1 ty2) = newWantedCoVar ty1 ty2
+newWantedEvVar (EqPred ty1 ty2) = newCoVar ty1 ty2
newWantedEvVar (ClassP cls tys) = newDict cls tys
newWantedEvVar (IParam ip ty) = newIP ip ty
newWantedEvVars :: TcThetaType -> TcM [EvVar]
newWantedEvVars theta = mapM newWantedEvVar theta
-newWantedCoVar :: TcType -> TcType -> TcM CoVar
-newWantedCoVar ty1 ty2 = newCoVar ty1 ty2
-
--------------
newEvVar :: TcPredType -> TcM EvVar
-- Creates new *rigid* variables for predicates
% (c) The University of Glasgow 2006
% (c) The GRASP/AQUA Project, Glasgow University, 1992-1998
%
-\section[TcModule]{Typechecking a whole module}
+\section[TcMovectle]{Typechecking a whole module}
\begin{code}
module TcRnDriver (
mg_inst_env = tcg_inst_env tcg_env,
mg_fam_inst_env = tcg_fam_inst_env tcg_env,
mg_rules = [],
+ mg_vect_decls = [],
mg_anns = [],
mg_binds = core_binds,
-- It's a waste of time; and we may get debug warnings
-- about strangely-typed TyCons!
- -- Zonk the final code. This must be done last.
- -- Even simplifyTop may do some unification.
+ -- Zonk the final code. This must be done last.
+ -- Even simplifyTop may do some unification.
-- This pass also warns about missing type signatures
- let { (tcg_env, _) = tc_envs
- ; TcGblEnv { tcg_type_env = type_env,
- tcg_binds = binds,
- tcg_sigs = sig_ns,
- tcg_ev_binds = cur_ev_binds,
- tcg_imp_specs = imp_specs,
- tcg_rules = rules,
- tcg_fords = fords } = tcg_env
+ let { (tcg_env, _) = tc_envs
+ ; TcGblEnv { tcg_type_env = type_env,
+ tcg_binds = binds,
+ tcg_sigs = sig_ns,
+ tcg_ev_binds = cur_ev_binds,
+ tcg_imp_specs = imp_specs,
+ tcg_rules = rules,
+ tcg_vects = vects,
+ tcg_fords = fords } = tcg_env
; all_ev_binds = cur_ev_binds `unionBags` new_ev_binds } ;
- (bind_ids, ev_binds', binds', fords', imp_specs', rules')
- <- zonkTopDecls all_ev_binds binds sig_ns rules imp_specs fords ;
-
- let { final_type_env = extendTypeEnvWithIds type_env bind_ids
- ; tcg_env' = tcg_env { tcg_binds = binds',
- tcg_ev_binds = ev_binds',
- tcg_imp_specs = imp_specs',
- tcg_rules = rules',
- tcg_fords = fords' } } ;
-
- setGlobalTypeEnv tcg_env' final_type_env
+ (bind_ids, ev_binds', binds', fords', imp_specs', rules', vects')
+ <- zonkTopDecls all_ev_binds binds sig_ns rules vects imp_specs fords ;
+
+ let { final_type_env = extendTypeEnvWithIds type_env bind_ids
+ ; tcg_env' = tcg_env { tcg_binds = binds',
+ tcg_ev_binds = ev_binds',
+ tcg_imp_specs = imp_specs',
+ tcg_rules = rules',
+ tcg_vects = vects',
+ tcg_fords = fords' } } ;
+
+ setGlobalTypeEnv tcg_env' final_type_env
} }
tc_rn_src_decls :: ModDetails -> [LHsDecl RdrName] -> TcM (TcGblEnv, TcLclEnv)
hs_fords = for_decls,
hs_defds = def_decls,
hs_ruleds = rule_decls,
+ hs_vects = vect_decls,
hs_annds = _,
hs_valds = val_binds }) <- rnTopSrcDecls first_group
; (gbl_env, lie) <- captureConstraints $ setGblEnv tcg_env $ do {
; mapM_ (badBootDecl "foreign") for_decls
; mapM_ (badBootDecl "default") def_decls
; mapM_ (badBootDecl "rule") rule_decls
+ ; mapM_ (badBootDecl "vect") vect_decls
-- Typecheck type/class decls
; traceTc "Tc2" empty
hs_defds = default_decls,
hs_annds = annotation_decls,
hs_ruleds = rule_decls,
+ hs_vects = vect_decls,
hs_valds = val_binds })
= do { -- Type-check the type and class decls, and all imported decls
-- The latter come in via tycl_decls
setLclTypeEnv tcl_env $ do { -- Environment doesn't change now
- -- Second pass over class and instance declarations,
+ -- Second pass over class and instance declarations,
traceTc "Tc6" empty ;
- inst_binds <- tcInstDecls2 (concat tycl_decls) inst_infos ;
+ inst_binds <- tcInstDecls2 (concat tycl_decls) inst_infos ;
- -- Foreign exports
+ -- Foreign exports
traceTc "Tc7" empty ;
- (foe_binds, foe_decls) <- tcForeignExports foreign_decls ;
+ (foe_binds, foe_decls) <- tcForeignExports foreign_decls ;
-- Annotations
- annotations <- tcAnnotations annotation_decls ;
+ annotations <- tcAnnotations annotation_decls ;
- -- Rules
- rules <- tcRules rule_decls ;
+ -- Rules
+ rules <- tcRules rule_decls ;
- -- Wrap up
+ -- Vectorisation declarations
+ vects <- tcVectDecls vect_decls ;
+
+ -- Wrap up
traceTc "Tc7a" empty ;
tcg_env <- getGblEnv ;
let { all_binds = tc_val_binds `unionBags`
; sig_names = mkNameSet (collectHsValBinders val_binds)
`minusNameSet` getTypeSigNames val_binds
- -- Extend the GblEnv with the (as yet un-zonked)
- -- bindings, rules, foreign decls
- ; tcg_env' = tcg_env { tcg_binds = tcg_binds tcg_env `unionBags` all_binds
- , tcg_imp_specs = tcg_imp_specs tcg_env ++ specs1 ++ specs2 ++ specs3
+ -- Extend the GblEnv with the (as yet un-zonked)
+ -- bindings, rules, foreign decls
+ ; tcg_env' = tcg_env { tcg_binds = tcg_binds tcg_env `unionBags` all_binds
+ , tcg_imp_specs = tcg_imp_specs tcg_env ++ specs1 ++ specs2 ++
+ specs3
, tcg_sigs = tcg_sigs tcg_env `unionNameSets` sig_names
- , tcg_rules = tcg_rules tcg_env ++ rules
- , tcg_anns = tcg_anns tcg_env ++ annotations
- , tcg_fords = tcg_fords tcg_env ++ foe_decls ++ fi_decls } } ;
- return (tcg_env', tcl_env)
+ , tcg_rules = tcg_rules tcg_env ++ rules
+ , tcg_vects = tcg_vects tcg_env ++ vects
+ , tcg_anns = tcg_anns tcg_env ++ annotations
+ , tcg_fords = tcg_fords tcg_env ++ foe_decls ++ fi_decls } } ;
+ return (tcg_env', tcl_env)
}}}}}}
\end{code}
-- It's unpleasant having both pprModGuts and pprModDetails here
pprTcGblEnv :: TcGblEnv -> SDoc
pprTcGblEnv (TcGblEnv { tcg_type_env = type_env,
- tcg_insts = insts,
- tcg_fam_insts = fam_insts,
- tcg_rules = rules,
- tcg_imports = imports })
+ tcg_insts = insts,
+ tcg_fam_insts = fam_insts,
+ tcg_rules = rules,
+ tcg_vects = vects,
+ tcg_imports = imports })
= vcat [ ppr_types insts type_env
, ppr_tycons fam_insts type_env
- , ppr_insts insts
- , ppr_fam_insts fam_insts
- , vcat (map ppr rules)
- , ppr_gen_tycons (typeEnvTyCons type_env)
- , ptext (sLit "Dependent modules:") <+>
- ppr (sortBy cmp_mp $ eltsUFM (imp_dep_mods imports))
+ , ppr_insts insts
+ , ppr_fam_insts fam_insts
+ , vcat (map ppr rules)
+ , vcat (map ppr vects)
+ , ppr_gen_tycons (typeEnvTyCons type_env)
+ , ptext (sLit "Dependent modules:") <+>
+ ppr (sortBy cmp_mp $ eltsUFM (imp_dep_mods imports))
, ptext (sLit "Dependent packages:") <+>
ppr (sortBy stablePackageIdCmp $ imp_dep_pkgs imports)]
where -- The two uses of sortBy are just to reduce unnecessary
tcg_warns = NoWarnings,
tcg_anns = [],
tcg_insts = [],
- tcg_fam_insts = [],
- tcg_rules = [],
- tcg_fords = [],
- tcg_dfun_n = dfun_n_var,
- tcg_keep = keep_var,
+ tcg_fam_insts = [],
+ tcg_rules = [],
+ tcg_fords = [],
+ tcg_vects = [],
+ tcg_dfun_n = dfun_n_var,
+ tcg_keep = keep_var,
tcg_doc_hdr = Nothing,
tcg_hpc = False,
tcg_main = Nothing
tcg_warns :: Warnings, -- ...Warnings and deprecations
tcg_anns :: [Annotation], -- ...Annotations
tcg_insts :: [Instance], -- ...Instances
- tcg_fam_insts :: [FamInst], -- ...Family instances
- tcg_rules :: [LRuleDecl Id], -- ...Rules
- tcg_fords :: [LForeignDecl Id], -- ...Foreign import & exports
+ tcg_fam_insts :: [FamInst], -- ...Family instances
+ tcg_rules :: [LRuleDecl Id], -- ...Rules
+ tcg_fords :: [LForeignDecl Id], -- ...Foreign import & exports
+ tcg_vects :: [LVectDecl Id], -- ...Vectorisation declarations
tcg_doc_hdr :: Maybe LHsDocString, -- ^ Maybe Haddock header docs
tcg_hpc :: AnyHpcUsage, -- ^ @True@ if any part of the
, wc_insol = n1 `unionBags` n2 }
addFlats :: WantedConstraints -> Bag WantedEvVar -> WantedConstraints
-addFlats wc wevs = wc { wc_flat = wevs `unionBags` wc_flat wc }
+addFlats wc wevs = wc { wc_flat = wc_flat wc `unionBags` wevs }
addImplics :: WantedConstraints -> Bag Implication -> WantedConstraints
-addImplics wc implic = wc { wc_impl = implic `unionBags` wc_impl wc }
+addImplics wc implic = wc { wc_impl = wc_impl wc `unionBags` implic }
instance Outputable WantedConstraints where
ppr (WC {wc_flat = f, wc_impl = i, wc_insol = n})
keepWanted :: Bag FlavoredEvVar -> Bag WantedEvVar
keepWanted flevs
- = foldlBag keep_wanted emptyBag flevs
+ = foldrBag keep_wanted emptyBag flevs
+ -- Important: use fold*r*Bag to preserve the order of the evidence variables.
where
- keep_wanted :: Bag WantedEvVar -> FlavoredEvVar -> Bag WantedEvVar
- keep_wanted r (EvVarX ev (Wanted wloc)) = consBag (EvVarX ev wloc) r
- keep_wanted r _ = r
+ keep_wanted :: FlavoredEvVar -> Bag WantedEvVar -> Bag WantedEvVar
+ keep_wanted (EvVarX ev (Wanted wloc)) r = consBag (EvVarX ev wloc) r
+ keep_wanted _ r = r
\end{code}
-- superclasses.
instance Outputable CtFlavor where
- ppr (Given _) = ptext (sLit "[Given]")
- ppr (Wanted _) = ptext (sLit "[Wanted]")
- ppr (Derived {}) = ptext (sLit "[Derived]")
-
+ ppr (Given {}) = ptext (sLit "[G]")
+ ppr (Wanted {}) = ptext (sLit "[W]")
+ ppr (Derived {}) = ptext (sLit "[D]")
pprFlavorArising :: CtFlavor -> SDoc
pprFlavorArising (Derived wl ) = pprArisingAt wl
pprFlavorArising (Wanted wl) = pprArisingAt wl
CanonicalCt(..), Xi, tyVarsOfCanonical, tyVarsOfCanonicals, tyVarsOfCDicts,
deCanonicalise, mkFrozenError,
- makeSolvedByInst,
isWanted, isGiven, isDerived,
isGivenCt, isWantedCt, isDerivedCt, pprFlavorArising,
combineCtLoc, mkGivenFlavor, mkWantedFlavor,
getWantedLoc,
- TcS, runTcS, failTcS, panicTcS, traceTcS, traceTcS0, -- Basic functionality
+ TcS, runTcS, failTcS, panicTcS, traceTcS, -- Basic functionality
+ traceFireTcS, bumpStepCountTcS,
tryTcS, nestImplicTcS, recoverTcS, wrapErrTcS, wrapWarnTcS,
SimplContext(..), isInteractive, simplEqsOnly, performDefaulting,
-- Creation of evidence variables
- newEvVar, newCoVar, newWantedCoVar, newGivenCoVar,
+ newEvVar, newCoVar, newGivenCoVar,
newDerivedId,
newIPVar, newDictVar, newKindConstraint,
-- Setting evidence variables
- setWantedCoBind,
- setIPBind, setDictBind, setEvBind,
+ setCoBind, setIPBind, setDictBind, setEvBind,
setWantedTyBind,
instDFunTypes, -- Instantiation
instDFunConstraints,
- newFlexiTcSTy,
+ newFlexiTcSTy, instFlexiTcS,
compatKind,
matchClass, matchFam, MatchInstResult (..),
checkWellStagedDFun,
warnTcS,
- pprEq, -- Smaller utils, re-exported from TcM
+ pprEq -- Smaller utils, re-exported from TcM
-- TODO (DV): these are only really used in the
-- instance matcher in TcSimplify. I am wondering
-- if the whole instance matcher simply belongs
-- here
-
-
- mkDerivedFunDepEqns -- Instantiation of 'Equations' from FunDeps
-
) where
#include "HsVersions.h"
import HsBinds -- for TcEvBinds stuff
import Id
-import FunDeps
import TcRnTypes
-import Control.Monad
import Data.IORef
\end{code}
compatKind :: Kind -> Kind -> Bool
compatKind k1 k2 = k1 `isSubKind` k2 || k2 `isSubKind` k1
-makeSolvedByInst :: CanonicalCt -> CanonicalCt
--- Record that a constraint is now solved
--- Wanted -> Given
--- Given, Derived -> no-op
-makeSolvedByInst ct
- | Wanted loc <- cc_flavor ct = ct { cc_flavor = mkGivenFlavor (Wanted loc) UnkSkol }
- | otherwise = ct
-
deCanonicalise :: CanonicalCt -> FlavoredEvVar
deCanonicalise ct = mkEvVarX (cc_id ct) (cc_flavor ct)
-- active(tv ~ xi) = tv
-- active(D xis) = D xis
-- active(IP nm ty) = nm
+--
+-- NB: either (a `canSolve` b) or (b `canSolve` a) must hold
-----------------------------------------
canSolve (Given {}) _ = True
-canSolve (Derived {}) (Wanted {}) = False -- DV: changing the semantics
-canSolve (Derived {}) (Derived {}) = True -- DV: changing the semantics of derived
+canSolve (Wanted {}) (Derived {}) = True
canSolve (Wanted {}) (Wanted {}) = True
-canSolve _ _ = False
+canSolve (Derived {}) (Derived {}) = True -- Important: derived can't solve wanted/given
+canSolve _ _ = False -- (There is no *evidence* for a derived.)
canRewrite :: CtFlavor -> CtFlavor -> Bool
-- canRewrite ctid1 ctid2
combineCtLoc _ _ = panic "combineCtLoc: both given"
mkGivenFlavor :: CtFlavor -> SkolemInfo -> CtFlavor
-mkGivenFlavor (Wanted loc) sk = Given (setCtLocOrigin loc sk)
-mkGivenFlavor (Derived loc) sk = Given (setCtLocOrigin loc sk)
-mkGivenFlavor (Given loc) sk = Given (setCtLocOrigin loc sk)
+mkGivenFlavor (Wanted loc) sk = Given (setCtLocOrigin loc sk)
+mkGivenFlavor (Derived loc) sk = Given (setCtLocOrigin loc sk)
+mkGivenFlavor (Given loc) sk = Given (setCtLocOrigin loc sk)
+
mkWantedFlavor :: CtFlavor -> CtFlavor
mkWantedFlavor (Wanted loc) = Wanted loc
tcs_context :: SimplContext,
- tcs_untch :: TcsUntouchables
+ tcs_untch :: TcsUntouchables,
+
+ tcs_ic_depth :: Int, -- Implication nesting depth
+ tcs_count :: IORef Int -- Global step count
}
type TcsUntouchables = (Untouchables,TcTyVarSet)
traceTcS :: String -> SDoc -> TcS ()
traceTcS herald doc = TcS $ \_env -> TcM.traceTc herald doc
-traceTcS0 :: String -> SDoc -> TcS ()
-traceTcS0 herald doc = TcS $ \_env -> TcM.traceTcN 0 herald doc
+bumpStepCountTcS :: TcS ()
+bumpStepCountTcS = TcS $ \env -> do { let ref = tcs_count env
+ ; n <- TcM.readTcRef ref
+ ; TcM.writeTcRef ref (n+1) }
+
+traceFireTcS :: Int -> SDoc -> TcS ()
+-- Dump a rule-firing trace
+traceFireTcS depth doc
+ = TcS $ \env ->
+ TcM.ifDOptM Opt_D_dump_cs_trace $
+ do { n <- TcM.readTcRef (tcs_count env)
+ ; let msg = int n
+ <> text (replicate (tcs_ic_depth env) '>')
+ <> brackets (int depth) <+> doc
+ ; TcM.dumpTcRn msg }
runTcS :: SimplContext
-> Untouchables -- Untouchables
runTcS context untouch tcs
= do { ty_binds_var <- TcM.newTcRef emptyVarEnv
; ev_binds_var@(EvBindsVar evb_ref _) <- TcM.newTcEvBinds
+ ; step_count <- TcM.newTcRef 0
; let env = TcSEnv { tcs_ev_binds = ev_binds_var
, tcs_ty_binds = ty_binds_var
, tcs_context = context
, tcs_untch = (untouch, emptyVarSet) -- No Tcs untouchables yet
+ , tcs_count = step_count
+ , tcs_ic_depth = 0
}
-- Run the computation
; ty_binds <- TcM.readTcRef ty_binds_var
; mapM_ do_unification (varEnvElts ty_binds)
+#ifdef DEBUG
+ ; count <- TcM.readTcRef step_count
+ ; TcM.dumpTcRn (ptext (sLit "Constraint solver steps =") <+> int count)
+#endif
-- And return
; ev_binds <- TcM.readTcRef evb_ref
; return (res, evBindMapBinds ev_binds) }
do_unification (tv,ty) = TcM.writeMetaTyVar tv ty
nestImplicTcS :: EvBindsVar -> TcsUntouchables -> TcS a -> TcS a
-nestImplicTcS ref untch (TcS thing_inside)
- = TcS $ \ TcSEnv { tcs_ty_binds = ty_binds,
- tcs_context = ctxt } ->
+nestImplicTcS ref (inner_range, inner_tcs) (TcS thing_inside)
+ = TcS $ \ TcSEnv { tcs_ty_binds = ty_binds
+ , tcs_untch = (_outer_range, outer_tcs)
+ , tcs_count = count
+ , tcs_ic_depth = idepth
+ , tcs_context = ctxt } ->
let
+ inner_untch = (inner_range, outer_tcs `unionVarSet` inner_tcs)
+ -- The inner_range should be narrower than the outer one
+ -- (thus increasing the set of untouchables) but
+ -- the inner Tcs-untouchables must be unioned with the
+ -- outer ones!
nest_env = TcSEnv { tcs_ev_binds = ref
, tcs_ty_binds = ty_binds
- , tcs_untch = untch
+ , tcs_untch = inner_untch
+ , tcs_count = count
+ , tcs_ic_depth = idepth+1
, tcs_context = ctxtUnderImplic ctxt }
in
thing_inside nest_env
= do { EvBindsVar ev_ref _ <- getTcEvBinds
; wrapTcS $ TcM.readTcRef ev_ref }
-setWantedCoBind :: CoVar -> Coercion -> TcS ()
-setWantedCoBind cv co
- = setEvBind cv (EvCoercion co)
- -- Was: wrapTcS $ TcM.writeWantedCoVar cv co
+setCoBind :: CoVar -> Coercion -> TcS ()
+setCoBind cv co = setEvBind cv (EvCoercion co)
setWantedTyBind :: TcTyVar -> TcType -> TcS ()
-- Add a type binding
newKindConstraint tv knd
= do { tv_k <- instFlexiTcSHelper (tyVarName tv) knd
; let ty_k = mkTyVarTy tv_k
- ; co_var <- newWantedCoVar (mkTyVarTy tv) ty_k
+ ; co_var <- newCoVar (mkTyVarTy tv) ty_k
; return co_var }
instFlexiTcSHelper :: Name -> Kind -> TcS TcTyVar
; setEvBind cv (EvCoercion co)
; return cv }
-newWantedCoVar :: TcType -> TcType -> TcS EvVar
-newWantedCoVar ty1 ty2 = wrapTcS $ TcM.newWantedCoVar ty1 ty2
-
newCoVar :: TcType -> TcType -> TcS EvVar
newCoVar ty1 ty2 = wrapTcS $ TcM.newCoVar ty1 ty2
-- DV: We never return MatchInstMany, since tcLookupFamInst never returns
-- multiple matches. Check.
}
-
-
--- Functional dependencies, instantiation of equations
--------------------------------------------------------
-
-mkDerivedFunDepEqns :: WantedLoc
- -> [(Equation, (PredType, SDoc), (PredType, SDoc))]
- -> TcS [FlavoredEvVar] -- All Derived
-mkDerivedFunDepEqns _ [] = return []
-mkDerivedFunDepEqns loc eqns
- = do { traceTcS "Improve:" (vcat (map pprEquationDoc eqns))
- ; evvars <- mapM to_work_item eqns
- ; return $ concat evvars }
- where
- to_work_item :: (Equation, (PredType,SDoc), (PredType,SDoc)) -> TcS [FlavoredEvVar]
- to_work_item ((qtvs, pairs), d1, d2)
- = do { let tvs = varSetElems qtvs
- ; tvs' <- mapM instFlexiTcS tvs
- ; let subst = zipTopTvSubst tvs (mkTyVarTys tvs')
- loc' = pushErrCtxt FunDepOrigin (False, mkEqnMsg d1 d2) loc
- flav = Derived loc'
- ; mapM (do_one subst flav) pairs }
-
- do_one subst flav (ty1, ty2)
- = do { let sty1 = substTy subst ty1
- sty2 = substTy subst ty2
- ; ev <- newCoVar sty1 sty2
- ; return (mkEvVarX ev flav) }
-
-pprEquationDoc :: (Equation, (PredType, SDoc), (PredType, SDoc)) -> SDoc
-pprEquationDoc (eqn, (p1, _), (p2, _))
- = vcat [pprEquation eqn, nest 2 (ppr p1), nest 2 (ppr p2)]
-
-mkEqnMsg :: (TcPredType, SDoc) -> (TcPredType, SDoc) -> TidyEnv
- -> TcM (TidyEnv, SDoc)
-mkEqnMsg (pred1,from1) (pred2,from2) tidy_env
- = do { pred1' <- TcM.zonkTcPredType pred1
- ; pred2' <- TcM.zonkTcPredType pred2
- ; let { pred1'' = tidyPred tidy_env pred1'
- ; pred2'' = tidyPred tidy_env pred2' }
- ; let msg = vcat [ptext (sLit "When using functional dependencies to combine"),
- nest 2 (sep [ppr pred1'' <> comma, nest 2 from1]),
- nest 2 (sep [ppr pred2'' <> comma, nest 2 from2])]
- ; return (tidy_env, msg) }
\end{code}
-- Simplify 'wanted' as much as possibles
-- Fail if not possible
simplifyDeriv orig tvs theta
- = do { tvs_skols <- tcInstSuperSkolTyVars tvs -- Skolemize
- -- One reason is that the constraint solving machinery
- -- expects *TcTyVars* not TyVars. Another is that
- -- when looking up instances we don't want overlap
- -- of type variables
+ = do { tvs_skols <- tcInstSkolTyVars tvs -- Skolemize
+ -- The constraint solving machinery
+ -- expects *TcTyVars* not TyVars.
+ -- We use *non-overlappable* (vanilla) skolems
+ -- See Note [Overlap and deriving]
; let skol_subst = zipTopTvSubst tvs $ map mkTyVarTy tvs_skols
subst_skol = zipTopTvSubst tvs_skols $ map mkTyVarTy tvs
; return (substTheta subst_skol min_theta) }
\end{code}
+Note [Overlap and deriving]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Consider some overlapping instances:
+ data Show a => Show [a] where ..
+ data Show [Char] where ...
+
+Now a data type with deriving:
+ data T a = MkT [a] deriving( Show )
+
+We want to get the derived instance
+ instance Show [a] => Show (T a) where...
+and NOT
+ instance Show a => Show (T a) where...
+so that the (Show (T Char)) instance does the Right Thing
+
+It's very like the situation when we're inferring the type
+of a function
+ f x = show [x]
+and we want to infer
+ f :: Show [a] => a -> String
+
+BOTTOM LINE: use vanilla, non-overlappable skolems when inferring
+ the context for the derived instance.
+ Hence tcInstSkolTyVars not tcInstSuperSkolTyVars
+
Note [Exotic derived instance contexts]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
In a 'derived' instance declaration, we *infer* the context. It's a
; (lhs_results, lhs_binds)
<- runTcS SimplRuleLhs untch $
- solveWanteds emptyInert lhs_wanted
+ solveWanteds emptyInert zonked_lhs
; traceTc "simplifyRule" $
vcat [ text "zonked_lhs" <+> ppr zonked_lhs
, text "inert =" <+> ppr inert ]
; let (just_given_inert, unsolved_cans) = extractUnsolved inert
- -- unsolved_ccans contains either Wanted or Derived!
+ -- unsolved_cans contains either Wanted or Derived!
- -- Go inside each implication
; (implic_eqs, unsolved_implics)
- <- solveNestedImplications just_given_inert implics
+ <- solveNestedImplications just_given_inert unsolved_cans implics
-- Apply defaulting rules if and only if there
-- no floated equalities. If there are, they may
unsolved_implics
}
-solveNestedImplications :: InertSet -> Bag Implication
+givensFromWanteds :: CanonicalCts -> Bag FlavoredEvVar
+-- Extract the *wanted* ones from CanonicalCts
+-- and make them into *givens*
+givensFromWanteds = foldrBag getWanted emptyBag
+ where
+ getWanted :: CanonicalCt -> Bag FlavoredEvVar -> Bag FlavoredEvVar
+ getWanted cc givens
+ | not (isCFrozenErr cc)
+ , Wanted loc <- cc_flavor cc
+ , let given = mkEvVarX (cc_id cc) (Given (setCtLocOrigin loc UnkSkol))
+ = given `consBag` givens
+ | otherwise
+ = givens -- We are not helping anyone by pushing a Derived in!
+ -- Because if we could not solve it to start with
+ -- we are not going to do either inside the impl constraint
+
+solveNestedImplications :: InertSet -> CanonicalCts
+ -> Bag Implication
-> TcS (Bag FlavoredEvVar, Bag Implication)
-solveNestedImplications inerts implics
+solveNestedImplications just_given_inert unsolved_cans implics
| isEmptyBag implics
= return (emptyBag, emptyBag)
| otherwise
- = do { -- See Note [Preparing inert set for implications]
- traceTcS "solveWanteds: preparing inerts for implications {" empty
- ; let inert_for_implics = inerts
- -- DV: Used to be:
- -- inert_for_implics <- solveInteract inerts (makeGivens unsolved).
- -- But now the top-level simplifyInfer effectively converts the
- -- quantifiable wanteds to givens, and hence we don't need to add
- -- those unsolved as givens here; they will already be in the inert set.
-
- ; traceTcS "}" empty
-
- ; traceTcS "solveWanteds: doing nested implications {" $
+ = do { -- See Note [Preparing inert set for implications]
+ -- Push the unsolved wanteds inwards, but as givens
+ let pushed_givens = givensFromWanteds unsolved_cans
+ tcs_untouchables = filterVarSet isFlexiTcsTv $
+ tyVarsOfEvVarXs pushed_givens
+ -- See Note [Extra TcsTv untouchables]
+
+ ; traceTcS "solveWanteds: preparing inerts for implications {"
+ (vcat [ppr tcs_untouchables, ppr pushed_givens])
+
+ ; (_, inert_for_implics) <- solveInteract just_given_inert pushed_givens
+
+ ; traceTcS "solveWanteds: } now doing nested implications {" $
vcat [ text "inerts_for_implics =" <+> ppr inert_for_implics
, text "implics =" <+> ppr implics ]
- ; let tcs_untouchables = filterVarSet isFlexiTcsTv $
- tyVarsOfInert inert_for_implics
- -- See Note [Extra TcsTv untouchables]
-
; (implic_eqs, unsolved_implics)
<- flatMapBagPairM (solveImplication tcs_untouchables inert_for_implics) implics
predTvs_under_fsks (EqPred ty1 ty2) = tvs_under_fsks ty1 `unionVarSet` tvs_under_fsks ty2
\end{code}
-Note [Float Equalities out of Implications]
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-We want to float equalities out of vanilla existentials, but *not* out
-of GADT pattern matches.
-
Note [Preparing inert set for implications]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Before solving the nested implications, we convert any unsolved flat wanteds
a) In checking mode, suppresses unnecessary errors. We already have
on unsolved-wanted error; adding it to the givens prevents any
- consequential errors from showing uop
+ consequential errors from showing up
b) More importantly, in inference mode, we are going to quantify over this
constraint, and we *don't* want to quantify over any constraints that
are deducible from it.
+ c) Flattened type-family equalities must be exposed to the nested
+ constraints. Consider
+ F b ~ alpha, (forall c. F b ~ alpha)
+ Obviously this is soluble with [alpha := F b]. But the
+ unification is only done by solveCTyFunEqs, right at the end of
+ solveWanteds, and if we aren't careful we'll end up with an
+ unsolved goal inside the implication. We need to "push" the
+ as-yes-unsolved (F b ~ alpha) inwards, as a *given*, so that it
+ can be used to solve the inner (F b
+ ~ alpha). See Trac #4935.
+
+ d) There are other cases where interactions between wanteds that can help
+ to solve a constraint. For example
+
+ class C a b | a -> b
+
+ (C Int alpha), (forall d. C d blah => C Int a)
+
+ If we push the (C Int alpha) inwards, as a given, it can produce
+ a fundep (alpha~a) and this can float out again and be used to
+ fix alpha. (In general we can't float class constraints out just
+ in case (C d blah) might help to solve (C Int a).)
+
The unsolved wanteds are *canonical* but they may not be *inert*,
because when made into a given they might interact with other givens.
Hence the call to solveInteract. Example:
Note [Extra TcsTv untouchables]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-Furthemore, we record the inert set simplifier-generated unification variables of the TcsTv
-kind (such as variables from instance that have been applied, or unification flattens). These
-variables must be passed to the implications as extra untouchable variables. Otherwise
-we have the danger of double unifications. Example (from trac ticket #4494):
+Furthemore, we record the inert set simplifier-generated unification
+variables of the TcsTv kind (such as variables from instance that have
+been applied, or unification flattens). These variables must be passed
+to the implications as extra untouchable variables. Otherwise we have
+the danger of double unifications. Example (from trac ticket #4494):
(F Int ~ uf) /\ (forall a. C a => F Int ~ beta)
-In this example, beta is touchable inside the implication. The first solveInteract step
-leaves 'uf' ununified. Then we move inside the implication where a new constraint
+In this example, beta is touchable inside the implication. The first
+solveInteract step leaves 'uf' ununified. Then we move inside the
+implication where a new constraint
uf ~ beta
-emerges. We may spontaneously solve it to get uf := beta, so the whole implication disappears
-but when we pop out again we are left with (F Int ~ uf) which will be unified by our final
-solveCTyFunEqs stage and uf will get unified *once more* to (F Int).
-
-The solution is to record the TcsTvs (i.e. the simplifier-generated unification variables)
-that are generated when solving the flats, and make them untouchables for the nested
-implication. In the example above uf would become untouchable, so beta would be forced to
-be unified as beta := uf.
+emerges. We may spontaneously solve it to get uf := beta, so the whole
+implication disappears but when we pop out again we are left with (F
+Int ~ uf) which will be unified by our final solveCTyFunEqs stage and
+uf will get unified *once more* to (F Int).
+
+The solution is to record the TcsTvs (i.e. the simplifier-generated
+unification variables) that are generated when solving the flats, and
+make them untouchables for the nested implication. In the example
+above uf would become untouchable, so beta would be forced to be
+unified as beta := uf.
+
+NB: A consequence is that every simplifier-generated TcsTv variable
+ that gets floated out of an implication becomes now untouchable
+ next time we go inside that implication to solve any residual
+ constraints. In effect, by floating an equality out of the
+ implication we are committing to have it solved in the outside.
-NB: A consequence is that every simplifier-generated TcsTv variable that gets floated out
- of an implication becomes now untouchable next time we go inside that implication to
- solve any residual constraints. In effect, by floating an equality out of the implication
- we are committing to have it solved in the outside.
+Note [Float Equalities out of Implications]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+We want to float equalities out of vanilla existentials, but *not* out
+of GADT pattern matches.
\begin{code}
; return (niFixTvSubst ni_subst, unsolved_can_cts) }
where
- solve_one (cv,tv,ty) = setWantedTyBind tv ty >> setWantedCoBind cv ty
+ solve_one (cv,tv,ty) = setWantedTyBind tv ty >> setCoBind cv ty
------------
type FunEqBinds = (TvSubstEnv, [(CoVar, TcTyVar, TcType)])
-- See Note [Deferred unification]
uType_defer (item : origin) ty1 ty2
= wrapEqCtxt origin $
- do { co_var <- newWantedCoVar ty1 ty2
+ do { co_var <- newCoVar ty1 ty2
; loc <- getCtLoc (TypeEqOrigin item)
; emitFlat (mkEvVarX co_var loc)
\begin{code}
module FunDeps (
- Equation, pprEquation,
+ FDEq (..),
+ Equation(..), pprEquation,
oclose, improveFromInstEnv, improveFromAnother,
checkInstCoverage, checkFunDeps,
pprFundeps
%************************************************************************
+Each functional dependency with one variable in the RHS is responsible
+for generating a single equality. For instance:
+ class C a b | a -> b
+The constraints ([Wanted] C Int Bool) and [Wanted] C Int alpha
+ FDEq { fd_pos = 1
+ , fd_ty_left = Bool
+ , fd_ty_right = alpha }
+However notice that a functional dependency may have more than one variable
+in the RHS which will create more than one FDEq. Example:
+ class C a b c | a -> b c
+ [Wanted] C Int alpha alpha
+ [Wanted] C Int Bool beta
+Will generate:
+ fd1 = FDEq { fd_pos = 1, fd_ty_left = alpha, fd_ty_right = Bool } and
+ fd2 = FDEq { fd_pos = 2, fd_ty_left = alpha, fd_ty_right = beta }
+
+We record the paremeter position so that can immediately rewrite a constraint
+using the produced FDEqs and remove it from our worklist.
+
+
+INVARIANT: Corresponding types aren't already equal
+That is, there exists at least one non-identity equality in FDEqs.
+
+Assume:
+ class C a b c | a -> b c
+ instance C Int x x
+And: [Wanted] C Int Bool alpha
+We will /match/ the LHS of fundep equations, producing a matching substitution
+and create equations for the RHS sides. In our last example we'd have generated:
+ ({x}, [fd1,fd2])
+where
+ fd1 = FDEq 1 Bool x
+ fd2 = FDEq 2 alpha x
+To ``execute'' the equation, make fresh type variable for each tyvar in the set,
+instantiate the two types with these fresh variables, and then unify or generate
+a new constraint. In the above example we would generate a new unification
+variable 'beta' for x and produce the following constraints:
+ [Wanted] (Bool ~ beta)
+ [Wanted] (alpha ~ beta)
+
+Notice the subtle difference between the above class declaration and:
+ class C a b c | a -> b, a -> c
+where we would generate:
+ ({x},[fd1]),({x},[fd2])
+This means that the template variable would be instantiated to different
+unification variables when producing the FD constraints.
+
+Finally, the position parameters will help us rewrite the wanted constraint ``on the spot''
+
\begin{code}
-type Equation = (TyVarSet, [(Type, Type)])
--- These pairs of types should be equal, for some
--- substitution of the tyvars in the tyvar set
--- INVARIANT: corresponding types aren't already equal
-
--- It's important that we have a *list* of pairs of types. Consider
--- class C a b c | a -> b c where ...
--- instance C Int x x where ...
--- Then, given the constraint (C Int Bool v) we should improve v to Bool,
--- via the equation ({x}, [(Bool,x), (v,x)])
--- This would not happen if the class had looked like
--- class C a b c | a -> b, a -> c
-
--- To "execute" the equation, make fresh type variable for each tyvar in the set,
--- instantiate the two types with these fresh variables, and then unify.
---
--- For example, ({a,b}, (a,Int,b), (Int,z,Bool))
--- We unify z with Int, but since a and b are quantified we do nothing to them
--- We usually act on an equation by instantiating the quantified type varaibles
--- to fresh type variables, and then calling the standard unifier.
+type Pred_Loc = (PredType, SDoc) -- SDoc says where the Pred comes from
-pprEquation :: Equation -> SDoc
-pprEquation (qtvs, pairs)
- = vcat [ptext (sLit "forall") <+> braces (pprWithCommas ppr (varSetElems qtvs)),
- nest 2 (vcat [ ppr t1 <+> ptext (sLit "~") <+> ppr t2 | (t1,t2) <- pairs])]
+data Equation
+ = FDEqn { fd_qtvs :: TyVarSet -- Instantiate these to fresh unification vars
+ , fd_eqs :: [FDEq] -- and then make these equal
+ , fd_pred1, fd_pred2 :: Pred_Loc } -- The Equation arose from
+ -- combining these two constraints
+
+data FDEq = FDEq { fd_pos :: Int -- We use '0' for the first position
+ , fd_ty_left :: Type
+ , fd_ty_right :: Type }
\end{code}
Given a bunch of predicates that must hold, such as
\begin{code}
-type Pred_Loc = (PredType, SDoc) -- SDoc says where the Pred comes from
+instFD_WithPos :: FunDep TyVar -> [TyVar] -> [Type] -> ([Type], [(Int,Type)])
+-- Returns a FunDep between the types accompanied along with their
+-- position (<=0) in the types argument list.
+instFD_WithPos (ls,rs) tvs tys
+ = (map (snd . lookup) ls, map lookup rs)
+ where
+ ind_tys = zip [0..] tys
+ env = zipVarEnv tvs ind_tys
+ lookup tv = lookupVarEnv_NF env tv
-improveFromInstEnv :: (Class -> [Instance])
- -> Pred_Loc
- -> [(Equation,Pred_Loc,Pred_Loc)]
--- Improvement from top-level instances
-improveFromInstEnv _inst_env pred
- = improveOne _inst_env pred [] -- TODO: Refactor to directly use instance_eqnd?
+zipAndComputeFDEqs :: (Type -> Type -> Bool) -- Discard this FDEq if true
+ -> [Type]
+ -> [(Int,Type)]
+ -> [FDEq]
+-- Create a list of FDEqs from two lists of types, making sure
+-- that the types are not equal.
+zipAndComputeFDEqs discard (ty1:tys1) ((i2,ty2):tys2)
+ | discard ty1 ty2 = zipAndComputeFDEqs discard tys1 tys2
+ | otherwise = FDEq { fd_pos = i2
+ , fd_ty_left = ty1
+ , fd_ty_right = ty2 } : zipAndComputeFDEqs discard tys1 tys2
+zipAndComputeFDEqs _ _ _ = []
+
+-- Improve a class constraint from another class constraint
+-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+improveFromAnother :: Pred_Loc -- Template item (usually given, or inert)
+ -> Pred_Loc -- Workitem [that can be improved]
+ -> [Equation]
+-- Post: FDEqs always oriented from the other to the workitem
+-- Equations have empty quantified variables
+improveFromAnother pred1@(ClassP cls1 tys1, _) pred2@(ClassP cls2 tys2, _)
+ | tys1 `lengthAtLeast` 2 && cls1 == cls2
+ = [ FDEqn { fd_qtvs = emptyVarSet, fd_eqs = eqs, fd_pred1 = pred1, fd_pred2 = pred2 }
+ | let (cls_tvs, cls_fds) = classTvsFds cls1
+ , fd <- cls_fds
+ , let (ltys1, rs1) = instFD fd cls_tvs tys1
+ (ltys2, irs2) = instFD_WithPos fd cls_tvs tys2
+ , tcEqTypes ltys1 ltys2 -- The LHSs match
+ , let eqs = zipAndComputeFDEqs tcEqType rs1 irs2
+ , not (null eqs) ]
+
+improveFromAnother _ _ = []
+
+
+-- Improve a class constraint from instance declarations
+-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+pprEquation :: Equation -> SDoc
+pprEquation (FDEqn { fd_qtvs = qtvs, fd_eqs = pairs })
+ = vcat [ptext (sLit "forall") <+> braces (pprWithCommas ppr (varSetElems qtvs)),
+ nest 2 (vcat [ ppr t1 <+> ptext (sLit "~") <+> ppr t2 | (FDEq _ t1 t2) <- pairs])]
-improveFromAnother :: Pred_Loc
+improveFromInstEnv :: (InstEnv,InstEnv)
-> Pred_Loc
- -> [(Equation, Pred_Loc, Pred_Loc)]
--- Improvement from another local (given or wanted) constraint
-improveFromAnother pred1 pred2
- = improveOne (\_ -> []) pred1 [pred2] -- TODO: Refactor to directly use pairwise_eqns?
-
-
-improveOne :: (Class -> [Instance]) -- Gives instances for given class
- -> Pred_Loc -- Do improvement triggered by this
- -> [Pred_Loc] -- Current constraints
- -> [(Equation,Pred_Loc,Pred_Loc)] -- Derived equalities that must also hold
- -- (NB the above INVARIANT for type Equation)
- -- The Pred_Locs explain which two predicates were
- -- combined (for error messages)
--- Just do improvement triggered by a single, distinguised predicate
-
-improveOne _inst_env pred@(IParam ip ty, _) preds
- = [ ((emptyVarSet, [(ty,ty2)]), pred, p2)
- | p2@(IParam ip2 ty2, _) <- preds
- , ip==ip2
- , not (ty `tcEqType` ty2)]
-
-improveOne inst_env pred@(ClassP cls tys, _) preds
+ -> [Equation] -- Needs to be an Equation because
+ -- of quantified variables
+-- Post: Equations oriented from the template (matching instance) to the workitem!
+improveFromInstEnv _inst_env (pred,_loc)
+ | not (isClassPred pred)
+ = panic "improveFromInstEnv: not a class predicate"
+improveFromInstEnv inst_env pred@(ClassP cls tys, _)
| tys `lengthAtLeast` 2
- = instance_eqns ++ pairwise_eqns
- -- NB: we put the instance equations first. This biases the
- -- order so that we first improve individual constraints against the
- -- instances (which are perhaps in a library and less likely to be
- -- wrong; and THEN perform the pairwise checks.
- -- The other way round, it's possible for the pairwise check to succeed
- -- and cause a subsequent, misleading failure of one of the pair with an
- -- instance declaration. See tcfail143.hs for an example
- where
- (cls_tvs, cls_fds) = classTvsFds cls
- instances = inst_env cls
- rough_tcs = roughMatchTcs tys
-
- -- NOTE that we iterate over the fds first; they are typically
- -- empty, which aborts the rest of the loop.
- pairwise_eqns :: [(Equation,Pred_Loc,Pred_Loc)]
- pairwise_eqns -- This group comes from pairwise comparison
- = [ (eqn, pred, p2)
- | fd <- cls_fds
- , p2@(ClassP cls2 tys2, _) <- preds
- , cls == cls2
- , eqn <- checkClsFD emptyVarSet fd cls_tvs tys tys2
- ]
-
- instance_eqns :: [(Equation,Pred_Loc,Pred_Loc)]
- instance_eqns -- This group comes from comparing with instance decls
- = [ (eqn, p_inst, pred)
- | fd <- cls_fds -- Iterate through the fundeps first,
+ = [ FDEqn { fd_qtvs = qtvs, fd_eqs = eqs, fd_pred1 = p_inst, fd_pred2=pred }
+ | fd <- cls_fds -- Iterate through the fundeps first,
-- because there often are none!
- , let trimmed_tcs = trimRoughMatchTcs cls_tvs fd rough_tcs
+ , let trimmed_tcs = trimRoughMatchTcs cls_tvs fd rough_tcs
-- Trim the rough_tcs based on the head of the fundep.
-- Remember that instanceCantMatch treats both argumnents
-- symmetrically, so it's ok to trim the rough_tcs,
-- rather than trimming each inst_tcs in turn
- , ispec@(Instance { is_tvs = qtvs, is_tys = tys_inst,
- is_tcs = inst_tcs }) <- instances
- , not (instanceCantMatch inst_tcs trimmed_tcs)
- , eqn <- checkClsFD qtvs fd cls_tvs tys_inst tys
- , let p_inst = (mkClassPred cls tys_inst,
- sep [ ptext (sLit "arising from the dependency") <+> quotes (pprFunDep fd)
- , ptext (sLit "in the instance declaration at")
- <+> ppr (getSrcLoc ispec)])
- ]
-
-improveOne _ _ _
- = []
+ , ispec@(Instance { is_tvs = qtvs, is_tys = tys_inst,
+ is_tcs = inst_tcs }) <- instances
+ , not (instanceCantMatch inst_tcs trimmed_tcs)
+ , let p_inst = (mkClassPred cls tys_inst,
+ sep [ ptext (sLit "arising from the dependency") <+> quotes (pprFunDep fd)
+ , ptext (sLit "in the instance declaration at")
+ <+> ppr (getSrcLoc ispec)])
+ , (qtvs, eqs) <- checkClsFD qtvs fd cls_tvs tys_inst tys -- NB: orientation
+ , not (null eqs)
+ ]
+ where
+ (cls_tvs, cls_fds) = classTvsFds cls
+ instances = classInstances inst_env cls
+ rough_tcs = roughMatchTcs tys
+improveFromInstEnv _ _ = []
checkClsFD :: TyVarSet -- Quantified type variables; see note below
-> FunDep TyVar -> [TyVar] -- One functional dependency from the class
-> [Type] -> [Type]
- -> [Equation]
+ -> [(TyVarSet, [FDEq])]
checkClsFD qtvs fd clas_tvs tys1 tys2
-- 'qtvs' are the quantified type variables, the ones which an be instantiated
length tys1 == length clas_tvs
, ppr tys1 <+> ppr tys2 )
- case tcUnifyTys bind_fn ls1 ls2 of
+ case tcUnifyTys bind_fn ltys1 ltys2 of
Nothing -> []
- Just subst | isJust (tcUnifyTys bind_fn rs1' rs2')
- -- Don't include any equations that already hold.
+ Just subst | isJust (tcUnifyTys bind_fn rtys1' rtys2')
+ -- Don't include any equations that already hold.
-- Reason: then we know if any actual improvement has happened,
-- in which case we need to iterate the solver
- -- In making this check we must taking account of the fact that any
- -- qtvs that aren't already instantiated can be instantiated to anything
+ -- In making this check we must taking account of the fact that any
+ -- qtvs that aren't already instantiated can be instantiated to anything
-- at all
- -> []
-
- | otherwise -- Aha! A useful equation
- -> [ (qtvs', zip rs1' rs2')]
+ -- NB: We can't do this 'is-useful-equation' check element-wise
+ -- because of:
+ -- class C a b c | a -> b c
+ -- instance C Int x x
+ -- [Wanted] C Int alpha Int
+ -- We would get that x -> alpha (isJust) and x -> Int (isJust)
+ -- so we would produce no FDs, which is clearly wrong.
+ -> []
+
+ | otherwise
+ -> [(qtvs', fdeqs)]
-- We could avoid this substTy stuff by producing the eqn
-- (qtvs, ls1++rs1, ls2++rs2)
-- which will re-do the ls1/ls2 unification when the equation is
-- executed. What we're doing instead is recording the partial
-- work of the ls1/ls2 unification leaving a smaller unification problem
- where
- rs1' = substTys subst rs1
- rs2' = substTys subst rs2
+ where
+ rtys1' = map (substTy subst) rtys1
+ irs2' = map (\(i,x) -> (i,substTy subst x)) irs2
+ rtys2' = map snd irs2'
+
+ fdeqs = zipAndComputeFDEqs (\_ _ -> False) rtys1' irs2'
+ -- Don't discard anything!
+ -- We could discard equal types but it's an overkill to call
+ -- tcEqType again, since we know for sure that /at least one/
+ -- equation in there is useful)
+
qtvs' = filterVarSet (`notElemTvSubst` subst) qtvs
- -- qtvs' are the quantified type variables
- -- that have not been substituted out
- --
- -- Eg. class C a b | a -> b
- -- instance C Int [y]
- -- Given constraint C Int z
- -- we generate the equation
- -- ({y}, [y], z)
+ -- qtvs' are the quantified type variables
+ -- that have not been substituted out
+ --
+ -- Eg. class C a b | a -> b
+ -- instance C Int [y]
+ -- Given constraint C Int z
+ -- we generate the equation
+ -- ({y}, [y], z)
where
bind_fn tv | tv `elemVarSet` qtvs = BindMe
| otherwise = Skolem
- (ls1, rs1) = instFD fd clas_tvs tys1
- (ls2, rs2) = instFD fd clas_tvs tys2
+ (ltys1, rtys1) = instFD fd clas_tvs tys1
+ (ltys2, irs2) = instFD_WithPos fd clas_tvs tys2
+\end{code}
+
+\begin{code}
instFD :: FunDep TyVar -> [TyVar] -> [Type] -> FunDep Type
+-- A simpler version of instFD_WithPos to be used in checking instance coverage etc.
instFD (ls,rs) tvs tys
= (map lookup ls, map lookup rs)
where
env = zipVarEnv tvs tys
lookup tv = lookupVarEnv_NF env tv
-\end{code}
-\begin{code}
checkInstCoverage :: Class -> [Type] -> Bool
-- Check that the Coverage Condition is obeyed in an instance decl
-- For example, if we have
concatBag, foldBag, foldrBag, foldlBag,
isEmptyBag, isSingletonBag, consBag, snocBag, anyBag,
listToBag, bagToList,
- foldlBagM, mapBagM, mapBagM_,
+ foldrBagM, foldlBagM, mapBagM, mapBagM_,
flatMapBagM, flatMapBagPairM,
mapAndUnzipBagM, mapAccumBagLM
) where
foldlBag k z (TwoBags b1 b2) = foldlBag k (foldlBag k z b1) b2
foldlBag k z (ListBag xs) = foldl k z xs
+foldrBagM :: (Monad m) => (a -> b -> m b) -> b -> Bag a -> m b
+foldrBagM _ z EmptyBag = return z
+foldrBagM k z (UnitBag x) = k x z
+foldrBagM k z (TwoBags b1 b2) = do { z' <- foldrBagM k z b2; foldrBagM k z' b1 }
+foldrBagM k z (ListBag xs) = foldrM k z xs
+
foldlBagM :: (Monad m) => (b -> a -> m b) -> b -> Bag a -> m b
foldlBagM _ z EmptyBag = return z
foldlBagM k z (UnitBag x) = k z x
{-# OPTIONS -fno-warn-missing-signatures #-}
-module Vectorise( vectorise )
+module Vectorise ( vectorise )
where
import Vectorise.Type.Env
import Vectorise.Monad
import HscTypes hiding ( MonadThings(..) )
-import Module ( PackageId )
-import CoreSyn
import CoreUnfold ( mkInlineUnfolding )
import CoreFVs
+import PprCore
+import CoreSyn
import CoreMonad ( CoreM, getHscEnv )
+import Type
import Var
import Id
import OccName
+import DynFlags
import BasicTypes ( isLoopBreaker )
import Outputable
import Util ( zipLazy )
import Control.Monad
-debug = False
-dtrace s x = if debug then pprTrace "Vectorise" s x else x
-- | Vectorise a single module.
--- Takes the package containing the DPH backend we're using. Eg either dph-par or dph-seq.
-vectorise :: PackageId -> ModGuts -> CoreM ModGuts
-vectorise backend guts
- = do hsc_env <- getHscEnv
- liftIO $ vectoriseIO backend hsc_env guts
-
-
--- | Vectorise a single monad, given its HscEnv (code gen environment).
-vectoriseIO :: PackageId -> HscEnv -> ModGuts -> IO ModGuts
-vectoriseIO backend hsc_env guts
- = do -- Get information about currently loaded external packages.
- eps <- hscEPS hsc_env
+--
+vectorise :: ModGuts -> CoreM ModGuts
+vectorise guts
+ = do { hsc_env <- getHscEnv
+ ; liftIO $ vectoriseIO hsc_env guts
+ }
- -- Combine vectorisation info from the current module, and external ones.
- let info = hptVectInfo hsc_env `plusVectInfo` eps_vect_info eps
+-- | Vectorise a single monad, given the dynamic compiler flags and HscEnv.
+--
+vectoriseIO :: HscEnv -> ModGuts -> IO ModGuts
+vectoriseIO hsc_env guts
+ = do { -- Get information about currently loaded external packages.
+ ; eps <- hscEPS hsc_env
- -- Run the main VM computation.
- Just (info', guts') <- initV backend hsc_env guts info (vectModule guts)
- return (guts' { mg_vect_info = info' })
+ -- Combine vectorisation info from the current module, and external ones.
+ ; let info = hptVectInfo hsc_env `plusVectInfo` eps_vect_info eps
+ -- Run the main VM computation.
+ ; Just (info', guts') <- initV hsc_env guts info (vectModule guts)
+ ; return (guts' { mg_vect_info = info' })
+ }
-- | Vectorise a single module, in the VM monad.
+--
vectModule :: ModGuts -> VM ModGuts
-vectModule guts
- = do -- Vectorise the type environment.
- -- This may add new TyCons and DataCons.
- -- TODO: What new binds do we get back here?
- (types', fam_insts, tc_binds) <- vectTypeEnv (mg_types guts)
-
- (_, fam_inst_env) <- readGEnv global_fam_inst_env
+vectModule guts@(ModGuts { mg_types = types
+ , mg_binds = binds
+ , mg_fam_insts = fam_insts
+ })
+ = do { dumpOptVt Opt_D_dump_vt_trace "Before vectorisation" $
+ pprCoreBindings binds
+
+ -- Vectorise the type environment.
+ -- This may add new TyCons and DataCons.
+ ; (types', new_fam_insts, tc_binds) <- vectTypeEnv types
+
+ ; (_, fam_inst_env) <- readGEnv global_fam_inst_env
-- dicts <- mapM buildPADict pa_insts
-- workers <- mapM vectDataConWorkers pa_insts
- -- Vectorise all the top level bindings.
- binds' <- mapM vectTopBind (mg_binds guts)
-
- return $ guts { mg_types = types'
- , mg_binds = Rec tc_binds : binds'
- , mg_fam_inst_env = fam_inst_env
- , mg_fam_insts = mg_fam_insts guts ++ fam_insts
- }
+ -- Vectorise all the top level bindings.
+ ; binds' <- mapM vectTopBind binds
+ ; return $ guts { mg_types = types'
+ , mg_binds = Rec tc_binds : binds'
+ , mg_fam_inst_env = fam_inst_env
+ , mg_fam_insts = fam_insts ++ new_fam_insts
+ }
+ }
-- | Try to vectorise a top-level binding.
-- If it doesn't vectorise then return it unharmed.
vectTopBind :: CoreBind -> VM CoreBind
vectTopBind b@(NonRec var expr)
= do
- (inline, expr') <- vectTopRhs var expr
- var' <- vectTopBinder var inline expr'
+ (inline, _, expr') <- vectTopRhs [] var expr
+ var' <- vectTopBinder var inline expr'
-- Vectorising the body may create other top-level bindings.
- hs <- takeHoisted
+ hs <- takeHoisted
-- To get the same functionality as the original body we project
-- out its vectorised version from the closure.
- cexpr <- tryConvert var var' expr
+ cexpr <- tryConvert var var' expr
return . Rec $ (var, cexpr) : (var', expr') : hs
`orElseV`
vectTopBind b@(Rec bs)
= do
(vars', _, exprs')
- <- fixV $ \ ~(_, inlines, rhss) ->
+ <- fixV $ \ ~(_, inlines, rhss) ->
do vars' <- sequence [vectTopBinder var inline rhs
| (var, ~(inline, rhs)) <- zipLazy vars (zip inlines rhss)]
- (inlines', exprs')
- <- mapAndUnzipM (uncurry vectTopRhs) bs
-
- return (vars', inlines', exprs')
-
+ (inlines', areScalars', exprs')
+ <- mapAndUnzip3M (uncurry $ vectTopRhs vars) bs
+ if (and areScalars') || (length bs <= 1)
+ then do
+ return (vars', inlines', exprs')
+ else do
+ _ <- mapM deleteGlobalScalar vars
+ (inlines'', _, exprs'') <- mapAndUnzip3M (uncurry $ vectTopRhs []) bs
+ return (vars', inlines'', exprs'')
+
hs <- takeHoisted
cexprs <- sequence $ zipWith3 tryConvert vars vars' exprs
return . Rec $ zip vars cexprs ++ zip vars' exprs' ++ hs
return b
where
(vars, exprs) = unzip bs
-
-
+
-- | Make the vectorised version of this top level binder, and add the mapping
-- between it and the original to the state. For some binder @foo@ the vectorised
-- version is @$v_foo@
--
-- NOTE: vectTopBinder *MUST* be lazy in inline and expr because of how it is
-- used inside of fixV in vectTopBind
-vectTopBinder
- :: Var -- ^ Name of the binding.
- -> Inline -- ^ Whether it should be inlined, used to annotate it.
- -> CoreExpr -- ^ RHS of the binding, used to set the `Unfolding` of the returned `Var`.
- -> VM Var -- ^ Name of the vectorised binding.
-
+--
+vectTopBinder :: Var -- ^ Name of the binding.
+ -> Inline -- ^ Whether it should be inlined, used to annotate it.
+ -> CoreExpr -- ^ RHS of binding, used to set the 'Unfolding' of the returned 'Var'.
+ -> VM Var -- ^ Name of the vectorised binding.
vectTopBinder var inline expr
- = do
- -- Vectorise the type attached to the var.
- vty <- vectType (idType var)
-
- -- Make the vectorised version of binding's name, and set the unfolding used for inlining.
- var' <- liftM (`setIdUnfoldingLazily` unfolding)
- $ cloneId mkVectOcc var vty
-
- -- Add the mapping between the plain and vectorised name to the state.
- defGlobalVar var var'
-
- return var'
+ = do { -- Vectorise the type attached to the var.
+ ; vty <- vectType (idType var)
+
+ -- If there is a vectorisation declartion for this binding, make sure that its type
+ -- matches
+ ; vectDecl <- lookupVectDecl var
+ ; case vectDecl of
+ Nothing -> return ()
+ Just (vdty, _)
+ | coreEqType vty vdty -> return ()
+ | otherwise ->
+ cantVectorise ("Type mismatch in vectorisation pragma for " ++ show var) $
+ (text "Expected type" <+> ppr vty)
+ $$
+ (text "Inferred type" <+> ppr vdty)
+
+ -- Make the vectorised version of binding's name, and set the unfolding used for inlining
+ ; var' <- liftM (`setIdUnfoldingLazily` unfolding)
+ $ cloneId mkVectOcc var vty
+
+ -- Add the mapping between the plain and vectorised name to the state.
+ ; defGlobalVar var var'
+
+ ; return var'
+ }
where
unfolding = case inline of
Inline arity -> mkInlineUnfolding (Just arity) expr
DontInline -> noUnfolding
-
-- | Vectorise the RHS of a top-level binding, in an empty local environment.
-vectTopRhs
- :: Var -- ^ Name of the binding.
- -> CoreExpr -- ^ Body of the binding.
- -> VM (Inline, CoreExpr)
-
-vectTopRhs var expr
- = dtrace (vcat [text "vectTopRhs", ppr expr])
- $ closedV
- $ do (inline, vexpr) <- inBind var
- $ vectPolyExpr (isLoopBreaker $ idOccInfo var)
- (freeVars expr)
- return (inline, vectorised vexpr)
-
+--
+-- We need to distinguish three cases:
+--
+-- (1) We have a (non-scalar) vectorisation declaration for the variable (which explicitly provides
+-- vectorised code implemented by the user)
+-- => no automatic vectorisation & instead use the user-supplied code
+--
+-- (2) We have a scalar vectorisation declaration for the variable
+-- => generate vectorised code that uses a scalar 'map'/'zipWith' to lift the computation
+--
+-- (3) There is no vectorisation declaration for the variable
+-- => perform automatic vectorisation of the RHS
+--
+vectTopRhs :: [Var] -- ^ Names of all functions in the rec block
+ -> Var -- ^ Name of the binding.
+ -> CoreExpr -- ^ Body of the binding.
+ -> VM ( Inline -- (1) inline specification for the binding
+ , Bool -- (2) whether the right-hand side is a scalar computation
+ , CoreExpr) -- (3) the vectorised right-hand side
+vectTopRhs recFs var expr
+ = closedV
+ $ do { traceVt ("vectTopRhs of " ++ show var) $ ppr expr
+
+ ; globalScalar <- isGlobalScalar var
+ ; vectDecl <- lookupVectDecl var
+ ; rhs globalScalar vectDecl
+ }
+ where
+ rhs _globalScalar (Just (_, expr')) -- Case (1)
+ = return (inlineMe, False, expr')
+ rhs True _vectDecl -- Case (2)
+ = return (inlineMe, True, scalarRHS)
+ -- FIXME: that True is not enough to register scalarness
+ rhs False _vectDecl -- Case (3)
+ = do { let fvs = freeVars expr
+ ; (inline, isScalar, vexpr) <- inBind var $
+ vectPolyExpr (isLoopBreaker $ idOccInfo var) recFs fvs
+ ; if isScalar
+ then addGlobalScalar var
+ else deleteGlobalScalar var
+ ; return (inline, isScalar, vectorised vexpr)
+ }
+
+ -- For scalar right-hand sides, we know that the original binding will remain unaltered
+ -- (hence, we can refer to it without risk of cycles) - cf, 'tryConvert'.
+ scalarRHS = panic "Vectorise.scalarRHS: not implemented yet"
-- | Project out the vectorised version of a binding from some closure,
--- or return the original body if that doesn't work.
-tryConvert
- :: Var -- ^ Name of the original binding (eg @foo@)
- -> Var -- ^ Name of vectorised version of binding (eg @$vfoo@)
- -> CoreExpr -- ^ The original body of the binding.
- -> VM CoreExpr
-
+-- or return the original body if that doesn't work or the binding is scalar.
+--
+tryConvert :: Var -- ^ Name of the original binding (eg @foo@)
+ -> Var -- ^ Name of vectorised version of binding (eg @$vfoo@)
+ -> CoreExpr -- ^ The original body of the binding.
+ -> VM CoreExpr
tryConvert var vect_var rhs
- = fromVect (idType var) (Var vect_var) `orElseV` return rhs
-
+ = do { globalScalar <- isGlobalScalar var
+ ; if globalScalar
+ then
+ return rhs
+ else
+ fromVect (idType var) (Var vect_var) `orElseV` return rhs
+ }
-- | Builtin types and functions used by the vectoriser.
--- The source program uses functions from GHC.PArr, which the vectoriser rewrites
+-- The source program uses functions from Data.Array.Parallel, which the vectoriser rewrites
-- to use equivalent vectorised versions in the DPH backend packages.
--
-- The `Builtins` structure holds the name of all the things in the DPH packages
$ mkFastString ("elementsSel" ++ show i ++ "_" ++ show j ++ "#")
return ((i,j), Var v)
-
-- | Get the mapping of names in the Prelude to names in the DPH library.
-initBuiltinVars :: Builtins -> DsM [(Var, Var)]
-initBuiltinVars (Builtins { dphModules = mods })
+--
+initBuiltinVars :: Bool -- FIXME
+ -> Builtins -> DsM [(Var, Var)]
+initBuiltinVars compilingDPH (Builtins { dphModules = mods })
= do
uvars <- zipWithM externalVar umods ufs
vvars <- zipWithM externalVar vmods vfs
++ zip (map dataConWorkId cons) cvars
++ zip uvars vvars
where
- (umods, ufs, vmods, vfs) = unzip4 (preludeVars mods)
+ (umods, ufs, vmods, vfs) = if compilingDPH then ([], [], [], []) else unzip4 (preludeVars mods)
(cons, cmods, cfs) = unzip3 (preludeDataCons mods)
defaultDataConWorkers :: [DataCon]
builtinBoxedTyCons _
= [(tyConName intPrimTyCon, intTyCon)]
-
-- | Get a list of all scalar functions in the mock prelude.
-initBuiltinScalars :: Builtins -> DsM [Var]
-initBuiltinScalars bi
- = mapM (uncurry externalVar) (preludeScalars $ dphModules bi)
-
+--
+initBuiltinScalars :: Bool
+ -> Builtins -> DsM [Var]
+initBuiltinScalars True _bi = return []
+initBuiltinScalars False bi = mapM (uncurry externalVar) (preludeScalars $ dphModules bi)
-- | Lookup some variable given its name and the module that contains it.
externalVar :: Module -> FastString -> DsM Var
+-- WARNING: This module is a temporary kludge. It will soon go away entirely (once
+-- VECTORISE SCALAR pragmas are fully implemented.)
+
-- | Mapping of prelude functions to vectorised versions.
-- Functions like filterP currently have a working but naive version in GHC.PArr
-- During vectorisation we replace these by calls to filterPA, which are
import FastString
-preludeVars
- :: Modules -- ^ Modules containing the DPH backens
+preludeVars :: Modules
-> [( Module, FastString -- Maps the original variable to the one in the DPH
, Module, FastString)] -- packages that it should be rewritten to.
-
-preludeVars (Modules { dph_Combinators = dph_Combinators
- , dph_PArray = dph_PArray
+preludeVars (Modules { dph_Combinators = _dph_Combinators
+ , dph_PArray = _dph_PArray
, dph_Prelude_Int = dph_Prelude_Int
, dph_Prelude_Word8 = dph_Prelude_Word8
, dph_Prelude_Double = dph_Prelude_Double
, dph_Prelude_Bool = dph_Prelude_Bool
- , dph_Prelude_PArr = dph_Prelude_PArr
+ , dph_Prelude_PArr = _dph_Prelude_PArr
})
-- Functions that work on whole PArrays, defined in GHC.PArr
- = [ mk gHC_PARR (fsLit "mapP") dph_Combinators (fsLit "mapPA")
- , mk gHC_PARR (fsLit "zipWithP") dph_Combinators (fsLit "zipWithPA")
- , mk gHC_PARR (fsLit "zipP") dph_Combinators (fsLit "zipPA")
- , mk gHC_PARR (fsLit "unzipP") dph_Combinators (fsLit "unzipPA")
- , mk gHC_PARR (fsLit "filterP") dph_Combinators (fsLit "filterPA")
- , mk gHC_PARR (fsLit "lengthP") dph_Combinators (fsLit "lengthPA")
- , mk gHC_PARR (fsLit "replicateP") dph_Combinators (fsLit "replicatePA")
- , mk gHC_PARR (fsLit "!:") dph_Combinators (fsLit "indexPA")
- , mk gHC_PARR (fsLit "sliceP") dph_Combinators (fsLit "slicePA")
- , mk gHC_PARR (fsLit "crossMapP") dph_Combinators (fsLit "crossMapPA")
- , mk gHC_PARR (fsLit "singletonP") dph_Combinators (fsLit "singletonPA")
- , mk gHC_PARR (fsLit "concatP") dph_Combinators (fsLit "concatPA")
- , mk gHC_PARR (fsLit "+:+") dph_Combinators (fsLit "appPA")
- , mk gHC_PARR (fsLit "emptyP") dph_PArray (fsLit "emptyPA")
+ = [ {- mk gHC_PARR' (fsLit "mapP") dph_Combinators (fsLit "mapPA")
+ , mk gHC_PARR' (fsLit "zipWithP") dph_Combinators (fsLit "zipWithPA")
+ , mk gHC_PARR' (fsLit "zipP") dph_Combinators (fsLit "zipPA")
+ , mk gHC_PARR' (fsLit "unzipP") dph_Combinators (fsLit "unzipPA")
+ , mk gHC_PARR' (fsLit "filterP") dph_Combinators (fsLit "filterPA")
+ , mk gHC_PARR' (fsLit "lengthP") dph_Combinators (fsLit "lengthPA")
+ , mk gHC_PARR' (fsLit "replicateP") dph_Combinators (fsLit "replicatePA")
+ , mk gHC_PARR' (fsLit "!:") dph_Combinators (fsLit "indexPA")
+ , mk gHC_PARR' (fsLit "sliceP") dph_Combinators (fsLit "slicePA")
+ , mk gHC_PARR' (fsLit "crossMapP") dph_Combinators (fsLit "crossMapPA")
+ , mk gHC_PARR' (fsLit "singletonP") dph_Combinators (fsLit "singletonPA")
+ , mk gHC_PARR' (fsLit "concatP") dph_Combinators (fsLit "concatPA")
+ , mk gHC_PARR' (fsLit "+:+") dph_Combinators (fsLit "appPA")
+ , mk gHC_PARR' (fsLit "emptyP") dph_PArray (fsLit "emptyPA")
-- Map scalar functions to versions using closures.
- , mk' dph_Prelude_Int "div" "divV"
+ , -} mk' dph_Prelude_Int "div" "divV"
, mk' dph_Prelude_Int "mod" "modV"
, mk' dph_Prelude_Int "sqrt" "sqrtV"
, mk' dph_Prelude_Int "enumFromToP" "enumFromToPA"
, mk gHC_CLASSES (fsLit "&&") dph_Prelude_Bool (fsLit "andV")
, mk gHC_CLASSES (fsLit "||") dph_Prelude_Bool (fsLit "orV")
+{-
-- FIXME: temporary
, mk dph_Prelude_PArr (fsLit "fromPArrayP") dph_Prelude_PArr (fsLit "fromPArrayPA")
, mk dph_Prelude_PArr (fsLit "toPArrayP") dph_Prelude_PArr (fsLit "toPArrayPA")
, mk dph_Prelude_PArr (fsLit "updateP") dph_Combinators (fsLit "updatePA")
, mk dph_Prelude_PArr (fsLit "bpermuteP") dph_Combinators (fsLit "bpermutePA")
, mk dph_Prelude_PArr (fsLit "indexedP") dph_Combinators (fsLit "indexedPA")
- ]
+-} ]
where
mk = (,,,)
mk' mod v v' = mk mod (fsLit v) mod (fsLit v')
, mk' mod "floor" "floorV"
]
-
preludeScalars :: Modules -> [(Module, FastString)]
preludeScalars (Modules { dph_Prelude_Int = dph_Prelude_Int
, dph_Prelude_Word8 = dph_Prelude_Word8
setBoxedTyConsEnv,
updVectInfo
) where
+
import HscTypes
import InstEnv
import FamInstEnv
import CoreSyn
+import Type
import TyCon
import DataCon
import VarEnv
-- GlobalEnv ------------------------------------------------------------------
-- | The global environment.
--- These are things the exist at top-level.
+-- These are things the exist at top-level.
data GlobalEnv
- = GlobalEnv {
+ = GlobalEnv {
-- | Mapping from global variables to their vectorised versions.
- global_vars :: VarEnv Var
+ global_vars :: VarEnv Var
+
+ -- | Mapping from global variables that have a vectorisation declaration to the right-hand
+ -- side of that declaration and its type. This mapping only applies to non-scalar
+ -- vectorisation declarations. All variables with a scalar vectorisation declaration are
+ -- mentioned in 'global_scalars'.
+ , global_vect_decls :: VarEnv (Type, CoreExpr)
- -- | Purely scalar variables. Code which mentions only these
- -- variables doesn't have to be lifted.
- , global_scalars :: VarSet
+ -- | Purely scalar variables. Code which mentions only these variables doesn't have to be
+ -- lifted. This includes variables from the current module that have a scalar
+ -- vectorisation declaration and those that the vectoriser determines to be scalar.
+ , global_scalars :: VarSet
-- | Exported variables which have a vectorised version.
, global_exported_vars :: VarEnv (Var, Var)
, global_tycons :: NameEnv TyCon
-- | Mapping from DataCons to their vectorised versions.
- , global_datacons :: NameEnv DataCon
+ , global_datacons :: NameEnv DataCon
-- | Mapping from TyCons to their PA dfuns.
- , global_pa_funs :: NameEnv Var
+ , global_pa_funs :: NameEnv Var
-- | Mapping from TyCons to their PR dfuns.
, global_pr_funs :: NameEnv Var
, global_bindings :: [(Var, CoreExpr)]
}
-
-- | Create an initial global environment
-initGlobalEnv :: VectInfo -> (InstEnv, InstEnv) -> FamInstEnvs -> GlobalEnv
-initGlobalEnv info instEnvs famInstEnvs
- = GlobalEnv
- { global_vars = mapVarEnv snd $ vectInfoVar info
- , global_scalars = emptyVarSet
- , global_exported_vars = emptyVarEnv
- , global_tycons = mapNameEnv snd $ vectInfoTyCon info
- , global_datacons = mapNameEnv snd $ vectInfoDataCon info
- , global_pa_funs = mapNameEnv snd $ vectInfoPADFun info
- , global_pr_funs = emptyNameEnv
- , global_boxed_tycons = emptyNameEnv
- , global_inst_env = instEnvs
- , global_fam_inst_env = famInstEnvs
- , global_bindings = []
- }
-
+initGlobalEnv :: VectInfo -> [CoreVect] -> (InstEnv, InstEnv) -> FamInstEnvs -> GlobalEnv
+initGlobalEnv info vectDecls instEnvs famInstEnvs
+ = GlobalEnv
+ { global_vars = mapVarEnv snd $ vectInfoVar info
+ , global_vect_decls = mkVarEnv vects
+ , global_scalars = mkVarSet scalars
+ , global_exported_vars = emptyVarEnv
+ , global_tycons = mapNameEnv snd $ vectInfoTyCon info
+ , global_datacons = mapNameEnv snd $ vectInfoDataCon info
+ , global_pa_funs = mapNameEnv snd $ vectInfoPADFun info
+ , global_pr_funs = emptyNameEnv
+ , global_boxed_tycons = emptyNameEnv
+ , global_inst_env = instEnvs
+ , global_fam_inst_env = famInstEnvs
+ , global_bindings = []
+ }
+ where
+ vects = [(var, (varType var, exp)) | Vect var (Just exp) <- vectDecls]
+ scalars = [var | Vect var Nothing <- vectDecls]
-- Operators on Global Environments -------------------------------------------
extendImportedVarsEnv ps genv
= genv { global_vars = extendVarEnvList (global_vars genv) ps }
-
-- | Extend the set of scalar variables in an environment.
extendScalars :: [Var] -> GlobalEnv -> GlobalEnv
extendScalars vs genv
= genv { global_scalars = extendVarSetList (global_scalars genv) vs }
-
-- | Set the list of type family instances in an environment.
setFamEnv :: FamInstEnv -> GlobalEnv -> GlobalEnv
setFamEnv l_fam_inst genv
-- | Vectorise a polymorphic expression.
-vectPolyExpr
- :: Bool -- ^ When vectorising the RHS of a binding, whether that
- -- binding is a loop breaker.
- -> CoreExprWithFVs
- -> VM (Inline, VExpr)
-
-vectPolyExpr loop_breaker (_, AnnNote note expr)
- = do (inline, expr') <- vectPolyExpr loop_breaker expr
- return (inline, vNote note expr')
-
-vectPolyExpr loop_breaker expr
+--
+vectPolyExpr :: Bool -- ^ When vectorising the RHS of a binding, whether that
+ -- binding is a loop breaker.
+ -> [Var]
+ -> CoreExprWithFVs
+ -> VM (Inline, Bool, VExpr)
+vectPolyExpr loop_breaker recFns (_, AnnNote note expr)
+ = do (inline, isScalarFn, expr') <- vectPolyExpr loop_breaker recFns expr
+ return (inline, isScalarFn, vNote note expr')
+vectPolyExpr loop_breaker recFns expr
= do
arity <- polyArity tvs
polyAbstract tvs $ \args ->
do
- (inline, mono') <- vectFnExpr False loop_breaker mono
- return (addInlineArity inline arity,
+ (inline, isScalarFn, mono') <- vectFnExpr False loop_breaker recFns mono
+ return (addInlineArity inline arity, isScalarFn,
mapVect (mkLams $ tvs ++ args) mono')
where
(tvs, mono) = collectAnnTypeBinders expr
| Just (tycon, ty_args) <- splitTyConApp_maybe scrut_ty
, isAlgTyCon tycon
= vectAlgCase tycon ty_args scrut bndr ty alts
+ | otherwise = cantVectorise "Can't vectorise expression" (ppr scrut_ty)
where
scrut_ty = exprType (deAnnotate scrut)
vectExpr (_, AnnLet (AnnNonRec bndr rhs) body)
= do
- vrhs <- localV . inBind bndr . liftM snd $ vectPolyExpr False rhs
+ vrhs <- localV . inBind bndr . liftM (\(_,_,z)->z) $ vectPolyExpr False [] rhs
(vbndr, vbody) <- vectBndrIn bndr (vectExpr body)
return $ vLet (vNonRec vbndr vrhs) vbody
vect_rhs bndr rhs = localV
. inBind bndr
- . liftM snd
- $ vectPolyExpr (isLoopBreaker $ idOccInfo bndr) rhs
+ . liftM (\(_,_,z)->z)
+ $ vectPolyExpr (isLoopBreaker $ idOccInfo bndr) [] rhs
vectExpr e@(_, AnnLam bndr _)
- | isId bndr = liftM snd $ vectFnExpr True False e
+ | isId bndr = liftM (\(_,_,z) ->z) $ vectFnExpr True False [] e
{-
onlyIfV (isEmptyVarSet fvs) (vectScalarLam bs $ deAnnotate body)
`orElseV` vectLam True fvs bs body
(bs,body) = collectAnnValBinders e
-}
-vectExpr e = cantVectorise "Can't vectorise expression" (ppr $ deAnnotate e)
-
+vectExpr e = cantVectorise "Can't vectorise expression (vectExpr)" (ppr $ deAnnotate e)
-- | Vectorise an expression with an outer lambda abstraction.
-vectFnExpr
- :: Bool -- ^ When the RHS of a binding, whether that binding should be inlined.
- -> Bool -- ^ Whether the binding is a loop breaker.
- -> CoreExprWithFVs -- ^ Expression to vectorise. Must have an outer `AnnLam`.
- -> VM (Inline, VExpr)
-
-vectFnExpr inline loop_breaker e@(fvs, AnnLam bndr _)
- | isId bndr = onlyIfV (isEmptyVarSet fvs)
- (mark DontInline . vectScalarLam bs $ deAnnotate body)
- `orElseV` mark inlineMe (vectLam inline loop_breaker fvs bs body)
+--
+vectFnExpr :: Bool -- ^ When the RHS of a binding, whether that binding should be inlined.
+ -> Bool -- ^ Whether the binding is a loop breaker.
+ -> [Var]
+ -> CoreExprWithFVs -- ^ Expression to vectorise. Must have an outer `AnnLam`.
+ -> VM (Inline, Bool, VExpr)
+vectFnExpr inline loop_breaker recFns e@(fvs, AnnLam bndr _)
+ | isId bndr = onlyIfV True -- (isEmptyVarSet fvs) -- we check for free variables later. TODO: clean up
+ (mark DontInline True . vectScalarLam bs recFns $ deAnnotate body)
+ `orElseV` mark inlineMe False (vectLam inline loop_breaker fvs bs body)
where
(bs,body) = collectAnnValBinders e
+vectFnExpr _ _ _ e = mark DontInline False $ vectExpr e
-vectFnExpr _ _ e = mark DontInline $ vectExpr e
-
-mark :: Inline -> VM a -> VM (Inline, a)
-mark b p = do { x <- p; return (b,x) }
+mark :: Inline -> Bool -> VM a -> VM (Inline, Bool, a)
+mark b isScalarFn p = do { x <- p; return (b, isScalarFn, x) }
-- | Vectorise a function where are the args have scalar type,
-- that is Int, Float, Double etc.
vectScalarLam
- :: [Var] -- ^ Bound variables of function.
+ :: [Var] -- ^ Bound variables of function
+ -> [Var]
-> CoreExpr -- ^ Function body.
-> VM VExpr
-vectScalarLam args body
- = do scalars <- globalScalars
- onlyIfV (all is_scalar_ty arg_tys
- && is_scalar_ty res_ty
+vectScalarLam args recFns body
+ = do scalars' <- globalScalars
+ let scalars = unionVarSet (mkVarSet recFns) scalars'
+ onlyIfV (all is_prim_ty arg_tys
+ && is_prim_ty res_ty
&& is_scalar (extendVarSetList scalars args) body
&& uses scalars body)
$ do
arg_tys = map idType args
res_ty = exprType body
- is_scalar_ty ty
+ is_prim_ty ty
| Just (tycon, []) <- splitTyConApp_maybe ty
= tycon == intTyCon
|| tycon == floatTyCon
|| tycon == doubleTyCon
- || tycon == boolTyCon
| otherwise = False
-
- is_scalar vs (Var v) = v `elemVarSet` vs
- is_scalar _ e@(Lit _) = is_scalar_ty $ exprType e
- is_scalar _ (App (Var v) (Lit _))
- | Just con <- isDataConId_maybe v = con `elem` [intDataCon, floatDataCon, doubleDataCon]
-
- is_scalar vs (App e1 e2) = is_scalar vs e1 && is_scalar vs e2
- is_scalar vs (Let (NonRec b letExpr) body)
- = is_scalar vs letExpr && is_scalar (extendVarSet vs b) body
- is_scalar vs (Let (Rec bnds) body)
+ cantbe_parr_expr expr = not $ maybe_parr_ty $ exprType expr
+
+ maybe_parr_ty ty = maybe_parr_ty' [] ty
+
+ maybe_parr_ty' _ ty | Nothing <- splitTyConApp_maybe ty = False -- TODO: is this really what we want to do with polym. types?
+ maybe_parr_ty' alreadySeen ty
+ | isPArrTyCon tycon = True
+ | isPrimTyCon tycon = False
+ | isAbstractTyCon tycon = True
+ | isFunTyCon tycon || isProductTyCon tycon || isTupleTyCon tycon = any (maybe_parr_ty' alreadySeen) args
+ | isDataTyCon tycon = any (maybe_parr_ty' alreadySeen) args ||
+ hasParrDataCon alreadySeen tycon
+ | otherwise = True
+ where
+ Just (tycon, args) = splitTyConApp_maybe ty
+
+
+ hasParrDataCon alreadySeen tycon
+ | tycon `elem` alreadySeen = False
+ | otherwise =
+ any (maybe_parr_ty' $ tycon : alreadySeen) $ concat $ map dataConOrigArgTys $ tyConDataCons tycon
+
+ -- checks to make sure expression can't contain a non-scalar subexpression. Might err on the side of caution whenever
+ -- an external (non data constructor) variable is used, or anonymous data constructor
+ is_scalar vs e@(Var v)
+ | Just _ <- isDataConId_maybe v = cantbe_parr_expr e
+ | otherwise = cantbe_parr_expr e && (v `elemVarSet` vs)
+ is_scalar _ e@(Lit _) = cantbe_parr_expr e
+
+ is_scalar vs e@(App e1 e2) = cantbe_parr_expr e &&
+ is_scalar vs e1 && is_scalar vs e2
+ is_scalar vs e@(Let (NonRec b letExpr) body)
+ = cantbe_parr_expr e &&
+ is_scalar vs letExpr && is_scalar (extendVarSet vs b) body
+ is_scalar vs e@(Let (Rec bnds) body)
= let vs' = extendVarSetList vs (map fst bnds)
- in all (is_scalar vs') (map snd bnds) && is_scalar vs' body
- is_scalar vs (Case e eId ty alts)
+ in cantbe_parr_expr e &&
+ all (is_scalar vs') (map snd bnds) && is_scalar vs' body
+ is_scalar vs e@(Case eC eId ty alts)
= let vs' = extendVarSet vs eId
- in is_scalar_ty ty &&
- is_scalar vs' e &&
+ in cantbe_parr_expr e &&
+ is_prim_ty ty &&
+ is_scalar vs' eC &&
(all (is_scalar_alt vs') alts)
- is_scalar _ _ = False
+ is_scalar _ _ = False
is_scalar_alt vs (_, bs, e)
= is_scalar (extendVarSetList vs bs) e
vectTyAppExpr :: CoreExprWithFVs -> [Type] -> VM VExpr
vectTyAppExpr (_, AnnVar v) tys = vectPolyVar v tys
-vectTyAppExpr e tys = cantVectorise "Can't vectorise expression"
+vectTyAppExpr e tys = cantVectorise "Can't vectorise expression (vectTyExpr)"
(ppr $ deAnnotate e `mkTyApps` tys)
lookupVar,
maybeCantVectoriseVarM,
dumpVar,
-
+ addGlobalScalar,
+ deleteGlobalScalar,
+
-- * Primitives
lookupPrimPArray,
lookupPrimMethod
-)
-where
+) where
+
import Vectorise.Monad.Base
import Vectorise.Monad.Naming
import Vectorise.Monad.Local
import Vectorise.Builtins
import Vectorise.Env
-import HscTypes hiding ( MonadThings(..) )
+import HscTypes hiding ( MonadThings(..) )
+import DynFlags
import MonadUtils (liftIO)
-import Module
import TyCon
import Var
import VarEnv
import Id
import DsMonad
import Outputable
-import Control.Monad
+import FastString
+import Control.Monad
+import VarSet
-- | Run a vectorisation computation.
-initV :: PackageId
- -> HscEnv
- -> ModGuts
- -> VectInfo
- -> VM a
- -> IO (Maybe (VectInfo, a))
-
-initV pkg hsc_env guts info p
- = do
- -- XXX: ignores error messages and warnings, check that this is
- -- indeed ok (the use of "Just r" suggests so)
- (_,Just r) <- initDs hsc_env (mg_module guts)
- (mg_rdr_env guts)
- (mg_types guts)
- go
- return r
+--
+initV :: HscEnv
+ -> ModGuts
+ -> VectInfo
+ -> VM a
+ -> IO (Maybe (VectInfo, a))
+initV hsc_env guts info thing_inside
+ = do { (_, Just r) <- initDs hsc_env (mg_module guts) (mg_rdr_env guts) (mg_types guts) go
+ ; return r
+ }
where
go
- = do
- builtins <- initBuiltins pkg
- builtin_vars <- initBuiltinVars builtins
- builtin_tycons <- initBuiltinTyCons builtins
- let builtin_datacons = initBuiltinDataCons builtins
- builtin_boxed <- initBuiltinBoxedTyCons builtins
- builtin_scalars <- initBuiltinScalars builtins
-
- eps <- liftIO $ hscEPS hsc_env
- let famInstEnvs = (eps_fam_inst_env eps, mg_fam_inst_env guts)
- instEnvs = (eps_inst_env eps, mg_inst_env guts)
-
- builtin_prs <- initBuiltinPRs builtins instEnvs
- builtin_pas <- initBuiltinPAs builtins instEnvs
-
- let genv = extendImportedVarsEnv builtin_vars
- . extendScalars builtin_scalars
- . extendTyConsEnv builtin_tycons
- . extendDataConsEnv builtin_datacons
- . extendPAFunsEnv builtin_pas
- . setPRFunsEnv builtin_prs
- . setBoxedTyConsEnv builtin_boxed
- $ initGlobalEnv info instEnvs famInstEnvs
-
- r <- runVM p builtins genv emptyLocalEnv
- case r of
- Yes genv _ x -> return $ Just (new_info genv, x)
- No -> return Nothing
+ = do { -- pick a DPH backend
+ ; dflags <- getDOptsDs
+ ; case dphPackageMaybe dflags of
+ Nothing -> failWithDs $ ptext selectBackendErr
+ Just pkg -> do {
+
+ -- set up tables of builtin entities
+ ; let compilingDPH = dphBackend dflags == DPHThis -- FIXME: temporary kludge support
+ ; builtins <- initBuiltins pkg
+ ; builtin_vars <- initBuiltinVars compilingDPH builtins
+ ; builtin_tycons <- initBuiltinTyCons builtins
+ ; let builtin_datacons = initBuiltinDataCons builtins
+ ; builtin_boxed <- initBuiltinBoxedTyCons builtins
+ ; builtin_scalars <- initBuiltinScalars compilingDPH builtins
+
+ -- set up class and type family envrionments
+ ; eps <- liftIO $ hscEPS hsc_env
+ ; let famInstEnvs = (eps_fam_inst_env eps, mg_fam_inst_env guts)
+ instEnvs = (eps_inst_env eps, mg_inst_env guts)
+ ; builtin_prs <- initBuiltinPRs builtins instEnvs
+ ; builtin_pas <- initBuiltinPAs builtins instEnvs
+
+ -- construct the initial global environment
+ ; let genv = extendImportedVarsEnv builtin_vars
+ . extendScalars builtin_scalars
+ . extendTyConsEnv builtin_tycons
+ . extendDataConsEnv builtin_datacons
+ . extendPAFunsEnv builtin_pas
+ . setPRFunsEnv builtin_prs
+ . setBoxedTyConsEnv builtin_boxed
+ $ initGlobalEnv info (mg_vect_decls guts) instEnvs famInstEnvs
+
+ -- perform vectorisation
+ ; r <- runVM thing_inside builtins genv emptyLocalEnv
+ ; case r of
+ Yes genv _ x -> return $ Just (new_info genv, x)
+ No -> return Nothing
+ } }
new_info genv = updVectInfo genv (mg_types guts) info
+ selectBackendErr = sLit "To use -fvectorise select a DPH backend with -fdph-par or -fdph-seq"
-- Builtins -------------------------------------------------------------------
-- | Lift a desugaring computation using the `Builtins` into the vectorisation monad.
= cantVectorise "Variable not vectorised:" (ppr var)
+-- local scalars --------------------------------------------------------------
+
+addGlobalScalar :: Var -> VM ()
+addGlobalScalar var
+ = do { traceVt "addGlobalScalar" (ppr var)
+ ; updGEnv $ \env -> env{global_scalars = extendVarSet (global_scalars env) var}
+ }
+
+deleteGlobalScalar :: Var -> VM ()
+deleteGlobalScalar var
+ = do { traceVt "deleteGlobalScalar" (ppr var)
+ ; updGEnv $ \env -> env{global_scalars = delVarSet (global_scalars env) var}
+ }
+
+
-- Primitives -----------------------------------------------------------------
lookupPrimPArray :: TyCon -> VM (Maybe TyCon)
lookupPrimPArray = liftBuiltinDs . primPArray
maybeCantVectorise,
maybeCantVectoriseM,
+ -- * Debugging
+ traceVt, dumpOptVt, dumpVt,
+
-- * Control
noV, traceNoV,
ensureV, traceEnsureV,
orElseV,
fixV,
) where
+
import Vectorise.Builtins
import Vectorise.Env
import DsMonad
+import TcRnMonad
+import ErrUtils
import Outputable
-
+import DynFlags
+import StaticFlags
+
+import Control.Monad
+import System.IO (stderr)
+
-- The Vectorisation Monad ----------------------------------------------------
+
-- | Vectorisation can either succeed with new envionment and a value,
-- or return with failure.
data VResult a
Yes genv' lenv' x -> runVM (f x) bi genv' lenv'
No -> return No
+instance Functor VM where
+ fmap = liftM
+
+instance MonadIO VM where
+ liftIO = liftDs . liftIO
+
-- Lifting --------------------------------------------------------------------
-- | Lift a desugaring computation into the vectorisation monad.
Just x -> return x
Nothing -> cantVectorise s d
+
+-- Debugging ------------------------------------------------------------------
+
+-- |Output a trace message if -ddump-vt-trace is active.
+--
+traceVt :: String -> SDoc -> VM ()
+traceVt herald doc
+ | 1 <= opt_TraceLevel = liftDs $
+ traceOptIf Opt_D_dump_vt_trace $
+ hang (text herald) 2 doc
+ | otherwise = return ()
+
+-- |Dump the given program conditionally.
+--
+dumpOptVt :: DynFlag -> String -> SDoc -> VM ()
+dumpOptVt flag header doc
+ = do { b <- liftDs $ doptM flag
+ ; if b
+ then dumpVt header doc
+ else return ()
+ }
+
+-- |Dump the given program unconditionally.
+--
+dumpVt :: String -> SDoc -> VM ()
+dumpVt header doc
+ = do { unqual <- liftDs mkPrintUnqualifiedDs
+ ; liftIO $ printForUser stderr unqual (mkDumpDoc header doc)
+ }
+
-- Control --------------------------------------------------------------------
-- | Return some result saying we've failed.
noV :: VM a
setGEnv,
updGEnv,
- -- * Vars
- defGlobalVar,
-
- -- * Scalars
- globalScalars,
+ -- * Vars
+ defGlobalVar,
+
+ -- * Vectorisation declarations
+ lookupVectDecl,
+
+ -- * Scalars
+ globalScalars, isGlobalScalar,
-- * TyCons
lookupTyCon,
-- * PR Dictionaries
lookupTyConPR
) where
+
import Vectorise.Monad.Base
import Vectorise.Env
+
+import CoreSyn
+import Type
import TyCon
import DataCon
import NameEnv
| otherwise = env
+-- Vectorisation declarations -------------------------------------------------
+-- | Check whether a variable has a (non-scalar) vectorisation declaration.
+lookupVectDecl :: Var -> VM (Maybe (Type, CoreExpr))
+lookupVectDecl var = readGEnv $ \env -> lookupVarEnv (global_vect_decls env) var
+
+
-- Scalars --------------------------------------------------------------------
-- | Get the set of global scalar variables.
globalScalars :: VM VarSet
-globalScalars
- = readGEnv global_scalars
+globalScalars = readGEnv global_scalars
+
+-- | Check whether a given variable is in the set of global scalar variables.
+isGlobalScalar :: Var -> VM Bool
+isGlobalScalar var = readGEnv $ \env -> elemVarSet var (global_scalars env)
-- TyCons ---------------------------------------------------------------------
-{-# OPTIONS_GHC -fno-warn-missing-signatures #-}
{-# OPTIONS_GHC -XNoMonoLocalBinds #-}
--- Roman likes local bindings
--- If this module lives on I'd like to get rid of this flag in due course
module Vectorise.Type.Env (
vectTypeEnv,
-)
-where
+) where
+
import Vectorise.Env
import Vectorise.Vect
import Vectorise.Monad
import Control.Monad
import Data.List
-debug = False
-dtrace s x = if debug then pprTrace "VectType" s x else x
-- | Vectorise a type environment.
-- The type environment contains all the type things defined in a module.
-vectTypeEnv
- :: TypeEnv
- -> VM ( TypeEnv -- Vectorised type environment.
- , [FamInst] -- New type family instances.
- , [(Var, CoreExpr)]) -- New top level bindings.
-
+--
+vectTypeEnv :: TypeEnv
+ -> VM ( TypeEnv -- Vectorised type environment.
+ , [FamInst] -- New type family instances.
+ , [(Var, CoreExpr)]) -- New top level bindings.
vectTypeEnv env
- = dtrace (ppr env)
- $ do
+ = do
+ traceVt "** vectTypeEnv" $ ppr env
+
cs <- readGEnv $ mk_map . global_tycons
-- Split the list of TyCons into the ones we have to vectorise vs the
where
mk_map env = listToUFM_Directly [(u, getUnique n /= u) | (u,n) <- nameEnvUniqueElts env]
-
-
buildTyConBindings :: TyCon -> TyCon -> TyCon -> TyCon -> SumRepr -> VM Var
buildTyConBindings orig_tc vect_tc prepr_tc pdata_tc repr
= do vectDataConWorkers orig_tc vect_tc pdata_tc
buildPADict vect_tc prepr_tc pdata_tc repr
-
vectDataConWorkers :: TyCon -> TyCon -> TyCon -> VM ()
vectDataConWorkers orig_tc vect_tc arr_tc
= do bs <- sequence
vectAndLiftType ty | Just ty' <- coreView ty = vectAndLiftType ty'
vectAndLiftType ty
= do
- mdicts <- mapM paDictArgType tyvars
+ mdicts <- mapM paDictArgType (reverse tyvars)
let dicts = [dict | Just dict <- mdicts]
vmono_ty <- vectType mono_ty
lmono_ty <- mkPDataType vmono_ty
dictsPA <- liftM catMaybes $ mapM paDictArgType tyvars
-- pack it all back together.
- return $ abstractType tyvars (dictsVect ++ dictsPA) tyBody''
+ traceVt "vect ForAllTy: " $ ppr (abstractType tyvars (dictsPA ++ dictsVect) tyBody'')
+ return $ abstractType tyvars (dictsPA ++ dictsVect) tyBody''
vectType ty = cantVectorise "Can't vectorise type" (ppr ty)
AC_CANONICAL_HOST
AC_CANONICAL_TARGET
-# If no argument was given for a configuration variable, then discard
-# the guessed canonical system and use the configuration of the
-# bootstrapping ghc. If an argument was given, map it from gnu format
-# to ghc format.
-#
-# For why we do it this way, see: #3637, #1717, #2951
-
-if test "$build_alias" = ""
-then
- if test "${WithGhc}" != ""
- then
- build=$bootstrap_target
- echo "Build platform inferred as: $build"
- else
- echo "Can't work out build platform"
- exit 1
- fi
-
- BuildArch=`echo "$build" | sed 's/-.*//'`
- BuildVendor=`echo "$build" | sed -e 's/.*-\(.*\)-.*/\1/'`
- BuildOS=`echo "$build" | sed 's/.*-//'`
-else
- GHC_CONVERT_CPU([$build_cpu], [BuildArch])
- GHC_CONVERT_VENDOR([$build_vendor], [BuildVendor])
- GHC_CONVERT_OS([$build_os], [BuildOS])
-fi
-
-if test "$host_alias" = ""
-then
- if test "${WithGhc}" != ""
- then
- host=$bootstrap_target
- echo "Host platform inferred as: $host"
- else
- echo "Can't work out host platform"
- exit 1
- fi
-
- HostArch=`echo "$host" | sed 's/-.*//'`
- HostVendor=`echo "$host" | sed -e 's/.*-\(.*\)-.*/\1/'`
- HostOS=`echo "$host" | sed 's/.*-//'`
-else
- GHC_CONVERT_CPU([$host_cpu], [HostArch])
- GHC_CONVERT_VENDOR([$host_vendor], [HostVendor])
- GHC_CONVERT_OS([$host_os], [HostOS])
-fi
-
-if test "$target_alias" = ""
-then
- if test "${WithGhc}" != ""
- then
- target=$bootstrap_target
- echo "Target platform inferred as: $target"
- else
- echo "Can't work out target platform"
- exit 1
- fi
-
- TargetArch=`echo "$target" | sed 's/-.*//'`
- TargetVendor=`echo "$target" | sed -e 's/.*-\(.*\)-.*/\1/'`
- TargetOS=`echo "$target" | sed 's/.*-//'`
-else
- GHC_CONVERT_CPU([$target_cpu], [TargetArch])
- GHC_CONVERT_VENDOR([$target_vendor], [TargetVendor])
- GHC_CONVERT_OS([$target_os], [TargetOS])
-fi
+FPTOOLS_SET_PLATFORM_VARS
exeext=''
soext='.so'
NmCmd="$NM"
AC_SUBST([NmCmd])
+SplitObjsBroken=NO
+if test "$TargetOS_CPP" = "darwin"
+then
+ XCodeVersion=`xcodebuild -version | grep Xcode | sed "s/Xcode //"`
+ XCodeVersion1=`echo "$XCodeVersion" | sed 's/\..*//'`
+ XCodeVersion2=`echo "$XCodeVersion" | sed 's/.*\.//'`
+ # Old XCode versions don't actually give the XCode version
+ if test "$XCodeVersion" = ""
+ then
+ SplitObjsBroken=YES
+ fi
+ # Split objects is broken (#4013) with XCode < 3.2
+ if test "$XCodeVersion1" -lt 3
+ then
+ SplitObjsBroken=YES
+ fi
+ if test "$XCodeVersion1" -eq 3 && test "$XCodeVersion2" -lt 2
+ then
+ SplitObjsBroken=YES
+ fi
+fi
+AC_SUBST([SplitObjsBroken])
+
dnl ** Mac OS X: explicit deployment target
dnl --------------------------------------------------------------
AC_ARG_WITH([macosx-deployment-target],
HaveDtrace=NO
AC_PATH_PROG(DtraceCmd,dtrace)
if test -n "$DtraceCmd"; then
- if test "x$TargetOS_CPP-$TargetVendor_CPP" == "xdarwin-apple"; then
+ if test "x$TargetOS_CPP-$TargetVendor_CPP" == "xdarwin-apple" -o "x$TargetOS_CPP-$TargetVendor_CPP" == "xsolaris2-unknown"; then
HaveDtrace=YES
fi
fi
AC_MSG_ERROR([compiler/ghc.cabal.in contains tab characters; please remove them])
fi
-AC_CONFIG_FILES([mk/config.mk mk/install.mk mk/project.mk compiler/ghc.cabal ghc/ghc-bin.cabal utils/runghc/runghc.cabal ghc.spec extra-gcc-opts docs/users_guide/ug-book.xml docs/users_guide/ug-ent.xml docs/index.html libraries/prologue.txt distrib/ghc.iss distrib/configure.ac distrib/MacOS/installer-scripts/create-links distrib/MacOS/installer-scripts/Uninstaller distrib/MacOS/GHC-system.pmdoc/index.xml])
+AC_CONFIG_FILES([mk/config.mk mk/install.mk mk/project.mk compiler/ghc.cabal ghc/ghc-bin.cabal utils/runghc/runghc.cabal ghc.spec extra-gcc-opts docs/users_guide/ug-book.xml docs/users_guide/ug-ent.xml docs/index.html libraries/prologue.txt distrib/ghc.iss distrib/configure.ac])
AC_CONFIG_COMMANDS([mk/stamp-h],[echo timestamp > mk/stamp-h])
AC_OUTPUT
\
To uninstall, execute\
\
- /Library/Frameworks/GHC.framework/Versions/@ProjectVersion@-@TargetArch_CPP@/Tools/Uninstaller}]]></resource></locale></resources><flags/><item type="file">01ghc.xml</item><mod>properties.systemDomain</mod><mod>properties.title</mod><mod>properties.userDomain</mod><mod>properties.anywhereDomain</mod><mod>description</mod></pkmkdoc>
+ /Library/Frameworks/GHC.framework/Versions/@FRAMEWORK_VERSION@/Tools/Uninstaller}]]></resource></locale></resources><flags/><item type="file">01ghc.xml</item><mod>properties.systemDomain</mod><mod>properties.title</mod><mod>properties.userDomain</mod><mod>properties.anywhereDomain</mod><mod>description</mod></pkmkdoc>
);
runOnlyForDeploymentPostprocessing = 0;
shellPath = /bin/sh;
- shellScript = "# We need to be at the top of the ghc tree\ncd ../..\n\nPREFIX=${INSTALL_PATH}/${CONTENTS_FOLDER_PATH}/usr\n\nif [ ${ACTION} = build ]; then\n # put explicit --prefix last, in case the extra configure args contain a prefix\n ./configure ${XCODE_EXTRA_CONFIGURE_ARGS} --prefix=${PREFIX} || exit 1\n MAKE_ACTION=\nelse\n MAKE_ACTION=${ACTION}\nfi\n\n# - We must explictly force the creation of a symbol table in .a files on BSD, and not all versions of Cabal\n# do that consistently; hence, the EXTRA_AR_ARGS.\nmake DESTDIR=${DSTROOT} HADDOCK_DOCS=YES EXTRA_AR_ARGS=-s PERL=/usr/bin/perl ${MAKE_ACTION} < /dev/null || exit 1\nmake DESTDIR=${DSTROOT} || exit 1";
+ shellScript = "case \"$ACTION\" in clean) rm -rf \"$GHC_UNPACKS_INTO\" ;; build) tar -jxf \"$BINDIST\" && cd \"$GHC_UNPACKS_INTO\" && ./configure --prefix=\"$INSTALL_PATH/$CONTENTS_FOLDER_PATH/usr\" ;; install) cd \"$GHC_UNPACKS_INTO\" && make install DESTDIR=\"$DSTROOT\" ;; *) echo \"Unknown action $ACTION\" >&2 ; exit 1 ;; esac ";
};
E76B00450D52DFDB00A05A2F /* ShellScript */ = {
isa = PBXShellScriptBuildPhase;
+++ /dev/null
-############################################################################
-#
-# This is the GHC Makefile for MacOS X-specific targets
-#
-# Targets:
-#
-# framework-pkg [MacOS only]
-# Builds /Library/Frameworks/GHC.framework wrapped into a Mac
-# installer package; must be executed in a ./configure'd tree
-# (--prefix doesn't matter as it will be overridden); other
-# ./configure arguments are passed through, unless overwritten
-# with XCODE_EXTRA_CONFIGURE_ARGS. A deployment target can
-# be specified by setting the corresponding ./configure
-# argument or by setting MACOSX_DEPLOYMENT_TARGET.
-#
-# framework-binary-dist [MacOS only]
-# Builds GHC.framework encapsulating a binary distribution
-# (to give a relocatable framework); must be used in a fully
-# built tree
-#
-############################################################################
-
-include ../../mk/config.mk
-
-ifeq "$(ProjectVersion)" ""
-$(error Please run ./configure first)
-endif
-
-include ../../mk/custom-settings.mk
-
-# The framework version is a string like
-# 7.0.1-i386
-# for an i386 build of GHC 7.0.1. It's used for the subdirectory of
-# /Library/Frameworks/GHC.framework/Versions/
-FRAMEWORK_VERSION = $(ProjectVersion)-$(TargetArch_CPP)
-
-# Xcode requires CURRENT_PROJECT_VERSION to be an int or float. We use this
-# only as the build version (aka CFBundleVersion).
-CURRENT_PROJECT_VERSION = $(ProjectVersionInt).$(ProjectPatchLevel)
-
-# The user-visible CFBundleShortVersionString
-SHORT_VERSION_STRING = $(FRAMEWORK_VERSION)
-
-# Name of the installer package
-PACKAGE_NAME = GHC-$(FRAMEWORK_VERSION).pkg
-
-# Determine arguments that should be passed to ./configure from within Xcode
-#
-# By default, we pass whatever was used when the present tree was configured.
-# The user can override this by setting XCODE_EXTRA_CONFIGURE_ARGS. If
-# MACOSX_DEPLOYMENT_TARGET is set, the target is added to whatever arguments
-# are passed.
-export XCODE_EXTRA_CONFIGURE_ARGS
-ifeq "$(XCODE_EXTRA_CONFIGURE_ARGS)" ""
-XCODE_EXTRA_CONFIGURE_ARGS = $(shell echo $(CONFIGURE_ARGS))
-endif
-ifneq "$(MACOSX_DEPLOYMENT_TARGET)" ""
-XCODE_EXTRA_CONFIGURE_ARGS += --with-macosx-deployment-target=$(MACOSX_DEPLOYMENT_TARGET)
-endif
-
-# Determine whether we need to pass a "-target" option to packagemaker
-#
-# If a deployment target has been set, we use the same target for packagemaker.
-ifneq "$(MACOSX_DEPLOYMENT_TARGET)" ""
-PACKAGEMAKER_TARGET = -target $(MACOSX_DEPLOYMENT_TARGET)
-endif
-
-# Xcode's installation build product location (this is where the GHC.framework
-# is assembled)
-DSTROOT=/tmp/GHC.dst
-
-# Tools directory for a system volume install
-TOOLS_SYSTEM=$(DSTROOT)/Library/Frameworks/GHC.framework/Versions/$(FRAMEWORK_VERSION)/Tools
-
-PACKAGEMAKER=/Developer/usr/bin/packagemaker
-
-# Build and package GHC.framework for /Library/Frameworks
-#
-# * Be careful to clean out Xcode's build product location first, as Xcode
-# sets permissions such that a second install on top of an existing one
-# fails
-# * Make there are no contents component descriptions in the .pmdoc. These are
-# created by the PackageMaker GUI and have per default the wrong ownership
-# and permissions for all files (as well as prevent correct setting of those
-# for files that are not in the content list).
-framework-pkg:
- -chmod -fR u+w $(DSTROOT)
- -$(RM) -rf $(DSTROOT)
- mkdir -p $(TOOLS_SYSTEM)
- cp installer-scripts/Uninstaller $(TOOLS_SYSTEM)
- cp installer-scripts/create-links $(TOOLS_SYSTEM)
- xcodebuild -target GHC-systemwide clean build\
- CURRENT_PROJECT_VERSION=$(CURRENT_PROJECT_VERSION)\
- SHORT_VERSION_STRING=$(SHORT_VERSION_STRING)\
- FRAMEWORK_VERSION=$(FRAMEWORK_VERSION)\
- CURRENT_LIBRARY_VERSION=$(FRAMEWORK_VERSION)\
- COMMAND_MODE=unix2003
- xcodebuild -target GHC-systemwide install\
- CURRENT_PROJECT_VERSION=$(CURRENT_PROJECT_VERSION)\
- SHORT_VERSION_STRING=$(SHORT_VERSION_STRING)\
- FRAMEWORK_VERSION=$(FRAMEWORK_VERSION)\
- CURRENT_LIBRARY_VERSION=$(FRAMEWORK_VERSION)\
- COMMAND_MODE=unix2003
- -$(RM) -f GHC-system.pmdoc/*-contents.xml
- $(PACKAGEMAKER) -v --doc GHC-system.pmdoc\
- $(PACKAGEMAKER_TARGET) -o $(TOP)/$(PACKAGE_NAME)\
- -i org.haskell.ghc.$(FRAMEWORK_VERSION)
-
-# If we don't specify COMMAND_MODE=unix2003 then xcodebuild defaults
-# to setting it to legacy, which means that ar builds archives
-# without a table of contents. That makes the build fail later on.
-
-
-
-# Instead of making 'binary-dist' a Makefile dependency, we let xcodebuild call
-# 'make binary-dist'. This has the advantage that xcode knows the framework
-# path into which the distribution should be installed and can instruct
-# binary-dist to put it directly into the right place without copying the whole
-# tree yet another time.
-#
-framework-binary-dist:
- xcodebuild -target GHC-relocatable clean build\
- CURRENT_PROJECT_VERSION=$(CURRENT_PROJECT_VERSION)\
- SHORT_VERSION_STRING=$(SHORT_VERSION_STRING)\
- FRAMEWORK_VERSION=$(FRAMEWORK_VERSION)\
- CURRENT_LIBRARY_VERSION=$(FRAMEWORK_VERSION)
- @echo "FIXME: call the packager"; exit 1
- # FIXME: The approach with the binary-dist in the package has the
- # problem that the BOM is wrong and the installer relocation feature
- # isn't sufficient as the layout in the binary-dist and the installed
- # tree is different
FRAMEWORK_DIRECTORY="$INSTALL_DEST/GHC.framework"
VERSIONS_DIRECTORY="$FRAMEWORK_DIRECTORY/Versions"
-MY_VERSION=@ProjectVersion@-@TargetArch_CPP@
+MY_VERSION=@FRAMEWORK_VERSION@
MY_DIRECTORY="$VERSIONS_DIRECTORY/$MY_VERSION"
CURRENT_DIRECTORY="$VERSIONS_DIRECTORY/Current"
INSTALL_BASE=/usr
fi
-VERSION=@ProjectVersion@-@TargetArch_CPP@
+VERSION=@FRAMEWORK_VERSION@
GHC_BASE="$INSTALL_DEST/GHC.framework/Versions/$VERSION"
INSTALL_BIN="$INSTALL_BASE/bin"
--- /dev/null
+#!/bin/bash
+
+set -e
+
+die () {
+ echo "Error: $1" >&2
+ exit 1
+}
+
+if ! [ -d installer-scripts ]
+then
+ die "Doesn't look like you are running this script from the right location"
+fi
+
+if [ "$#" -ne 1 ]
+then
+ die "Must be given on argument (the bindist)"
+fi
+
+BINDIST="$1"
+
+GHC_UNPACKS_INTO=`echo "$BINDIST" | sed 's/^\(.*\/\)\{0,1\}\(ghc-\([0-9]\{1,\}\)\.\([0-9]\{1,\}\)\.\([0-9]\{1,\}\)\(\.\([0-9]\{1,\}\)\)\{0,1\}\)-\([a-z0-9_]\{1,\}\)-apple-darwin\.tar\.bz2$/\2/'`
+GHC_VERSION_DIG1=`echo "$BINDIST" | sed 's/^\(.*\/\)\{0,1\}\(ghc-\([0-9]\{1,\}\)\.\([0-9]\{1,\}\)\.\([0-9]\{1,\}\)\(\.\([0-9]\{1,\}\)\)\{0,1\}\)-\([a-z0-9_]\{1,\}\)-apple-darwin\.tar\.bz2$/\3/'`
+GHC_VERSION_DIG2=`echo "$BINDIST" | sed 's/^\(.*\/\)\{0,1\}\(ghc-\([0-9]\{1,\}\)\.\([0-9]\{1,\}\)\.\([0-9]\{1,\}\)\(\.\([0-9]\{1,\}\)\)\{0,1\}\)-\([a-z0-9_]\{1,\}\)-apple-darwin\.tar\.bz2$/\4/'`
+GHC_VERSION_DIG3=`echo "$BINDIST" | sed 's/^\(.*\/\)\{0,1\}\(ghc-\([0-9]\{1,\}\)\.\([0-9]\{1,\}\)\.\([0-9]\{1,\}\)\(\.\([0-9]\{1,\}\)\)\{0,1\}\)-\([a-z0-9_]\{1,\}\)-apple-darwin\.tar\.bz2$/\5/'`
+GHC_VERSION_DIG4=`echo "$BINDIST" | sed 's/^\(.*\/\)\{0,1\}\(ghc-\([0-9]\{1,\}\)\.\([0-9]\{1,\}\)\.\([0-9]\{1,\}\)\(\.\([0-9]\{1,\}\)\)\{0,1\}\)-\([a-z0-9_]\{1,\}\)-apple-darwin\.tar\.bz2$/\7/'`
+GHC_ARCHITECTURE=`echo "$BINDIST" | sed 's/^\(.*\/\)\{0,1\}\(ghc-\([0-9]\{1,\}\)\.\([0-9]\{1,\}\)\.\([0-9]\{1,\}\)\(\.\([0-9]\{1,\}\)\)\{0,1\}\)-\([a-z0-9_]\{1,\}\)-apple-darwin\.tar\.bz2$/\8/'`
+
+if [ "$GHC_ARCHITECTURE" == "$BINDIST" ]
+then
+ die "Bindist filename not in expected format"
+fi
+
+if [ "$(($GHC_VERSION_DIG2 % 2))" -eq 1 ]
+then
+ if [ "$GHC_VERSION_DIG4" == "" ]
+ then
+ FRAMEWORK_VERSION="$GHC_VERSION_DIG1.$GHC_VERSION_DIG2.$GHC_VERSION_DIG3-$GHC_ARCHITECTURE"
+ CURRENT_PROJECT_VERSION=`printf "%02d%02d00.%08d\n" "$GHC_VERSION_DIG1" "$GHC_VERSION_DIG2" "$GHC_VERSION_DIG3"`
+ else
+ die "Huh? 4 component HEAD version?"
+ fi
+else
+ if [ "$GHC_VERSION_DIG4" == "" ]
+ then
+ FRAMEWORK_VERSION="$GHC_VERSION_DIG1.$GHC_VERSION_DIG2.$GHC_VERSION_DIG3-$GHC_ARCHITECTURE"
+ CURRENT_PROJECT_VERSION=`printf "%02d%02d%02d\n" "$GHC_VERSION_DIG1" "$GHC_VERSION_DIG2" "$GHC_VERSION_DIG3"`
+ else
+ FRAMEWORK_VERSION="$GHC_VERSION_DIG1.$GHC_VERSION_DIG2.$GHC_VERSION_DIG3.$GHC_VERSION_DIG4-$GHC_ARCHITECTURE"
+ CURRENT_PROJECT_VERSION=`printf "%02d%02d%02d.%08d\n" "$GHC_VERSION_DIG1" "$GHC_VERSION_DIG2" "$GHC_VERSION_DIG3" "$GHC_VERSION_DIG4"`
+ fi
+fi
+
+# The user-visible CFBundleShortVersionString
+SHORT_VERSION_STRING="$FRAMEWORK_VERSION"
+
+# Name of the installer package
+PACKAGE_NAME="GHC-$FRAMEWORK_VERSION.pkg"
+
+# Xcode's installation build product location (this is where the GHC.framework
+# is assembled)
+DSTROOT="/tmp/GHC.dst"
+
+# Tools directory for a system volume install
+TOOLS_SYSTEM="$DSTROOT/Library/Frameworks/GHC.framework/Versions/$FRAMEWORK_VERSION/Tools"
+
+rm -rf "$DSTROOT"
+mkdir -p "$TOOLS_SYSTEM"
+sed "s/@FRAMEWORK_VERSION@/$FRAMEWORK_VERSION/" \
+ < installer-scripts/Uninstaller.in \
+ > installer-scripts/Uninstaller
+sed "s/@FRAMEWORK_VERSION@/$FRAMEWORK_VERSION/" \
+ < installer-scripts/create-links.in \
+ > installer-scripts/create-links
+sed "s/@FRAMEWORK_VERSION@/$FRAMEWORK_VERSION/" \
+ < GHC-system.pmdoc/index.xml.in \
+ > GHC-system.pmdoc/index.xml
+cp installer-scripts/Uninstaller installer-scripts/create-links "$TOOLS_SYSTEM"
+xcodebuild -target GHC-systemwide clean build install \
+ BINDIST="$BINDIST" \
+ GHC_UNPACKS_INTO="$GHC_UNPACKS_INTO" \
+ CURRENT_PROJECT_VERSION="$CURRENT_PROJECT_VERSION" \
+ SHORT_VERSION_STRING="$SHORT_VERSION_STRING" \
+ FRAMEWORK_VERSION="$FRAMEWORK_VERSION" \
+ CURRENT_LIBRARY_VERSION="$FRAMEWORK_VERSION" \
+ COMMAND_MODE=unix2003
+rm -f GHC-system.pmdoc/*-contents.xml
+
+/Developer/usr/bin/packagemaker \
+ -v --doc GHC-system.pmdoc \
+ -o $PACKAGE_NAME \
+ -i org.haskell.ghc."$FRAMEWORK_VERSION"
+
+# Clean up
+xcodebuild -target GHC-systemwide clean \
+ CURRENT_PROJECT_VERSION="$CURRENT_PROJECT_VERSION" \
+ SHORT_VERSION_STRING="$SHORT_VERSION_STRING" \
+ FRAMEWORK_VERSION="$FRAMEWORK_VERSION" \
+ CURRENT_LIBRARY_VERSION="$FRAMEWORK_VERSION" \
+ COMMAND_MODE=unix2003
+rm -r "$GHC_UNPACKS_INTO"
+rm -r "$DSTROOT"
+rm installer-scripts/Uninstaller installer-scripts/create-links
+rm GHC-system.pmdoc/index.xml
+
FP_GMP
+bootstrap_target=`ghc/stage2/build/tmp/ghc-stage2 +RTS --info | grep '^ ,("Target platform"' | sed -e 's/.*, "//' -e 's/")//' | tr -d '\r'`
+FPTOOLS_SET_PLATFORM_VARS
+
#
dnl ** Check Perl installation **
#
<varlistentry>
<term>
+ <option>-ddump-vect</option>:
+ <indexterm><primary><option>-ddump-vect</option></primary></indexterm>
+ </term>
+ <listitem>
+ <para>dumps the output of the vectoriser.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>
<option>-ddump-simpl</option>:
<indexterm><primary><option>-ddump-simpl</option></primary></indexterm>
</term>
</varlistentry>
<varlistentry>
- <term>
+ <term>
<option>-ddump-tc-trace</option>
<indexterm><primary><option>-ddump-tc-trace</option></primary></indexterm>
</term>
- <listitem>
- <para>Make the type checker be *real* chatty about what it is
- upto.</para>
- </listitem>
+ <listitem>
+ <para>Make the type checker be *real* chatty about what it is
+ upto.</para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>
+ <option>-ddump-vt-trace</option>
+ <indexterm><primary><option>-ddump-tv-trace</option></primary></indexterm>
+ </term>
+ <listitem>
+ <para>Make the vectoriser be *real* chatty about what it is
+ upto.</para>
+ </listitem>
</varlistentry>
<varlistentry>
<entry>dynamic</entry>
<entry>-</entry>
</row>
- <row>
- <entry><option>-ddump-rules</option></entry>
- <entry>Dump rules</entry>
- <entry>dynamic</entry>
- <entry>-</entry>
- </row>
+ <row>
+ <entry><option>-ddump-rules</option></entry>
+ <entry>Dump rules</entry>
+ <entry>dynamic</entry>
+ <entry>-</entry>
+ </row>
+ <row>
+ <entry><option>-ddump-vect</option></entry>
+ <entry>Dump vectoriser input and output</entry>
+ <entry>dynamic</entry>
+ <entry>-</entry>
+ </row>
<row>
<entry><option>-ddump-simpl</option></entry>
<entry>Dump final simplifier output</entry>
<entry>dynamic</entry>
<entry>-</entry>
</row>
- <row>
- <entry><option>-ddump-tc-trace</option></entry>
- <entry>Trace typechecker</entry>
- <entry>dynamic</entry>
- <entry>-</entry>
- </row>
+ <row>
+ <entry><option>-ddump-tc-trace</option></entry>
+ <entry>Trace typechecker</entry>
+ <entry>dynamic</entry>
+ <entry>-</entry>
+ </row>
+ <row>
+ <entry><option>-ddump-vt-trace</option></entry>
+ <entry>Trace vectoriser</entry>
+ <entry>dynamic</entry>
+ <entry>-</entry>
+ </row>
<row>
<entry><option>-ddump-rn-trace</option></entry>
<entry>Trace renamer</entry>
<varlistentry>
<term>
+ <literal>:script</literal> <optional><replaceable>n</replaceable></optional>
+ <literal>filename</literal>
+ <indexterm><primary><literal>:script</literal></primary></indexterm>
+ </term>
+ <listitem>
+ <para>Executes the lines of a file as a series of GHCi commands. This command
+ is compatible with multiline statements as set by <literal>:set +m</literal>
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>
<literal>:set</literal> <optional><replaceable>option</replaceable>...</optional>
<indexterm><primary><literal>:set</literal></primary></indexterm>
</term>
stop :: String,
options :: [GHCiOption],
prelude :: GHC.Module,
+ line_number :: !Int, -- input line
break_ctr :: !Int,
breaks :: ![(Int, BreakLocation)],
tickarrays :: ModuleEnv TickArray,
reflectGHCi x $ do
GHC.handleSourceError (\e -> do GHC.printException e
return GHC.RunFailed) $ do
- GHC.runStmt expr step
+ GHC.runStmtWithLocation (progname st) (line_number st) expr step
resume :: (SrcSpan -> Bool) -> GHC.SingleStep -> GHCi GHC.RunResult
resume canLogSpan step = do
("quit", quit, noCompletion),
("reload", keepGoing' reloadModule, noCompletion),
("run", keepGoing runRun, completeFilename),
+ ("script", keepGoing' scriptCmd, completeFilename),
("set", keepGoing setCmd, completeSetOptions),
("show", keepGoing showCmd, completeShowOptions),
("sprint", keepGoing sprintCmd, completeExpression),
" :quit exit GHCi\n" ++
" :reload reload the current module set\n" ++
" :run function [<arguments> ...] run the function with the given arguments\n" ++
+ " :script <filename> run the script <filename>" ++
" :type <expr> show the type of <expr>\n" ++
" :undef <cmd> undefine user-defined command :<cmd>\n" ++
" :!<command> run the shell command <command>\n" ++
-- session = session,
options = [],
prelude = prel_mod,
+ line_number = 1,
break_ctr = 0,
breaks = [],
tickarrays = emptyModuleEnv,
-- This would be a good place for runFileInputT.
Right hdl ->
do runInputTWithPrefs defaultPrefs defaultSettings $
- runCommands $ fileLoop hdl
+ runCommands False $ fileLoop hdl
liftIO (hClose hdl `catchIO` \_ -> return ())
where
getDirectory f = case takeDirectory f of "" -> "."; d -> d
Nothing ->
do
-- enter the interactive loop
- runGHCiInput $ runCommands $ nextInputLine show_prompt is_tty
+ runGHCiInput $ runCommands True $ nextInputLine show_prompt is_tty
Just exprs -> do
-- just evaluate the expression we were given
enqueueCommands exprs
-- this used to be topHandlerFastExit, see #2228
$ topHandler e
runInputTWithPrefs defaultPrefs defaultSettings $ do
- runCommands' handle (return Nothing)
+ runCommands' handle True (return Nothing)
-- and finally, exit
liftIO $ when (verbosity dflags > 0) $ putStrLn "Leaving GHCi."
else return True
#endif
-fileLoop :: MonadIO m => Handle -> InputT m (Maybe String)
+incrementLines :: InputT GHCi ()
+incrementLines = do
+ st <- lift $ getGHCiState
+ let ln = 1+(line_number st)
+ lift $ setGHCiState st{line_number=ln}
+
+fileLoop :: Handle -> InputT GHCi (Maybe String)
fileLoop hdl = do
l <- liftIO $ tryIO $ hGetLine hdl
case l of
-- this can happen if the user closed stdin, or
-- perhaps did getContents which closes stdin at
-- EOF.
- Right l -> return (Just l)
+ Right l -> do
+ incrementLines
+ return (Just l)
mkPrompt :: GHCi String
mkPrompt = do
c:cs -> do setGHCiState st{ cmdqueue = cs }
return (Just c)
-runCommands :: InputT GHCi (Maybe String) -> InputT GHCi ()
+runCommands :: Bool -> InputT GHCi (Maybe String) -> InputT GHCi ()
runCommands = runCommands' handler
runCommands' :: (SomeException -> GHCi Bool) -- Exception handler
+ -> Bool
-> InputT GHCi (Maybe String) -> InputT GHCi ()
-runCommands' eh getCmd = do
+runCommands' eh resetLineTo1 getCmd = do
+ when resetLineTo1 $ lift $ do st <- getGHCiState
+ setGHCiState $ st { line_number = 0 }
b <- ghandle (\e -> case fromException e of
Just UserInterrupt -> return $ Just False
_ -> case fromException e of
(runOneCommand eh getCmd)
case b of
Nothing -> return ()
- Just _ -> runCommands' eh getCmd
+ Just _ -> runCommands' eh resetLineTo1 getCmd
runOneCommand :: (SomeException -> GHCi Bool) -> InputT GHCi (Maybe String)
-> InputT GHCi (Maybe Bool)
ml <- lift $ isOptionSet Multiline
if ml
then do
- mb_stmt <- checkInputForLayout stmt 1 getCmd
+ mb_stmt <- checkInputForLayout stmt getCmd
case mb_stmt of
Nothing -> return $ Just True
Just ml_stmt -> do
-- #4316
-- lex the input. If there is an unclosed layout context, request input
-checkInputForLayout :: String -> Int -> InputT GHCi (Maybe String)
+checkInputForLayout :: String -> InputT GHCi (Maybe String)
-> InputT GHCi (Maybe String)
-checkInputForLayout stmt line_number getStmt = do
+checkInputForLayout stmt getStmt = do
dflags' <- lift $ getDynFlags
let dflags = xopt_set dflags' Opt_AlternativeLayoutRule
st <- lift $ getGHCiState
let buf = stringToStringBuffer stmt
- loc = mkSrcLoc (fsLit (progname st)) line_number 1
+ loc = mkSrcLoc (fsLit (progname st)) (line_number st) 1
pstate = Lexer.mkPState dflags buf loc
case Lexer.unP goToEnd pstate of
(Lexer.POk _ False) -> return $ Just stmt
Nothing -> return Nothing
Just str -> if str == ""
then return $ Just stmt
- else checkInputForLayout (stmt++"\n"++str) (line_number+1) getStmt
+ else do
+ checkInputForLayout (stmt++"\n"++str) getStmt
where goToEnd = do
eof <- Lexer.nextIsEOF
if eof
shellEscape str = liftIO (system str >> return False)
-----------------------------------------------------------------------------
+-- running a script file #1363
+
+scriptCmd :: String -> InputT GHCi ()
+scriptCmd s = do
+ case words s of
+ [s] -> runScript s
+ _ -> ghcError (CmdLineError "syntax: :script <filename>")
+
+runScript :: String -- ^ filename
+ -> InputT GHCi ()
+runScript filename = do
+ either_script <- liftIO $ tryIO (openFile filename ReadMode)
+ case either_script of
+ Left _err -> ghcError (CmdLineError $ "IO error: \""++filename++"\" "
+ ++(ioeGetErrorString _err))
+ Right script -> do
+ st <- lift $ getGHCiState
+ let prog = progname st
+ line = line_number st
+ lift $ setGHCiState st{progname=filename,line_number=0}
+ scriptLoop script
+ liftIO $ hClose script
+ new_st <- lift $ getGHCiState
+ lift $ setGHCiState new_st{progname=prog,line_number=line}
+ where scriptLoop script = do
+ res <- runOneCommand handler $ fileLoop script
+ case res of
+ Nothing -> return ()
+ Just succ -> if succ
+ then scriptLoop script
+ else return ()
+
+-----------------------------------------------------------------------------
-- Browsing a module's contents
browseCmd :: Bool -> String -> InputT GHCi ()
field_offset(Capability, r);
field_offset(Capability, lock);
+ struct_field(Capability, no);
struct_field(Capability, mut_lists);
struct_field(Capability, context_switch);
struct_field(Capability, sparks);
#ifndef EVENTLOG_CONSTANTS_ONLY
typedef StgWord16 EventTypeNum;
-typedef StgWord64 EventTimestamp; // in nanoseconds
+typedef StgWord64 EventTimestamp; /* in nanoseconds */
typedef StgWord32 EventThreadID;
typedef StgWord16 EventCapNo;
-typedef StgWord16 EventPayloadSize; // variable-size events
-typedef StgWord16 EventThreadStatus; // status for EVENT_STOP_THREAD
+typedef StgWord16 EventPayloadSize; /* variable-size events */
+typedef StgWord16 EventThreadStatus; /* status for EVENT_STOP_THREAD */
#endif
#else
#define ACQUIRE_LOCK(mutex) EnterCriticalSection(mutex)
-#define TRY_ACQUIRE_LOCK(mutex) (TryEnterCriticalSection(mutex) != 0)
+#define TRY_ACQUIRE_LOCK(mutex) (TryEnterCriticalSection(mutex) == 0)
#define RELEASE_LOCK(mutex) LeaveCriticalSection(mutex)
// I don't know how to do this. TryEnterCriticalSection() doesn't do
cat ghc-tarballs/libffi/libffi*.tar.gz | $(GZIP_CMD) -d | { cd libffi && $(TAR_CMD) -xf - ; }
mv libffi/libffi-* libffi/build
chmod +x libffi/ln
+ # don't report nonselinux systems as selinux
+ cd libffi/build && "$(PATCH_CMD)" -p0 < ../libffi.selinux-detection-3.0.8.patch
# Because -Werror may be in SRC_CC_OPTS/SRC_LD_OPTS, we need to turn
# warnings off or the compilation of libffi might fail due to warnings
--- /dev/null
+src/closures.c (selinux_enabled_check): Fix strncmp usage bug.
+
+http://github.com/atgreen/libffi/commit/eaf444eabc4c78703c0f98ac0197b1619c1b1bef
+
+--- src/closures.c
++++ src/closures.c
+@@ -146,7 +146,7 @@
+ p = strchr (p + 1, ' ');
+ if (p == NULL)
+ break;
+- if (strncmp (p + 1, "selinuxfs ", 10) != 0)
++ if (strncmp (p + 1, "selinuxfs ", 10) == 0)
+ {
+ free (buf);
+ fclose (f);
# doing object-file splitting
ArchSupportsSplitObjs=$(strip $(if $(filter $(TargetArch_CPP),i386 x86_64 powerpc sparc),YES,NO))
-# Object splitting is disabled on darwin due to #4013
-OsSupportsSplitObjs=$(strip $(if $(filter $(TargetOS_CPP),mingw32 cygwin32 linux solaris2 freebsd dragonfly netbsd openbsd),YES,NO))
+OsSupportsSplitObjs=$(strip $(if $(filter $(TargetOS_CPP),mingw32 cygwin32 linux darwin solaris2 freebsd dragonfly netbsd openbsd),YES,NO))
+SplitObjsBroken = @SplitObjsBroken@
# lazy test, so that $(GhcUnregisterised) can be set in build.mk
SupportsSplitObjs=$(strip \
$(if $(and $(filter YES,$(ArchSupportsSplitObjs)),\
$(filter YES,$(OsSupportsSplitObjs)),\
+ $(filter NO,$(SplitObjsBroken)),\
$(filter NO,$(BootingFromHc)),\
$(filter NO,$(GhcUnregisterised))),\
YES,NO))
SHELL = /bin/sh
HaveDtrace = @HaveDtrace@
-# There are problems with dtrace on 64bit 10.5. For now at least, we
-# just turn dtrace off unless you override USE_DTRACE
-USE_DTRACE = NO
+USE_DTRACE = $(HaveDtrace)
DTRACE = @DtraceCmd@
LD = @LdCmd@
}
else
{
- value = sections[reloc->r_symbolnum-1].offset
- - sections[reloc->r_symbolnum-1].addr
- + (uint64_t) image;
+ // If the relocation is not through the global offset table
+ // or external, then set the value to the baseValue. This
+ // will leave displacements into the __const section
+ // unchanged (as they ought to be).
+
+ value = baseValue;
}
IF_DEBUG(linker, debugBelch("relocateSection: value = %p\n", (void *)value));
/* -----------------------------------------------------------------------------
*
- * (c) The GHC Team, 1998-2004
+ * (c) The GHC Team, 1998-2011
*
* Out-of-line primitive operations
*
W_ tso;
W_ why_blocked;
W_ what_next;
- W_ ret;
+ W_ ret, cap, locked;
tso = R1;
ret = why_blocked;
}
}
- RET_N(ret);
+
+ cap = TO_W_(Capability_no(StgTSO_cap(tso)));
+
+ if ((TO_W_(StgTSO_flags(tso)) & TSO_LOCKED) != 0) {
+ locked = 1;
+ } else {
+ locked = 0;
+ }
+
+ RET_NNN(ret,cap,locked);
}
/* -----------------------------------------------------------------------------
// We should go through the macro HASKELLEVENT_USER_MSG_ENABLED from
// RtsProbes.h, but that header file includes unistd.h, which doesn't
// work in Cmm
+#if !defined(solaris2_TARGET_OS)
(enabled) = foreign "C" __dtrace_isenabled$HaskellEvent$user__msg$v1() [];
+#else
+ // Solaris' DTrace can't handle the
+ // __dtrace_isenabled$HaskellEvent$user__msg$v1
+ // call above. This call is just for testing whether the user__msg
+ // probe is enabled, and is here for just performance optimization.
+ // Since preparation for the probe is not that complex I disable usage of
+ // this test above for Solaris and enable the probe usage manually
+ // here. Please note that this does not mean that the probe will be
+ // used during the runtime! You still need to enable it by consumption
+ // in your dtrace script as you do with any other probe.
+ enabled = 1;
+#endif
if (enabled != 0) {
foreign "C" dtraceUserMsgWrapper(MyCapability() "ptr", msg "ptr") [];
}
static rtsBool ccs_to_ignore ( CostCentreStack *ccs );
static void count_ticks ( CostCentreStack *ccs );
static void inherit_costs ( CostCentreStack *ccs );
-static void reportCCS ( CostCentreStack *ccs, nat indent );
+static void findCCSMaxLens ( CostCentreStack *ccs, nat indent, nat *max_label_len, nat *max_module_len );
+static void logCCS ( CostCentreStack *ccs, nat indent, nat max_label_len, nat max_module_len );
+static void reportCCS ( CostCentreStack *ccs );
static void DecCCS ( CostCentreStack *ccs );
static void DecBackEdge ( CostCentreStack *ccs,
CostCentreStack *oldccs );
report_per_cc_costs( void )
{
CostCentre *cc, *next;
+ nat max_label_len, max_module_len;
aggregate_cc_costs(CCS_MAIN);
sorted_cc_list = NULL;
+ max_label_len = max_module_len = 0;
+
for (cc = CC_LIST; cc != NULL; cc = next) {
next = cc->link;
if (cc->time_ticks > total_prof_ticks/100
|| cc->mem_alloc > total_alloc/100
|| RtsFlags.CcFlags.doCostCentres >= COST_CENTRES_ALL) {
insert_cc_in_sorted_list(cc);
+
+ max_label_len = stg_max(strlen(cc->label), max_label_len);
+ max_module_len = stg_max(strlen(cc->module), max_module_len);
}
}
- fprintf(prof_file, "%-30s %-20s", "COST CENTRE", "MODULE");
+ fprintf(prof_file, "%-*s %-*s", max_label_len, "COST CENTRE", max_module_len, "MODULE");
fprintf(prof_file, "%6s %6s", "%time", "%alloc");
if (RtsFlags.CcFlags.doCostCentres >= COST_CENTRES_VERBOSE) {
fprintf(prof_file, " %5s %9s", "ticks", "bytes");
if (cc_to_ignore(cc)) {
continue;
}
- fprintf(prof_file, "%-30s %-20s", cc->label, cc->module);
+ fprintf(prof_file, "%-*s %-*s", max_label_len, cc->label, max_module_len, cc->module);
fprintf(prof_file, "%6.1f %6.1f",
total_prof_ticks == 0 ? 0.0 : (cc->time_ticks / (StgFloat) total_prof_ticks * 100),
total_alloc == 0 ? 0.0 : (cc->mem_alloc / (StgFloat)
-------------------------------------------------------------------------- */
static void
-fprint_header( void )
+fprint_header( nat max_label_len, nat max_module_len )
{
fprintf(prof_file, "%-24s %-10s individual inherited\n", "", "");
- fprintf(prof_file, "%-24s %-50s", "COST CENTRE", "MODULE");
+ fprintf(prof_file, "%-*s %-*s", max_label_len, "COST CENTRE", max_module_len, "MODULE");
fprintf(prof_file, "%6s %10s %5s %5s %5s %5s", "no.", "entries", "%time", "%alloc", "%time", "%alloc");
if (RtsFlags.CcFlags.doCostCentres >= COST_CENTRES_VERBOSE) {
{
nat count;
char temp[128]; /* sigh: magic constant */
-
+
stopProfTimer();
total_prof_ticks = 0;
inherit_costs(CCS_MAIN);
- fprint_header();
- reportCCS(pruneCCSTree(CCS_MAIN), 0);
+ reportCCS(pruneCCSTree(CCS_MAIN));
}
static void
-reportCCS(CostCentreStack *ccs, nat indent)
+findCCSMaxLens(CostCentreStack *ccs, nat indent, nat *max_label_len, nat *max_module_len) {
+ CostCentre *cc;
+ IndexTable *i;
+
+ cc = ccs->cc;
+
+ *max_label_len = stg_max(*max_label_len, indent + strlen(cc->label));
+ *max_module_len = stg_max(*max_module_len, strlen(cc->module));
+
+ for (i = ccs->indexTable; i != 0; i = i->next) {
+ if (!i->back_edge) {
+ findCCSMaxLens(i->ccs, indent+1, max_label_len, max_module_len);
+ }
+ }
+}
+
+static void
+logCCS(CostCentreStack *ccs, nat indent, nat max_label_len, nat max_module_len)
{
CostCentre *cc;
IndexTable *i;
/* force printing of *all* cost centres if -P -P */
{
- fprintf(prof_file, "%-*s%-*s %-50s",
- indent, "", 24-indent, cc->label, cc->module);
+ fprintf(prof_file, "%-*s%-*s %-*s",
+ indent, "", max_label_len-indent, cc->label, max_module_len, cc->module);
fprintf(prof_file, "%6ld %11.0f %5.1f %5.1f %5.1f %5.1f",
ccs->ccsID, (double) ccs->scc_count,
for (i = ccs->indexTable; i != 0; i = i->next) {
if (!i->back_edge) {
- reportCCS(i->ccs, indent+1);
+ logCCS(i->ccs, indent+1, max_label_len, max_module_len);
}
}
}
+static void
+reportCCS(CostCentreStack *ccs)
+{
+ nat max_label_len, max_module_len;
+ max_label_len = max_module_len = 0;
+
+ findCCSMaxLens(ccs, 0, &max_label_len, &max_module_len);
+
+ fprint_header(max_label_len, max_module_len);
+ logCCS(ccs, 0, max_label_len, max_module_len);
+}
+
/* Traverse the cost centre stack tree and accumulate
* ticks/allocations.
#include "rts/EventLogFormat.h"
-// -----------------------------------------------------------------------------
-// Payload datatypes for Haskell events
-// -----------------------------------------------------------------------------
-
-// We effectively have:
-//
-// typedef uint16_t EventTypeNum;
-// typedef uint64_t EventTimestamp; // in nanoseconds
-// typedef uint32_t EventThreadID;
-// typedef uint16_t EventCapNo;
-// typedef uint16_t EventPayloadSize; // variable-size events
-// typedef uint16_t EventThreadStatus;
-
-
-// -----------------------------------------------------------------------------
-// The HaskellEvent provider captures everything from eventlog for use with
-// dtrace
-// -----------------------------------------------------------------------------
-
-// These probes correspond to the events defined in EventLogFormat.h
-//
+/* -----------------------------------------------------------------------------
+ * Payload datatypes for Haskell events
+ * -----------------------------------------------------------------------------
+ */
+
+/* We effectively have:
+ *
+ * typedef uint16_t EventTypeNum;
+ * typedef uint64_t EventTimestamp; // in nanoseconds
+ * typedef uint32_t EventThreadID;
+ * typedef uint16_t EventCapNo;
+ * typedef uint16_t EventPayloadSize; // variable-size events
+ * typedef uint16_t EventThreadStatus;
+ */
+
+/* -----------------------------------------------------------------------------
+ * The HaskellEvent provider captures everything from eventlog for use with
+ * dtrace
+ * -----------------------------------------------------------------------------
+ */
+
+/* These probes correspond to the events defined in EventLogFormat.h
+ */
provider HaskellEvent {
- // scheduler events
+ /* scheduler events */
probe create__thread (EventCapNo, EventThreadID);
probe run__thread (EventCapNo, EventThreadID);
probe stop__thread (EventCapNo, EventThreadID, EventThreadStatus, EventThreadID);
probe request__par__gc (EventCapNo);
probe create__spark__thread (EventCapNo, EventThreadID);
- // other events
-//This one doesn't seem to be used at all at the moment:
-// probe log__msg (char *);
+ /* other events */
+/* This one doesn't seem to be used at all at the moment: */
+/* probe log__msg (char *); */
probe startup (EventCapNo);
- // we don't need EVENT_BLOCK_MARKER with dtrace
+ /* we don't need EVENT_BLOCK_MARKER with dtrace */
probe user__msg (EventCapNo, char *);
probe gc__idle (EventCapNo);
probe gc__work (EventCapNo);
// We have to be careful here: in the parallel GC, another
// thread might evacuate this closure while we're looking at it,
// so grab the info pointer just once.
- info = spark->header.info;
- if (IS_FORWARDING_PTR(info)) {
- tmp = (StgClosure*)UN_FORWARDING_PTR(info);
- /* if valuable work: shift inside the pool */
- if (closure_SHOULD_SPARK(tmp)) {
- elements[botInd] = tmp; // keep entry (new address)
- botInd++;
- n++;
- } else {
- pruned_sparks++; // discard spark
- cap->sparks_fizzled++;
- }
- } else if (HEAP_ALLOCED(spark) &&
- (Bdescr((P_)spark)->flags & BF_EVACUATED)) {
- if (closure_SHOULD_SPARK(spark)) {
- elements[botInd] = spark; // keep entry (new address)
- botInd++;
- n++;
+ if (GET_CLOSURE_TAG(spark) != 0) {
+ // Tagged pointer is a value, so the spark has fizzled. It
+ // probably never happens that we get a tagged pointer in
+ // the spark pool, because we would have pruned the spark
+ // during the previous GC cycle if it turned out to be
+ // evaluated, but it doesn't hurt to have this check for
+ // robustness.
+ pruned_sparks++;
+ cap->sparks_fizzled++;
+ } else {
+ info = spark->header.info;
+ if (IS_FORWARDING_PTR(info)) {
+ tmp = (StgClosure*)UN_FORWARDING_PTR(info);
+ /* if valuable work: shift inside the pool */
+ if (closure_SHOULD_SPARK(tmp)) {
+ elements[botInd] = tmp; // keep entry (new address)
+ botInd++;
+ n++;
+ } else {
+ pruned_sparks++; // discard spark
+ cap->sparks_fizzled++;
+ }
+ } else if (HEAP_ALLOCED(spark) &&
+ (Bdescr((P_)spark)->flags & BF_EVACUATED)) {
+ if (closure_SHOULD_SPARK(spark)) {
+ elements[botInd] = spark; // keep entry (new address)
+ botInd++;
+ n++;
+ } else {
+ pruned_sparks++; // discard spark
+ cap->sparks_fizzled++;
+ }
} else {
pruned_sparks++; // discard spark
- cap->sparks_fizzled++;
+ cap->sparks_gcd++;
}
- } else {
- pruned_sparks++; // discard spark
- cap->sparks_gcd++;
}
currInd++;
rts_$1_OBJS = $$(rts_$1_C_OBJS) $$(rts_$1_S_OBJS) $$(rts_$1_CMM_OBJS)
+ifeq "$(USE_DTRACE)" "YES"
+ifeq "$(TargetOS_CPP)" "solaris2"
+# On Darwin we don't need to generate binary containing probes defined
+# in DTrace script, but DTrace on Solaris expects generation of binary
+# from the DTrace probes definitions
+rts_$1_DTRACE_OBJS = rts/dist/build/RtsProbes.$$($1_osuf)
+
+rts/dist/build/RtsProbes.$$($1_osuf) : $$(rts_$1_OBJS)
+ $(DTRACE) -G -C -Iincludes -DDTRACE -s rts/RtsProbes.d -o \
+ $$@ $$(rts_$1_OBJS)
+endif
+endif
+
rts_dist_$1_CC_OPTS += -DRtsWay=\"rts_$1\"
# Making a shared library for the RTS.
"$$(rts_dist_HC)" -package-name rts -shared -dynamic -dynload deploy \
-no-auto-link-packages `cat rts/libs.depend` $$(rts_$1_OBJS) $$(ALL_RTS_DEF_LIBS) -o $$@
else
-$$(rts_$1_LIB) : $$(rts_$1_OBJS) rts/libs.depend
+$$(rts_$1_LIB) : $$(rts_$1_OBJS) $$(rts_$1_DTRACE_OBJS) rts/libs.depend
"$$(RM)" $$(RM_OPTS) $$@
"$$(rts_dist_HC)" -package-name rts -shared -dynamic -dynload deploy \
- -no-auto-link-packages `cat rts/libs.depend` $$(rts_$1_OBJS) -o $$@
+ -no-auto-link-packages `cat rts/libs.depend` $$(rts_$1_OBJS) \
+ $$(rts_$1_DTRACE_OBJS) -o $$@
ifeq "$$(darwin_HOST_OS)" "1"
# Ensure library's install name is correct before anyone links with it.
install_name_tool -id $(ghclibdir)/$$(rts_$1_LIB_NAME) $$@
endif
endif
else
-$$(rts_$1_LIB) : $$(rts_$1_OBJS)
+$$(rts_$1_LIB) : $$(rts_$1_OBJS) $$(rts_$1_DTRACE_OBJS)
"$$(RM)" $$(RM_OPTS) $$@
- echo $$(rts_$1_OBJS) | "$$(XARGS)" $$(XARGS_OPTS) "$$(AR)" $$(AR_OPTS) $$(EXTRA_AR_ARGS) $$@
+ echo $$(rts_$1_OBJS) $$(rts_$1_DTRACE_OBJS) | "$$(XARGS)" $$(XARGS_OPTS) "$$(AR)" \
+ $$(AR_OPTS) $$(EXTRA_AR_ARGS) $$@
endif
endif
# to force it to use a different gcc, we need to give the path in
# the option cpppath.
-DTRACEPROBES_SRC = rts/RtsProbes.d
-$(DTRACEPROBES_H): $(DTRACEPROBES_SRC) includes/ghcplatform.h | $(dir $@)/.
- "$(DTRACE)" $(filter -I%,$(rts_CC_OPTS)) -C -x cpppath=$(WhatGccIsCalled) -h -o $@ -s $<
+ifeq "$(TargetOS_CPP)" "darwin"
+# Darwin has a flag to tell dtrace which cpp to use.
+# Unfortunately, this isn't supported on Solaris (See Solaris Dynamic Tracing
+# Guide, Chapter 16, for the configuration variables available on Solaris)
+DTRACE_FLAGS = -x cpppath=$(WhatGccIsCalled)
+endif
+DTRACEPROBES_SRC = rts/RtsProbes.d
+$(DTRACEPROBES_H): $(DTRACEPROBES_SRC) includes/ghcplatform.h | $$(dir $$@)/.
+ "$(DTRACE)" $(filter -I%,$(rts_CC_OPTS)) -C $(DTRACE_FLAGS) -h -o $@ -s $<
endif
# -----------------------------------------------------------------------------