mkSplitMarkerLabel,
mkDirty_MUT_VAR_Label,
mkUpdInfoLabel,
- mkSeqInfoLabel,
mkIndStaticInfoLabel,
mkMainCapabilityLabel,
mkMAP_FROZEN_infoLabel,
mkSplitMarkerLabel = RtsLabel (RtsCode SLIT("__stg_split_marker"))
mkDirty_MUT_VAR_Label = RtsLabel (RtsCode SLIT("dirty_MUT_VAR"))
mkUpdInfoLabel = RtsLabel (RtsInfo SLIT("stg_upd_frame"))
-mkSeqInfoLabel = RtsLabel (RtsInfo SLIT("stg_seq_frame"))
mkIndStaticInfoLabel = RtsLabel (RtsInfo SLIT("stg_IND_STATIC"))
mkMainCapabilityLabel = RtsLabel (RtsData SLIT("MainCapability"))
mkMAP_FROZEN_infoLabel = RtsLabel (RtsInfo SLIT("stg_MUT_ARR_PTRS_FROZEN0"))
-- selector, closure type, description, type
{ basicInfo $3 (mkIntCLit (fromIntegral $5)) 0 $7 $9 $11 }
- | 'INFO_TABLE_RET' '(' NAME ',' INT ',' INT ',' INT maybe_vec ')'
- { retInfo $3 $5 $7 $9 $10 }
-
-maybe_vec :: { [CmmLit] }
- : {- empty -} { [] }
- | ',' NAME maybe_vec { CmmLabel (mkRtsCodeLabelFS $2) : $3 }
+ | 'INFO_TABLE_RET' '(' NAME ',' INT ',' INT ',' INT ')'
+ { retInfo $3 $5 $7 $9 }
body :: { ExtCode }
: {- empty -} { return () }
( FSLIT("GET_FUN_INFO"), \ [x] -> funInfoTable (closureInfoPtr x) ),
( FSLIT("INFO_TYPE"), \ [x] -> infoTableClosureType x ),
( FSLIT("INFO_PTRS"), \ [x] -> infoTablePtrs x ),
- ( FSLIT("INFO_NPTRS"), \ [x] -> infoTableNonPtrs x ),
- ( FSLIT("RET_VEC"), \ [info, conZ] -> retVec info conZ )
+ ( FSLIT("INFO_NPTRS"), \ [x] -> infoTableNonPtrs x )
]
-- we understand a subset of C-- primitives:
stmts <- getCgStmtsEC ec
code (forkCgStmts stmts)
-retInfo name size live_bits cl_type vector = do
+retInfo name size live_bits cl_type = do
let liveness = smallLiveness (fromIntegral size) (fromIntegral live_bits)
info_lbl = mkRtsRetInfoLabelFS name
(info1,info2) = mkRetInfoTable info_lbl liveness NoC_SRT
- (fromIntegral cl_type) vector
+ (fromIntegral cl_type)
return (info_lbl, info1, info2)
stdInfo name ptrs nptrs srt_bitmap cl_type desc_str ty_str =
constructSlowCall, slowArgs, slowCallPattern,
-- Returns
- CtrlReturnConvention(..),
- ctrlReturnConvAlg,
dataReturnConvPrim,
getSequelAmode
) where
import Maybes
import Id
import Name
-import TyCon
import Bitmap
import Util
import StaticFlags
stg_ap_pat = mkRtsApFastLabel arg_pat
(arg_pat, these, rest) = matchSlowPattern amodes
-enterRtsRetLabel arg_pat
- | tablesNextToCode = mkRtsRetInfoLabel arg_pat
- | otherwise = mkRtsRetLabel arg_pat
-
-- | 'slowArgs' takes a list of function arguments and prepares them for
-- pushing on the stack for "extra" arguments to a function which requires
-- fewer arguments than we currently have.
--
-------------------------------------------------------------------------
--- A @CtrlReturnConvention@ says how {\em control} is returned.
-
-data CtrlReturnConvention
- = VectoredReturn Int -- size of the vector table (family size)
- | UnvectoredReturn Int -- family size
-
-ctrlReturnConvAlg :: TyCon -> CtrlReturnConvention
-ctrlReturnConvAlg tycon
- = case (tyConFamilySize tycon) of
- size -> -- we're supposed to know...
- if (size > (1::Int) && size <= mAX_FAMILY_SIZE_FOR_VEC_RETURNS) then
- VectoredReturn size
- else
- UnvectoredReturn size
- -- NB: unvectored returns Include size 0 (no constructors), so that
- -- the following perverse code compiles (it crashed GHC in 5.02)
- -- data T1
- -- data T2 = T2 !T1 Int
- -- The only value of type T1 is bottom, which never returns anyway.
-
dataReturnConvPrim :: CgRep -> CmmReg
dataReturnConvPrim PtrArg = CmmGlobal (VanillaReg 1)
dataReturnConvPrim NonPtrArg = CmmGlobal (VanillaReg 1)
-- getSequelAmode returns an amode which refers to an info table. The info
--- table will always be of the RET(_VEC)?_(BIG|SMALL) kind. We're careful
+-- table will always be of the RET_(BIG|SMALL) kind. We're careful
-- not to handle real code pointers, just in case we're compiling for
-- an unregisterised/untailcallish architecture, where info pointers and
-- code pointers aren't the same.
OnStack -> do { sp_rel <- getSpRelOffset virt_sp
; returnFC (CmmLoad sp_rel wordRep) }
- UpdateCode -> returnFC (CmmLit (CmmLabel mkUpdInfoLabel))
- CaseAlts lbl _ _ True -> returnFC (CmmLit (CmmLabel mkSeqInfoLabel))
- CaseAlts lbl _ _ False -> returnFC (CmmLit (CmmLabel lbl))
+ UpdateCode -> returnFC (CmmLit (CmmLabel mkUpdInfoLabel))
+ CaseAlts lbl _ _ -> returnFC (CmmLit (CmmLabel lbl))
}
-------------------------------------------------------------------------
(do { deAllocStackTop retAddrSizeW
; cgEvalAlts maybe_cc_slot bndr srt alt_type alts })
- ; setEndOfBlockInfo (maybeReserveSeqFrame alt_type scrut_eob_info)
+ ; setEndOfBlockInfo scrut_eob_info
(performTailCall fun_info arg_amodes save_assts) }
\end{code}
(do { deAllocStackTop retAddrSizeW
; cgEvalAlts maybe_cc_slot bndr srt alt_type alts })
- ; setEndOfBlockInfo (maybeReserveSeqFrame alt_type scrut_eob_info)
- (cgExpr expr)
+ ; setEndOfBlockInfo scrut_eob_info (cgExpr expr)
}
\end{code}
follow the layout of closures when we're profiling. The CCS could be
anywhere within the record).
-\begin{code}
-maybeReserveSeqFrame PolyAlt (EndOfBlockInfo args_sp (CaseAlts amode stuff bndr _))
- = EndOfBlockInfo (args_sp + retAddrSizeW) (CaseAlts amode stuff bndr True)
-maybeReserveSeqFrame other scrut_eob_info = scrut_eob_info
-\end{code}
-
-
%************************************************************************
%* *
Inline primops
; restoreCurrentCostCentre cc_slot True
; cgPrimAlts GCMayHappen alt_type reg alts }
- ; lbl <- emitDirectReturnTarget (idName bndr) abs_c srt
- ; returnFC (CaseAlts lbl Nothing bndr False) }
+ ; lbl <- emitReturnTarget (idName bndr) abs_c srt
+ ; returnFC (CaseAlts lbl Nothing bndr) }
cgEvalAlts cc_slot bndr srt (UbxTupAlt _) [(con,args,_,rhs)]
= -- Unboxed tuple case
ASSERT2( case con of { DataAlt _ -> True; other -> False },
text "cgEvalAlts: dodgy case of unboxed tuple type" )
do { -- forkAbsC for the RHS, so that the envt is
- -- not changed for the emitDirectReturn call
+ -- not changed for the emitReturn call
abs_c <- forkProc $ do
{ (live_regs, ptrs, nptrs, _) <- bindUnboxedTupleComponents args
-- Restore the CC *after* binding the tuple components,
-- and finally the code for the alternative
; unbxTupleHeapCheck live_regs ptrs nptrs noStmts
(cgExpr rhs) }
- ; lbl <- emitDirectReturnTarget (idName bndr) abs_c srt
- ; returnFC (CaseAlts lbl Nothing bndr False) }
+ ; lbl <- emitReturnTarget (idName bndr) abs_c srt
+ ; returnFC (CaseAlts lbl Nothing bndr) }
cgEvalAlts cc_slot bndr srt alt_type alts
= -- Algebraic and polymorphic case
; (alts, mb_deflt) <- cgAlgAlts GCMayHappen cc_slot alt_type alts
; (lbl, branches) <- emitAlgReturnTarget (idName bndr)
- alts mb_deflt srt ret_conv
+ alts mb_deflt srt fam_sz
- ; returnFC (CaseAlts lbl branches bndr False) }
+ ; returnFC (CaseAlts lbl branches bndr) }
where
- ret_conv = case alt_type of
- AlgAlt tc -> ctrlReturnConvAlg tc
- PolyAlt -> UnvectoredReturn 0
+ fam_sz = case alt_type of
+ AlgAlt tc -> tyConFamilySize tc
+ PolyAlt -> 0
\end{code}
= ASSERT( amodes `lengthIs` dataConRepArity con )
do { EndOfBlockInfo _ sequel <- getEndOfBlockInfo
; case sequel of
- CaseAlts _ (Just (alts, deflt_lbl)) bndr _
+ CaseAlts _ (Just (alts, deflt_lbl)) bndr
-> -- Ho! We know the constructor so we can
-- go straight to the right alternative
case assocMaybe alts (dataConTagZ con) of {
other_sequel -- The usual case
| isUnboxedTupleCon con -> returnUnboxedTuple amodes
- | otherwise -> build_it_then (emitKnownConReturnCode con)
+ | otherwise -> build_it_then emitReturnInstr
}
where
jump_to lbl = stmtC (CmmJump (CmmLit lbl) [])
body_code = do {
-- NB: We don't set CC when entering data (WDP 94/06)
tickyReturnOldCon (length arg_things)
- ; performReturn (emitKnownConReturnCode data_con) }
+ ; performReturn emitReturnInstr }
-- noStmts: Ptr to thing already in Node
; whenC (not (isNullaryRepDataCon data_con))
-- Dynamic-Closure first, to reduce forward references
; emit_info static_cl_info tickyEnterStaticCon }
-
- where
\end{code}
-- so save in a temp if non-trivial
; this_pkg <- getThisPackage
; stmtC (CmmAssign nodeReg (tagToClosure this_pkg tycon amode'))
- ; performReturn (emitAlgReturnCode tycon amode') }
+ ; performReturn emitReturnInstr }
where
-- If you're reading this code in the attempt to figure
-- out why the compiler panic'ed here, it is probably because
| ReturnsPrim VoidRep <- result_info
= do cgPrimOp [] primop args emptyVarSet
- performReturn emitDirectReturnInstr
+ performReturn emitReturnInstr
| ReturnsPrim rep <- result_info
= do cgPrimOp [dataReturnConvPrim (primRepToCgRep rep)]
primop args emptyVarSet
- performReturn emitDirectReturnInstr
+ performReturn emitReturnInstr
| ReturnsAlg tycon <- result_info, isUnboxedTupleTyCon tycon
= do (reps, regs, _hints) <- newUnboxedTupleRegs res_ty
this_pkg <- getThisPackage
cgPrimOp [tag_reg] primop args emptyVarSet
stmtC (CmmAssign nodeReg (tagToClosure this_pkg tycon (CmmReg tag_reg)))
- performReturn (emitAlgReturnCode tycon (CmmReg tag_reg))
+ performReturn emitReturnInstr
where
result_info = getPrimOpResultInfo primop
\end{code}
emitInfoTableAndCode,
dataConTagZ,
getSRTInfo,
- emitDirectReturnTarget, emitAlgReturnTarget,
- emitDirectReturnInstr, emitVectoredReturnInstr,
+ emitReturnTarget, emitAlgReturnTarget,
+ emitReturnInstr,
mkRetInfoTable,
mkStdInfoTable,
stdInfoTableSizeB,
getConstrTag,
infoTable, infoTableClosureType,
infoTablePtrs, infoTableNonPtrs,
- funInfoTable,
- retVec
+ funInfoTable
) where
import Name
import DataCon
import Unique
-import DynFlags
import StaticFlags
-import ListSetOps
import Maybes
import Constants
--
-- Tables next to code:
--
--- <reversed vector table>
-- <srt slot>
-- <standard info table>
-- ret-addr --> <entry code (if any)>
-- ret-addr --> <ptr to entry code>
-- <standard info table>
-- <srt slot>
--- <forward vector table>
--
--- * The vector table is only present for vectored returns
---
--- * The SRT slot is only there if either
--- (a) there is SRT info to record, OR
--- (b) if the return is vectored
--- The latter (b) is necessary so that the vector is in a
--- predictable place
-
-vectorSlot :: CmmExpr -> CmmExpr -> CmmExpr
--- Get the vector slot from the info pointer
-vectorSlot info_amode zero_indexed_tag
- | tablesNextToCode
- = cmmOffsetExprW (cmmOffsetW info_amode (- (stdInfoTableSizeW + 2)))
- (cmmNegate zero_indexed_tag)
- -- The "2" is one for the SRT slot, and one more
- -- to get to the first word of the vector
-
- | otherwise
- = cmmOffsetExprW (cmmOffsetW info_amode (stdInfoTableSizeW + 2))
- zero_indexed_tag
- -- The "2" is one for the entry-code slot and one for the SRT slot
-
-retVec :: CmmExpr -> CmmExpr -> CmmExpr
--- Get a return vector from the info pointer
-retVec info_amode zero_indexed_tag
- = let slot = vectorSlot info_amode zero_indexed_tag
- table_slot = CmmLoad slot wordRep
-#if defined(x86_64_TARGET_ARCH)
- offset_slot = CmmMachOp (MO_S_Conv I32 I64) [CmmLoad slot I32]
- -- offsets are 32-bits on x86-64, due to the inability of
- -- the tools to handle 64-bit PC-relative relocations. See also
- -- PprMach.pprDataItem, and InfoTables.h:OFFSET_FIELD().
-#else
- offset_slot = table_slot
-#endif
- in if tablesNextToCode
- then CmmMachOp (MO_Add wordRep) [offset_slot, info_amode]
- else table_slot
+-- * The SRT slot is only there is SRT info to record
emitReturnTarget
:: Name
-> CgStmts -- The direct-return code (if any)
- -- (empty for vectored returns)
- -> [CmmLit] -- Vector of return points
- -- (empty for non-vectored returns)
-> SRT
-> FCode CLabel
-emitReturnTarget name stmts vector srt
+emitReturnTarget name stmts srt
= do { live_slots <- getLiveStackSlots
; liveness <- buildContLiveness name live_slots
; srt_info <- getSRTInfo name srt
; let
- cl_type = case (null vector, isBigLiveness liveness) of
- (True, True) -> rET_BIG
- (True, False) -> rET_SMALL
- (False, True) -> rET_VEC_BIG
- (False, False) -> rET_VEC_SMALL
+ cl_type | isBigLiveness liveness = rET_BIG
+ | otherwise = rET_SMALL
(std_info, extra_bits) =
- mkRetInfoTable info_lbl liveness srt_info cl_type vector
+ mkRetInfoTable info_lbl liveness srt_info cl_type
; blks <- cgStmtsToBlocks stmts
; emitInfoTableAndCode info_lbl std_info extra_bits args blks
-> Liveness -- liveness
-> C_SRT -- SRT Info
-> Int -- type (eg. rET_SMALL)
- -> [CmmLit] -- vector
-> ([CmmLit],[CmmLit])
-mkRetInfoTable info_lbl liveness srt_info cl_type vector
- = (std_info, extra_bits)
+mkRetInfoTable info_lbl liveness srt_info cl_type
+ = (std_info, srt_slot)
where
(srt_label, srt_len) = srtLabelAndLength srt_info info_lbl
- srt_slot | need_srt = [srt_label]
- | otherwise = []
-
- need_srt = needsSRT srt_info || not (null vector)
- -- If there's a vector table then we must allocate
- -- an SRT slot, so that the vector table is at a
- -- known offset from the info pointer
+ srt_slot | needsSRT srt_info = [srt_label]
+ | otherwise = []
liveness_lit = makeRelativeRefTo info_lbl $ mkLivenessCLit liveness
std_info = mkStdInfoTable zeroCLit zeroCLit cl_type srt_len liveness_lit
- extra_bits = srt_slot ++ map (makeRelativeRefTo info_lbl) vector
-
-
-emitDirectReturnTarget
- :: Name
- -> CgStmts -- The direct-return code
- -> SRT
- -> FCode CLabel
-emitDirectReturnTarget name code srt
- = emitReturnTarget name code [] srt
emitAlgReturnTarget
:: Name -- Just for its unique
-> [(ConTagZ, CgStmts)] -- Tagged branches
-> Maybe CgStmts -- Default branch (if any)
-> SRT -- Continuation's SRT
- -> CtrlReturnConvention
+ -> Int -- family size
-> FCode (CLabel, SemiTaggingStuff)
-emitAlgReturnTarget name branches mb_deflt srt ret_conv
- = case ret_conv of
- UnvectoredReturn fam_sz -> do
- { blks <- getCgStmts $
+emitAlgReturnTarget name branches mb_deflt srt fam_sz
+ = do { blks <- getCgStmts $
emitSwitch tag_expr branches mb_deflt 0 (fam_sz - 1)
-- NB: tag_expr is zero-based
- ; lbl <- emitDirectReturnTarget name blks srt
+ ; lbl <- emitReturnTarget name blks srt
; return (lbl, Nothing) }
-- Nothing: the internal branches in the switch don't have
-- global labels, so we can't use them at the 'call site'
-
- VectoredReturn fam_sz -> do
- { let tagged_lbls = zip (map fst branches) $
- map (CmmLabel . mkAltLabel uniq . fst) branches
- deflt_lbl | isJust mb_deflt = CmmLabel $ mkDefaultLabel uniq
- | otherwise = mkIntCLit 0
- ; let vector = [ assocDefault deflt_lbl tagged_lbls i
- | i <- [0..fam_sz-1]]
- ; lbl <- emitReturnTarget name noCgStmts vector srt
- ; mapFCs emit_alt branches
- ; emit_deflt mb_deflt
- ; return (lbl, Just (tagged_lbls, deflt_lbl)) }
where
- uniq = getUnique name
tag_expr = getConstrTag (CmmReg nodeReg)
- emit_alt :: (Int, CgStmts) -> FCode (Int, CmmLit)
- -- Emit the code for the alternative as a top-level
- -- code block returning a label for it
- emit_alt (tag, stmts) = do { let lbl = mkAltLabel uniq tag
- ; blks <- cgStmtsToBlocks stmts
- ; emitProc [] lbl [] blks
- ; return (tag, CmmLabel lbl) }
-
- emit_deflt (Just stmts) = do { let lbl = mkDefaultLabel uniq
- ; blks <- cgStmtsToBlocks stmts
- ; emitProc [] lbl [] blks
- ; return (CmmLabel lbl) }
- emit_deflt Nothing = return (mkIntCLit 0)
- -- Nothing case: the simplifier might have eliminated a case
- -- so we may have e.g. case xs of
- -- [] -> e
- -- In that situation the default should never be taken,
- -- so we just use a NULL pointer
-
--------------------------------
-emitDirectReturnInstr :: Code
-emitDirectReturnInstr
+emitReturnInstr :: Code
+emitReturnInstr
= do { info_amode <- getSequelAmode
; stmtC (CmmJump (entryCode info_amode) []) }
-emitVectoredReturnInstr :: CmmExpr -- _Zero-indexed_ constructor tag
- -> Code
-emitVectoredReturnInstr zero_indexed_tag
- = do { info_amode <- getSequelAmode
- -- HACK! assign info_amode to a temp, because retVec
- -- uses it twice and the NCG doesn't have any CSE yet.
- -- Only do this for the NCG, because gcc is too stupid
- -- to optimise away the extra tmp (grrr).
- ; dflags <- getDynFlags
- ; x <- if hscTarget dflags == HscAsm
- then do z <- newTemp wordRep
- stmtC (CmmAssign z info_amode)
- return (CmmReg z)
- else
- return info_amode
- ; let target = retVec x zero_indexed_tag
- ; stmtC (CmmJump target []) }
-
-
-------------------------------------------------------------------------
--
-- Generating a standard info table
import CLabel
import ClosureInfo
import CostCentre
-import Id
import Var
import SMRep
import BasicTypes
-- Ignore the label that comes back from
-- mkRetDirectTarget. It must be conjured up elswhere
- ; emitDirectReturnTarget (idName bndr) abs_c srt
+ ; emitReturnTarget (idName bndr) abs_c srt
; return () })
; returnFC (bndr, letNoEscapeIdInfo bndr vSp lf_info) }
-- case this might be the label of a return vector
SemiTaggingStuff
Id -- The case binder, only used to see if it's dead
- Bool -- True <=> polymorphic, push a SEQ frame too
type SemiTaggingStuff
= Maybe -- Maybe[1] we don't have any semi-tagging stuff...
module CgTailCall (
cgTailCall, performTailCall,
performReturn, performPrimReturn,
- emitKnownConReturnCode, emitAlgReturnCode,
returnUnboxedTuple, ccallReturnUnboxedTuple,
pushUnboxedTuple,
tailCallPrimOp,
import CLabel
import Type
import Id
-import DataCon
import StgSyn
-import TyCon
import PrimOp
import Outputable
-- As with any return, Node must point to it.
ReturnIt -> do
{ emitSimultaneously (node_asst `plusStmts` pending_assts)
- ; doFinalJump sp False emitDirectReturnInstr }
+ ; doFinalJump sp False emitReturnInstr }
-- A real constructor. Don't bother entering it,
-- just do the right sort of return instead.
-- As with any return, Node must point to it.
ReturnCon con -> do
{ emitSimultaneously (node_asst `plusStmts` pending_assts)
- ; doFinalJump sp False (emitKnownConReturnCode con) }
+ ; doFinalJump sp False emitReturnInstr }
JumpToIt lbl -> do
{ emitSimultaneously (opt_node_asst `plusStmts` pending_assts)
-- and do the jump
; jump_code }
--- -----------------------------------------------------------------------------
+-- ----------------------------------------------------------------------------
-- A general return (just a special case of doFinalJump, above)
-performReturn :: Code -- The code to execute to actually do the return
+performReturn :: Code -- The code to execute to actually do the return
-> Code
performReturn finish_code
= do { EndOfBlockInfo args_sp sequel <- getEndOfBlockInfo
; doFinalJump args_sp False{-not a LNE-} finish_code }
--- -----------------------------------------------------------------------------
+-- ----------------------------------------------------------------------------
-- Primitive Returns
-- Just load the return value into the right register, and return.
performPrimReturn rep amode
= do { whenC (not (isVoidArg rep))
(stmtC (CmmAssign ret_reg amode))
- ; performReturn emitDirectReturnInstr }
+ ; performReturn emitReturnInstr }
where
ret_reg = dataReturnConvPrim rep
--- -----------------------------------------------------------------------------
--- Algebraic constructor returns
-
--- Constructor is built on the heap; Node is set.
--- All that remains is to do the right sort of jump.
-
-emitKnownConReturnCode :: DataCon -> Code
-emitKnownConReturnCode con
- = emitAlgReturnCode (dataConTyCon con)
- (CmmLit (mkIntCLit (dataConTagZ con)))
- -- emitAlgReturnCode requires zero-indexed tag
-
-emitAlgReturnCode :: TyCon -> CmmExpr -> Code
--- emitAlgReturnCode is used both by emitKnownConReturnCode,
--- and by by PrimOps that return enumerated types (i.e.
--- all the comparison operators).
-emitAlgReturnCode tycon tag
- = do { case ctrlReturnConvAlg tycon of
- VectoredReturn fam_sz -> do { tickyVectoredReturn fam_sz
- ; emitVectoredReturnInstr tag }
- UnvectoredReturn _ -> emitDirectReturnInstr
- }
-
-
-- ---------------------------------------------------------------------------
-- Unboxed tuple returns
; tickyUnboxedTupleReturn (length amodes)
; (final_sp, assts) <- pushUnboxedTuple args_sp amodes
; emitSimultaneously assts
- ; doFinalJump final_sp False{-not a LNE-} emitDirectReturnInstr }
+ ; doFinalJump final_sp False{-not a LNE-} emitReturnInstr }
pushUnboxedTuple :: VirtualSpOffset -- Sp at which to start pushing
-> [(CgRep, CmmExpr)] -- amodes of the components
pushReturnAddress :: EndOfBlockInfo -> Code
-pushReturnAddress (EndOfBlockInfo args_sp sequel@(CaseAlts lbl _ _ False))
+pushReturnAddress (EndOfBlockInfo args_sp sequel@(CaseAlts lbl _ _))
= do { sp_rel <- getSpRelOffset args_sp
; stmtC (CmmStore sp_rel (mkLblExpr lbl)) }
--- For a polymorphic case, we have two return addresses to push: the case
--- return, and stg_seq_frame_info which turns a possible vectored return
--- into a direct one.
-pushReturnAddress (EndOfBlockInfo args_sp sequel@(CaseAlts lbl _ _ True))
- = do { sp_rel <- getSpRelOffset (args_sp-1)
- ; stmtC (CmmStore sp_rel (mkLblExpr lbl))
- ; sp_rel <- getSpRelOffset args_sp
- ; stmtC (CmmStore sp_rel (CmmLit (CmmLabel mkSeqInfoLabel))) }
-
pushReturnAddress _ = nopC
-- -----------------------------------------------------------------------------
profHdrSize, thunkHdrSize,
smRepClosureType, smRepClosureTypeInt,
- rET_SMALL, rET_VEC_SMALL, rET_BIG, rET_VEC_BIG
+ rET_SMALL, rET_BIG
) where
#include "HsVersions.h"
-- We export these ones
rET_SMALL = (RET_SMALL :: Int)
-rET_VEC_SMALL = (RET_VEC_SMALL :: Int)
rET_BIG = (RET_BIG :: Int)
-rET_VEC_BIG = (RET_VEC_BIG :: Int)
\end{code}
A section of code-generator-related MAGIC CONSTANTS.
\begin{code}
-mAX_FAMILY_SIZE_FOR_VEC_RETURNS = (MAX_VECTORED_RTN::Int) -- pretty arbitrary
--- If you change this, you may need to change runtimes/standard/Update.lhc
-\end{code}
-
-\begin{code}
mAX_Vanilla_REG = (MAX_VANILLA_REG :: Int)
mAX_Float_REG = (MAX_FLOAT_REG :: Int)
mAX_Double_REG = (MAX_DOUBLE_REG :: Int)
import TysPrim
import TysWiredIn
import Util
-import Constants
import Outputable
import FastString
import OccName
`nlHsFunTy`
nlHsTyVar (getRdrName intPrimTyCon)
- lots_of_constructors = tyConFamilySize tycon > mAX_FAMILY_SIZE_FOR_VEC_RETURNS
+ lots_of_constructors = tyConFamilySize tycon > 8
+ -- was: mAX_FAMILY_SIZE_FOR_VEC_RETURNS
+ -- but we don't do vectored returns any more.
mk_stuff :: DataCon -> ([LPat RdrName], LHsExpr RdrName)
mk_stuff con = ([nlWildConPat con],
#define IND_STATIC 32
#define RET_BCO 33
#define RET_SMALL 34
-#define RET_VEC_SMALL 35
-#define RET_BIG 36
-#define RET_VEC_BIG 37
-#define RET_DYN 38
-#define RET_FUN 39
-#define UPDATE_FRAME 40
-#define CATCH_FRAME 41
-#define STOP_FRAME 42
-#define CAF_BLACKHOLE 43
-#define BLACKHOLE 44
-#define SE_BLACKHOLE 45
-#define SE_CAF_BLACKHOLE 46
-#define MVAR 47
-#define ARR_WORDS 48
-#define MUT_ARR_PTRS_CLEAN 49
-#define MUT_ARR_PTRS_DIRTY 50
-#define MUT_ARR_PTRS_FROZEN0 51
-#define MUT_ARR_PTRS_FROZEN 52
-#define MUT_VAR_CLEAN 53
-#define MUT_VAR_DIRTY 54
-#define WEAK 55
-#define STABLE_NAME 56
-#define TSO 57
-#define BLOCKED_FETCH 58
-#define FETCH_ME 59
-#define FETCH_ME_BQ 60
-#define RBH 61
-#define EVACUATED 62
-#define REMOTE_REF 63
-#define TVAR_WATCH_QUEUE 64
-#define INVARIANT_CHECK_QUEUE 65
-#define ATOMIC_INVARIANT 66
-#define TVAR 67
-#define TREC_CHUNK 68
-#define TREC_HEADER 69
-#define ATOMICALLY_FRAME 70
-#define CATCH_RETRY_FRAME 71
-#define CATCH_STM_FRAME 72
-#define N_CLOSURE_TYPES 73
+#define RET_BIG 35
+#define RET_DYN 36
+#define RET_FUN 37
+#define UPDATE_FRAME 38
+#define CATCH_FRAME 39
+#define STOP_FRAME 40
+#define CAF_BLACKHOLE 41
+#define BLACKHOLE 42
+#define SE_BLACKHOLE 43
+#define SE_CAF_BLACKHOLE 44
+#define MVAR 45
+#define ARR_WORDS 46
+#define MUT_ARR_PTRS_CLEAN 47
+#define MUT_ARR_PTRS_DIRTY 48
+#define MUT_ARR_PTRS_FROZEN0 49
+#define MUT_ARR_PTRS_FROZEN 50
+#define MUT_VAR_CLEAN 51
+#define MUT_VAR_DIRTY 52
+#define WEAK 53
+#define STABLE_NAME 54
+#define TSO 55
+#define BLOCKED_FETCH 56
+#define FETCH_ME 57
+#define FETCH_ME_BQ 58
+#define RBH 59
+#define EVACUATED 60
+#define REMOTE_REF 61
+#define TVAR_WATCH_QUEUE 62
+#define INVARIANT_CHECK_QUEUE 63
+#define ATOMIC_INVARIANT 64
+#define TVAR 65
+#define TREC_CHUNK 66
+#define TREC_HEADER 67
+#define ATOMICALLY_FRAME 68
+#define CATCH_RETRY_FRAME 69
+#define CATCH_STM_FRAME 70
+#define N_CLOSURE_TYPES 71
#endif /* CLOSURETYPES_H */
#define MAX_LONG_REG 1
/* -----------------------------------------------------------------------------
- * Maximum number of constructors in a data type for direct-returns.
- *
- * NB. There are various places that assume the value of this
- * constant, such as the polymorphic return frames for updates
- * (stg_upd_frame_info) and catch frames (stg_catch_frame_info).
- * -------------------------------------------------------------------------- */
-
-#define MAX_VECTORED_RTN 8
-
-/* -----------------------------------------------------------------------------
Semi-Tagging constants
Old Comments about this stuff:
#else
StgInfoTable i;
StgSRT *srt; /* pointer to the SRT table */
- StgFunPtr vector[FLEXIBLE_ARRAY];
#endif
} StgRetInfoTable;
RTS_RET_INFO(stg_upd_frame_info);
RTS_RET_INFO(stg_marked_upd_frame_info);
RTS_RET_INFO(stg_noupd_frame_info);
-RTS_RET_INFO(stg_seq_frame_info);
RTS_RET_INFO(stg_catch_frame_info);
RTS_RET_INFO(stg_catch_retry_frame_info);
RTS_RET_INFO(stg_atomically_frame_info);
RTS_ENTRY(stg_upd_frame_ret);
RTS_ENTRY(stg_marked_upd_frame_ret);
-RTS_ENTRY(stg_seq_frame_ret);
/* Entry code for constructors created by the bytecode interpreter */
RTS_FUN(stg_interp_constr_entry);
-RTS_FUN(stg_interp_constr1_entry);
-RTS_FUN(stg_interp_constr2_entry);
-RTS_FUN(stg_interp_constr3_entry);
-RTS_FUN(stg_interp_constr4_entry);
-RTS_FUN(stg_interp_constr5_entry);
-RTS_FUN(stg_interp_constr6_entry);
-RTS_FUN(stg_interp_constr7_entry);
-RTS_FUN(stg_interp_constr8_entry);
/* Magic glue code for when compiled code returns a value in R1/F1/D1
or a VoidRep to the interpreter. */
RTS_INFO(stg_IND_info);
RTS_INFO(stg_IND_direct_info);
-RTS_INFO(stg_IND_0_info);
-RTS_INFO(stg_IND_1_info);
-RTS_INFO(stg_IND_2_info);
-RTS_INFO(stg_IND_3_info);
-RTS_INFO(stg_IND_4_info);
-RTS_INFO(stg_IND_5_info);
-RTS_INFO(stg_IND_6_info);
-RTS_INFO(stg_IND_7_info);
RTS_INFO(stg_IND_STATIC_info);
RTS_INFO(stg_IND_PERM_info);
RTS_INFO(stg_IND_OLDGEN_info);
RTS_ENTRY(stg_IND_entry);
RTS_ENTRY(stg_IND_direct_entry);
-RTS_ENTRY(stg_IND_0_entry);
-RTS_ENTRY(stg_IND_1_entry);
-RTS_ENTRY(stg_IND_2_entry);
-RTS_ENTRY(stg_IND_3_entry);
-RTS_ENTRY(stg_IND_4_entry);
-RTS_ENTRY(stg_IND_5_entry);
-RTS_ENTRY(stg_IND_6_entry);
-RTS_ENTRY(stg_IND_7_entry);
RTS_ENTRY(stg_IND_STATIC_entry);
RTS_ENTRY(stg_IND_PERM_entry);
RTS_ENTRY(stg_IND_OLDGEN_entry);
return sizeofW(StgRetFun) + ((StgRetFun *)frame)->size;
case RET_BIG:
- case RET_VEC_BIG:
return 1 + GET_LARGE_BITMAP(&info->i)->size;
case RET_BCO:
EXTERN StgInt RET_OLD_ctr INIT(0);
EXTERN StgInt RET_UNBOXED_TUP_ctr INIT(0);
-EXTERN StgInt VEC_RETURN_ctr INIT(0);
-
EXTERN StgInt RET_SEMI_loads_avoided INIT(0);
/* End of counter declarations. */
/* IND_STATIC = */ ( _NS|_STA| _IND ),
/* RET_BCO = */ ( _BTM ),
/* RET_SMALL = */ ( _BTM| _SRT ),
-/* RET_VEC_SMALL = */ ( _BTM| _SRT ),
/* RET_BIG = */ ( _SRT ),
-/* RET_VEC_BIG = */ ( _SRT ),
/* RET_DYN = */ ( _SRT ),
/* RET_FUN = */ ( 0 ),
/* UPDATE_FRAME = */ ( _BTM ),
/* CATCH_STM_FRAME = */ ( _BTM )
};
-#if N_CLOSURE_TYPES != 73
+#if N_CLOSURE_TYPES != 71
#error Closure types changed: update ClosureFlags.c!
#endif
-------------------------------------------------------------------------- */
#ifdef REG_R1
-#define CATCH_FRAME_ENTRY_TEMPLATE(label,ret) \
- label \
- { \
- Sp = Sp + SIZEOF_StgCatchFrame; \
- jump ret; \
- }
-#else
-#define CATCH_FRAME_ENTRY_TEMPLATE(label,ret) \
- label \
- { \
- W_ rval; \
- rval = Sp(0); \
- Sp = Sp + SIZEOF_StgCatchFrame; \
- Sp(0) = rval; \
- jump ret; \
- }
-#endif
-
-#ifdef REG_R1
#define SP_OFF 0
#else
#define SP_OFF 1
#endif
-CATCH_FRAME_ENTRY_TEMPLATE(stg_catch_frame_0_ret,%RET_VEC(Sp(SP_OFF),0))
-CATCH_FRAME_ENTRY_TEMPLATE(stg_catch_frame_1_ret,%RET_VEC(Sp(SP_OFF),1))
-CATCH_FRAME_ENTRY_TEMPLATE(stg_catch_frame_2_ret,%RET_VEC(Sp(SP_OFF),2))
-CATCH_FRAME_ENTRY_TEMPLATE(stg_catch_frame_3_ret,%RET_VEC(Sp(SP_OFF),3))
-CATCH_FRAME_ENTRY_TEMPLATE(stg_catch_frame_4_ret,%RET_VEC(Sp(SP_OFF),4))
-CATCH_FRAME_ENTRY_TEMPLATE(stg_catch_frame_5_ret,%RET_VEC(Sp(SP_OFF),5))
-CATCH_FRAME_ENTRY_TEMPLATE(stg_catch_frame_6_ret,%RET_VEC(Sp(SP_OFF),6))
-CATCH_FRAME_ENTRY_TEMPLATE(stg_catch_frame_7_ret,%RET_VEC(Sp(SP_OFF),7))
-
-#if MAX_VECTORED_RTN > 8
-#error MAX_VECTORED_RTN has changed: please modify stg_catch_frame too.
-#endif
-
#if defined(PROFILING)
#define CATCH_FRAME_BITMAP 7
#define CATCH_FRAME_WORDS 4
INFO_TABLE_RET(stg_catch_frame,
CATCH_FRAME_WORDS, CATCH_FRAME_BITMAP,
- CATCH_FRAME,
- stg_catch_frame_0_ret,
- stg_catch_frame_1_ret,
- stg_catch_frame_2_ret,
- stg_catch_frame_3_ret,
- stg_catch_frame_4_ret,
- stg_catch_frame_5_ret,
- stg_catch_frame_6_ret,
- stg_catch_frame_7_ret)
-CATCH_FRAME_ENTRY_TEMPLATE(,%ENTRY_CODE(Sp(SP_OFF)))
+ CATCH_FRAME)
+#ifdef REG_R1
+ {
+ Sp = Sp + SIZEOF_StgCatchFrame;
+ jump Sp(SP_OFF);
+ }
+#else
+ {
+ W_ rval;
+ rval = Sp(0);
+ Sp = Sp + SIZEOF_StgCatchFrame;
+ Sp(0) = rval;
+ jump Sp(SP_OFF);
+ }
+#endif
/* -----------------------------------------------------------------------------
* The catch infotable
case RET_DYN:
case RET_BCO:
case RET_SMALL:
- case RET_VEC_SMALL:
case RET_BIG:
- case RET_VEC_BIG:
// others
case BLOCKED_FETCH:
case FETCH_ME:
SymX(stg_block_1) \
SymX(stg_block_takemvar) \
SymX(stg_block_putmvar) \
- SymX(stg_seq_frame_info) \
MAIN_CAP_SYM \
SymX(MallocFailHook) \
SymX(OnExitHook) \
SymX(xorIntegerzh_fast) \
SymX(yieldzh_fast) \
SymX(stg_interp_constr_entry) \
- SymX(stg_interp_constr1_entry) \
- SymX(stg_interp_constr2_entry) \
- SymX(stg_interp_constr3_entry) \
- SymX(stg_interp_constr4_entry) \
- SymX(stg_interp_constr5_entry) \
- SymX(stg_interp_constr6_entry) \
- SymX(stg_interp_constr7_entry) \
- SymX(stg_interp_constr8_entry) \
SymX(allocateExec) \
SymX(freeExec) \
SymX(getAllocations) \
// Catch retry frame ------------------------------------------------------------
-#define CATCH_RETRY_FRAME_ERROR(label) \
- label { foreign "C" barf("catch_retry_frame incorrectly entered!"); }
-
-CATCH_RETRY_FRAME_ERROR(stg_catch_retry_frame_0_ret)
-CATCH_RETRY_FRAME_ERROR(stg_catch_retry_frame_1_ret)
-CATCH_RETRY_FRAME_ERROR(stg_catch_retry_frame_2_ret)
-CATCH_RETRY_FRAME_ERROR(stg_catch_retry_frame_3_ret)
-CATCH_RETRY_FRAME_ERROR(stg_catch_retry_frame_4_ret)
-CATCH_RETRY_FRAME_ERROR(stg_catch_retry_frame_5_ret)
-CATCH_RETRY_FRAME_ERROR(stg_catch_retry_frame_6_ret)
-CATCH_RETRY_FRAME_ERROR(stg_catch_retry_frame_7_ret)
-
-#if MAX_VECTORED_RTN > 8
-#error MAX_VECTORED_RTN has changed: please modify stg_catch_retry_frame too.
-#endif
-
#if defined(PROFILING)
#define CATCH_RETRY_FRAME_BITMAP 7
#define CATCH_RETRY_FRAME_WORDS 5
INFO_TABLE_RET(stg_catch_retry_frame,
CATCH_RETRY_FRAME_WORDS, CATCH_RETRY_FRAME_BITMAP,
- CATCH_RETRY_FRAME,
- stg_catch_retry_frame_0_ret,
- stg_catch_retry_frame_1_ret,
- stg_catch_retry_frame_2_ret,
- stg_catch_retry_frame_3_ret,
- stg_catch_retry_frame_4_ret,
- stg_catch_retry_frame_5_ret,
- stg_catch_retry_frame_6_ret,
- stg_catch_retry_frame_7_ret)
+ CATCH_RETRY_FRAME)
{
W_ r, frame, trec, outer;
IF_NOT_REG_R1(W_ rval; rval = Sp(0); Sp_adj(1); )
}
-// Atomically frame -------------------------------------------------------------
-
-
-#define ATOMICALLY_FRAME_ERROR(label) \
- label { foreign "C" barf("atomically_frame incorrectly entered!"); }
-
-ATOMICALLY_FRAME_ERROR(stg_atomically_frame_0_ret)
-ATOMICALLY_FRAME_ERROR(stg_atomically_frame_1_ret)
-ATOMICALLY_FRAME_ERROR(stg_atomically_frame_2_ret)
-ATOMICALLY_FRAME_ERROR(stg_atomically_frame_3_ret)
-ATOMICALLY_FRAME_ERROR(stg_atomically_frame_4_ret)
-ATOMICALLY_FRAME_ERROR(stg_atomically_frame_5_ret)
-ATOMICALLY_FRAME_ERROR(stg_atomically_frame_6_ret)
-ATOMICALLY_FRAME_ERROR(stg_atomically_frame_7_ret)
-
-#if MAX_VECTORED_RTN > 8
-#error MAX_VECTORED_RTN has changed: please modify stg_atomically_frame too.
-#endif
+// Atomically frame ------------------------------------------------------------
#if defined(PROFILING)
#define ATOMICALLY_FRAME_BITMAP 3
#define ATOMICALLY_FRAME_WORDS 2
#endif
-
INFO_TABLE_RET(stg_atomically_frame,
ATOMICALLY_FRAME_WORDS, ATOMICALLY_FRAME_BITMAP,
- ATOMICALLY_FRAME,
- stg_atomically_frame_0_ret,
- stg_atomically_frame_1_ret,
- stg_atomically_frame_2_ret,
- stg_atomically_frame_3_ret,
- stg_atomically_frame_4_ret,
- stg_atomically_frame_5_ret,
- stg_atomically_frame_6_ret,
- stg_atomically_frame_7_ret)
+ ATOMICALLY_FRAME)
{
W_ frame, trec, valid, next_invariant, q, outer;
IF_NOT_REG_R1(W_ rval; rval = Sp(0); Sp_adj(1); )
INFO_TABLE_RET(stg_atomically_waiting_frame,
ATOMICALLY_FRAME_WORDS, ATOMICALLY_FRAME_BITMAP,
- ATOMICALLY_FRAME,
- stg_atomically_frame_0_ret,
- stg_atomically_frame_1_ret,
- stg_atomically_frame_2_ret,
- stg_atomically_frame_3_ret,
- stg_atomically_frame_4_ret,
- stg_atomically_frame_5_ret,
- stg_atomically_frame_6_ret,
- stg_atomically_frame_7_ret)
+ ATOMICALLY_FRAME)
{
W_ frame, trec, valid;
IF_NOT_REG_R1(W_ rval; rval = Sp(0); Sp_adj(1); )
// STM catch frame --------------------------------------------------------------
-#define CATCH_STM_FRAME_ENTRY_TEMPLATE(label,ret) \
- label \
- { \
- IF_NOT_REG_R1(W_ rval; rval = Sp(0); Sp_adj(1); ) \
- W_ r, frame, trec, outer; \
- frame = Sp; \
- trec = StgTSO_trec(CurrentTSO); \
- "ptr" outer = foreign "C" stmGetEnclosingTRec(trec "ptr") []; \
- r = foreign "C" stmCommitNestedTransaction(MyCapability() "ptr", trec "ptr") []; \
- if (r != 0) { \
- /* Commit succeeded */ \
- StgTSO_trec(CurrentTSO) = outer; \
- Sp = Sp + SIZEOF_StgCatchSTMFrame; \
- IF_NOT_REG_R1(Sp_adj(-1); Sp(0) = rval;) \
- jump ret; \
- } else { \
- /* Commit failed */ \
- W_ new_trec; \
- "ptr" new_trec = foreign "C" stmStartTransaction(MyCapability() "ptr", outer "ptr") []; \
- StgTSO_trec(CurrentTSO) = new_trec; \
- R1 = StgCatchSTMFrame_code(frame); \
- jump stg_ap_v_fast; \
- } \
- }
-
#ifdef REG_R1
#define SP_OFF 0
#else
#define SP_OFF 1
#endif
-CATCH_STM_FRAME_ENTRY_TEMPLATE(stg_catch_stm_frame_0_ret,%RET_VEC(Sp(SP_OFF),0))
-CATCH_STM_FRAME_ENTRY_TEMPLATE(stg_catch_stm_frame_1_ret,%RET_VEC(Sp(SP_OFF),1))
-CATCH_STM_FRAME_ENTRY_TEMPLATE(stg_catch_stm_frame_2_ret,%RET_VEC(Sp(SP_OFF),2))
-CATCH_STM_FRAME_ENTRY_TEMPLATE(stg_catch_stm_frame_3_ret,%RET_VEC(Sp(SP_OFF),3))
-CATCH_STM_FRAME_ENTRY_TEMPLATE(stg_catch_stm_frame_4_ret,%RET_VEC(Sp(SP_OFF),4))
-CATCH_STM_FRAME_ENTRY_TEMPLATE(stg_catch_stm_frame_5_ret,%RET_VEC(Sp(SP_OFF),5))
-CATCH_STM_FRAME_ENTRY_TEMPLATE(stg_catch_stm_frame_6_ret,%RET_VEC(Sp(SP_OFF),6))
-CATCH_STM_FRAME_ENTRY_TEMPLATE(stg_catch_stm_frame_7_ret,%RET_VEC(Sp(SP_OFF),7))
-
-#if MAX_VECTORED_RTN > 8
-#error MAX_VECTORED_RTN has changed: please modify stg_catch_stm_frame too.
-#endif
-
#if defined(PROFILING)
#define CATCH_STM_FRAME_BITMAP 3
#define CATCH_STM_FRAME_WORDS 4
INFO_TABLE_RET(stg_catch_stm_frame,
CATCH_STM_FRAME_WORDS, CATCH_STM_FRAME_BITMAP,
- CATCH_STM_FRAME,
- stg_catch_stm_frame_0_ret,
- stg_catch_stm_frame_1_ret,
- stg_catch_stm_frame_2_ret,
- stg_catch_stm_frame_3_ret,
- stg_catch_stm_frame_4_ret,
- stg_catch_stm_frame_5_ret,
- stg_catch_stm_frame_6_ret,
- stg_catch_stm_frame_7_ret)
-CATCH_STM_FRAME_ENTRY_TEMPLATE(,%ENTRY_CODE(Sp(SP_OFF)))
+ CATCH_STM_FRAME)
+ {
+ IF_NOT_REG_R1(W_ rval; rval = Sp(0); Sp_adj(1); )
+ W_ r, frame, trec, outer;
+ frame = Sp;
+ trec = StgTSO_trec(CurrentTSO);
+ "ptr" outer = foreign "C" stmGetEnclosingTRec(trec "ptr") [];
+ r = foreign "C" stmCommitNestedTransaction(MyCapability() "ptr", trec "ptr") [];
+ if (r != 0) {
+ /* Commit succeeded */
+ StgTSO_trec(CurrentTSO) = outer;
+ Sp = Sp + SIZEOF_StgCatchSTMFrame;
+ IF_NOT_REG_R1(Sp_adj(-1); Sp(0) = rval;)
+ jump Sp(SP_OFF);
+ } else {
+ /* Commit failed */
+ W_ new_trec;
+ "ptr" new_trec = foreign "C" stmStartTransaction(MyCapability() "ptr", outer "ptr") [];
+ StgTSO_trec(CurrentTSO) = new_trec;
+ R1 = StgCatchSTMFrame_code(frame);
+ jump stg_ap_v_fast;
+ }
+ }
// Primop definition ------------------------------------------------------------
/* Cannot happen -- use default case.
case RET_BCO:
case RET_SMALL:
- case RET_VEC_SMALL:
case RET_BIG:
- case RET_VEC_BIG:
case RET_DYN:
case RET_FUN:
*/
}
case RET_SMALL:
- case RET_VEC_SMALL:
debugBelch("RET_SMALL (%p)\n", info);
bitmap = info->layout.bitmap;
printSmallBitmap(spBottom, sp+1,
}
case RET_BIG:
- case RET_VEC_BIG:
barf("todo");
case RET_FUN:
"IND_STATIC",
"RET_BCO",
"RET_SMALL",
- "RET_VEC_SMALL",
"RET_BIG",
- "RET_VEC_BIG",
"RET_DYN",
"RET_FUN",
"UPDATE_FRAME",
, "RET_BCO"
, "RET_SMALL"
- , "RET_VEC_SMALL"
, "RET_BIG"
- , "RET_VEC_BIG"
, "RET_DYN"
, "UPDATE_FRAME"
, "CATCH_FRAME"
case RET_DYN:
case RET_BCO:
case RET_SMALL:
- case RET_VEC_SMALL:
case RET_BIG:
- case RET_VEC_BIG:
// invalid objects
case IND:
case BLOCKED_FETCH:
case STOP_FRAME:
case RET_BCO:
case RET_SMALL:
- case RET_VEC_SMALL:
case RET_BIG:
- case RET_VEC_BIG:
// invalid objects
case IND:
case BLOCKED_FETCH:
case RET_DYN:
case RET_BCO:
case RET_SMALL:
- case RET_VEC_SMALL:
case RET_BIG:
- case RET_VEC_BIG:
// other cases
case IND:
case BLOCKED_FETCH:
case CATCH_RETRY_FRAME:
case ATOMICALLY_FRAME:
case RET_SMALL:
- case RET_VEC_SMALL:
bitmap = BITMAP_BITS(info->i.layout.bitmap);
size = BITMAP_SIZE(info->i.layout.bitmap);
p++;
// large bitmap (> 32 entries, or > 64 on a 64-bit machine)
case RET_BIG:
- case RET_VEC_BIG:
size = GET_LARGE_BITMAP(&info->i)->size;
p++;
retain_large_bitmap(p, GET_LARGE_BITMAP(&info->i),
// small bitmap cases (<= 32 entries)
case STOP_FRAME:
case RET_SMALL:
- case RET_VEC_SMALL:
size = BITMAP_SIZE(info->i.layout.bitmap);
checkSmallBitmap((StgPtr)c + 1,
BITMAP_BITS(info->i.layout.bitmap), size);
}
case RET_BIG: // large bitmap (> 32 entries)
- case RET_VEC_BIG:
size = GET_LARGE_BITMAP(&info->i)->size;
checkLargeBitmap((StgPtr)c + 1, GET_LARGE_BITMAP(&info->i), size);
return 1 + size;
case RET_BCO:
case RET_SMALL:
- case RET_VEC_SMALL:
case RET_BIG:
- case RET_VEC_BIG:
case RET_DYN:
case UPDATE_FRAME:
case STOP_FRAME:
jump %ENTRY_CODE(Sp(0));
}
-stg_interp_constr1_entry { jump %RET_VEC(Sp(0),0); }
-stg_interp_constr2_entry { jump %RET_VEC(Sp(0),1); }
-stg_interp_constr3_entry { jump %RET_VEC(Sp(0),2); }
-stg_interp_constr4_entry { jump %RET_VEC(Sp(0),3); }
-stg_interp_constr5_entry { jump %RET_VEC(Sp(0),4); }
-stg_interp_constr6_entry { jump %RET_VEC(Sp(0),5); }
-stg_interp_constr7_entry { jump %RET_VEC(Sp(0),6); }
-stg_interp_constr8_entry { jump %RET_VEC(Sp(0),7); }
-
/* Some info tables to be used when compiled code returns a value to
the interpreter, i.e. the interpreter pushes one of these onto the
stack before entering a value. What the code does is to
INFO_TABLE_RET( stg_ctoi_R1p,
0/*size*/, 0/*bitmap*/, /* special layout! */
- RET_BCO,
- RET_LBL(stg_ctoi_R1p),
- RET_LBL(stg_ctoi_R1p),
- RET_LBL(stg_ctoi_R1p),
- RET_LBL(stg_ctoi_R1p),
- RET_LBL(stg_ctoi_R1p),
- RET_LBL(stg_ctoi_R1p),
- RET_LBL(stg_ctoi_R1p),
- RET_LBL(stg_ctoi_R1p))
+ RET_BCO)
{
Sp_adj(-2);
Sp(1) = R1;
jump stg_yield_to_interpreter;
}
-#if MAX_VECTORED_RTN != 8
-#error MAX_VECTORED_RTN has changed: please modify stg_ctoi_R1p too.
-#endif
-
/*
* When the returned value is a pointer, but unlifted, in R1 ...
*/
/* ----------------------------------------------------------------------------
Info tables for indirections.
- SPECIALISED INDIRECTIONS: we have a specialised indirection for each
- kind of return (direct, vectored 0-7), so that we can avoid entering
- the object when we know what kind of return it will do. The update
- code (Updates.hc) updates objects with the appropriate kind of
+ SPECIALISED INDIRECTIONS: we have a specialised indirection for direct returns,
+ so that we can avoid entering
+ the object when we know it points directly to a value. The update
+ code (Updates.cmm) updates objects with the appropriate kind of
indirection. We only do this for young-gen indirections.
------------------------------------------------------------------------- */
jump %GET_ENTRY(R1);
}
-#define IND_SPEC(label,ret) \
-INFO_TABLE(label,1,0,IND,"IND","IND") \
-{ \
- TICK_ENT_DYN_IND(); /* tick */ \
- R1 = StgInd_indirectee(R1); \
- TICK_ENT_VIA_NODE(); \
- jump ret; \
+INFO_TABLE(stg_IND_direct,1,0,IND,"IND","IND")
+{
+ TICK_ENT_DYN_IND(); /* tick */
+ R1 = StgInd_indirectee(R1);
+ TICK_ENT_VIA_NODE();
+ jump %ENTRY_CODE(Sp(0));
}
-IND_SPEC(stg_IND_direct, %ENTRY_CODE(Sp(0)))
-IND_SPEC(stg_IND_0, %RET_VEC(Sp(0),0))
-IND_SPEC(stg_IND_1, %RET_VEC(Sp(0),1))
-IND_SPEC(stg_IND_2, %RET_VEC(Sp(0),2))
-IND_SPEC(stg_IND_3, %RET_VEC(Sp(0),3))
-IND_SPEC(stg_IND_4, %RET_VEC(Sp(0),4))
-IND_SPEC(stg_IND_5, %RET_VEC(Sp(0),5))
-IND_SPEC(stg_IND_6, %RET_VEC(Sp(0),6))
-IND_SPEC(stg_IND_7, %RET_VEC(Sp(0),7))
-
INFO_TABLE(stg_IND_STATIC,1,0,IND_STATIC,"IND_STATIC","IND_STATIC")
{
TICK_ENT_STATIC_IND(); /* tick */
/* -----------------------------------------------------------------------------
Returning from the STG world.
-
- This is a polymorphic return address, meaning that any old constructor
- can be returned, we don't care (actually, it's probably going to be
- an IOok constructor, which will indirect through the vector table
- slot 0).
-------------------------------------------------------------------------- */
#if defined(PROFILING)
#define STOP_THREAD_WORDS 0
#endif
-/* A polymorhpic return address, where all the vector slots point to the
- direct entry point. */
INFO_TABLE_RET( stg_stop_thread, STOP_THREAD_WORDS, STOP_THREAD_BITMAP,
- STOP_FRAME,
- RET_LBL(stg_stop_thread),
- RET_LBL(stg_stop_thread),
- RET_LBL(stg_stop_thread),
- RET_LBL(stg_stop_thread),
- RET_LBL(stg_stop_thread),
- RET_LBL(stg_stop_thread),
- RET_LBL(stg_stop_thread),
- RET_LBL(stg_stop_thread) )
+ STOP_FRAME)
{
/*
The final exit.
fprintf(tf,"%7ld (%5.1f%%) from entering a new constructor\n\t\t [the rest from entering an existing constructor]\n",
tot_returns_of_new,
PC(INTAVG(tot_returns_of_new,tot_returns)));
- fprintf(tf,"%7ld (%5.1f%%) vectored [the rest unvectored]\n",
- VEC_RETURN_ctr,
- PC(INTAVG(VEC_RETURN_ctr,tot_returns)));
/* krc: comment out some of this stuff temporarily */
PC(INTAVG(RET_UNBOXED_TUP_hst[i],
RET_UNBOXED_TUP_ctr))); }
fprintf(tf, "\n");
- fprintf(tf, "\nRET_VEC_RETURN : %7ld: ", VEC_RETURN_ctr);
- for (i = 0; i < 9; i++) { fprintf(tf, "%5.1f%%",
- PC(INTAVG(RET_VEC_RETURN_hst[i],VEC_RETURN_ctr))); }
- fprintf(tf, "\n");
*/
fprintf(tf,"\nUPDATE FRAMES: %ld (%ld omitted from thunks)",
PR_CTR(RET_NEW_ctr);
PR_CTR(RET_OLD_ctr);
PR_CTR(RET_UNBOXED_TUP_ctr);
- PR_CTR(VEC_RETURN_ctr);
/* krc: put off till later... */
#if FALSE
PR_HST(RET_UNBOXED_TUP_hst,6);
PR_HST(RET_UNBOXED_TUP_hst,7);
PR_HST(RET_UNBOXED_TUP_hst,8);
- PR_HST(RET_VEC_RETURN_hst,0);
- PR_HST(RET_VEC_RETURN_hst,1);
- PR_HST(RET_VEC_RETURN_hst,2);
- PR_HST(RET_VEC_RETURN_hst,3);
- PR_HST(RET_VEC_RETURN_hst,4);
- PR_HST(RET_VEC_RETURN_hst,5);
- PR_HST(RET_VEC_RETURN_hst,6);
- PR_HST(RET_VEC_RETURN_hst,7);
- PR_HST(RET_VEC_RETURN_hst,8);
#endif /* FALSE */
PR_CTR(UPDF_OMITTED_ctr);
#include "Updates.h"
#include "StgLdvProf.h"
-/*
- The update frame return address must be *polymorphic*, that means
- we have to cope with both vectored and non-vectored returns. This
- is done by putting the return vector right before the info table, and
- having a standard direct return address after the info table (pointed
- to by the return address itself, as usual).
-
- Each entry in the vector table points to a specialised entry code fragment
- that knows how to return after doing the update. It would be possible to
- use a single generic piece of code that simply entered the return value
- to return, but it's quicker this way. The direct return code of course
- just does another direct return when it's finished.
-*/
-
/* on entry to the update code
(1) R1 points to the closure being returned
(2) Sp points to the update frame
code), since we don't mind duplicating this jump.
*/
-#define UPD_FRAME_ENTRY_TEMPLATE(label,ind_info,ret) \
- label \
+#define UPD_FRAME_ENTRY_TEMPLATE \
{ \
W_ updatee; \
\
/* ToDo: it might be a PAP, so we should check... */ \
TICK_UPD_CON_IN_NEW(sizeW_fromITBL(%GET_STD_INFO(updatee))); \
\
- UPD_SPEC_IND(updatee, ind_info, R1, jump (ret)); \
+ UPD_SPEC_IND(updatee, stg_IND_direct_info, R1, jump %ENTRY_CODE(Sp(0))); \
}
-UPD_FRAME_ENTRY_TEMPLATE(stg_upd_frame_0_ret,stg_IND_0_info,%RET_VEC(Sp(0),0))
-UPD_FRAME_ENTRY_TEMPLATE(stg_upd_frame_1_ret,stg_IND_1_info,%RET_VEC(Sp(0),1))
-UPD_FRAME_ENTRY_TEMPLATE(stg_upd_frame_2_ret,stg_IND_2_info,%RET_VEC(Sp(0),2))
-UPD_FRAME_ENTRY_TEMPLATE(stg_upd_frame_3_ret,stg_IND_3_info,%RET_VEC(Sp(0),3))
-UPD_FRAME_ENTRY_TEMPLATE(stg_upd_frame_4_ret,stg_IND_4_info,%RET_VEC(Sp(0),4))
-UPD_FRAME_ENTRY_TEMPLATE(stg_upd_frame_5_ret,stg_IND_5_info,%RET_VEC(Sp(0),5))
-UPD_FRAME_ENTRY_TEMPLATE(stg_upd_frame_6_ret,stg_IND_6_info,%RET_VEC(Sp(0),6))
-UPD_FRAME_ENTRY_TEMPLATE(stg_upd_frame_7_ret,stg_IND_7_info,%RET_VEC(Sp(0),7))
-
-#if MAX_VECTORED_RTN > 8
-#error MAX_VECTORED_RTN has changed: please modify stg_upd_frame too.
-#endif
-
-/*
- Make sure this table is big enough to handle the maximum vectored
- return size!
- */
-
#if defined(PROFILING)
#define UPD_FRAME_BITMAP 3
#define UPD_FRAME_WORDS 3
*/
INFO_TABLE_RET( stg_upd_frame,
- UPD_FRAME_WORDS, UPD_FRAME_BITMAP, UPDATE_FRAME,
- stg_upd_frame_0_ret,
- stg_upd_frame_1_ret,
- stg_upd_frame_2_ret,
- stg_upd_frame_3_ret,
- stg_upd_frame_4_ret,
- stg_upd_frame_5_ret,
- stg_upd_frame_6_ret,
- stg_upd_frame_7_ret
- )
-UPD_FRAME_ENTRY_TEMPLATE(,stg_IND_direct_info,%ENTRY_CODE(Sp(0)))
+ UPD_FRAME_WORDS, UPD_FRAME_BITMAP, UPDATE_FRAME)
+UPD_FRAME_ENTRY_TEMPLATE
INFO_TABLE_RET( stg_marked_upd_frame,
- UPD_FRAME_WORDS, UPD_FRAME_BITMAP, UPDATE_FRAME,
- stg_upd_frame_0_ret,
- stg_upd_frame_1_ret,
- stg_upd_frame_2_ret,
- stg_upd_frame_3_ret,
- stg_upd_frame_4_ret,
- stg_upd_frame_5_ret,
- stg_upd_frame_6_ret,
- stg_upd_frame_7_ret
- )
-UPD_FRAME_ENTRY_TEMPLATE(,stg_IND_direct_info,%ENTRY_CODE(Sp(0)))
-
-/*-----------------------------------------------------------------------------
- Seq frames
-
- We don't have a primitive seq# operator: it is just a 'case'
- expression whose scrutinee has either a polymorphic or function type
- (constructor types can be handled by normal 'case' expressions).
-
- To handle a polymorphic/function typed seq, we push a SEQ frame on
- the stack. This is a polymorphic activation record that just pops
- itself and returns (in a non-vectored way) when entered. The
- purpose of the SEQ frame is to avoid having to make a polymorphic return
- point for each polymorphic case expression.
-
- Another way of looking at it: the SEQ frame turns a vectored return
- into a direct one.
- -------------------------------------------------------------------------- */
-
-#if MAX_VECTORED_RTN > 8
-#error MAX_VECTORED_RTN has changed: please modify stg_seq_frame too.
-#endif
-
-INFO_TABLE_RET( stg_seq_frame, 0/* words */, 0/* bitmap */, RET_SMALL,
- RET_LBL(stg_seq_frame), /* 0 */
- RET_LBL(stg_seq_frame), /* 1 */
- RET_LBL(stg_seq_frame), /* 2 */
- RET_LBL(stg_seq_frame), /* 3 */
- RET_LBL(stg_seq_frame), /* 4 */
- RET_LBL(stg_seq_frame), /* 5 */
- RET_LBL(stg_seq_frame), /* 6 */
- RET_LBL(stg_seq_frame) /* 7 */
- )
-{
- Sp_adj(1);
- jump %ENTRY_CODE(Sp(0));
-}
+ UPD_FRAME_WORDS, UPD_FRAME_BITMAP, UPDATE_FRAME)
+UPD_FRAME_ENTRY_TEMPLATE
case STOP_FRAME:
case CATCH_FRAME:
case RET_SMALL:
- case RET_VEC_SMALL:
bitmap = BITMAP_BITS(info->i.layout.bitmap);
size = BITMAP_SIZE(info->i.layout.bitmap);
p++;
// large bitmap (> 32 entries, or 64 on a 64-bit machine)
case RET_BIG:
- case RET_VEC_BIG:
p++;
size = GET_LARGE_BITMAP(&info->i)->size;
thread_large_bitmap(p, GET_LARGE_BITMAP(&info->i), size);
case RET_BCO:
case RET_SMALL:
- case RET_VEC_SMALL:
case RET_BIG:
- case RET_VEC_BIG:
case RET_DYN:
case UPDATE_FRAME:
case STOP_FRAME:
case STOP_FRAME:
case CATCH_FRAME:
case RET_SMALL:
- case RET_VEC_SMALL:
bitmap = BITMAP_BITS(info->i.layout.bitmap);
size = BITMAP_SIZE(info->i.layout.bitmap);
// NOTE: the payload starts immediately after the info-ptr, we
// large bitmap (> 32 entries, or > 64 on a 64-bit machine)
case RET_BIG:
- case RET_VEC_BIG:
{
nat size;