import LlvmCodeGen.Regs
import BlockId
-import CgUtils ( activeStgRegs )
+import CgUtils ( activeStgRegs, callerSaves )
import CLabel
-import Cmm
-import qualified PprCmm
+import OldCmm
+import qualified OldPprCmm as PprCmm
import OrdList
import BasicTypes
import Unique
import Util
+import Data.List ( partition )
+import Control.Monad ( liftM )
+
type LlvmStatements = OrdList LlvmStatement
+
-- -----------------------------------------------------------------------------
--- | Top-level of the llvm proc codegen
+-- | Top-level of the LLVM proc Code generator
--
genLlvmProc :: LlvmEnv -> RawCmmTop -> UniqSM (LlvmEnv, [LlvmCmmTop])
genLlvmProc env (CmmData _ _)
= return (env, [])
-genLlvmProc env (CmmProc _ _ _ (ListGraph []))
+genLlvmProc env (CmmProc _ _ (ListGraph []))
= return (env, [])
-genLlvmProc env (CmmProc info lbl params (ListGraph blocks))
+genLlvmProc env (CmmProc info lbl (ListGraph blocks))
= do
(env', lmblocks, lmdata) <- basicBlocksCodeGen env blocks ([], [])
- let proc = CmmProc info lbl params (ListGraph lmblocks)
+ let proc = CmmProc info lbl (ListGraph lmblocks)
let tops = lmdata ++ [proc]
return (env', tops)
basicBlocksCodeGen env ([]) (blocks, tops)
= do let (blocks', allocs) = mapAndUnzip dominateAllocs blocks
let allocs' = concat allocs
- let ((BasicBlock id fstmts):rblocks) = blocks'
- let fblocks = (BasicBlock id (funPrologue ++ allocs' ++ fstmts)):rblocks
+ let ((BasicBlock id fstmts):rblks) = blocks'
+ fplog <- funPrologue
+ let fblocks = (BasicBlock id (fplog ++ allocs' ++ fstmts)):rblks
return (env, fblocks, tops)
basicBlocksCodeGen env (block:blocks) (lblocks', ltops')
basicBlocksCodeGen env' blocks (lblocks, ltops)
+-- | Allocations need to be extracted so they can be moved to the entry
+-- of a function to make sure they dominate all possible paths in the CFG.
+dominateAllocs :: LlvmBasicBlock -> (LlvmBasicBlock, [LlvmStatement])
+dominateAllocs (BasicBlock id stmts)
+ = let (allocs, stmts') = partition isAlloc stmts
+ isAlloc (Assignment _ (Alloca _ _)) = True
+ isAlloc _other = False
+ in (BasicBlock id stmts', allocs)
+
+
-- | Generate code for one block
basicBlockCodeGen :: LlvmEnv
-> CmmBasicBlock
return (env', [BasicBlock id (fromOL instrs)], top)
--- | Allocations need to be extracted so they can be moved to the entry
--- of a function to make sure they dominate all posible paths in the CFG.
-dominateAllocs :: LlvmBasicBlock -> (LlvmBasicBlock, [LlvmStatement])
-dominateAllocs (BasicBlock id stmts)
- = (BasicBlock id allstmts, allallocs)
- where
- (allstmts, allallocs) = foldl split ([],[]) stmts
- split (stmts', allocs) s@(Assignment _ (Alloca _ _))
- = (stmts', allocs ++ [s])
- split (stmts', allocs) other
- = (stmts' ++ [other], allocs)
-
-
-- -----------------------------------------------------------------------------
-- * CmmStmt code generation
--
-- A statement conversion return data.
--- * LlvmEnv: The new enviornment
--- * LlvmStatements: The compiled llvm statements.
+-- * LlvmEnv: The new environment
+-- * LlvmStatements: The compiled LLVM statements.
-- * LlvmCmmTop: Any global data needed.
type StmtData = (LlvmEnv, LlvmStatements, [LlvmCmmTop])
-- CPS, only tail calls, no return's
-- Actually, there are a few return statements that occur because of hand
- -- written cmm code.
+ -- written Cmm code.
CmmReturn _
-> return (env, unitOL $ Return Nothing, [])
genCall :: LlvmEnv -> CmmCallTarget -> HintedCmmFormals -> HintedCmmActuals
-> CmmReturnInfo -> UniqSM StmtData
--- Write barrier needs to be handled specially as it is implemented as an llvm
+-- Write barrier needs to be handled specially as it is implemented as an LLVM
-- intrinsic function.
+#if i386_TARGET_ARCH || x86_64_TARGET_ARCH || sparc_TARGET_ARCH
+genCall env (CmmPrim MO_WriteBarrier) _ _ _ = return (env, nilOL, [])
+
+#else
genCall env (CmmPrim MO_WriteBarrier) _ _ _ = do
let fname = fsLit "llvm.memory.barrier"
let funSig = LlvmFunctionDecl fname ExternallyVisible CC_Ccc LMVoid
where
lmTrue :: LlvmVar
lmTrue = LMLitVar $ LMIntLit (-1) i1
+#endif
-- Handle all other foreign calls and prim ops.
genCall env target res args ret = do
- -- paramater types
+ -- parameter types
let arg_type (CmmHinted _ AddrHint) = i8Ptr
-- cast pointers to i8*. Llvm equivalent of void*
arg_type (CmmHinted expr _ ) = cmmToLlvmType $ cmmExprType expr
ret_type t = panic $ "genCall: Too many return values! Can only handle"
++ " 0 or 1, given " ++ show (length t) ++ "."
- -- extract cmm call convention
+ -- extract Cmm call convention
let cconv = case target of
CmmCallee _ conv -> conv
CmmPrim _ -> PrimCallConv
- -- translate to llvm call convention
+ -- translate to LLVM call convention
let lmconv = case cconv of
#if i386_TARGET_ARCH || x86_64_TARGET_ARCH
StdCallConv -> CC_X86_Stdcc
let funTy name = LMFunction $ LlvmFunctionDecl name ExternallyVisible
lmconv retTy FixedArgs argTy llvmFunAlign
- -- get paramter values
+ -- get parameter values
(env1, argVars, stmts1, top1) <- arg_vars env args ([], nilOL, [])
-- get the return register
Nothing Nothing False
return (env1, fun, nilOL, [])
- Just _ -> do
+ Just ty' -> do
-- label in module but not function pointer, convert
let fty@(LMFunction sig) = funTy name
- let fun = LMGlobalVar name fty (funcLinkage sig)
+ let fun = LMGlobalVar name (pLift ty') (funcLinkage sig)
Nothing Nothing False
(v1, s1) <- doExpr (pLift fty)
$ Cast LM_Bitcast fun (pLift fty)
| ret == CmmNeverReturns = unitOL $ Unreachable
| otherwise = nilOL
+ {- In LLVM we pass the STG registers around everywhere in function calls.
+ So this means LLVM considers them live across the entire function, when
+ in reality they usually aren't. For Caller save registers across C calls
+ the saving and restoring of them is done by the Cmm code generator,
+ using Cmm local vars. So to stop LLVM saving them as well (and saving
+ all of them since it thinks they're always live, we trash them just
+ before the call by assigning the 'undef' value to them. The ones we
+ need are restored from the Cmm local var and the ones we don't need
+ are fine to be trashed.
+ -}
+ let trashStmts = concatOL $ map trashReg activeStgRegs
+ where trashReg r =
+ let reg = lmGlobalRegVar r
+ ty = (pLower . getVarType) reg
+ trash = unitOL $ Store (LMLitVar $ LMUndefLit ty) reg
+ in case callerSaves r of
+ True -> trash
+ False -> nilOL
+
+ let stmts = stmts1 `appOL` stmts2 `appOL` trashStmts
+
-- make the actual call
case retTy of
LMVoid -> do
let s1 = Expr $ Call ccTy fptr argVars fnAttrs
- let allStmts = stmts1 `appOL` stmts2 `snocOL` s1 `appOL` retStmt
+ let allStmts = stmts `snocOL` s1 `appOL` retStmt
return (env2, allStmts, top1 ++ top2)
_ -> do
+ (v1, s1) <- doExpr retTy $ Call ccTy fptr argVars fnAttrs
let (creg, _) = ret_reg res
let (env3, vreg, stmts3, top3) = getCmmReg env2 (CmmLocal creg)
- let allStmts = stmts1 `appOL` stmts2 `appOL` stmts3
- (v1, s1) <- doExpr retTy $ Call ccTy fptr argVars fnAttrs
+ let allStmts = stmts `snocOL` s1 `appOL` stmts3
if retTy == pLower (getVarType vreg)
then do
let s2 = Store v1 vreg
- return (env3, allStmts `snocOL` s1 `snocOL` s2
- `appOL` retStmt, top1 ++ top2 ++ top3)
+ return (env3, allStmts `snocOL` s2 `appOL` retStmt,
+ top1 ++ top2 ++ top3)
else do
let ty = pLower $ getVarType vreg
let op = case ty of
(v2, s2) <- doExpr ty $ Cast op v1 ty
let s3 = Store v2 vreg
- return (env3, allStmts `snocOL` s1 `snocOL` s2 `snocOL` s3
- `appOL` retStmt, top1 ++ top2 ++ top3)
+ return (env3, allStmts `snocOL` s2 `snocOL` s3
+ `appOL` retStmt, top1 ++ top2 ++ top3)
-- | Conversion of call arguments.
= case mop of
MO_F32_Exp -> fsLit "expf"
MO_F32_Log -> fsLit "logf"
- MO_F32_Sqrt -> fsLit "sqrtf"
- MO_F32_Pwr -> fsLit "powf"
+ MO_F32_Sqrt -> fsLit "llvm.sqrt.f32"
+ MO_F32_Pwr -> fsLit "llvm.pow.f32"
- MO_F32_Sin -> fsLit "sinf"
- MO_F32_Cos -> fsLit "cosf"
+ MO_F32_Sin -> fsLit "llvm.sin.f32"
+ MO_F32_Cos -> fsLit "llvm.cos.f32"
MO_F32_Tan -> fsLit "tanf"
MO_F32_Asin -> fsLit "asinf"
MO_F64_Exp -> fsLit "exp"
MO_F64_Log -> fsLit "log"
- MO_F64_Sqrt -> fsLit "sqrt"
- MO_F64_Pwr -> fsLit "pow"
+ MO_F64_Sqrt -> fsLit "llvm.sqrt.f64"
+ MO_F64_Pwr -> fsLit "llvm.pow.f64"
- MO_F64_Sin -> fsLit "sin"
- MO_F64_Cos -> fsLit "cos"
+ MO_F64_Sin -> fsLit "llvm.sin.f64"
+ MO_F64_Cos -> fsLit "llvm.cos.f64"
MO_F64_Tan -> fsLit "tan"
MO_F64_Asin -> fsLit "asin"
genAssign env reg val = do
let (env1, vreg, stmts1, top1) = getCmmReg env reg
(env2, vval, stmts2, top2) <- exprToVar env1 val
- let s1 = Store vval vreg
- return (env2, stmts1 `appOL` stmts2 `snocOL` s1, top1 ++ top2)
+ let stmts = stmts1 `appOL` stmts2
+
+ let ty = (pLower . getVarType) vreg
+ case isPointer ty && getVarType vval == llvmWord of
+ -- Some registers are pointer types, so need to cast value to pointer
+ True -> do
+ (v, s1) <- doExpr ty $ Cast LM_Inttoptr vval ty
+ let s2 = Store v vreg
+ return (env2, stmts `snocOL` s1 `snocOL` s2, top1 ++ top2)
+
+ False -> do
+ let s1 = Store vval vreg
+ return (env2, stmts `snocOL` s1, top1 ++ top2)
-- | CmmStore operation
genStore :: LlvmEnv -> CmmExpr -> CmmExpr -> UniqSM StmtData
-genStore env addr val = do
+
+-- First we try to detect a few common cases and produce better code for
+-- these then the default case. We are mostly trying to detect Cmm code
+-- like I32[Sp + n] and use 'getelementptr' operations instead of the
+-- generic case that uses casts and pointer arithmetic
+genStore env addr@(CmmReg (CmmGlobal r)) val
+ = genStore_fast env addr r 0 val
+
+genStore env addr@(CmmRegOff (CmmGlobal r) n) val
+ = genStore_fast env addr r n val
+
+genStore env addr@(CmmMachOp (MO_Add _) [
+ (CmmReg (CmmGlobal r)),
+ (CmmLit (CmmInt n _))])
+ val
+ = genStore_fast env addr r (fromInteger n) val
+
+genStore env addr@(CmmMachOp (MO_Sub _) [
+ (CmmReg (CmmGlobal r)),
+ (CmmLit (CmmInt n _))])
+ val
+ = genStore_fast env addr r (negate $ fromInteger n) val
+
+-- generic case
+genStore env addr val = genStore_slow env addr val
+
+-- | CmmStore operation
+-- This is a special case for storing to a global register pointer
+-- offset such as I32[Sp+8].
+genStore_fast :: LlvmEnv -> CmmExpr -> GlobalReg -> Int -> CmmExpr
+ -> UniqSM StmtData
+genStore_fast env addr r n val
+ = let gr = lmGlobalRegVar r
+ grt = (pLower . getVarType) gr
+ (ix,rem) = n `divMod` ((llvmWidthInBits . pLower) grt `div` 8)
+ in case isPointer grt && rem == 0 of
+ True -> do
+ (env', vval, stmts, top) <- exprToVar env val
+ (gv, s1) <- doExpr grt $ Load gr
+ (ptr, s2) <- doExpr grt $ GetElemPtr True gv [toI32 ix]
+ -- We might need a different pointer type, so check
+ case pLower grt == getVarType vval of
+ -- were fine
+ True -> do
+ let s3 = Store vval ptr
+ return (env', stmts `snocOL` s1 `snocOL` s2
+ `snocOL` s3, top)
+
+ -- cast to pointer type needed
+ False -> do
+ let ty = (pLift . getVarType) vval
+ (ptr', s3) <- doExpr ty $ Cast LM_Bitcast ptr ty
+ let s4 = Store vval ptr'
+ return (env', stmts `snocOL` s1 `snocOL` s2
+ `snocOL` s3 `snocOL` s4, top)
+
+ -- If its a bit type then we use the slow method since
+ -- we can't avoid casting anyway.
+ False -> genStore_slow env addr val
+
+
+-- | CmmStore operation
+-- Generic case. Uses casts and pointer arithmetic if needed.
+genStore_slow :: LlvmEnv -> CmmExpr -> CmmExpr -> UniqSM StmtData
+genStore_slow env addr val = do
(env1, vaddr, stmts1, top1) <- exprToVar env addr
(env2, vval, stmts2, top2) <- exprToVar env1 val
- if getVarType vaddr == llvmWord
- then do
+
+ let stmts = stmts1 `appOL` stmts2
+ case getVarType vaddr of
+ -- sometimes we need to cast an int to a pointer before storing
+ LMPointer ty@(LMPointer _) | getVarType vval == llvmWord -> do
+ (v, s1) <- doExpr ty $ Cast LM_Inttoptr vval ty
+ let s2 = Store v vaddr
+ return (env2, stmts `snocOL` s1 `snocOL` s2, top1 ++ top2)
+
+ LMPointer _ -> do
+ let s1 = Store vval vaddr
+ return (env2, stmts `snocOL` s1, top1 ++ top2)
+
+ i@(LMInt _) | i == llvmWord -> do
let vty = pLift $ getVarType vval
(vptr, s1) <- doExpr vty $ Cast LM_Inttoptr vaddr vty
let s2 = Store vval vptr
- return (env2, stmts1 `appOL` stmts2 `snocOL` s1 `snocOL` s2,
- top1 ++ top2)
+ return (env2, stmts `snocOL` s1 `snocOL` s2, top1 ++ top2)
- else
- panic $ "genStore: ptr not of word size! (" ++ show vaddr ++ ")"
+ other ->
+ pprPanic "genStore: ptr not right type!"
+ (PprCmm.pprExpr addr <+> text (
+ "Size of Ptr: " ++ show llvmPtrBits ++
+ ", Size of var: " ++ show (llvmWidthInBits other) ++
+ ", Var: " ++ show vaddr))
-- | Unconditional branch
-- | Switch branch
--
--- N.B. we remove Nothing's from the list of branches, as they are 'undefined'.
+-- N.B. We remove Nothing's from the list of branches, as they are 'undefined'.
-- However, they may be defined one day, so we better document this behaviour.
genSwitch :: LlvmEnv -> CmmExpr -> [Maybe BlockId] -> UniqSM StmtData
genSwitch env cond maybe_ids = do
let ty = getVarType vc
let pairs = [ (ix, id) | (ix,Just id) <- zip ([0..]::[Integer]) maybe_ids ]
- let labels = map (\(ix, b) -> (mkIntLit ix ty, blockIdToLlvm b)) pairs
+ let labels = map (\(ix, b) -> (mkIntLit ty ix, blockIdToLlvm b)) pairs
-- out of range is undefied, so lets just branch to first label
let (_, defLbl) = head labels
-> genLit env lit
CmmLoad e' ty
- -> genCmmLoad env e' ty
+ -> genLoad env e' ty
-- Cmmreg in expression is the value, so must load. If you want actual
-- reg pointer, call getCmmReg directly.
CmmReg r -> do
let (env', vreg, stmts, top) = getCmmReg env r
(v1, s1) <- doExpr (pLower $ getVarType vreg) $ Load vreg
- return (env', v1, stmts `snocOL` s1 , top)
+ case (isPointer . getVarType) v1 of
+ True -> do
+ -- Cmm wants the value, so pointer types must be cast to ints
+ (v2, s2) <- doExpr llvmWord $ Cast LM_Ptrtoint v1 llvmWord
+ return (env', v2, stmts `snocOL` s1 `snocOL` s2, top)
+
+ False -> return (env', v1, stmts `snocOL` s1, top)
CmmMachOp op exprs
-> genMachOp env opt op exprs
genMachOp env _ op [x] = case op of
MO_Not w ->
- let all1 = mkIntLit (-1::Int) (widthToLlvmInt w)
+ let all1 = mkIntLit (widthToLlvmInt w) (-1::Int)
in negate (widthToLlvmInt w) all1 LM_MO_Xor
MO_S_Neg w ->
- let all0 = mkIntLit (0::Int) (widthToLlvmInt w)
+ let all0 = mkIntLit (widthToLlvmInt w) (0::Int)
in negate (widthToLlvmInt w) all0 LM_MO_Sub
MO_F_Neg w ->
let all0 = LMLitVar $ LMFloatLit (-0) (widthToLlvmFloat w)
- in negate (widthToLlvmFloat w) all0 LM_MO_Sub
+ in negate (widthToLlvmFloat w) all0 LM_MO_FSub
MO_SF_Conv _ w -> fiConv (widthToLlvmFloat w) LM_Sitofp
MO_FS_Conv _ w -> fiConv (widthToLlvmInt w) LM_Fptosi
return (env', v1, stmts `snocOL` s1, top)
let toWidth = llvmWidthInBits ty
-- LLVM doesn't like trying to convert to same width, so
- -- need to check for that as we do get cmm code doing it.
+ -- need to check for that as we do get Cmm code doing it.
case widthInBits from of
w | w < toWidth -> sameConv' expand
w | w > toWidth -> sameConv' reduce
_w -> return x'
+-- Handle GlobalRegs pointers
+genMachOp env opt o@(MO_Add _) e@[(CmmReg (CmmGlobal r)), (CmmLit (CmmInt n _))]
+ = genMachOp_fast env opt o r (fromInteger n) e
+
+genMachOp env opt o@(MO_Sub _) e@[(CmmReg (CmmGlobal r)), (CmmLit (CmmInt n _))]
+ = genMachOp_fast env opt o r (negate . fromInteger $ n) e
+
+-- Generic case
+genMachOp env opt op e = genMachOp_slow env opt op e
+
+
+-- | Handle CmmMachOp expressions
+-- This is a specialised method that handles Global register manipulations like
+-- 'Sp - 16', using the getelementptr instruction.
+genMachOp_fast :: LlvmEnv -> EOption -> MachOp -> GlobalReg -> Int -> [CmmExpr]
+ -> UniqSM ExprData
+genMachOp_fast env opt op r n e
+ = let gr = lmGlobalRegVar r
+ grt = (pLower . getVarType) gr
+ (ix,rem) = n `divMod` ((llvmWidthInBits . pLower) grt `div` 8)
+ in case isPointer grt && rem == 0 of
+ True -> do
+ (gv, s1) <- doExpr grt $ Load gr
+ (ptr, s2) <- doExpr grt $ GetElemPtr True gv [toI32 ix]
+ (var, s3) <- doExpr llvmWord $ Cast LM_Ptrtoint ptr llvmWord
+ return (env, var, unitOL s1 `snocOL` s2 `snocOL` s3, [])
+
+ False -> genMachOp_slow env opt op e
+
+
+-- | Handle CmmMachOp expressions
+-- This handles all the cases not handle by the specialised genMachOp_fast.
+genMachOp_slow :: LlvmEnv -> EOption -> MachOp -> [CmmExpr] -> UniqSM ExprData
-- Binary MachOp
-genMachOp env opt op [x, y] = case op of
+genMachOp_slow env opt op [x, y] = case op of
MO_Eq _ -> genBinComp opt LM_CMP_Eq
MO_Ne _ -> genBinComp opt LM_CMP_Ne
MO_F_Lt _ -> genBinComp opt LM_CMP_Flt
MO_F_Le _ -> genBinComp opt LM_CMP_Fle
- MO_F_Add _ -> genBinMach LM_MO_Add
- MO_F_Sub _ -> genBinMach LM_MO_Sub
- MO_F_Mul _ -> genBinMach LM_MO_Mul
+ MO_F_Add _ -> genBinMach LM_MO_FAdd
+ MO_F_Sub _ -> genBinMach LM_MO_FSub
+ MO_F_Mul _ -> genBinMach LM_MO_FMul
MO_F_Quot _ -> genBinMach LM_MO_FDiv
MO_And _ -> genBinMach LM_MO_And
-- ++ "\ne2: " ++ (show.llvmSDoc.PprCmm.pprExpr $ y)
-- | Need to use EOption here as Cmm expects word size results from
- -- comparisons while llvm return i1. Need to extend to llvmWord type
+ -- comparisons while LLVM return i1. Need to extend to llvmWord type
-- if expected
genBinComp opt cmp = do
ed@(env', v1, stmts, top) <- binLlvmOp (\_ -> i1) $ Compare cmp
let word = getVarType vx
let word2 = LMInt $ 2 * (llvmWidthInBits $ getVarType vx)
let shift = llvmWidthInBits word
- let shift1 = mkIntLit (shift - 1) llvmWord
- let shift2 = mkIntLit shift llvmWord
+ let shift1 = toIWord (shift - 1)
+ let shift2 = toIWord shift
if isInt word
then do
else
panic $ "isSMulOK: Not bit type! (" ++ show word ++ ")"
-
-- More then two expression, invalid!
-genMachOp _ _ _ _ = panic "genMachOp: More then 2 expressions in MachOp!"
-
-
--- | Handle CmmLoad expression
-genCmmLoad :: LlvmEnv -> CmmExpr -> CmmType -> UniqSM ExprData
-genCmmLoad env e ty = do
+genMachOp_slow _ _ _ _ = panic "genMachOp: More then 2 expressions in MachOp!"
+
+
+-- | Handle CmmLoad expression.
+genLoad :: LlvmEnv -> CmmExpr -> CmmType -> UniqSM ExprData
+
+-- First we try to detect a few common cases and produce better code for
+-- these then the default case. We are mostly trying to detect Cmm code
+-- like I32[Sp + n] and use 'getelementptr' operations instead of the
+-- generic case that uses casts and pointer arithmetic
+genLoad env e@(CmmReg (CmmGlobal r)) ty
+ = genLoad_fast env e r 0 ty
+
+genLoad env e@(CmmRegOff (CmmGlobal r) n) ty
+ = genLoad_fast env e r n ty
+
+genLoad env e@(CmmMachOp (MO_Add _) [
+ (CmmReg (CmmGlobal r)),
+ (CmmLit (CmmInt n _))])
+ ty
+ = genLoad_fast env e r (fromInteger n) ty
+
+genLoad env e@(CmmMachOp (MO_Sub _) [
+ (CmmReg (CmmGlobal r)),
+ (CmmLit (CmmInt n _))])
+ ty
+ = genLoad_fast env e r (negate $ fromInteger n) ty
+
+-- generic case
+genLoad env e ty = genLoad_slow env e ty
+
+-- | Handle CmmLoad expression.
+-- This is a special case for loading from a global register pointer
+-- offset such as I32[Sp+8].
+genLoad_fast :: LlvmEnv -> CmmExpr -> GlobalReg -> Int -> CmmType
+ -> UniqSM ExprData
+genLoad_fast env e r n ty =
+ let gr = lmGlobalRegVar r
+ grt = (pLower . getVarType) gr
+ ty' = cmmToLlvmType ty
+ (ix,rem) = n `divMod` ((llvmWidthInBits . pLower) grt `div` 8)
+ in case isPointer grt && rem == 0 of
+ True -> do
+ (gv, s1) <- doExpr grt $ Load gr
+ (ptr, s2) <- doExpr grt $ GetElemPtr True gv [toI32 ix]
+ -- We might need a different pointer type, so check
+ case grt == ty' of
+ -- were fine
+ True -> do
+ (var, s3) <- doExpr ty' $ Load ptr
+ return (env, var, unitOL s1 `snocOL` s2 `snocOL` s3,
+ [])
+
+ -- cast to pointer type needed
+ False -> do
+ let pty = pLift ty'
+ (ptr', s3) <- doExpr pty $ Cast LM_Bitcast ptr pty
+ (var, s4) <- doExpr ty' $ Load ptr'
+ return (env, var, unitOL s1 `snocOL` s2 `snocOL` s3
+ `snocOL` s4, [])
+
+ -- If its a bit type then we use the slow method since
+ -- we can't avoid casting anyway.
+ False -> genLoad_slow env e ty
+
+
+-- | Handle Cmm load expression.
+-- Generic case. Uses casts and pointer arithmetic if needed.
+genLoad_slow :: LlvmEnv -> CmmExpr -> CmmType -> UniqSM ExprData
+genLoad_slow env e ty = do
(env', iptr, stmts, tops) <- exprToVar env e
- let ety = getVarType iptr
- case (isInt ety) of
- True | llvmPtrBits == llvmWidthInBits ety -> do
+ case getVarType iptr of
+ LMPointer _ -> do
+ (dvar, load) <- doExpr (cmmToLlvmType ty) $ Load iptr
+ return (env', dvar, stmts `snocOL` load, tops)
+
+ i@(LMInt _) | i == llvmWord -> do
let pty = LMPointer $ cmmToLlvmType ty
(ptr, cast) <- doExpr pty $ Cast LM_Inttoptr iptr pty
(dvar, load) <- doExpr (cmmToLlvmType ty) $ Load ptr
return (env', dvar, stmts `snocOL` cast `snocOL` load, tops)
- | otherwise
- -> pprPanic
- ("exprToVar: can't cast to pointer as int not of "
- ++ "pointer size!")
+ other -> pprPanic "exprToVar: CmmLoad expression is not right type!"
(PprCmm.pprExpr e <+> text (
"Size of Ptr: " ++ show llvmPtrBits ++
- ", Size of var: " ++ show (llvmWidthInBits ety) ++
+ ", Size of var: " ++ show (llvmWidthInBits other) ++
", Var: " ++ show iptr))
- False -> panic "exprToVar: CmmLoad expression is not of type int!"
-
-- | Handle CmmReg expression
--
-- We allocate CmmReg on the stack. This avoids having to map a CmmReg to an
-- equivalent SSA form and avoids having to deal with Phi node insertion.
--- This is also the approach recommended by llvm developers.
+-- This is also the approach recommended by LLVM developers.
getCmmReg :: LlvmEnv -> CmmReg -> ExprData
getCmmReg env r@(CmmLocal (LocalReg un _))
= let exists = varLookup un env
-- | Generate code for a literal
genLit :: LlvmEnv -> CmmLit -> UniqSM ExprData
genLit env (CmmInt i w)
- = return (env, mkIntLit i (LMInt $ widthInBits w), nilOL, [])
+ = return (env, mkIntLit (LMInt $ widthInBits w) i, nilOL, [])
genLit env (CmmFloat r w)
= return (env, LMLitVar $ LMFloatLit (fromRational r) (widthToLlvmFloat w),
ty = funLookup label env
lmty = cmmToLlvmType $ cmmLitType cmm
in case ty of
- -- Make generic external label defenition and then pointer to it
+ -- Make generic external label definition and then pointer to it
Nothing -> do
let glob@(var, _) = genStringLabelRef label
let ldata = [CmmData Data [([glob], [])]]
let env' = funInsert label (pLower $ getVarType var) env
(v1, s1) <- doExpr lmty $ Cast LM_Ptrtoint var llvmWord
return (env', v1, unitOL s1, ldata)
+
-- Referenced data exists in this module, retrieve type and make
-- pointer to it.
Just ty' -> do
genLit env (CmmLabelOff label off) = do
(env', vlbl, stmts, stat) <- genLit env (CmmLabel label)
- let voff = mkIntLit off llvmWord
+ let voff = toIWord off
(v1, s1) <- doExpr (getVarType vlbl) $ LlvmOp LM_MO_Add vlbl voff
return (env', v1, stmts `snocOL` s1, stat)
genLit env (CmmLabelDiffOff l1 l2 off) = do
(env1, vl1, stmts1, stat1) <- genLit env (CmmLabel l1)
(env2, vl2, stmts2, stat2) <- genLit env1 (CmmLabel l2)
- let voff = mkIntLit off llvmWord
+ let voff = toIWord off
let ty1 = getVarType vl1
let ty2 = getVarType vl2
if (isInt ty1) && (isInt ty2)
--
-- | Function prologue. Load STG arguments into variables for function.
-funPrologue :: [LlvmStatement]
-funPrologue = concat $ map getReg activeStgRegs
+funPrologue :: UniqSM [LlvmStatement]
+funPrologue = liftM concat $ mapM getReg activeStgRegs
where getReg rr =
let reg = lmGlobalRegVar rr
arg = lmGlobalRegArg rr
alloc = Assignment reg $ Alloca (pLower $ getVarType reg) 1
- store = Store arg reg
- in [alloc, store]
+ in return [alloc, Store arg reg]
-- | Function epilogue. Load STG variables to use as argument for call.
funEpilogue :: UniqSM ([LlvmVar], LlvmStatements)
funEpilogue = do
let loadExpr r = do
- (v,s) <- doExpr (pLower $ getVarType r) $ Load r
+ let reg = lmGlobalRegVar r
+ (v,s) <- doExpr (pLower $ getVarType reg) $ Load reg
return (v, unitOL s)
- loads <- mapM (loadExpr . lmGlobalRegVar) activeStgRegs
+ loads <- mapM loadExpr activeStgRegs
let (vars, stmts) = unzip loads
return (vars, concatOL stmts)
= let fn = strCLabel_llvm lbl
ty = funLookup fn env
in case ty of
- Just ty'@(LMFunction sig) -> do
-- Function in module in right form
+ Just ty'@(LMFunction sig) -> do
let fun = LMGlobalVar fn ty' (funcLinkage sig) Nothing Nothing False
return (env, fun, nilOL, [])
- Just ty' -> do
+
-- label in module but not function pointer, convert
+ Just ty' -> do
let fun = LMGlobalVar fn (pLift ty') ExternallyVisible
Nothing Nothing False
(v1, s1) <- doExpr (pLift llvmFunTy) $
Cast LM_Bitcast fun (pLift llvmFunTy)
return (env, v1, unitOL s1, [])
- Nothing -> do
+
-- label not in module, create external reference
+ Nothing -> do
let ty' = LMFunction $ llvmFunSig lbl ExternallyVisible
let fun = LMGlobalVar fn ty' ExternallyVisible Nothing Nothing False
let top = CmmData Data [([],[ty'])]
blockIdToLlvm :: BlockId -> LlvmVar
blockIdToLlvm bid = LMLocalVar (getUnique bid) LMLabel
-
-- | Create Llvm int Literal
-mkIntLit :: Integral a => a -> LlvmType -> LlvmVar
-mkIntLit i ty = LMLitVar $ LMIntLit (toInteger i) ty
+mkIntLit :: Integral a => LlvmType -> a -> LlvmVar
+mkIntLit ty i = LMLitVar $ LMIntLit (toInteger i) ty
+
+-- | Convert int type to a LLvmVar of word or i32 size
+toI32, toIWord :: Integral a => a -> LlvmVar
+toI32 = mkIntLit i32
+toIWord = mkIntLit llvmWord
-- | Error functions