X-Git-Url: http://git.megacz.com/?a=blobdiff_plain;f=compiler%2FnativeGen%2FAsmCodeGen.lhs;h=f954d524c9ef056eae015fd7f9ec44e64b1474e2;hb=f96e9aa0444de0e673b3c4055c6e43299639bc5b;hp=85fb4372e56a4ac280ac51bef21f124132790aba;hpb=28c556a5e0ed5c2687f19ec6ef8853b79ad65518;p=ghc-hetmet.git diff --git a/compiler/nativeGen/AsmCodeGen.lhs b/compiler/nativeGen/AsmCodeGen.lhs index 85fb437..f954d52 100644 --- a/compiler/nativeGen/AsmCodeGen.lhs +++ b/compiler/nativeGen/AsmCodeGen.lhs @@ -17,7 +17,7 @@ import MachRegs import MachCodeGen import PprMach import RegisterAlloc -import RegAllocInfo ( jumpDests ) +import RegAllocInfo import NCGMonad import PositionIndependentCode @@ -25,20 +25,17 @@ import Cmm import CmmOpt ( cmmMiniInline, cmmMachOpFold ) import PprCmm ( pprStmt, pprCmms ) import MachOp -import CLabel ( CLabel, mkSplitMarkerLabel, mkAsmTempLabel ) -#if powerpc_TARGET_ARCH -import CLabel ( mkRtsCodeLabel ) -#endif +import CLabel import UniqFM import Unique ( Unique, getUnique ) import UniqSupply import FastTypes import List ( groupBy, sortBy ) -import CLabel ( pprCLabel ) import ErrUtils ( dumpIfSet_dyn ) -import DynFlags ( DynFlags, DynFlag(..), dopt ) +import DynFlags import StaticFlags ( opt_Static, opt_PIC ) +import Util import Config ( cProjectVersion ) import Digraph @@ -111,12 +108,12 @@ The machine-dependent bits break down as follows: -- NB. We *lazilly* compile each block of code for space reasons. -nativeCodeGen :: DynFlags -> [Cmm] -> UniqSupply -> IO Pretty.Doc +nativeCodeGen :: DynFlags -> [RawCmm] -> UniqSupply -> IO Pretty.Doc nativeCodeGen dflags cmms us = let (res, _) = initUs us $ cgCmm (concat (map add_split cmms)) - cgCmm :: [CmmTop] -> UniqSM (Cmm, Pretty.Doc, [CLabel]) + cgCmm :: [RawCmmTop] -> UniqSM (RawCmm, Pretty.Doc, [CLabel]) cgCmm tops = lazyMapUs (cmmNativeGen dflags) tops `thenUs` \ results -> case unzip3 results of { (cmms,docs,imps) -> @@ -199,7 +196,7 @@ nativeCodeGen dflags cmms us -- Complete native code generation phase for a single top-level chunk -- of Cmm. -cmmNativeGen :: DynFlags -> CmmTop -> UniqSM (CmmTop, Pretty.Doc, [CLabel]) +cmmNativeGen :: DynFlags -> RawCmmTop -> UniqSM (RawCmmTop, Pretty.Doc, [CLabel]) cmmNativeGen dflags cmm = {-# SCC "fixAssigns" #-} fixAssignsTop cmm `thenUs` \ fixed_cmm -> @@ -212,8 +209,10 @@ cmmNativeGen dflags cmm genMachCode cmm `thenUs` \ (pre_regalloc, lastMinuteImports) -> {-# SCC "regAlloc" #-} mapUs regAlloc pre_regalloc `thenUs` \ with_regs -> + {-# SCC "shortcutBranches" #-} + shortcutBranches dflags with_regs `bind` \ shorted -> {-# SCC "sequenceBlocks" #-} - map sequenceTop with_regs `bind` \ sequenced -> + map sequenceTop shorted `bind` \ sequenced -> {-# SCC "x86fp_kludge" #-} map x86fp_kludge sequenced `bind` \ final_mach_code -> {-# SCC "vcat" #-} @@ -245,7 +244,7 @@ cmmNativeGen dflags cmm sequenceTop :: NatCmmTop -> NatCmmTop sequenceTop top@(CmmData _ _) = top sequenceTop (CmmProc info lbl params blocks) = - CmmProc info lbl params (sequenceBlocks blocks) + CmmProc info lbl params (makeFarBranches $ sequenceBlocks blocks) -- The algorithm is very simple (and stupid): we make a graph out of -- the blocks where there is an edge from one block to another iff the @@ -289,6 +288,88 @@ reorder id accum (b@(block,id',out) : rest) | id == id' = (True, (block,id,out) : reverse accum ++ rest) | otherwise = reorder id (b:accum) rest + +-- ----------------------------------------------------------------------------- +-- Making far branches + +-- Conditional branches on PowerPC are limited to +-32KB; if our Procs get too +-- big, we have to work around this limitation. + +makeFarBranches :: [NatBasicBlock] -> [NatBasicBlock] + +#if powerpc_TARGET_ARCH +makeFarBranches blocks + | last blockAddresses < nearLimit = blocks + | otherwise = zipWith handleBlock blockAddresses blocks + where + blockAddresses = scanl (+) 0 $ map blockLen blocks + blockLen (BasicBlock _ instrs) = length instrs + + handleBlock addr (BasicBlock id instrs) + = BasicBlock id (zipWith makeFar [addr..] instrs) + + makeFar addr (BCC ALWAYS tgt) = BCC ALWAYS tgt + makeFar addr (BCC cond tgt) + | abs (addr - targetAddr) >= nearLimit + = BCCFAR cond tgt + | otherwise + = BCC cond tgt + where Just targetAddr = lookupUFM blockAddressMap tgt + makeFar addr other = other + + nearLimit = 7000 -- 8192 instructions are allowed; let's keep some + -- distance, as we have a few pseudo-insns that are + -- pretty-printed as multiple instructions, + -- and it's just not worth the effort to calculate + -- things exactly + + blockAddressMap = listToUFM $ zip (map blockId blocks) blockAddresses +#else +makeFarBranches = id +#endif + +-- ----------------------------------------------------------------------------- +-- Shortcut branches + +shortcutBranches :: DynFlags -> [NatCmmTop] -> [NatCmmTop] +shortcutBranches dflags tops + | optLevel dflags < 1 = tops -- only with -O or higher + | otherwise = map (apply_mapping mapping) tops' + where + (tops', mappings) = mapAndUnzip build_mapping tops + mapping = foldr plusUFM emptyUFM mappings + +build_mapping top@(CmmData _ _) = (top, emptyUFM) +build_mapping (CmmProc info lbl params []) + = (CmmProc info lbl params [], emptyUFM) +build_mapping (CmmProc info lbl params (head:blocks)) + = (CmmProc info lbl params (head:others), mapping) + -- drop the shorted blocks, but don't ever drop the first one, + -- because it is pointed to by a global label. + where + -- find all the blocks that just consist of a jump that can be + -- shorted. + (shortcut_blocks, others) = partitionWith split blocks + split (BasicBlock id [insn]) | Just dest <- canShortcut insn + = Left (id,dest) + split other = Right other + + -- build a mapping from BlockId to JumpDest for shorting branches + mapping = foldl add emptyUFM shortcut_blocks + add ufm (id,dest) = addToUFM ufm id dest + +apply_mapping ufm (CmmData sec statics) + = CmmData sec (map (shortcutStatic (lookupUFM ufm)) statics) + -- we need to get the jump tables, so apply the mapping to the entries + -- of a CmmData too. +apply_mapping ufm (CmmProc info lbl params blocks) + = CmmProc info lbl params (map short_bb blocks) + where + short_bb (BasicBlock id insns) = BasicBlock id $! map short_insn insns + short_insn i = shortcutJump (lookupUFM ufm) i + -- shortcutJump should apply the mapping repeatedly, + -- just in case we can short multiple branches. + -- ----------------------------------------------------------------------------- -- Instruction selection @@ -309,7 +390,7 @@ reorder id accum (b@(block,id',out) : rest) -- Switching between the two monads whilst carrying along the same -- Unique supply breaks abstraction. Is that bad? -genMachCode :: CmmTop -> UniqSM ([NatCmmTop], [CLabel]) +genMachCode :: RawCmmTop -> UniqSM ([NatCmmTop], [CLabel]) genMachCode cmm_top = do { initial_us <- getUs @@ -331,7 +412,7 @@ genMachCode cmm_top -- the generic optimiser below, to avoid having two separate passes -- over the Cmm. -fixAssignsTop :: CmmTop -> UniqSM CmmTop +fixAssignsTop :: RawCmmTop -> UniqSM RawCmmTop fixAssignsTop top@(CmmData _ _) = returnUs top fixAssignsTop (CmmProc info lbl params blocks) = mapUs fixAssignsBlock blocks `thenUs` \ blocks' -> @@ -363,16 +444,12 @@ fixAssign (CmmAssign (CmmGlobal reg) src) where reg_or_addr = get_GlobalReg_reg_or_addr reg -fixAssign (CmmCall target results args vols) +{- +fixAssign (CmmCall target results args) = mapAndUnzipUs fixResult results `thenUs` \ (results',stores) -> - returnUs (caller_save ++ - CmmCall target results' args vols : - caller_restore ++ + returnUs (CmmCall target results' args : concat stores) where - -- we also save/restore any caller-saves STG registers here - (caller_save, caller_restore) = callerSaveVolatileRegs vols - fixResult g@(CmmGlobal reg,hint) = case get_GlobalReg_reg_or_addr reg of Left realreg -> returnUs (g, []) @@ -383,6 +460,7 @@ fixAssign (CmmCall target results args vols) [CmmStore baseRegAddr (CmmReg local)]) fixResult other = returnUs (other,[]) +-} fixAssign other_stmt = returnUs [other_stmt] @@ -412,7 +490,7 @@ Ideas for other things we could do (ToDo): temp assignments, and certain assigns to mem...) -} -cmmToCmm :: CmmTop -> (CmmTop, [CLabel]) +cmmToCmm :: RawCmmTop -> (RawCmmTop, [CLabel]) cmmToCmm top@(CmmData _ _) = (top, []) cmmToCmm (CmmProc info lbl params blocks) = runCmmOpt $ do blocks' <- mapM cmmBlockConFold (cmmMiniInline blocks) @@ -458,7 +536,7 @@ cmmStmtConFold stmt -> do addr' <- cmmExprConFold JumpReference addr return $ CmmJump addr' regs - CmmCall target regs args vols + CmmCall target regs args srt -> do target' <- case target of CmmForeignCall e conv -> do e' <- cmmExprConFold CallReference e @@ -467,7 +545,7 @@ cmmStmtConFold stmt args' <- mapM (\(arg, hint) -> do arg' <- cmmExprConFold DataReference arg return (arg', hint)) args - return $ CmmCall target' regs args' vols + return $ CmmCall target' regs args' srt CmmCondBranch test dest -> do test' <- cmmExprConFold DataReference test