X-Git-Url: http://git.megacz.com/?p=ghc-hetmet.git;a=blobdiff_plain;f=compiler%2FnativeGen%2FAsmCodeGen.lhs;h=29f4be42fe22413a9293e3954cea9a4a91edd74f;hp=7b49f7ac7c61460a8492785a7452f3a6de804095;hb=176fa33f17dd78355cc572e006d2ab26898e2c69;hpb=7bf4c6c4d89f63af5b6a2be8e69cec7e8416dbaa diff --git a/compiler/nativeGen/AsmCodeGen.lhs b/compiler/nativeGen/AsmCodeGen.lhs index 7b49f7a..29f4be4 100644 --- a/compiler/nativeGen/AsmCodeGen.lhs +++ b/compiler/nativeGen/AsmCodeGen.lhs @@ -7,11 +7,11 @@ -- ----------------------------------------------------------------------------- \begin{code} -{-# OPTIONS_GHC -w #-} +{-# OPTIONS -w #-} -- The above warning supression flag is a temporary kludge. -- While working on this module you are encouraged to remove it and fix -- any warnings in the module. See --- http://hackage.haskell.org/trac/ghc/wiki/WorkingConventions#Warnings +-- http://hackage.haskell.org/trac/ghc/wiki/Commentary/CodingStyle#Warnings -- for details module AsmCodeGen ( nativeCodeGen ) where @@ -35,19 +35,18 @@ import qualified GraphColor as Color import Cmm import CmmOpt ( cmmMiniInline, cmmMachOpFold ) -import PprCmm ( pprStmt, pprCmms, pprCmm ) -import MachOp +import PprCmm import CLabel import State import UniqFM import Unique ( Unique, getUnique ) import UniqSupply -import FastTypes import List ( groupBy, sortBy ) -import ErrUtils ( dumpIfSet_dyn ) import DynFlags +#if powerpc_TARGET_ARCH import StaticFlags ( opt_Static, opt_PIC ) +#endif import Util import Config ( cProjectVersion ) import Module @@ -129,7 +128,7 @@ nativeCodeGen dflags h us cmms let split_cmms = concat $ map add_split cmms (imports, prof) - <- cmmNativeGens dflags h us split_cmms [] [] + <- cmmNativeGens dflags h us split_cmms [] [] 0 let (native, colorStats, linearStats) = unzip3 prof @@ -174,18 +173,18 @@ nativeCodeGen dflags h us cmms | dopt Opt_SplitObjs dflags = split_marker : tops | otherwise = tops - split_marker = CmmProc [] mkSplitMarkerLabel [] [] + split_marker = CmmProc [] mkSplitMarkerLabel [] (ListGraph []) -- | Do native code generation on all these cmms. -- -cmmNativeGens dflags h us [] impAcc profAcc +cmmNativeGens dflags h us [] impAcc profAcc count = return (reverse impAcc, reverse profAcc) -cmmNativeGens dflags h us (cmm : cmms) impAcc profAcc +cmmNativeGens dflags h us (cmm : cmms) impAcc profAcc count = do (us', native, imports, colorStats, linearStats) - <- cmmNativeGen dflags us cmm + <- cmmNativeGen dflags us cmm count Pretty.printDoc Pretty.LeftMode h $ {-# SCC "pprNativeCode" #-} Pretty.vcat $ map pprNatCmmTop native @@ -196,13 +195,18 @@ cmmNativeGens dflags h us (cmm : cmms) impAcc profAcc then native else [] - -- force evaulation of imports and lsPprNative to avoid space leak + let count' = count + 1; + + + -- force evaulation all this stuff to avoid space leaks seqString (showSDoc $ vcat $ map ppr imports) `seq` return () - lsPprNative `seq` return () + lsPprNative `seq` return () + count' `seq` return () cmmNativeGens dflags h us' cmms (imports : impAcc) ((lsPprNative, colorStats, linearStats) : profAcc) + count' where seqString [] = () seqString (x:xs) = x `seq` seqString xs `seq` () @@ -214,15 +218,17 @@ cmmNativeGens dflags h us (cmm : cmms) impAcc profAcc cmmNativeGen :: DynFlags -> UniqSupply - -> RawCmmTop + -> RawCmmTop -- ^ the cmm to generate code for + -> Int -- ^ sequence number of this top thing -> IO ( UniqSupply - , [NatCmmTop] - , [CLabel] - , Maybe [Color.RegAllocStats] - , Maybe [Linear.RegAllocStats]) + , [NatCmmTop] -- native code + , [CLabel] -- things imported by this cmm + , Maybe [Color.RegAllocStats] -- stats for the coloring register allocator + , Maybe [Linear.RegAllocStats]) -- stats for the linear register allocators -cmmNativeGen dflags us cmm +cmmNativeGen dflags us cmm count = do + -- rewrite assignments to global regs let (fixed_cmm, usFix) = {-# SCC "fixAssignsTop" #-} @@ -259,7 +265,8 @@ cmmNativeGen dflags us cmm -- allocate registers (alloced, usAlloc, ppr_raStatsColor, ppr_raStatsLinear) <- - if dopt Opt_RegsGraph dflags + if ( dopt Opt_RegsGraph dflags + || dopt Opt_RegsIterative dflags) then do -- the regs usable for allocation let alloc_regs @@ -268,32 +275,15 @@ cmmNativeGen dflags us cmm emptyUFM $ map RealReg allocatableRegs - -- aggressively coalesce moves between virtual regs - let (coalesced, usCoalesce) - = {-# SCC "regCoalesce" #-} - initUs usLive $ regCoalesce withLiveness - - dumpIfSet_dyn dflags - Opt_D_dump_asm_coalesce "Reg-Reg moves coalesced" - (vcat $ map ppr coalesced) - - -- if any of these dump flags are turned on we want to hang on to - -- intermediate structures in the allocator - otherwise tell the - -- allocator to ditch them early so we don't end up creating space leaks. - let generateRegAllocStats = or - [ dopt Opt_D_dump_asm_regalloc_stages dflags - , dopt Opt_D_dump_asm_stats dflags - , dopt Opt_D_dump_asm_conflicts dflags ] - -- graph coloring register allocation let ((alloced, regAllocStats), usAlloc) - = {-# SCC "regAlloc(color)" #-} - initUs usCoalesce + = {-# SCC "RegAlloc" #-} + initUs usLive $ Color.regAlloc - generateRegAllocStats + dflags alloc_regs (mkUniqSet [0..maxSpillSlots]) - coalesced + withLiveness -- dump out what happened during register allocation dumpIfSet_dyn dflags @@ -303,7 +293,8 @@ cmmNativeGen dflags us cmm dumpIfSet_dyn dflags Opt_D_dump_asm_regalloc_stages "Build/spill stages" (vcat $ map (\(stage, stats) - -> text "-- Stage " <> int stage + -> text "# --------------------------" + $$ text "# cmm " <> int count <> text " Stage " <> int stage $$ ppr stats) $ zip [0..] regAllocStats) @@ -321,7 +312,7 @@ cmmNativeGen dflags us cmm else do -- do linear register allocation let ((alloced, regAllocStats), usAlloc) - = {-# SCC "regAlloc(linear)" #-} + = {-# SCC "RegAlloc" #-} initUs usLive $ liftM unzip $ mapUs Linear.regAlloc withLiveness @@ -370,11 +361,8 @@ cmmNativeGen dflags us cmm #if i386_TARGET_ARCH x86fp_kludge :: NatCmmTop -> NatCmmTop x86fp_kludge top@(CmmData _ _) = top -x86fp_kludge top@(CmmProc info lbl params code) = - CmmProc info lbl params (map bb_i386_insert_ffrees code) - where - bb_i386_insert_ffrees (BasicBlock id instrs) = - BasicBlock id (i386_insert_ffrees instrs) +x86fp_kludge top@(CmmProc info lbl params (ListGraph code)) = + CmmProc info lbl params (ListGraph $ i386_insert_ffrees code) #endif @@ -444,8 +432,8 @@ makeImportsDoc imports sequenceTop :: NatCmmTop -> NatCmmTop sequenceTop top@(CmmData _ _) = top -sequenceTop (CmmProc info lbl params blocks) = - CmmProc info lbl params (makeFarBranches $ sequenceBlocks blocks) +sequenceTop (CmmProc info lbl params (ListGraph blocks)) = + CmmProc info lbl params (ListGraph $ makeFarBranches $ sequenceBlocks blocks) -- The algorithm is very simple (and stupid): we make a graph out of -- the blocks where there is an edge from one block to another iff the @@ -454,6 +442,9 @@ sequenceTop (CmmProc info lbl params blocks) = -- output the block, then if it has an out edge, we move the -- destination of the out edge to the front of the list, and continue. +-- FYI, the classic layout for basic blocks uses postorder DFS; this +-- algorithm is implemented in cmm/ZipCfg.hs (NR 6 Sep 2007). + sequenceBlocks :: [NatBasicBlock] -> [NatBasicBlock] sequenceBlocks [] = [] sequenceBlocks (entry:blocks) = @@ -461,7 +452,7 @@ sequenceBlocks (entry:blocks) = -- the first block is the entry point ==> it must remain at the start. sccBlocks :: [NatBasicBlock] -> [SCC (NatBasicBlock,Unique,[Unique])] -sccBlocks blocks = stronglyConnCompR (map mkNode blocks) +sccBlocks blocks = stronglyConnCompFromEdgedVerticesR (map mkNode blocks) getOutEdges :: [Instr] -> [Unique] getOutEdges instrs = case jumpDests (last instrs) [] of @@ -541,10 +532,10 @@ shortcutBranches dflags tops mapping = foldr plusUFM emptyUFM mappings build_mapping top@(CmmData _ _) = (top, emptyUFM) -build_mapping (CmmProc info lbl params []) - = (CmmProc info lbl params [], emptyUFM) -build_mapping (CmmProc info lbl params (head:blocks)) - = (CmmProc info lbl params (head:others), mapping) +build_mapping (CmmProc info lbl params (ListGraph [])) + = (CmmProc info lbl params (ListGraph []), emptyUFM) +build_mapping (CmmProc info lbl params (ListGraph (head:blocks))) + = (CmmProc info lbl params (ListGraph (head:others)), mapping) -- drop the shorted blocks, but don't ever drop the first one, -- because it is pointed to by a global label. where @@ -563,8 +554,8 @@ apply_mapping ufm (CmmData sec statics) = CmmData sec (map (shortcutStatic (lookupUFM ufm)) statics) -- we need to get the jump tables, so apply the mapping to the entries -- of a CmmData too. -apply_mapping ufm (CmmProc info lbl params blocks) - = CmmProc info lbl params (map short_bb blocks) +apply_mapping ufm (CmmProc info lbl params (ListGraph blocks)) + = CmmProc info lbl params (ListGraph $ map short_bb blocks) where short_bb (BasicBlock id insns) = BasicBlock id $! map short_insn insns short_insn i = shortcutJump (lookupUFM ufm) i @@ -614,9 +605,9 @@ genMachCode dflags cmm_top fixAssignsTop :: RawCmmTop -> UniqSM RawCmmTop fixAssignsTop top@(CmmData _ _) = returnUs top -fixAssignsTop (CmmProc info lbl params blocks) = +fixAssignsTop (CmmProc info lbl params (ListGraph blocks)) = mapUs fixAssignsBlock blocks `thenUs` \ blocks' -> - returnUs (CmmProc info lbl params blocks') + returnUs (CmmProc info lbl params (ListGraph blocks')) fixAssignsBlock :: CmmBasicBlock -> UniqSM CmmBasicBlock fixAssignsBlock (BasicBlock id stmts) = @@ -671,9 +662,9 @@ Ideas for other things we could do (ToDo): cmmToCmm :: DynFlags -> RawCmmTop -> (RawCmmTop, [CLabel]) cmmToCmm _ top@(CmmData _ _) = (top, []) -cmmToCmm dflags (CmmProc info lbl params blocks) = runCmmOpt dflags $ do +cmmToCmm dflags (CmmProc info lbl params (ListGraph blocks)) = runCmmOpt dflags $ do blocks' <- mapM cmmBlockConFold (cmmMiniInline blocks) - return $ CmmProc info lbl params blocks' + return $ CmmProc info lbl params (ListGraph blocks') newtype CmmOptM a = CmmOptM (([CLabel], DynFlags) -> (# a, [CLabel] #)) @@ -724,9 +715,9 @@ cmmStmtConFold stmt e' <- cmmExprConFold CallReference e return $ CmmCallee e' conv other -> return other - args' <- mapM (\(arg, hint) -> do + args' <- mapM (\(CmmHinted arg hint) -> do arg' <- cmmExprConFold DataReference arg - return (arg', hint)) args + return (CmmHinted arg' hint)) args return $ CmmCall target' regs args' srt returns CmmCondBranch test dest @@ -767,23 +758,27 @@ cmmExprConFold referenceKind expr -> do dflags <- getDynFlagsCmmOpt dynRef <- cmmMakeDynamicReference dflags addImportCmmOpt referenceKind lbl - return $ cmmMachOpFold (MO_Add wordRep) [ + return $ cmmMachOpFold (MO_Add wordWidth) [ dynRef, - (CmmLit $ CmmInt (fromIntegral off) wordRep) + (CmmLit $ CmmInt (fromIntegral off) wordWidth) ] #if powerpc_TARGET_ARCH -- On powerpc (non-PIC), it's easier to jump directly to a label than -- to use the register table, so we replace these registers -- with the corresponding labels: + CmmReg (CmmGlobal EagerBlackholeInfo) + | not opt_PIC + -> cmmExprConFold referenceKind $ + CmmLit (CmmLabel (mkRtsCodeLabel (sLit "__stg_EAGER_BLACKHOLE_INFO"))) CmmReg (CmmGlobal GCEnter1) | not opt_PIC -> cmmExprConFold referenceKind $ - CmmLit (CmmLabel (mkRtsCodeLabel SLIT( "__stg_gc_enter_1"))) + CmmLit (CmmLabel (mkRtsCodeLabel (sLit "__stg_gc_enter_1"))) CmmReg (CmmGlobal GCFun) | not opt_PIC -> cmmExprConFold referenceKind $ - CmmLit (CmmLabel (mkRtsCodeLabel SLIT( "__stg_gc_fun"))) + CmmLit (CmmLabel (mkRtsCodeLabel (sLit "__stg_gc_fun"))) #endif CmmReg (CmmGlobal mid) @@ -799,7 +794,7 @@ cmmExprConFold referenceKind expr -> case mid of BaseReg -> cmmExprConFold DataReference baseRegAddr other -> cmmExprConFold DataReference - (CmmLoad baseRegAddr (globalRegRep mid)) + (CmmLoad baseRegAddr (globalRegType mid)) -- eliminate zero offsets CmmRegOff reg 0 -> cmmExprConFold referenceKind (CmmReg reg) @@ -811,10 +806,10 @@ cmmExprConFold referenceKind expr -> case get_GlobalReg_reg_or_addr mid of Left realreg -> return expr Right baseRegAddr - -> cmmExprConFold DataReference (CmmMachOp (MO_Add wordRep) [ + -> cmmExprConFold DataReference (CmmMachOp (MO_Add wordWidth) [ CmmReg (CmmGlobal mid), CmmLit (CmmInt (fromIntegral offset) - wordRep)]) + wordWidth)]) other -> return other