+ add_split (Cmm tops)
+ | opt_EnsureSplittableC = split_marker : tops
+ | otherwise = tops
+
+ split_marker = CmmProc [] mkSplitMarkerLabel [] []
+
+#if darwin_TARGET_OS || (powerpc_TARGET_ARCH && linux_TARGET_OS)
+ -- Generate "symbol stubs" for all external symbols that might
+ -- come from a dynamic library.
+{- dyld_stubs imps = Pretty.vcat $ map pprDyldSymbolStub $
+ map head $ group $ sort imps-}
+
+ -- (Hack) sometimes two Labels pretty-print the same, but have
+ -- different uniques; so we compare their text versions...
+ dyld_stubs imps
+ | needImportedSymbols
+ = Pretty.vcat $
+ (pprGotDeclaration :) $
+ map (pprImportedSymbol . fst . head) $
+ groupBy (\(_,a) (_,b) -> a == b) $
+ sortBy (\(_,a) (_,b) -> compare a b) $
+ map doPpr $
+ imps
+ | otherwise
+ = Pretty.empty
+
+ where doPpr lbl = (lbl, Pretty.render $ pprCLabel lbl astyle)
+ astyle = mkCodeStyle AsmStyle
+#else
+ dyld_stubs imps = Pretty.empty
+#endif
+
+#ifndef NCG_DEBUG
+ my_vcat sds = Pretty.vcat sds
+#else
+ my_vcat sds = Pretty.vcat (
+ intersperse (
+ Pretty.char ' '
+ Pretty.$$ Pretty.ptext SLIT("# ___ncg_debug_marker")
+ Pretty.$$ Pretty.char ' '
+ )
+ sds
+ )
+#endif
+
+
+-- Complete native code generation phase for a single top-level chunk
+-- of Cmm.
+
+cmmNativeGen :: DynFlags -> CmmTop -> UniqSM (CmmTop, Pretty.Doc, [CLabel])
+cmmNativeGen dflags cmm
+ = {-# SCC "fixAssigns" #-}
+ fixAssignsTop cmm `thenUs` \ fixed_cmm ->
+ {-# SCC "genericOpt" #-}
+ cmmToCmm fixed_cmm `bind` \ (cmm, imports) ->
+ (if dopt Opt_D_dump_opt_cmm dflags -- space leak avoidance
+ then cmm
+ else CmmData Text []) `bind` \ ppr_cmm ->
+ {-# SCC "genMachCode" #-}
+ genMachCode cmm `thenUs` \ (pre_regalloc, lastMinuteImports) ->
+ {-# SCC "regAlloc" #-}
+ map regAlloc pre_regalloc `bind` \ with_regs ->
+ {-# SCC "sequenceBlocks" #-}
+ map sequenceTop with_regs `bind` \ sequenced ->
+ {-# SCC "x86fp_kludge" #-}
+ map x86fp_kludge sequenced `bind` \ final_mach_code ->
+ {-# SCC "vcat" #-}
+ Pretty.vcat (map pprNatCmmTop final_mach_code) `bind` \ final_sdoc ->
+
+ returnUs (ppr_cmm, final_sdoc Pretty.$$ Pretty.text "", lastMinuteImports ++ imports)
+ where
+ x86fp_kludge :: NatCmmTop -> NatCmmTop
+ x86fp_kludge top@(CmmData _ _) = top
+#if i386_TARGET_ARCH
+ x86fp_kludge top@(CmmProc info lbl params code) =
+ CmmProc info lbl params (map bb_i386_insert_ffrees code)
+ where
+ bb_i386_insert_ffrees (BasicBlock id instrs) =
+ BasicBlock id (i386_insert_ffrees instrs)
+#else
+ x86fp_kludge top = top
+#endif
+
+-- -----------------------------------------------------------------------------
+-- Sequencing the basic blocks
+
+-- Cmm BasicBlocks are self-contained entities: they always end in a
+-- jump, either non-local or to another basic block in the same proc.
+-- In this phase, we attempt to place the basic blocks in a sequence
+-- such that as many of the local jumps as possible turn into
+-- fallthroughs.
+
+sequenceTop :: NatCmmTop -> NatCmmTop
+sequenceTop top@(CmmData _ _) = top
+sequenceTop (CmmProc info lbl params blocks) =
+ CmmProc info lbl params (sequenceBlocks blocks)
+
+-- The algorithm is very simple (and stupid): we make a graph out of
+-- the blocks where there is an edge from one block to another iff the
+-- first block ends by jumping to the second. Then we topologically
+-- sort this graph. Then traverse the list: for each block, we first
+-- output the block, then if it has an out edge, we move the
+-- destination of the out edge to the front of the list, and continue.
+
+sequenceBlocks :: [NatBasicBlock] -> [NatBasicBlock]
+sequenceBlocks [] = []
+sequenceBlocks (entry:blocks) =
+ seqBlocks (mkNode entry : reverse (flattenSCCs (sccBlocks blocks)))
+ -- the first block is the entry point ==> it must remain at the start.
+
+sccBlocks :: [NatBasicBlock] -> [SCC (NatBasicBlock,Unique,[Unique])]
+sccBlocks blocks = stronglyConnCompR (map mkNode blocks)
+
+getOutEdges :: [Instr] -> [Unique]
+getOutEdges instrs = case jumpDests (last instrs) [] of
+ [one] -> [getUnique one]
+ _many -> []
+ -- we're only interested in the last instruction of
+ -- the block, and only if it has a single destination.
+
+mkNode block@(BasicBlock id instrs) = (block, getUnique id, getOutEdges instrs)
+
+seqBlocks [] = []
+seqBlocks ((block,_,[]) : rest)
+ = block : seqBlocks rest
+seqBlocks ((block@(BasicBlock id instrs),_,[next]) : rest)
+ | can_fallthrough = BasicBlock id (init instrs) : seqBlocks rest'
+ | otherwise = block : seqBlocks rest'
+ where
+ (can_fallthrough, rest') = reorder next [] rest
+ -- TODO: we should do a better job for cycles; try to maximise the
+ -- fallthroughs within a loop.
+seqBlocks _ = panic "AsmCodegen:seqBlocks"
+
+reorder id accum [] = (False, reverse accum)
+reorder id accum (b@(block,id',out) : rest)
+ | id == id' = (True, (block,id,out) : reverse accum ++ rest)
+ | otherwise = reorder id (b:accum) rest
+
+-- -----------------------------------------------------------------------------
+-- Instruction selection
+
+-- Native code instruction selection for a chunk of stix code. For
+-- this part of the computation, we switch from the UniqSM monad to
+-- the NatM monad. The latter carries not only a Unique, but also an
+-- Int denoting the current C stack pointer offset in the generated
+-- code; this is needed for creating correct spill offsets on
+-- architectures which don't offer, or for which it would be
+-- prohibitively expensive to employ, a frame pointer register. Viz,
+-- x86.
+
+-- The offset is measured in bytes, and indicates the difference
+-- between the current (simulated) C stack-ptr and the value it was at
+-- the beginning of the block. For stacks which grow down, this value
+-- should be either zero or negative.
+
+-- Switching between the two monads whilst carrying along the same
+-- Unique supply breaks abstraction. Is that bad?
+
+genMachCode :: CmmTop -> UniqSM ([NatCmmTop], [CLabel])
+
+genMachCode cmm_top initial_us
+ = let initial_st = mkNatM_State initial_us 0
+ (new_tops, final_st) = initNat initial_st (cmmTopCodeGen cmm_top)
+ final_us = natm_us final_st
+ final_delta = natm_delta final_st
+ final_imports = natm_imports final_st