1 -- -----------------------------------------------------------------------------
3 -- (c) The University of Glasgow 1993-2004
5 -- This is the top-level module in the native code generator.
7 -- -----------------------------------------------------------------------------
11 -- The above warning supression flag is a temporary kludge.
12 -- While working on this module you are encouraged to remove it and fix
13 -- any warnings in the module. See
14 -- http://hackage.haskell.org/trac/ghc/wiki/Commentary/CodingStyle#Warnings
17 module AsmCodeGen ( nativeCodeGen ) where
19 #include "HsVersions.h"
20 #include "nativeGen/NCG.h"
29 #elif i386_TARGET_ARCH || x86_64_TARGET_ARCH
36 #elif sparc_TARGET_ARCH
41 import SPARC.ShortcutJump
43 #elif powerpc_TARGET_ARCH
52 #error "AsmCodeGen: unknown architecture"
56 import RegAlloc.Liveness
57 import qualified RegAlloc.Linear.Main as Linear
59 import qualified GraphColor as Color
60 import qualified RegAlloc.Graph.Main as Color
61 import qualified RegAlloc.Graph.Stats as Color
62 import qualified RegAlloc.Graph.Coalesce as Color
63 import qualified RegAlloc.Graph.TrivColorable as Color
65 import qualified SPARC.CodeGen.Expand as SPARC
77 import CmmOpt ( cmmMiniInline, cmmMachOpFold )
83 import Unique ( Unique, getUnique )
86 #if powerpc_TARGET_ARCH
87 import StaticFlags ( opt_Static, opt_PIC )
90 import Config ( cProjectVersion )
94 import qualified Pretty
114 The native-code generator has machine-independent and
115 machine-dependent modules.
117 This module ("AsmCodeGen") is the top-level machine-independent
118 module. Before entering machine-dependent land, we do some
119 machine-independent optimisations (defined below) on the
122 We convert to the machine-specific 'Instr' datatype with
123 'cmmCodeGen', assuming an infinite supply of registers. We then use
124 a machine-independent register allocator ('regAlloc') to rejoin
125 reality. Obviously, 'regAlloc' has machine-specific helper
126 functions (see about "RegAllocInfo" below).
128 Finally, we order the basic blocks of the function so as to minimise
129 the number of jumps between blocks, by utilising fallthrough wherever
132 The machine-dependent bits break down as follows:
134 * ["MachRegs"] Everything about the target platform's machine
135 registers (and immediate operands, and addresses, which tend to
136 intermingle/interact with registers).
138 * ["MachInstrs"] Includes the 'Instr' datatype (possibly should
139 have a module of its own), plus a miscellany of other things
140 (e.g., 'targetDoubleSize', 'smStablePtrTable', ...)
142 * ["MachCodeGen"] is where 'Cmm' stuff turns into
143 machine instructions.
145 * ["PprMach"] 'pprInstr' turns an 'Instr' into text (well, really
148 * ["RegAllocInfo"] In the register allocator, we manipulate
149 'MRegsState's, which are 'BitSet's, one bit per machine register.
150 When we want to say something about a specific machine register
151 (e.g., ``it gets clobbered by this instruction''), we set/unset
152 its bit. Obviously, we do this 'BitSet' thing for efficiency
155 The 'RegAllocInfo' module collects together the machine-specific
156 info needed to do register allocation.
158 * ["RegisterAlloc"] The (machine-independent) register allocator.
161 -- -----------------------------------------------------------------------------
162 -- Top-level of the native codegen
165 nativeCodeGen :: DynFlags -> Handle -> UniqSupply -> [RawCmm] -> IO ()
166 nativeCodeGen dflags h us cmms
168 let split_cmms = concat $ map add_split cmms
170 -- BufHandle is a performance hack. We could hide it inside
171 -- Pretty if it weren't for the fact that we do lots of little
172 -- printDocs here (in order to do codegen in constant space).
173 bufh <- newBufHandle h
174 (imports, prof) <- cmmNativeGens dflags bufh us split_cmms [] [] 0
177 let (native, colorStats, linearStats)
182 Opt_D_dump_asm "Asm code"
183 (vcat $ map (docToSDoc . pprNatCmmTop) $ concat native)
185 -- dump global NCG stats for graph coloring allocator
186 (case concat $ catMaybes colorStats of
189 -- build the global register conflict graph
191 = foldl Color.union Color.initGraph
192 $ [ Color.raGraph stat
193 | stat@Color.RegAllocStatsStart{} <- stats]
195 dumpSDoc dflags Opt_D_dump_asm_stats "NCG stats"
196 $ Color.pprStats stats graphGlobal
199 Opt_D_dump_asm_conflicts "Register conflict graph"
203 targetVirtualRegSqueeze
204 targetRealRegSqueeze)
208 -- dump global NCG stats for linear allocator
209 (case concat $ catMaybes linearStats of
211 stats -> dumpSDoc dflags Opt_D_dump_asm_stats "NCG stats"
212 $ Linear.pprStats (concat native) stats)
214 -- write out the imports
215 Pretty.printDoc Pretty.LeftMode h
216 $ makeImportsDoc dflags (concat imports)
220 where add_split (Cmm tops)
221 | dopt Opt_SplitObjs dflags = split_marker : tops
224 split_marker = CmmProc [] mkSplitMarkerLabel [] (ListGraph [])
227 -- | Do native code generation on all these cmms.
229 cmmNativeGens dflags h us [] impAcc profAcc count
230 = return (reverse impAcc, reverse profAcc)
232 cmmNativeGens dflags h us (cmm : cmms) impAcc profAcc count
234 (us', native, imports, colorStats, linearStats)
235 <- cmmNativeGen dflags us cmm count
237 Pretty.bufLeftRender h
238 $ {-# SCC "pprNativeCode" #-} Pretty.vcat $ map pprNatCmmTop native
240 -- carefully evaluate this strictly. Binding it with 'let'
241 -- and then using 'seq' doesn't work, because the let
242 -- apparently gets inlined first.
243 lsPprNative <- return $!
244 if dopt Opt_D_dump_asm dflags
245 || dopt Opt_D_dump_asm_stats dflags
249 count' <- return $! count + 1;
251 -- force evaulation all this stuff to avoid space leaks
252 seqString (showSDoc $ vcat $ map ppr imports) `seq` return ()
254 cmmNativeGens dflags h us' cmms
256 ((lsPprNative, colorStats, linearStats) : profAcc)
259 where seqString [] = ()
260 seqString (x:xs) = x `seq` seqString xs `seq` ()
263 -- | Complete native code generation phase for a single top-level chunk of Cmm.
264 -- Dumping the output of each stage along the way.
265 -- Global conflict graph and NGC stats
269 -> RawCmmTop -- ^ the cmm to generate code for
270 -> Int -- ^ sequence number of this top thing
272 , [NatCmmTop Instr] -- native code
273 , [CLabel] -- things imported by this cmm
274 , Maybe [Color.RegAllocStats Instr] -- stats for the coloring register allocator
275 , Maybe [Linear.RegAllocStats]) -- stats for the linear register allocators
277 cmmNativeGen dflags us cmm count
280 -- rewrite assignments to global regs
281 let (fixed_cmm, usFix) =
282 {-# SCC "fixAssignsTop" #-}
283 initUs us $ fixAssignsTop cmm
285 -- cmm to cmm optimisations
286 let (opt_cmm, imports) =
287 {-# SCC "cmmToCmm" #-}
288 cmmToCmm dflags fixed_cmm
291 Opt_D_dump_opt_cmm "Optimised Cmm"
292 (pprCmm $ Cmm [opt_cmm])
294 -- generate native code from cmm
295 let ((native, lastMinuteImports), usGen) =
296 {-# SCC "genMachCode" #-}
297 initUs usFix $ genMachCode dflags opt_cmm
300 Opt_D_dump_asm_native "Native code"
301 (vcat $ map (docToSDoc . pprNatCmmTop) native)
304 -- tag instructions with register liveness information
305 let (withLiveness, usLive) =
306 {-# SCC "regLiveness" #-}
307 initUs usGen $ mapUs regLiveness native
310 Opt_D_dump_asm_liveness "Liveness annotations added"
311 (vcat $ map ppr withLiveness)
314 -- allocate registers
315 (alloced, usAlloc, ppr_raStatsColor, ppr_raStatsLinear) <-
316 if ( dopt Opt_RegsGraph dflags
317 || dopt Opt_RegsIterative dflags)
319 -- the regs usable for allocation
320 let (alloc_regs :: UniqFM (UniqSet RealReg))
321 = foldr (\r -> plusUFM_C unionUniqSets
322 $ unitUFM (targetClassOfRealReg r) (unitUniqSet r))
327 -- do the graph coloring register allocation
328 let ((alloced, regAllocStats), usAlloc)
329 = {-# SCC "RegAlloc" #-}
334 (mkUniqSet [0..maxSpillSlots])
337 -- dump out what happened during register allocation
339 Opt_D_dump_asm_regalloc "Registers allocated"
340 (vcat $ map (docToSDoc . pprNatCmmTop) alloced)
343 Opt_D_dump_asm_regalloc_stages "Build/spill stages"
344 (vcat $ map (\(stage, stats)
345 -> text "# --------------------------"
346 $$ text "# cmm " <> int count <> text " Stage " <> int stage
348 $ zip [0..] regAllocStats)
351 if dopt Opt_D_dump_asm_stats dflags
352 then Just regAllocStats else Nothing
354 -- force evaluation of the Maybe to avoid space leak
355 mPprStats `seq` return ()
357 return ( alloced, usAlloc
362 -- do linear register allocation
363 let ((alloced, regAllocStats), usAlloc)
364 = {-# SCC "RegAlloc" #-}
367 $ mapUs Linear.regAlloc withLiveness
370 Opt_D_dump_asm_regalloc "Registers allocated"
371 (vcat $ map (docToSDoc . pprNatCmmTop) alloced)
374 if dopt Opt_D_dump_asm_stats dflags
375 then Just (catMaybes regAllocStats) else Nothing
377 -- force evaluation of the Maybe to avoid space leak
378 mPprStats `seq` return ()
380 return ( alloced, usAlloc
384 ---- shortcut branches
386 {-# SCC "shortcutBranches" #-}
387 shortcutBranches dflags alloced
391 {-# SCC "sequenceBlocks" #-}
392 map sequenceTop shorted
397 {-# SCC "x86fp_kludge" #-}
398 map x86fp_kludge sequenced
403 ---- expansion of SPARC synthetic instrs
404 #if sparc_TARGET_ARCH
406 {-# SCC "sparc_expand" #-}
407 map SPARC.expandTop kludged
410 Opt_D_dump_asm_expanded "Synthetic instructions expanded"
411 (vcat $ map (docToSDoc . pprNatCmmTop) expanded)
419 , lastMinuteImports ++ imports
425 x86fp_kludge :: NatCmmTop Instr -> NatCmmTop Instr
426 x86fp_kludge top@(CmmData _ _) = top
427 x86fp_kludge top@(CmmProc info lbl params (ListGraph code)) =
428 CmmProc info lbl params (ListGraph $ i386_insert_ffrees code)
432 -- | Build a doc for all the imports.
434 makeImportsDoc :: DynFlags -> [CLabel] -> Pretty.Doc
435 makeImportsDoc dflags imports
438 #if HAVE_SUBSECTIONS_VIA_SYMBOLS
439 -- On recent versions of Darwin, the linker supports
440 -- dead-stripping of code and data on a per-symbol basis.
441 -- There's a hack to make this work in PprMach.pprNatCmmTop.
442 Pretty.$$ Pretty.text ".subsections_via_symbols"
444 #if HAVE_GNU_NONEXEC_STACK
445 -- On recent GNU ELF systems one can mark an object file
446 -- as not requiring an executable stack. If all objects
447 -- linked into a program have this note then the program
448 -- will not use an executable stack, which is good for
449 -- security. GHC generated code does not need an executable
450 -- stack so add the note in:
451 Pretty.$$ Pretty.text ".section .note.GNU-stack,\"\",@progbits"
453 #if !defined(darwin_TARGET_OS)
454 -- And just because every other compiler does, lets stick in
455 -- an identifier directive: .ident "GHC x.y.z"
456 Pretty.$$ let compilerIdent = Pretty.text "GHC" Pretty.<+>
457 Pretty.text cProjectVersion
458 in Pretty.text ".ident" Pretty.<+>
459 Pretty.doubleQuotes compilerIdent
463 -- Generate "symbol stubs" for all external symbols that might
464 -- come from a dynamic library.
465 dyld_stubs :: [CLabel] -> Pretty.Doc
466 {- dyld_stubs imps = Pretty.vcat $ map pprDyldSymbolStub $
467 map head $ group $ sort imps-}
469 arch = platformArch $ targetPlatform dflags
470 os = platformOS $ targetPlatform dflags
472 -- (Hack) sometimes two Labels pretty-print the same, but have
473 -- different uniques; so we compare their text versions...
475 | needImportedSymbols arch os
477 (pprGotDeclaration arch os :) $
478 map ( pprImportedSymbol arch os . fst . head) $
479 groupBy (\(_,a) (_,b) -> a == b) $
480 sortBy (\(_,a) (_,b) -> compare a b) $
486 doPpr lbl = (lbl, Pretty.render $ pprCLabel lbl astyle)
487 astyle = mkCodeStyle AsmStyle
490 -- -----------------------------------------------------------------------------
491 -- Sequencing the basic blocks
493 -- Cmm BasicBlocks are self-contained entities: they always end in a
494 -- jump, either non-local or to another basic block in the same proc.
495 -- In this phase, we attempt to place the basic blocks in a sequence
496 -- such that as many of the local jumps as possible turn into
503 sequenceTop top@(CmmData _ _) = top
504 sequenceTop (CmmProc info lbl params (ListGraph blocks)) =
505 CmmProc info lbl params (ListGraph $ makeFarBranches $ sequenceBlocks blocks)
507 -- The algorithm is very simple (and stupid): we make a graph out of
508 -- the blocks where there is an edge from one block to another iff the
509 -- first block ends by jumping to the second. Then we topologically
510 -- sort this graph. Then traverse the list: for each block, we first
511 -- output the block, then if it has an out edge, we move the
512 -- destination of the out edge to the front of the list, and continue.
514 -- FYI, the classic layout for basic blocks uses postorder DFS; this
515 -- algorithm is implemented in cmm/ZipCfg.hs (NR 6 Sep 2007).
519 => [NatBasicBlock instr]
520 -> [NatBasicBlock instr]
522 sequenceBlocks [] = []
523 sequenceBlocks (entry:blocks) =
524 seqBlocks (mkNode entry : reverse (flattenSCCs (sccBlocks blocks)))
525 -- the first block is the entry point ==> it must remain at the start.
530 => [NatBasicBlock instr]
531 -> [SCC ( NatBasicBlock instr
535 sccBlocks blocks = stronglyConnCompFromEdgedVerticesR (map mkNode blocks)
537 -- we're only interested in the last instruction of
538 -- the block, and only if it has a single destination.
541 => [instr] -> [Unique]
544 = case jumpDestsOfInstr (last instrs) of
545 [one] -> [getUnique one]
548 mkNode block@(BasicBlock id instrs) = (block, getUnique id, getOutEdges instrs)
551 seqBlocks ((block,_,[]) : rest)
552 = block : seqBlocks rest
553 seqBlocks ((block@(BasicBlock id instrs),_,[next]) : rest)
554 | can_fallthrough = BasicBlock id (init instrs) : seqBlocks rest'
555 | otherwise = block : seqBlocks rest'
557 (can_fallthrough, rest') = reorder next [] rest
558 -- TODO: we should do a better job for cycles; try to maximise the
559 -- fallthroughs within a loop.
560 seqBlocks _ = panic "AsmCodegen:seqBlocks"
562 reorder id accum [] = (False, reverse accum)
563 reorder id accum (b@(block,id',out) : rest)
564 | id == id' = (True, (block,id,out) : reverse accum ++ rest)
565 | otherwise = reorder id (b:accum) rest
568 -- -----------------------------------------------------------------------------
569 -- Making far branches
571 -- Conditional branches on PowerPC are limited to +-32KB; if our Procs get too
572 -- big, we have to work around this limitation.
575 :: [NatBasicBlock Instr]
576 -> [NatBasicBlock Instr]
578 #if powerpc_TARGET_ARCH
579 makeFarBranches blocks
580 | last blockAddresses < nearLimit = blocks
581 | otherwise = zipWith handleBlock blockAddresses blocks
583 blockAddresses = scanl (+) 0 $ map blockLen blocks
584 blockLen (BasicBlock _ instrs) = length instrs
586 handleBlock addr (BasicBlock id instrs)
587 = BasicBlock id (zipWith makeFar [addr..] instrs)
589 makeFar addr (BCC ALWAYS tgt) = BCC ALWAYS tgt
590 makeFar addr (BCC cond tgt)
591 | abs (addr - targetAddr) >= nearLimit
595 where Just targetAddr = lookupUFM blockAddressMap tgt
596 makeFar addr other = other
598 nearLimit = 7000 -- 8192 instructions are allowed; let's keep some
599 -- distance, as we have a few pseudo-insns that are
600 -- pretty-printed as multiple instructions,
601 -- and it's just not worth the effort to calculate
604 blockAddressMap = listToUFM $ zip (map blockId blocks) blockAddresses
609 -- -----------------------------------------------------------------------------
617 shortcutBranches dflags tops
618 | optLevel dflags < 1 = tops -- only with -O or higher
619 | otherwise = map (apply_mapping mapping) tops'
621 (tops', mappings) = mapAndUnzip build_mapping tops
622 mapping = foldr plusUFM emptyUFM mappings
624 build_mapping top@(CmmData _ _) = (top, emptyUFM)
625 build_mapping (CmmProc info lbl params (ListGraph []))
626 = (CmmProc info lbl params (ListGraph []), emptyUFM)
627 build_mapping (CmmProc info lbl params (ListGraph (head:blocks)))
628 = (CmmProc info lbl params (ListGraph (head:others)), mapping)
629 -- drop the shorted blocks, but don't ever drop the first one,
630 -- because it is pointed to by a global label.
632 -- find all the blocks that just consist of a jump that can be
634 -- Don't completely eliminate loops here -- that can leave a dangling jump!
635 (_, shortcut_blocks, others) = foldl split (emptyBlockSet, [], []) blocks
636 split (s, shortcut_blocks, others) b@(BasicBlock id [insn])
637 | Just (DestBlockId dest) <- canShortcut insn,
638 (elemBlockSet dest s) || dest == id -- loop checks
639 = (s, shortcut_blocks, b : others)
640 split (s, shortcut_blocks, others) (BasicBlock id [insn])
641 | Just dest <- canShortcut insn
642 = (extendBlockSet s id, (id,dest) : shortcut_blocks, others)
643 split (s, shortcut_blocks, others) other = (s, shortcut_blocks, other : others)
646 -- build a mapping from BlockId to JumpDest for shorting branches
647 mapping = foldl add emptyUFM shortcut_blocks
648 add ufm (id,dest) = addToUFM ufm id dest
650 apply_mapping ufm (CmmData sec statics)
651 = CmmData sec (map (shortcutStatic (lookupUFM ufm)) statics)
652 -- we need to get the jump tables, so apply the mapping to the entries
654 apply_mapping ufm (CmmProc info lbl params (ListGraph blocks))
655 = CmmProc info lbl params (ListGraph $ map short_bb blocks)
657 short_bb (BasicBlock id insns) = BasicBlock id $! map short_insn insns
658 short_insn i = shortcutJump (lookupUFM ufm) i
659 -- shortcutJump should apply the mapping repeatedly,
660 -- just in case we can short multiple branches.
662 -- -----------------------------------------------------------------------------
663 -- Instruction selection
665 -- Native code instruction selection for a chunk of stix code. For
666 -- this part of the computation, we switch from the UniqSM monad to
667 -- the NatM monad. The latter carries not only a Unique, but also an
668 -- Int denoting the current C stack pointer offset in the generated
669 -- code; this is needed for creating correct spill offsets on
670 -- architectures which don't offer, or for which it would be
671 -- prohibitively expensive to employ, a frame pointer register. Viz,
674 -- The offset is measured in bytes, and indicates the difference
675 -- between the current (simulated) C stack-ptr and the value it was at
676 -- the beginning of the block. For stacks which grow down, this value
677 -- should be either zero or negative.
679 -- Switching between the two monads whilst carrying along the same
680 -- Unique supply breaks abstraction. Is that bad?
689 genMachCode dflags cmm_top
690 = do { initial_us <- getUs
691 ; let initial_st = mkNatM_State initial_us 0 dflags
692 (new_tops, final_st) = initNat initial_st (cmmTopCodeGen dflags cmm_top)
693 final_delta = natm_delta final_st
694 final_imports = natm_imports final_st
695 ; if final_delta == 0
696 then return (new_tops, final_imports)
697 else pprPanic "genMachCode: nonzero final delta" (int final_delta)
700 -- -----------------------------------------------------------------------------
701 -- Fixup assignments to global registers so that they assign to
702 -- locations within the RegTable, if appropriate.
704 -- Note that we currently don't fixup reads here: they're done by
705 -- the generic optimiser below, to avoid having two separate passes
708 fixAssignsTop :: RawCmmTop -> UniqSM RawCmmTop
709 fixAssignsTop top@(CmmData _ _) = returnUs top
710 fixAssignsTop (CmmProc info lbl params (ListGraph blocks)) =
711 mapUs fixAssignsBlock blocks `thenUs` \ blocks' ->
712 returnUs (CmmProc info lbl params (ListGraph blocks'))
714 fixAssignsBlock :: CmmBasicBlock -> UniqSM CmmBasicBlock
715 fixAssignsBlock (BasicBlock id stmts) =
716 fixAssigns stmts `thenUs` \ stmts' ->
717 returnUs (BasicBlock id stmts')
719 fixAssigns :: [CmmStmt] -> UniqSM [CmmStmt]
721 mapUs fixAssign stmts `thenUs` \ stmtss ->
722 returnUs (concat stmtss)
724 fixAssign :: CmmStmt -> UniqSM [CmmStmt]
725 fixAssign (CmmAssign (CmmGlobal reg) src)
726 | Left realreg <- reg_or_addr
727 = returnUs [CmmAssign (CmmGlobal reg) src]
728 | Right baseRegAddr <- reg_or_addr
729 = returnUs [CmmStore baseRegAddr src]
730 -- Replace register leaves with appropriate StixTrees for
731 -- the given target. GlobalRegs which map to a reg on this
732 -- arch are left unchanged. Assigning to BaseReg is always
733 -- illegal, so we check for that.
735 reg_or_addr = get_GlobalReg_reg_or_addr reg
737 fixAssign other_stmt = returnUs [other_stmt]
739 -- -----------------------------------------------------------------------------
740 -- Generic Cmm optimiser
746 (b) Simple inlining: a temporary which is assigned to and then
747 used, once, can be shorted.
748 (c) Replacement of references to GlobalRegs which do not have
749 machine registers by the appropriate memory load (eg.
750 Hp ==> *(BaseReg + 34) ).
751 (d) Position independent code and dynamic linking
752 (i) introduce the appropriate indirections
753 and position independent refs
754 (ii) compile a list of imported symbols
756 Ideas for other things we could do (ToDo):
758 - shortcut jumps-to-jumps
759 - eliminate dead code blocks
760 - simple CSE: if an expr is assigned to a temp, then replace later occs of
761 that expr with the temp, until the expr is no longer valid (can push through
762 temp assignments, and certain assigns to mem...)
765 cmmToCmm :: DynFlags -> RawCmmTop -> (RawCmmTop, [CLabel])
766 cmmToCmm _ top@(CmmData _ _) = (top, [])
767 cmmToCmm dflags (CmmProc info lbl params (ListGraph blocks)) = runCmmOpt dflags $ do
768 blocks' <- mapM cmmBlockConFold (cmmMiniInline blocks)
769 return $ CmmProc info lbl params (ListGraph blocks')
771 newtype CmmOptM a = CmmOptM (([CLabel], DynFlags) -> (# a, [CLabel] #))
773 instance Monad CmmOptM where
774 return x = CmmOptM $ \(imports, _) -> (# x,imports #)
776 CmmOptM $ \(imports, dflags) ->
777 case f (imports, dflags) of
780 CmmOptM g' -> g' (imports', dflags)
782 addImportCmmOpt :: CLabel -> CmmOptM ()
783 addImportCmmOpt lbl = CmmOptM $ \(imports, dflags) -> (# (), lbl:imports #)
785 getDynFlagsCmmOpt :: CmmOptM DynFlags
786 getDynFlagsCmmOpt = CmmOptM $ \(imports, dflags) -> (# dflags, imports #)
788 runCmmOpt :: DynFlags -> CmmOptM a -> (a, [CLabel])
789 runCmmOpt dflags (CmmOptM f) = case f ([], dflags) of
790 (# result, imports #) -> (result, imports)
792 cmmBlockConFold :: CmmBasicBlock -> CmmOptM CmmBasicBlock
793 cmmBlockConFold (BasicBlock id stmts) = do
794 stmts' <- mapM cmmStmtConFold stmts
795 return $ BasicBlock id stmts'
800 -> do src' <- cmmExprConFold DataReference src
801 return $ case src' of
802 CmmReg reg' | reg == reg' -> CmmNop
803 new_src -> CmmAssign reg new_src
806 -> do addr' <- cmmExprConFold DataReference addr
807 src' <- cmmExprConFold DataReference src
808 return $ CmmStore addr' src'
811 -> do addr' <- cmmExprConFold JumpReference addr
812 return $ CmmJump addr' regs
814 CmmCall target regs args srt returns
815 -> do target' <- case target of
816 CmmCallee e conv -> do
817 e' <- cmmExprConFold CallReference e
818 return $ CmmCallee e' conv
819 other -> return other
820 args' <- mapM (\(CmmHinted arg hint) -> do
821 arg' <- cmmExprConFold DataReference arg
822 return (CmmHinted arg' hint)) args
823 return $ CmmCall target' regs args' srt returns
825 CmmCondBranch test dest
826 -> do test' <- cmmExprConFold DataReference test
827 return $ case test' of
828 CmmLit (CmmInt 0 _) ->
829 CmmComment (mkFastString ("deleted: " ++
830 showSDoc (pprStmt stmt)))
832 CmmLit (CmmInt n _) -> CmmBranch dest
833 other -> CmmCondBranch test' dest
836 -> do expr' <- cmmExprConFold DataReference expr
837 return $ CmmSwitch expr' ids
843 cmmExprConFold referenceKind expr
846 -> do addr' <- cmmExprConFold DataReference addr
847 return $ CmmLoad addr' rep
850 -- For MachOps, we first optimize the children, and then we try
851 -- our hand at some constant-folding.
852 -> do args' <- mapM (cmmExprConFold DataReference) args
853 return $ cmmMachOpFold mop args'
855 CmmLit (CmmLabel lbl)
857 dflags <- getDynFlagsCmmOpt
858 cmmMakeDynamicReference dflags addImportCmmOpt referenceKind lbl
859 CmmLit (CmmLabelOff lbl off)
861 dflags <- getDynFlagsCmmOpt
862 dynRef <- cmmMakeDynamicReference dflags addImportCmmOpt referenceKind lbl
863 return $ cmmMachOpFold (MO_Add wordWidth) [
865 (CmmLit $ CmmInt (fromIntegral off) wordWidth)
868 #if powerpc_TARGET_ARCH
869 -- On powerpc (non-PIC), it's easier to jump directly to a label than
870 -- to use the register table, so we replace these registers
871 -- with the corresponding labels:
872 CmmReg (CmmGlobal EagerBlackholeInfo)
874 -> cmmExprConFold referenceKind $
875 CmmLit (CmmLabel (mkRtsCodeLabel (sLit "__stg_EAGER_BLACKHOLE_INFO")))
876 CmmReg (CmmGlobal GCEnter1)
878 -> cmmExprConFold referenceKind $
879 CmmLit (CmmLabel (mkRtsCodeLabel (sLit "__stg_gc_enter_1")))
880 CmmReg (CmmGlobal GCFun)
882 -> cmmExprConFold referenceKind $
883 CmmLit (CmmLabel (mkRtsCodeLabel (sLit "__stg_gc_fun")))
886 CmmReg (CmmGlobal mid)
887 -- Replace register leaves with appropriate StixTrees for
888 -- the given target. MagicIds which map to a reg on this
889 -- arch are left unchanged. For the rest, BaseReg is taken
890 -- to mean the address of the reg table in MainCapability,
891 -- and for all others we generate an indirection to its
892 -- location in the register table.
893 -> case get_GlobalReg_reg_or_addr mid of
894 Left realreg -> return expr
897 BaseReg -> cmmExprConFold DataReference baseRegAddr
898 other -> cmmExprConFold DataReference
899 (CmmLoad baseRegAddr (globalRegType mid))
900 -- eliminate zero offsets
902 -> cmmExprConFold referenceKind (CmmReg reg)
904 CmmRegOff (CmmGlobal mid) offset
905 -- RegOf leaves are just a shorthand form. If the reg maps
906 -- to a real reg, we keep the shorthand, otherwise, we just
907 -- expand it and defer to the above code.
908 -> case get_GlobalReg_reg_or_addr mid of
909 Left realreg -> return expr
911 -> cmmExprConFold DataReference (CmmMachOp (MO_Add wordWidth) [
912 CmmReg (CmmGlobal mid),
913 CmmLit (CmmInt (fromIntegral offset)
918 -- -----------------------------------------------------------------------------