1 -- -----------------------------------------------------------------------------
3 -- (c) The University of Glasgow 1993-2004
5 -- This is the top-level module in the native code generator.
7 -- -----------------------------------------------------------------------------
10 module AsmCodeGen ( nativeCodeGen ) where
12 #include "HsVersions.h"
13 #include "nativeGen/NCG.h"
22 #elif i386_TARGET_ARCH || x86_64_TARGET_ARCH
28 #elif sparc_TARGET_ARCH
30 import SPARC.CodeGen.Expand
34 import SPARC.ShortcutJump
36 #elif powerpc_TARGET_ARCH
45 #error "AsmCodeGen: unknown architecture"
49 import RegAlloc.Liveness
50 import qualified RegAlloc.Linear.Main as Linear
52 import qualified GraphColor as Color
53 import qualified RegAlloc.Graph.Main as Color
54 import qualified RegAlloc.Graph.Stats as Color
55 import qualified RegAlloc.Graph.TrivColorable as Color
65 import CgUtils ( fixStgRegisters )
67 import CmmOpt ( cmmMiniInline, cmmMachOpFold )
72 import Unique ( Unique, getUnique )
80 import qualified Pretty
95 import Distribution.System
98 The native-code generator has machine-independent and
99 machine-dependent modules.
101 This module ("AsmCodeGen") is the top-level machine-independent
102 module. Before entering machine-dependent land, we do some
103 machine-independent optimisations (defined below) on the
106 We convert to the machine-specific 'Instr' datatype with
107 'cmmCodeGen', assuming an infinite supply of registers. We then use
108 a machine-independent register allocator ('regAlloc') to rejoin
109 reality. Obviously, 'regAlloc' has machine-specific helper
110 functions (see about "RegAllocInfo" below).
112 Finally, we order the basic blocks of the function so as to minimise
113 the number of jumps between blocks, by utilising fallthrough wherever
116 The machine-dependent bits break down as follows:
118 * ["MachRegs"] Everything about the target platform's machine
119 registers (and immediate operands, and addresses, which tend to
120 intermingle/interact with registers).
122 * ["MachInstrs"] Includes the 'Instr' datatype (possibly should
123 have a module of its own), plus a miscellany of other things
124 (e.g., 'targetDoubleSize', 'smStablePtrTable', ...)
126 * ["MachCodeGen"] is where 'Cmm' stuff turns into
127 machine instructions.
129 * ["PprMach"] 'pprInstr' turns an 'Instr' into text (well, really
132 * ["RegAllocInfo"] In the register allocator, we manipulate
133 'MRegsState's, which are 'BitSet's, one bit per machine register.
134 When we want to say something about a specific machine register
135 (e.g., ``it gets clobbered by this instruction''), we set/unset
136 its bit. Obviously, we do this 'BitSet' thing for efficiency
139 The 'RegAllocInfo' module collects together the machine-specific
140 info needed to do register allocation.
142 * ["RegisterAlloc"] The (machine-independent) register allocator.
145 -- -----------------------------------------------------------------------------
146 -- Top-level of the native codegen
149 nativeCodeGen :: DynFlags -> Handle -> UniqSupply -> [RawCmm] -> IO ()
150 nativeCodeGen dflags h us cmms
152 let split_cmms = concat $ map add_split cmms
154 -- BufHandle is a performance hack. We could hide it inside
155 -- Pretty if it weren't for the fact that we do lots of little
156 -- printDocs here (in order to do codegen in constant space).
157 bufh <- newBufHandle h
158 (imports, prof) <- cmmNativeGens dflags bufh us split_cmms [] [] 0
161 let (native, colorStats, linearStats)
166 Opt_D_dump_asm "Asm code"
167 (vcat $ map (docToSDoc . pprNatCmmTop) $ concat native)
169 -- dump global NCG stats for graph coloring allocator
170 (case concat $ catMaybes colorStats of
173 -- build the global register conflict graph
175 = foldl Color.union Color.initGraph
176 $ [ Color.raGraph stat
177 | stat@Color.RegAllocStatsStart{} <- stats]
179 dumpSDoc dflags Opt_D_dump_asm_stats "NCG stats"
180 $ Color.pprStats stats graphGlobal
183 Opt_D_dump_asm_conflicts "Register conflict graph"
187 targetVirtualRegSqueeze
188 targetRealRegSqueeze)
192 -- dump global NCG stats for linear allocator
193 (case concat $ catMaybes linearStats of
195 stats -> dumpSDoc dflags Opt_D_dump_asm_stats "NCG stats"
196 $ Linear.pprStats (concat native) stats)
198 -- write out the imports
199 Pretty.printDoc Pretty.LeftMode h
200 $ makeImportsDoc dflags (concat imports)
204 where add_split (Cmm tops)
205 | dopt Opt_SplitObjs dflags = split_marker : tops
208 split_marker = CmmProc [] mkSplitMarkerLabel (ListGraph [])
211 -- | Do native code generation on all these cmms.
213 cmmNativeGens :: DynFlags
218 -> [ ([NatCmmTop Instr],
219 Maybe [Color.RegAllocStats Instr],
220 Maybe [Linear.RegAllocStats]) ]
224 Maybe [Color.RegAllocStats Instr],
225 Maybe [Linear.RegAllocStats])] )
227 cmmNativeGens _ _ _ [] impAcc profAcc _
228 = return (reverse impAcc, reverse profAcc)
230 cmmNativeGens dflags h us (cmm : cmms) impAcc profAcc count
232 (us', native, imports, colorStats, linearStats)
233 <- cmmNativeGen dflags us cmm count
235 Pretty.bufLeftRender h
236 $ {-# SCC "pprNativeCode" #-} Pretty.vcat $ map pprNatCmmTop native
238 -- carefully evaluate this strictly. Binding it with 'let'
239 -- and then using 'seq' doesn't work, because the let
240 -- apparently gets inlined first.
241 lsPprNative <- return $!
242 if dopt Opt_D_dump_asm dflags
243 || dopt Opt_D_dump_asm_stats dflags
247 count' <- return $! count + 1;
249 -- force evaulation all this stuff to avoid space leaks
250 seqString (showSDoc $ vcat $ map ppr imports) `seq` return ()
252 cmmNativeGens dflags h us' cmms
254 ((lsPprNative, colorStats, linearStats) : profAcc)
257 where seqString [] = ()
258 seqString (x:xs) = x `seq` seqString xs `seq` ()
261 -- | Complete native code generation phase for a single top-level chunk of Cmm.
262 -- Dumping the output of each stage along the way.
263 -- Global conflict graph and NGC stats
267 -> RawCmmTop -- ^ the cmm to generate code for
268 -> Int -- ^ sequence number of this top thing
270 , [NatCmmTop Instr] -- native code
271 , [CLabel] -- things imported by this cmm
272 , Maybe [Color.RegAllocStats Instr] -- stats for the coloring register allocator
273 , Maybe [Linear.RegAllocStats]) -- stats for the linear register allocators
275 cmmNativeGen dflags us cmm count
278 -- rewrite assignments to global regs
280 {-# SCC "fixStgRegisters" #-}
283 -- cmm to cmm optimisations
284 let (opt_cmm, imports) =
285 {-# SCC "cmmToCmm" #-}
286 cmmToCmm dflags fixed_cmm
289 Opt_D_dump_opt_cmm "Optimised Cmm"
290 (pprCmm $ Cmm [opt_cmm])
292 -- generate native code from cmm
293 let ((native, lastMinuteImports), usGen) =
294 {-# SCC "genMachCode" #-}
295 initUs us $ genMachCode dflags opt_cmm
298 Opt_D_dump_asm_native "Native code"
299 (vcat $ map (docToSDoc . pprNatCmmTop) native)
301 -- tag instructions with register liveness information
302 let (withLiveness, usLive) =
303 {-# SCC "regLiveness" #-}
306 $ map natCmmTopToLive native
309 Opt_D_dump_asm_liveness "Liveness annotations added"
310 (vcat $ map ppr withLiveness)
312 -- allocate registers
313 (alloced, usAlloc, ppr_raStatsColor, ppr_raStatsLinear) <-
314 if ( dopt Opt_RegsGraph dflags
315 || dopt Opt_RegsIterative dflags)
317 -- the regs usable for allocation
318 let (alloc_regs :: UniqFM (UniqSet RealReg))
319 = foldr (\r -> plusUFM_C unionUniqSets
320 $ unitUFM (targetClassOfRealReg r) (unitUniqSet r))
324 -- do the graph coloring register allocation
325 let ((alloced, regAllocStats), usAlloc)
326 = {-# SCC "RegAlloc" #-}
331 (mkUniqSet [0..maxSpillSlots])
334 -- dump out what happened during register allocation
336 Opt_D_dump_asm_regalloc "Registers allocated"
337 (vcat $ map (docToSDoc . pprNatCmmTop) alloced)
340 Opt_D_dump_asm_regalloc_stages "Build/spill stages"
341 (vcat $ map (\(stage, stats)
342 -> text "# --------------------------"
343 $$ text "# cmm " <> int count <> text " Stage " <> int stage
345 $ zip [0..] regAllocStats)
348 if dopt Opt_D_dump_asm_stats dflags
349 then Just regAllocStats else Nothing
351 -- force evaluation of the Maybe to avoid space leak
352 mPprStats `seq` return ()
354 return ( alloced, usAlloc
359 -- do linear register allocation
360 let ((alloced, regAllocStats), usAlloc)
361 = {-# SCC "RegAlloc" #-}
364 $ mapUs Linear.regAlloc withLiveness
367 Opt_D_dump_asm_regalloc "Registers allocated"
368 (vcat $ map (docToSDoc . pprNatCmmTop) alloced)
371 if dopt Opt_D_dump_asm_stats dflags
372 then Just (catMaybes regAllocStats) else Nothing
374 -- force evaluation of the Maybe to avoid space leak
375 mPprStats `seq` return ()
377 return ( alloced, usAlloc
381 ---- generate jump tables
383 {-# SCC "generateJumpTables" #-}
384 alloced ++ generateJumpTables alloced
386 ---- shortcut branches
388 {-# SCC "shortcutBranches" #-}
389 shortcutBranches dflags tabled
393 {-# SCC "sequenceBlocks" #-}
394 map sequenceTop shorted
399 {-# SCC "x86fp_kludge" #-}
400 map x86fp_kludge sequenced
405 ---- expansion of SPARC synthetic instrs
406 #if sparc_TARGET_ARCH
408 {-# SCC "sparc_expand" #-}
409 map expandTop kludged
412 Opt_D_dump_asm_expanded "Synthetic instructions expanded"
413 (vcat $ map (docToSDoc . pprNatCmmTop) expanded)
421 , lastMinuteImports ++ imports
427 x86fp_kludge :: NatCmmTop Instr -> NatCmmTop Instr
428 x86fp_kludge top@(CmmData _ _) = top
429 x86fp_kludge (CmmProc info lbl (ListGraph code)) =
430 CmmProc info lbl (ListGraph $ i386_insert_ffrees code)
434 -- | Build a doc for all the imports.
436 makeImportsDoc :: DynFlags -> [CLabel] -> Pretty.Doc
437 makeImportsDoc dflags imports
440 #if HAVE_SUBSECTIONS_VIA_SYMBOLS
441 -- On recent versions of Darwin, the linker supports
442 -- dead-stripping of code and data on a per-symbol basis.
443 -- There's a hack to make this work in PprMach.pprNatCmmTop.
444 Pretty.$$ Pretty.text ".subsections_via_symbols"
446 #if HAVE_GNU_NONEXEC_STACK
447 -- On recent GNU ELF systems one can mark an object file
448 -- as not requiring an executable stack. If all objects
449 -- linked into a program have this note then the program
450 -- will not use an executable stack, which is good for
451 -- security. GHC generated code does not need an executable
452 -- stack so add the note in:
453 Pretty.$$ Pretty.text ".section .note.GNU-stack,\"\",@progbits"
455 #if !defined(darwin_TARGET_OS)
456 -- And just because every other compiler does, lets stick in
457 -- an identifier directive: .ident "GHC x.y.z"
458 Pretty.$$ let compilerIdent = Pretty.text "GHC" Pretty.<+>
459 Pretty.text cProjectVersion
460 in Pretty.text ".ident" Pretty.<+>
461 Pretty.doubleQuotes compilerIdent
465 -- Generate "symbol stubs" for all external symbols that might
466 -- come from a dynamic library.
467 dyld_stubs :: [CLabel] -> Pretty.Doc
468 {- dyld_stubs imps = Pretty.vcat $ map pprDyldSymbolStub $
469 map head $ group $ sort imps-}
471 arch = platformArch $ targetPlatform dflags
472 os = platformOS $ targetPlatform dflags
474 -- (Hack) sometimes two Labels pretty-print the same, but have
475 -- different uniques; so we compare their text versions...
477 | needImportedSymbols arch os
479 (pprGotDeclaration arch os :) $
480 map ( pprImportedSymbol arch os . fst . head) $
481 groupBy (\(_,a) (_,b) -> a == b) $
482 sortBy (\(_,a) (_,b) -> compare a b) $
488 doPpr lbl = (lbl, Pretty.render $ pprCLabel lbl astyle)
489 astyle = mkCodeStyle AsmStyle
492 -- -----------------------------------------------------------------------------
493 -- Sequencing the basic blocks
495 -- Cmm BasicBlocks are self-contained entities: they always end in a
496 -- jump, either non-local or to another basic block in the same proc.
497 -- In this phase, we attempt to place the basic blocks in a sequence
498 -- such that as many of the local jumps as possible turn into
505 sequenceTop top@(CmmData _ _) = top
506 sequenceTop (CmmProc info lbl (ListGraph blocks)) =
507 CmmProc info lbl (ListGraph $ makeFarBranches $ sequenceBlocks blocks)
509 -- The algorithm is very simple (and stupid): we make a graph out of
510 -- the blocks where there is an edge from one block to another iff the
511 -- first block ends by jumping to the second. Then we topologically
512 -- sort this graph. Then traverse the list: for each block, we first
513 -- output the block, then if it has an out edge, we move the
514 -- destination of the out edge to the front of the list, and continue.
516 -- FYI, the classic layout for basic blocks uses postorder DFS; this
517 -- algorithm is implemented in Hoopl.
521 => [NatBasicBlock instr]
522 -> [NatBasicBlock instr]
524 sequenceBlocks [] = []
525 sequenceBlocks (entry:blocks) =
526 seqBlocks (mkNode entry : reverse (flattenSCCs (sccBlocks blocks)))
527 -- the first block is the entry point ==> it must remain at the start.
532 => [NatBasicBlock instr]
533 -> [SCC ( NatBasicBlock instr
537 sccBlocks blocks = stronglyConnCompFromEdgedVerticesR (map mkNode blocks)
539 -- we're only interested in the last instruction of
540 -- the block, and only if it has a single destination.
543 => [instr] -> [Unique]
546 = case jumpDestsOfInstr (last instrs) of
547 [one] -> [getUnique one]
550 mkNode :: (Instruction t)
552 -> (GenBasicBlock t, Unique, [Unique])
553 mkNode block@(BasicBlock id instrs) = (block, getUnique id, getOutEdges instrs)
555 seqBlocks :: (Eq t) => [(GenBasicBlock t1, t, [t])] -> [GenBasicBlock t1]
557 seqBlocks ((block,_,[]) : rest)
558 = block : seqBlocks rest
559 seqBlocks ((block@(BasicBlock id instrs),_,[next]) : rest)
560 | can_fallthrough = BasicBlock id (init instrs) : seqBlocks rest'
561 | otherwise = block : seqBlocks rest'
563 (can_fallthrough, rest') = reorder next [] rest
564 -- TODO: we should do a better job for cycles; try to maximise the
565 -- fallthroughs within a loop.
566 seqBlocks _ = panic "AsmCodegen:seqBlocks"
568 reorder :: (Eq a) => a -> [(t, a, t1)] -> [(t, a, t1)] -> (Bool, [(t, a, t1)])
569 reorder _ accum [] = (False, reverse accum)
570 reorder id accum (b@(block,id',out) : rest)
571 | id == id' = (True, (block,id,out) : reverse accum ++ rest)
572 | otherwise = reorder id (b:accum) rest
575 -- -----------------------------------------------------------------------------
576 -- Making far branches
578 -- Conditional branches on PowerPC are limited to +-32KB; if our Procs get too
579 -- big, we have to work around this limitation.
582 :: [NatBasicBlock Instr]
583 -> [NatBasicBlock Instr]
585 #if powerpc_TARGET_ARCH
586 makeFarBranches blocks
587 | last blockAddresses < nearLimit = blocks
588 | otherwise = zipWith handleBlock blockAddresses blocks
590 blockAddresses = scanl (+) 0 $ map blockLen blocks
591 blockLen (BasicBlock _ instrs) = length instrs
593 handleBlock addr (BasicBlock id instrs)
594 = BasicBlock id (zipWith makeFar [addr..] instrs)
596 makeFar _ (BCC ALWAYS tgt) = BCC ALWAYS tgt
597 makeFar addr (BCC cond tgt)
598 | abs (addr - targetAddr) >= nearLimit
602 where Just targetAddr = lookupUFM blockAddressMap tgt
603 makeFar _ other = other
605 nearLimit = 7000 -- 8192 instructions are allowed; let's keep some
606 -- distance, as we have a few pseudo-insns that are
607 -- pretty-printed as multiple instructions,
608 -- and it's just not worth the effort to calculate
611 blockAddressMap = listToUFM $ zip (map blockId blocks) blockAddresses
616 -- -----------------------------------------------------------------------------
617 -- Generate jump tables
619 -- Analyzes all native code and generates data sections for all jump
620 -- table instructions.
622 :: [NatCmmTop Instr] -> [NatCmmTop Instr]
623 generateJumpTables xs = concatMap f xs
624 where f (CmmProc _ _ (ListGraph xs)) = concatMap g xs
626 g (BasicBlock _ xs) = catMaybes (map generateJumpTableForInstr xs)
628 -- -----------------------------------------------------------------------------
636 shortcutBranches dflags tops
637 | optLevel dflags < 1 = tops -- only with -O or higher
638 | otherwise = map (apply_mapping mapping) tops'
640 (tops', mappings) = mapAndUnzip build_mapping tops
641 mapping = foldr plusUFM emptyUFM mappings
643 build_mapping :: GenCmmTop d t (ListGraph Instr)
644 -> (GenCmmTop d t (ListGraph Instr), UniqFM JumpDest)
645 build_mapping top@(CmmData _ _) = (top, emptyUFM)
646 build_mapping (CmmProc info lbl (ListGraph []))
647 = (CmmProc info lbl (ListGraph []), emptyUFM)
648 build_mapping (CmmProc info lbl (ListGraph (head:blocks)))
649 = (CmmProc info lbl (ListGraph (head:others)), mapping)
650 -- drop the shorted blocks, but don't ever drop the first one,
651 -- because it is pointed to by a global label.
653 -- find all the blocks that just consist of a jump that can be
655 -- Don't completely eliminate loops here -- that can leave a dangling jump!
656 (_, shortcut_blocks, others) = foldl split (emptyBlockSet, [], []) blocks
657 split (s, shortcut_blocks, others) b@(BasicBlock id [insn])
658 | Just (DestBlockId dest) <- canShortcut insn,
659 (setMember dest s) || dest == id -- loop checks
660 = (s, shortcut_blocks, b : others)
661 split (s, shortcut_blocks, others) (BasicBlock id [insn])
662 | Just dest <- canShortcut insn
663 = (setInsert id s, (id,dest) : shortcut_blocks, others)
664 split (s, shortcut_blocks, others) other = (s, shortcut_blocks, other : others)
667 -- build a mapping from BlockId to JumpDest for shorting branches
668 mapping = foldl add emptyUFM shortcut_blocks
669 add ufm (id,dest) = addToUFM ufm id dest
671 apply_mapping :: UniqFM JumpDest
672 -> GenCmmTop CmmStatic h (ListGraph Instr)
673 -> GenCmmTop CmmStatic h (ListGraph Instr)
674 apply_mapping ufm (CmmData sec statics)
675 = CmmData sec (map (shortcutStatic (lookupUFM ufm)) statics)
676 -- we need to get the jump tables, so apply the mapping to the entries
678 apply_mapping ufm (CmmProc info lbl (ListGraph blocks))
679 = CmmProc info lbl (ListGraph $ map short_bb blocks)
681 short_bb (BasicBlock id insns) = BasicBlock id $! map short_insn insns
682 short_insn i = shortcutJump (lookupUFM ufm) i
683 -- shortcutJump should apply the mapping repeatedly,
684 -- just in case we can short multiple branches.
686 -- -----------------------------------------------------------------------------
687 -- Instruction selection
689 -- Native code instruction selection for a chunk of stix code. For
690 -- this part of the computation, we switch from the UniqSM monad to
691 -- the NatM monad. The latter carries not only a Unique, but also an
692 -- Int denoting the current C stack pointer offset in the generated
693 -- code; this is needed for creating correct spill offsets on
694 -- architectures which don't offer, or for which it would be
695 -- prohibitively expensive to employ, a frame pointer register. Viz,
698 -- The offset is measured in bytes, and indicates the difference
699 -- between the current (simulated) C stack-ptr and the value it was at
700 -- the beginning of the block. For stacks which grow down, this value
701 -- should be either zero or negative.
703 -- Switching between the two monads whilst carrying along the same
704 -- Unique supply breaks abstraction. Is that bad?
713 genMachCode dflags cmm_top
714 = do { initial_us <- getUs
715 ; let initial_st = mkNatM_State initial_us 0 dflags
716 (new_tops, final_st) = initNat initial_st (cmmTopCodeGen dflags cmm_top)
717 final_delta = natm_delta final_st
718 final_imports = natm_imports final_st
719 ; if final_delta == 0
720 then return (new_tops, final_imports)
721 else pprPanic "genMachCode: nonzero final delta" (int final_delta)
724 -- -----------------------------------------------------------------------------
725 -- Generic Cmm optimiser
731 (b) Simple inlining: a temporary which is assigned to and then
732 used, once, can be shorted.
733 (c) Position independent code and dynamic linking
734 (i) introduce the appropriate indirections
735 and position independent refs
736 (ii) compile a list of imported symbols
738 Ideas for other things we could do (ToDo):
740 - shortcut jumps-to-jumps
741 - eliminate dead code blocks
742 - simple CSE: if an expr is assigned to a temp, then replace later occs of
743 that expr with the temp, until the expr is no longer valid (can push through
744 temp assignments, and certain assigns to mem...)
747 cmmToCmm :: DynFlags -> RawCmmTop -> (RawCmmTop, [CLabel])
748 cmmToCmm _ top@(CmmData _ _) = (top, [])
749 cmmToCmm dflags (CmmProc info lbl (ListGraph blocks)) = runCmmOpt dflags $ do
750 blocks' <- mapM cmmBlockConFold (cmmMiniInline blocks)
751 return $ CmmProc info lbl (ListGraph blocks')
753 newtype CmmOptM a = CmmOptM (([CLabel], DynFlags) -> (# a, [CLabel] #))
755 instance Monad CmmOptM where
756 return x = CmmOptM $ \(imports, _) -> (# x,imports #)
758 CmmOptM $ \(imports, dflags) ->
759 case f (imports, dflags) of
762 CmmOptM g' -> g' (imports', dflags)
764 addImportCmmOpt :: CLabel -> CmmOptM ()
765 addImportCmmOpt lbl = CmmOptM $ \(imports, _dflags) -> (# (), lbl:imports #)
767 getDynFlagsCmmOpt :: CmmOptM DynFlags
768 getDynFlagsCmmOpt = CmmOptM $ \(imports, dflags) -> (# dflags, imports #)
770 runCmmOpt :: DynFlags -> CmmOptM a -> (a, [CLabel])
771 runCmmOpt dflags (CmmOptM f) = case f ([], dflags) of
772 (# result, imports #) -> (result, imports)
774 cmmBlockConFold :: CmmBasicBlock -> CmmOptM CmmBasicBlock
775 cmmBlockConFold (BasicBlock id stmts) = do
776 stmts' <- mapM cmmStmtConFold stmts
777 return $ BasicBlock id stmts'
779 cmmStmtConFold :: CmmStmt -> CmmOptM CmmStmt
783 -> do src' <- cmmExprConFold DataReference src
784 return $ case src' of
785 CmmReg reg' | reg == reg' -> CmmNop
786 new_src -> CmmAssign reg new_src
789 -> do addr' <- cmmExprConFold DataReference addr
790 src' <- cmmExprConFold DataReference src
791 return $ CmmStore addr' src'
794 -> do addr' <- cmmExprConFold JumpReference addr
795 return $ CmmJump addr' regs
797 CmmCall target regs args srt returns
798 -> do target' <- case target of
799 CmmCallee e conv -> do
800 e' <- cmmExprConFold CallReference e
801 return $ CmmCallee e' conv
802 other -> return other
803 args' <- mapM (\(CmmHinted arg hint) -> do
804 arg' <- cmmExprConFold DataReference arg
805 return (CmmHinted arg' hint)) args
806 return $ CmmCall target' regs args' srt returns
808 CmmCondBranch test dest
809 -> do test' <- cmmExprConFold DataReference test
810 return $ case test' of
811 CmmLit (CmmInt 0 _) ->
812 CmmComment (mkFastString ("deleted: " ++
813 showSDoc (pprStmt stmt)))
815 CmmLit (CmmInt _ _) -> CmmBranch dest
816 _other -> CmmCondBranch test' dest
819 -> do expr' <- cmmExprConFold DataReference expr
820 return $ CmmSwitch expr' ids
826 cmmExprConFold :: ReferenceKind -> CmmExpr -> CmmOptM CmmExpr
827 cmmExprConFold referenceKind expr
830 -> do addr' <- cmmExprConFold DataReference addr
831 return $ CmmLoad addr' rep
834 -- For MachOps, we first optimize the children, and then we try
835 -- our hand at some constant-folding.
836 -> do args' <- mapM (cmmExprConFold DataReference) args
837 return $ cmmMachOpFold mop args'
839 CmmLit (CmmLabel lbl)
841 dflags <- getDynFlagsCmmOpt
842 cmmMakeDynamicReference dflags addImportCmmOpt referenceKind lbl
843 CmmLit (CmmLabelOff lbl off)
845 dflags <- getDynFlagsCmmOpt
846 dynRef <- cmmMakeDynamicReference dflags addImportCmmOpt referenceKind lbl
847 return $ cmmMachOpFold (MO_Add wordWidth) [
849 (CmmLit $ CmmInt (fromIntegral off) wordWidth)
852 -- On powerpc (non-PIC), it's easier to jump directly to a label than
853 -- to use the register table, so we replace these registers
854 -- with the corresponding labels:
855 CmmReg (CmmGlobal EagerBlackholeInfo)
856 | cTargetArch == PPC && not opt_PIC
857 -> cmmExprConFold referenceKind $
858 CmmLit (CmmLabel (mkCmmCodeLabel rtsPackageId (fsLit "__stg_EAGER_BLACKHOLE_info")))
859 CmmReg (CmmGlobal GCEnter1)
860 | cTargetArch == PPC && not opt_PIC
861 -> cmmExprConFold referenceKind $
862 CmmLit (CmmLabel (mkCmmCodeLabel rtsPackageId (fsLit "__stg_gc_enter_1")))
863 CmmReg (CmmGlobal GCFun)
864 | cTargetArch == PPC && not opt_PIC
865 -> cmmExprConFold referenceKind $
866 CmmLit (CmmLabel (mkCmmCodeLabel rtsPackageId (fsLit "__stg_gc_fun")))