1 -- -----------------------------------------------------------------------------
3 -- (c) The University of Glasgow 1993-2004
5 -- This is the top-level module in the native code generator.
7 -- -----------------------------------------------------------------------------
10 module AsmCodeGen ( nativeCodeGen ) where
12 #include "HsVersions.h"
13 #include "nativeGen/NCG.h"
16 #if i386_TARGET_ARCH || x86_64_TARGET_ARCH
22 #elif sparc_TARGET_ARCH
24 import SPARC.CodeGen.Expand
28 import SPARC.ShortcutJump
30 #elif powerpc_TARGET_ARCH
39 #error "AsmCodeGen: unknown architecture"
43 import RegAlloc.Liveness
44 import qualified RegAlloc.Linear.Main as Linear
46 import qualified GraphColor as Color
47 import qualified RegAlloc.Graph.Main as Color
48 import qualified RegAlloc.Graph.Stats as Color
49 import qualified RegAlloc.Graph.TrivColorable as Color
59 import CgUtils ( fixStgRegisters )
61 import CmmOpt ( cmmMiniInline, cmmMachOpFold )
66 import Unique ( Unique, getUnique )
74 import qualified Pretty
89 import Distribution.System
92 The native-code generator has machine-independent and
93 machine-dependent modules.
95 This module ("AsmCodeGen") is the top-level machine-independent
96 module. Before entering machine-dependent land, we do some
97 machine-independent optimisations (defined below) on the
100 We convert to the machine-specific 'Instr' datatype with
101 'cmmCodeGen', assuming an infinite supply of registers. We then use
102 a machine-independent register allocator ('regAlloc') to rejoin
103 reality. Obviously, 'regAlloc' has machine-specific helper
104 functions (see about "RegAllocInfo" below).
106 Finally, we order the basic blocks of the function so as to minimise
107 the number of jumps between blocks, by utilising fallthrough wherever
110 The machine-dependent bits break down as follows:
112 * ["MachRegs"] Everything about the target platform's machine
113 registers (and immediate operands, and addresses, which tend to
114 intermingle/interact with registers).
116 * ["MachInstrs"] Includes the 'Instr' datatype (possibly should
117 have a module of its own), plus a miscellany of other things
118 (e.g., 'targetDoubleSize', 'smStablePtrTable', ...)
120 * ["MachCodeGen"] is where 'Cmm' stuff turns into
121 machine instructions.
123 * ["PprMach"] 'pprInstr' turns an 'Instr' into text (well, really
126 * ["RegAllocInfo"] In the register allocator, we manipulate
127 'MRegsState's, which are 'BitSet's, one bit per machine register.
128 When we want to say something about a specific machine register
129 (e.g., ``it gets clobbered by this instruction''), we set/unset
130 its bit. Obviously, we do this 'BitSet' thing for efficiency
133 The 'RegAllocInfo' module collects together the machine-specific
134 info needed to do register allocation.
136 * ["RegisterAlloc"] The (machine-independent) register allocator.
139 -- -----------------------------------------------------------------------------
140 -- Top-level of the native codegen
143 nativeCodeGen :: DynFlags -> Handle -> UniqSupply -> [RawCmm] -> IO ()
144 nativeCodeGen dflags h us cmms
146 let split_cmms = concat $ map add_split cmms
148 -- BufHandle is a performance hack. We could hide it inside
149 -- Pretty if it weren't for the fact that we do lots of little
150 -- printDocs here (in order to do codegen in constant space).
151 bufh <- newBufHandle h
152 (imports, prof) <- cmmNativeGens dflags bufh us split_cmms [] [] 0
155 let (native, colorStats, linearStats)
160 Opt_D_dump_asm "Asm code"
161 (vcat $ map (docToSDoc . pprNatCmmTop) $ concat native)
163 -- dump global NCG stats for graph coloring allocator
164 (case concat $ catMaybes colorStats of
167 -- build the global register conflict graph
169 = foldl Color.union Color.initGraph
170 $ [ Color.raGraph stat
171 | stat@Color.RegAllocStatsStart{} <- stats]
173 dumpSDoc dflags Opt_D_dump_asm_stats "NCG stats"
174 $ Color.pprStats stats graphGlobal
177 Opt_D_dump_asm_conflicts "Register conflict graph"
181 targetVirtualRegSqueeze
182 targetRealRegSqueeze)
186 -- dump global NCG stats for linear allocator
187 (case concat $ catMaybes linearStats of
189 stats -> dumpSDoc dflags Opt_D_dump_asm_stats "NCG stats"
190 $ Linear.pprStats (concat native) stats)
192 -- write out the imports
193 Pretty.printDoc Pretty.LeftMode h
194 $ makeImportsDoc dflags (concat imports)
198 where add_split (Cmm tops)
199 | dopt Opt_SplitObjs dflags = split_marker : tops
202 split_marker = CmmProc [] mkSplitMarkerLabel (ListGraph [])
205 -- | Do native code generation on all these cmms.
207 cmmNativeGens :: DynFlags
212 -> [ ([NatCmmTop Instr],
213 Maybe [Color.RegAllocStats Instr],
214 Maybe [Linear.RegAllocStats]) ]
218 Maybe [Color.RegAllocStats Instr],
219 Maybe [Linear.RegAllocStats])] )
221 cmmNativeGens _ _ _ [] impAcc profAcc _
222 = return (reverse impAcc, reverse profAcc)
224 cmmNativeGens dflags h us (cmm : cmms) impAcc profAcc count
226 (us', native, imports, colorStats, linearStats)
227 <- cmmNativeGen dflags us cmm count
229 Pretty.bufLeftRender h
230 $ {-# SCC "pprNativeCode" #-} Pretty.vcat $ map pprNatCmmTop native
232 -- carefully evaluate this strictly. Binding it with 'let'
233 -- and then using 'seq' doesn't work, because the let
234 -- apparently gets inlined first.
235 lsPprNative <- return $!
236 if dopt Opt_D_dump_asm dflags
237 || dopt Opt_D_dump_asm_stats dflags
241 count' <- return $! count + 1;
243 -- force evaulation all this stuff to avoid space leaks
244 seqString (showSDoc $ vcat $ map ppr imports) `seq` return ()
246 cmmNativeGens dflags h us' cmms
248 ((lsPprNative, colorStats, linearStats) : profAcc)
251 where seqString [] = ()
252 seqString (x:xs) = x `seq` seqString xs `seq` ()
255 -- | Complete native code generation phase for a single top-level chunk of Cmm.
256 -- Dumping the output of each stage along the way.
257 -- Global conflict graph and NGC stats
261 -> RawCmmTop -- ^ the cmm to generate code for
262 -> Int -- ^ sequence number of this top thing
264 , [NatCmmTop Instr] -- native code
265 , [CLabel] -- things imported by this cmm
266 , Maybe [Color.RegAllocStats Instr] -- stats for the coloring register allocator
267 , Maybe [Linear.RegAllocStats]) -- stats for the linear register allocators
269 cmmNativeGen dflags us cmm count
272 -- rewrite assignments to global regs
274 {-# SCC "fixStgRegisters" #-}
277 -- cmm to cmm optimisations
278 let (opt_cmm, imports) =
279 {-# SCC "cmmToCmm" #-}
280 cmmToCmm dflags fixed_cmm
283 Opt_D_dump_opt_cmm "Optimised Cmm"
284 (pprCmm $ Cmm [opt_cmm])
286 -- generate native code from cmm
287 let ((native, lastMinuteImports), usGen) =
288 {-# SCC "genMachCode" #-}
289 initUs us $ genMachCode dflags opt_cmm
292 Opt_D_dump_asm_native "Native code"
293 (vcat $ map (docToSDoc . pprNatCmmTop) native)
295 -- tag instructions with register liveness information
296 let (withLiveness, usLive) =
297 {-# SCC "regLiveness" #-}
300 $ map natCmmTopToLive native
303 Opt_D_dump_asm_liveness "Liveness annotations added"
304 (vcat $ map ppr withLiveness)
306 -- allocate registers
307 (alloced, usAlloc, ppr_raStatsColor, ppr_raStatsLinear) <-
308 if ( dopt Opt_RegsGraph dflags
309 || dopt Opt_RegsIterative dflags)
311 -- the regs usable for allocation
312 let (alloc_regs :: UniqFM (UniqSet RealReg))
313 = foldr (\r -> plusUFM_C unionUniqSets
314 $ unitUFM (targetClassOfRealReg r) (unitUniqSet r))
318 -- do the graph coloring register allocation
319 let ((alloced, regAllocStats), usAlloc)
320 = {-# SCC "RegAlloc" #-}
325 (mkUniqSet [0..maxSpillSlots])
328 -- dump out what happened during register allocation
330 Opt_D_dump_asm_regalloc "Registers allocated"
331 (vcat $ map (docToSDoc . pprNatCmmTop) alloced)
334 Opt_D_dump_asm_regalloc_stages "Build/spill stages"
335 (vcat $ map (\(stage, stats)
336 -> text "# --------------------------"
337 $$ text "# cmm " <> int count <> text " Stage " <> int stage
339 $ zip [0..] regAllocStats)
342 if dopt Opt_D_dump_asm_stats dflags
343 then Just regAllocStats else Nothing
345 -- force evaluation of the Maybe to avoid space leak
346 mPprStats `seq` return ()
348 return ( alloced, usAlloc
353 -- do linear register allocation
354 let ((alloced, regAllocStats), usAlloc)
355 = {-# SCC "RegAlloc" #-}
358 $ mapUs Linear.regAlloc withLiveness
361 Opt_D_dump_asm_regalloc "Registers allocated"
362 (vcat $ map (docToSDoc . pprNatCmmTop) alloced)
365 if dopt Opt_D_dump_asm_stats dflags
366 then Just (catMaybes regAllocStats) else Nothing
368 -- force evaluation of the Maybe to avoid space leak
369 mPprStats `seq` return ()
371 return ( alloced, usAlloc
375 ---- generate jump tables
377 {-# SCC "generateJumpTables" #-}
378 alloced ++ generateJumpTables alloced
380 ---- shortcut branches
382 {-# SCC "shortcutBranches" #-}
383 shortcutBranches dflags tabled
387 {-# SCC "sequenceBlocks" #-}
388 map sequenceTop shorted
393 {-# SCC "x86fp_kludge" #-}
394 map x86fp_kludge sequenced
399 ---- expansion of SPARC synthetic instrs
400 #if sparc_TARGET_ARCH
402 {-# SCC "sparc_expand" #-}
403 map expandTop kludged
406 Opt_D_dump_asm_expanded "Synthetic instructions expanded"
407 (vcat $ map (docToSDoc . pprNatCmmTop) expanded)
415 , lastMinuteImports ++ imports
421 x86fp_kludge :: NatCmmTop Instr -> NatCmmTop Instr
422 x86fp_kludge top@(CmmData _ _) = top
423 x86fp_kludge (CmmProc info lbl (ListGraph code)) =
424 CmmProc info lbl (ListGraph $ i386_insert_ffrees code)
428 -- | Build a doc for all the imports.
430 makeImportsDoc :: DynFlags -> [CLabel] -> Pretty.Doc
431 makeImportsDoc dflags imports
434 #if HAVE_SUBSECTIONS_VIA_SYMBOLS
435 -- On recent versions of Darwin, the linker supports
436 -- dead-stripping of code and data on a per-symbol basis.
437 -- There's a hack to make this work in PprMach.pprNatCmmTop.
438 Pretty.$$ Pretty.text ".subsections_via_symbols"
440 #if HAVE_GNU_NONEXEC_STACK
441 -- On recent GNU ELF systems one can mark an object file
442 -- as not requiring an executable stack. If all objects
443 -- linked into a program have this note then the program
444 -- will not use an executable stack, which is good for
445 -- security. GHC generated code does not need an executable
446 -- stack so add the note in:
447 Pretty.$$ Pretty.text ".section .note.GNU-stack,\"\",@progbits"
449 #if !defined(darwin_TARGET_OS)
450 -- And just because every other compiler does, lets stick in
451 -- an identifier directive: .ident "GHC x.y.z"
452 Pretty.$$ let compilerIdent = Pretty.text "GHC" Pretty.<+>
453 Pretty.text cProjectVersion
454 in Pretty.text ".ident" Pretty.<+>
455 Pretty.doubleQuotes compilerIdent
459 -- Generate "symbol stubs" for all external symbols that might
460 -- come from a dynamic library.
461 dyld_stubs :: [CLabel] -> Pretty.Doc
462 {- dyld_stubs imps = Pretty.vcat $ map pprDyldSymbolStub $
463 map head $ group $ sort imps-}
465 arch = platformArch $ targetPlatform dflags
466 os = platformOS $ targetPlatform dflags
468 -- (Hack) sometimes two Labels pretty-print the same, but have
469 -- different uniques; so we compare their text versions...
471 | needImportedSymbols arch os
473 (pprGotDeclaration arch os :) $
474 map ( pprImportedSymbol arch os . fst . head) $
475 groupBy (\(_,a) (_,b) -> a == b) $
476 sortBy (\(_,a) (_,b) -> compare a b) $
482 doPpr lbl = (lbl, Pretty.render $ pprCLabel lbl astyle)
483 astyle = mkCodeStyle AsmStyle
486 -- -----------------------------------------------------------------------------
487 -- Sequencing the basic blocks
489 -- Cmm BasicBlocks are self-contained entities: they always end in a
490 -- jump, either non-local or to another basic block in the same proc.
491 -- In this phase, we attempt to place the basic blocks in a sequence
492 -- such that as many of the local jumps as possible turn into
499 sequenceTop top@(CmmData _ _) = top
500 sequenceTop (CmmProc info lbl (ListGraph blocks)) =
501 CmmProc info lbl (ListGraph $ makeFarBranches $ sequenceBlocks blocks)
503 -- The algorithm is very simple (and stupid): we make a graph out of
504 -- the blocks where there is an edge from one block to another iff the
505 -- first block ends by jumping to the second. Then we topologically
506 -- sort this graph. Then traverse the list: for each block, we first
507 -- output the block, then if it has an out edge, we move the
508 -- destination of the out edge to the front of the list, and continue.
510 -- FYI, the classic layout for basic blocks uses postorder DFS; this
511 -- algorithm is implemented in Hoopl.
515 => [NatBasicBlock instr]
516 -> [NatBasicBlock instr]
518 sequenceBlocks [] = []
519 sequenceBlocks (entry:blocks) =
520 seqBlocks (mkNode entry : reverse (flattenSCCs (sccBlocks blocks)))
521 -- the first block is the entry point ==> it must remain at the start.
526 => [NatBasicBlock instr]
527 -> [SCC ( NatBasicBlock instr
531 sccBlocks blocks = stronglyConnCompFromEdgedVerticesR (map mkNode blocks)
533 -- we're only interested in the last instruction of
534 -- the block, and only if it has a single destination.
537 => [instr] -> [Unique]
540 = case jumpDestsOfInstr (last instrs) of
541 [one] -> [getUnique one]
544 mkNode :: (Instruction t)
546 -> (GenBasicBlock t, Unique, [Unique])
547 mkNode block@(BasicBlock id instrs) = (block, getUnique id, getOutEdges instrs)
549 seqBlocks :: (Eq t) => [(GenBasicBlock t1, t, [t])] -> [GenBasicBlock t1]
551 seqBlocks ((block,_,[]) : rest)
552 = block : seqBlocks rest
553 seqBlocks ((block@(BasicBlock id instrs),_,[next]) : rest)
554 | can_fallthrough = BasicBlock id (init instrs) : seqBlocks rest'
555 | otherwise = block : seqBlocks rest'
557 (can_fallthrough, rest') = reorder next [] rest
558 -- TODO: we should do a better job for cycles; try to maximise the
559 -- fallthroughs within a loop.
560 seqBlocks _ = panic "AsmCodegen:seqBlocks"
562 reorder :: (Eq a) => a -> [(t, a, t1)] -> [(t, a, t1)] -> (Bool, [(t, a, t1)])
563 reorder _ accum [] = (False, reverse accum)
564 reorder id accum (b@(block,id',out) : rest)
565 | id == id' = (True, (block,id,out) : reverse accum ++ rest)
566 | otherwise = reorder id (b:accum) rest
569 -- -----------------------------------------------------------------------------
570 -- Making far branches
572 -- Conditional branches on PowerPC are limited to +-32KB; if our Procs get too
573 -- big, we have to work around this limitation.
576 :: [NatBasicBlock Instr]
577 -> [NatBasicBlock Instr]
579 #if powerpc_TARGET_ARCH
580 makeFarBranches blocks
581 | last blockAddresses < nearLimit = blocks
582 | otherwise = zipWith handleBlock blockAddresses blocks
584 blockAddresses = scanl (+) 0 $ map blockLen blocks
585 blockLen (BasicBlock _ instrs) = length instrs
587 handleBlock addr (BasicBlock id instrs)
588 = BasicBlock id (zipWith makeFar [addr..] instrs)
590 makeFar _ (BCC ALWAYS tgt) = BCC ALWAYS tgt
591 makeFar addr (BCC cond tgt)
592 | abs (addr - targetAddr) >= nearLimit
596 where Just targetAddr = lookupUFM blockAddressMap tgt
597 makeFar _ other = other
599 nearLimit = 7000 -- 8192 instructions are allowed; let's keep some
600 -- distance, as we have a few pseudo-insns that are
601 -- pretty-printed as multiple instructions,
602 -- and it's just not worth the effort to calculate
605 blockAddressMap = listToUFM $ zip (map blockId blocks) blockAddresses
610 -- -----------------------------------------------------------------------------
611 -- Generate jump tables
613 -- Analyzes all native code and generates data sections for all jump
614 -- table instructions.
616 :: [NatCmmTop Instr] -> [NatCmmTop Instr]
617 generateJumpTables xs = concatMap f xs
618 where f (CmmProc _ _ (ListGraph xs)) = concatMap g xs
620 g (BasicBlock _ xs) = catMaybes (map generateJumpTableForInstr xs)
622 -- -----------------------------------------------------------------------------
630 shortcutBranches dflags tops
631 | optLevel dflags < 1 = tops -- only with -O or higher
632 | otherwise = map (apply_mapping mapping) tops'
634 (tops', mappings) = mapAndUnzip build_mapping tops
635 mapping = foldr plusUFM emptyUFM mappings
637 build_mapping :: GenCmmTop d t (ListGraph Instr)
638 -> (GenCmmTop d t (ListGraph Instr), UniqFM JumpDest)
639 build_mapping top@(CmmData _ _) = (top, emptyUFM)
640 build_mapping (CmmProc info lbl (ListGraph []))
641 = (CmmProc info lbl (ListGraph []), emptyUFM)
642 build_mapping (CmmProc info lbl (ListGraph (head:blocks)))
643 = (CmmProc info lbl (ListGraph (head:others)), mapping)
644 -- drop the shorted blocks, but don't ever drop the first one,
645 -- because it is pointed to by a global label.
647 -- find all the blocks that just consist of a jump that can be
649 -- Don't completely eliminate loops here -- that can leave a dangling jump!
650 (_, shortcut_blocks, others) = foldl split (emptyBlockSet, [], []) blocks
651 split (s, shortcut_blocks, others) b@(BasicBlock id [insn])
652 | Just (DestBlockId dest) <- canShortcut insn,
653 (setMember dest s) || dest == id -- loop checks
654 = (s, shortcut_blocks, b : others)
655 split (s, shortcut_blocks, others) (BasicBlock id [insn])
656 | Just dest <- canShortcut insn
657 = (setInsert id s, (id,dest) : shortcut_blocks, others)
658 split (s, shortcut_blocks, others) other = (s, shortcut_blocks, other : others)
661 -- build a mapping from BlockId to JumpDest for shorting branches
662 mapping = foldl add emptyUFM shortcut_blocks
663 add ufm (id,dest) = addToUFM ufm id dest
665 apply_mapping :: UniqFM JumpDest
666 -> GenCmmTop CmmStatic h (ListGraph Instr)
667 -> GenCmmTop CmmStatic h (ListGraph Instr)
668 apply_mapping ufm (CmmData sec statics)
669 = CmmData sec (map (shortcutStatic (lookupUFM ufm)) statics)
670 -- we need to get the jump tables, so apply the mapping to the entries
672 apply_mapping ufm (CmmProc info lbl (ListGraph blocks))
673 = CmmProc info lbl (ListGraph $ map short_bb blocks)
675 short_bb (BasicBlock id insns) = BasicBlock id $! map short_insn insns
676 short_insn i = shortcutJump (lookupUFM ufm) i
677 -- shortcutJump should apply the mapping repeatedly,
678 -- just in case we can short multiple branches.
680 -- -----------------------------------------------------------------------------
681 -- Instruction selection
683 -- Native code instruction selection for a chunk of stix code. For
684 -- this part of the computation, we switch from the UniqSM monad to
685 -- the NatM monad. The latter carries not only a Unique, but also an
686 -- Int denoting the current C stack pointer offset in the generated
687 -- code; this is needed for creating correct spill offsets on
688 -- architectures which don't offer, or for which it would be
689 -- prohibitively expensive to employ, a frame pointer register. Viz,
692 -- The offset is measured in bytes, and indicates the difference
693 -- between the current (simulated) C stack-ptr and the value it was at
694 -- the beginning of the block. For stacks which grow down, this value
695 -- should be either zero or negative.
697 -- Switching between the two monads whilst carrying along the same
698 -- Unique supply breaks abstraction. Is that bad?
707 genMachCode dflags cmm_top
708 = do { initial_us <- getUs
709 ; let initial_st = mkNatM_State initial_us 0 dflags
710 (new_tops, final_st) = initNat initial_st (cmmTopCodeGen dflags cmm_top)
711 final_delta = natm_delta final_st
712 final_imports = natm_imports final_st
713 ; if final_delta == 0
714 then return (new_tops, final_imports)
715 else pprPanic "genMachCode: nonzero final delta" (int final_delta)
718 -- -----------------------------------------------------------------------------
719 -- Generic Cmm optimiser
725 (b) Simple inlining: a temporary which is assigned to and then
726 used, once, can be shorted.
727 (c) Position independent code and dynamic linking
728 (i) introduce the appropriate indirections
729 and position independent refs
730 (ii) compile a list of imported symbols
732 Ideas for other things we could do (ToDo):
734 - shortcut jumps-to-jumps
735 - eliminate dead code blocks
736 - simple CSE: if an expr is assigned to a temp, then replace later occs of
737 that expr with the temp, until the expr is no longer valid (can push through
738 temp assignments, and certain assigns to mem...)
741 cmmToCmm :: DynFlags -> RawCmmTop -> (RawCmmTop, [CLabel])
742 cmmToCmm _ top@(CmmData _ _) = (top, [])
743 cmmToCmm dflags (CmmProc info lbl (ListGraph blocks)) = runCmmOpt dflags $ do
744 blocks' <- mapM cmmBlockConFold (cmmMiniInline blocks)
745 return $ CmmProc info lbl (ListGraph blocks')
747 newtype CmmOptM a = CmmOptM (([CLabel], DynFlags) -> (# a, [CLabel] #))
749 instance Monad CmmOptM where
750 return x = CmmOptM $ \(imports, _) -> (# x,imports #)
752 CmmOptM $ \(imports, dflags) ->
753 case f (imports, dflags) of
756 CmmOptM g' -> g' (imports', dflags)
758 addImportCmmOpt :: CLabel -> CmmOptM ()
759 addImportCmmOpt lbl = CmmOptM $ \(imports, _dflags) -> (# (), lbl:imports #)
761 getDynFlagsCmmOpt :: CmmOptM DynFlags
762 getDynFlagsCmmOpt = CmmOptM $ \(imports, dflags) -> (# dflags, imports #)
764 runCmmOpt :: DynFlags -> CmmOptM a -> (a, [CLabel])
765 runCmmOpt dflags (CmmOptM f) = case f ([], dflags) of
766 (# result, imports #) -> (result, imports)
768 cmmBlockConFold :: CmmBasicBlock -> CmmOptM CmmBasicBlock
769 cmmBlockConFold (BasicBlock id stmts) = do
770 stmts' <- mapM cmmStmtConFold stmts
771 return $ BasicBlock id stmts'
773 cmmStmtConFold :: CmmStmt -> CmmOptM CmmStmt
777 -> do src' <- cmmExprConFold DataReference src
778 return $ case src' of
779 CmmReg reg' | reg == reg' -> CmmNop
780 new_src -> CmmAssign reg new_src
783 -> do addr' <- cmmExprConFold DataReference addr
784 src' <- cmmExprConFold DataReference src
785 return $ CmmStore addr' src'
788 -> do addr' <- cmmExprConFold JumpReference addr
789 return $ CmmJump addr' regs
791 CmmCall target regs args srt returns
792 -> do target' <- case target of
793 CmmCallee e conv -> do
794 e' <- cmmExprConFold CallReference e
795 return $ CmmCallee e' conv
796 other -> return other
797 args' <- mapM (\(CmmHinted arg hint) -> do
798 arg' <- cmmExprConFold DataReference arg
799 return (CmmHinted arg' hint)) args
800 return $ CmmCall target' regs args' srt returns
802 CmmCondBranch test dest
803 -> do test' <- cmmExprConFold DataReference test
804 return $ case test' of
805 CmmLit (CmmInt 0 _) ->
806 CmmComment (mkFastString ("deleted: " ++
807 showSDoc (pprStmt stmt)))
809 CmmLit (CmmInt _ _) -> CmmBranch dest
810 _other -> CmmCondBranch test' dest
813 -> do expr' <- cmmExprConFold DataReference expr
814 return $ CmmSwitch expr' ids
820 cmmExprConFold :: ReferenceKind -> CmmExpr -> CmmOptM CmmExpr
821 cmmExprConFold referenceKind expr
824 -> do addr' <- cmmExprConFold DataReference addr
825 return $ CmmLoad addr' rep
828 -- For MachOps, we first optimize the children, and then we try
829 -- our hand at some constant-folding.
830 -> do args' <- mapM (cmmExprConFold DataReference) args
831 return $ cmmMachOpFold mop args'
833 CmmLit (CmmLabel lbl)
835 dflags <- getDynFlagsCmmOpt
836 cmmMakeDynamicReference dflags addImportCmmOpt referenceKind lbl
837 CmmLit (CmmLabelOff lbl off)
839 dflags <- getDynFlagsCmmOpt
840 dynRef <- cmmMakeDynamicReference dflags addImportCmmOpt referenceKind lbl
841 return $ cmmMachOpFold (MO_Add wordWidth) [
843 (CmmLit $ CmmInt (fromIntegral off) wordWidth)
846 -- On powerpc (non-PIC), it's easier to jump directly to a label than
847 -- to use the register table, so we replace these registers
848 -- with the corresponding labels:
849 CmmReg (CmmGlobal EagerBlackholeInfo)
850 | cTargetArch == PPC && not opt_PIC
851 -> cmmExprConFold referenceKind $
852 CmmLit (CmmLabel (mkCmmCodeLabel rtsPackageId (fsLit "__stg_EAGER_BLACKHOLE_info")))
853 CmmReg (CmmGlobal GCEnter1)
854 | cTargetArch == PPC && not opt_PIC
855 -> cmmExprConFold referenceKind $
856 CmmLit (CmmLabel (mkCmmCodeLabel rtsPackageId (fsLit "__stg_gc_enter_1")))
857 CmmReg (CmmGlobal GCFun)
858 | cTargetArch == PPC && not opt_PIC
859 -> cmmExprConFold referenceKind $
860 CmmLit (CmmLabel (mkCmmCodeLabel rtsPackageId (fsLit "__stg_gc_fun")))