1 -- -----------------------------------------------------------------------------
3 -- (c) The University of Glasgow 1993-2004
5 -- This is the top-level module in the native code generator.
7 -- -----------------------------------------------------------------------------
10 module AsmCodeGen ( nativeCodeGen ) where
12 #include "HsVersions.h"
13 #include "nativeGen/NCG.h"
16 #if i386_TARGET_ARCH || x86_64_TARGET_ARCH
22 #elif sparc_TARGET_ARCH
24 import SPARC.CodeGen.Expand
28 import SPARC.ShortcutJump
30 #elif powerpc_TARGET_ARCH
39 #error "AsmCodeGen: unknown architecture"
43 import RegAlloc.Liveness
44 import qualified RegAlloc.Linear.Main as Linear
46 import qualified GraphColor as Color
47 import qualified RegAlloc.Graph.Main as Color
48 import qualified RegAlloc.Graph.Stats as Color
49 import qualified RegAlloc.Graph.TrivColorable as Color
59 import CgUtils ( fixStgRegisters )
61 import CmmOpt ( cmmEliminateDeadBlocks, cmmMiniInline, cmmMachOpFold )
66 import Unique ( Unique, getUnique )
74 import qualified Pretty
91 The native-code generator has machine-independent and
92 machine-dependent modules.
94 This module ("AsmCodeGen") is the top-level machine-independent
95 module. Before entering machine-dependent land, we do some
96 machine-independent optimisations (defined below) on the
99 We convert to the machine-specific 'Instr' datatype with
100 'cmmCodeGen', assuming an infinite supply of registers. We then use
101 a machine-independent register allocator ('regAlloc') to rejoin
102 reality. Obviously, 'regAlloc' has machine-specific helper
103 functions (see about "RegAllocInfo" below).
105 Finally, we order the basic blocks of the function so as to minimise
106 the number of jumps between blocks, by utilising fallthrough wherever
109 The machine-dependent bits break down as follows:
111 * ["MachRegs"] Everything about the target platform's machine
112 registers (and immediate operands, and addresses, which tend to
113 intermingle/interact with registers).
115 * ["MachInstrs"] Includes the 'Instr' datatype (possibly should
116 have a module of its own), plus a miscellany of other things
117 (e.g., 'targetDoubleSize', 'smStablePtrTable', ...)
119 * ["MachCodeGen"] is where 'Cmm' stuff turns into
120 machine instructions.
122 * ["PprMach"] 'pprInstr' turns an 'Instr' into text (well, really
125 * ["RegAllocInfo"] In the register allocator, we manipulate
126 'MRegsState's, which are 'BitSet's, one bit per machine register.
127 When we want to say something about a specific machine register
128 (e.g., ``it gets clobbered by this instruction''), we set/unset
129 its bit. Obviously, we do this 'BitSet' thing for efficiency
132 The 'RegAllocInfo' module collects together the machine-specific
133 info needed to do register allocation.
135 * ["RegisterAlloc"] The (machine-independent) register allocator.
138 -- -----------------------------------------------------------------------------
139 -- Top-level of the native codegen
142 nativeCodeGen :: DynFlags -> Handle -> UniqSupply -> [RawCmm] -> IO ()
143 nativeCodeGen dflags h us cmms
145 let split_cmms = concat $ map add_split cmms
147 -- BufHandle is a performance hack. We could hide it inside
148 -- Pretty if it weren't for the fact that we do lots of little
149 -- printDocs here (in order to do codegen in constant space).
150 bufh <- newBufHandle h
151 (imports, prof) <- cmmNativeGens dflags bufh us split_cmms [] [] 0
154 let (native, colorStats, linearStats)
159 Opt_D_dump_asm "Asm code"
160 (vcat $ map (docToSDoc . pprNatCmmTop) $ concat native)
162 -- dump global NCG stats for graph coloring allocator
163 (case concat $ catMaybes colorStats of
166 -- build the global register conflict graph
168 = foldl Color.union Color.initGraph
169 $ [ Color.raGraph stat
170 | stat@Color.RegAllocStatsStart{} <- stats]
172 dumpSDoc dflags Opt_D_dump_asm_stats "NCG stats"
173 $ Color.pprStats stats graphGlobal
176 Opt_D_dump_asm_conflicts "Register conflict graph"
180 targetVirtualRegSqueeze
181 targetRealRegSqueeze)
185 -- dump global NCG stats for linear allocator
186 (case concat $ catMaybes linearStats of
188 stats -> dumpSDoc dflags Opt_D_dump_asm_stats "NCG stats"
189 $ Linear.pprStats (concat native) stats)
191 -- write out the imports
192 Pretty.printDoc Pretty.LeftMode h
193 $ makeImportsDoc dflags (concat imports)
197 where add_split (Cmm tops)
198 | dopt Opt_SplitObjs dflags = split_marker : tops
201 split_marker = CmmProc [] mkSplitMarkerLabel (ListGraph [])
204 -- | Do native code generation on all these cmms.
206 cmmNativeGens :: DynFlags
211 -> [ ([NatCmmTop Instr],
212 Maybe [Color.RegAllocStats Instr],
213 Maybe [Linear.RegAllocStats]) ]
217 Maybe [Color.RegAllocStats Instr],
218 Maybe [Linear.RegAllocStats])] )
220 cmmNativeGens _ _ _ [] impAcc profAcc _
221 = return (reverse impAcc, reverse profAcc)
223 cmmNativeGens dflags h us (cmm : cmms) impAcc profAcc count
225 (us', native, imports, colorStats, linearStats)
226 <- cmmNativeGen dflags us cmm count
228 Pretty.bufLeftRender h
229 $ {-# SCC "pprNativeCode" #-} Pretty.vcat $ map pprNatCmmTop native
231 -- carefully evaluate this strictly. Binding it with 'let'
232 -- and then using 'seq' doesn't work, because the let
233 -- apparently gets inlined first.
234 lsPprNative <- return $!
235 if dopt Opt_D_dump_asm dflags
236 || dopt Opt_D_dump_asm_stats dflags
240 count' <- return $! count + 1;
242 -- force evaulation all this stuff to avoid space leaks
243 seqString (showSDoc $ vcat $ map ppr imports) `seq` return ()
245 cmmNativeGens dflags h us' cmms
247 ((lsPprNative, colorStats, linearStats) : profAcc)
250 where seqString [] = ()
251 seqString (x:xs) = x `seq` seqString xs `seq` ()
254 -- | Complete native code generation phase for a single top-level chunk of Cmm.
255 -- Dumping the output of each stage along the way.
256 -- Global conflict graph and NGC stats
260 -> RawCmmTop -- ^ the cmm to generate code for
261 -> Int -- ^ sequence number of this top thing
263 , [NatCmmTop Instr] -- native code
264 , [CLabel] -- things imported by this cmm
265 , Maybe [Color.RegAllocStats Instr] -- stats for the coloring register allocator
266 , Maybe [Linear.RegAllocStats]) -- stats for the linear register allocators
268 cmmNativeGen dflags us cmm count
271 -- rewrite assignments to global regs
273 {-# SCC "fixStgRegisters" #-}
276 -- cmm to cmm optimisations
277 let (opt_cmm, imports) =
278 {-# SCC "cmmToCmm" #-}
279 cmmToCmm dflags fixed_cmm
282 Opt_D_dump_opt_cmm "Optimised Cmm"
283 (pprCmm $ Cmm [opt_cmm])
285 -- generate native code from cmm
286 let ((native, lastMinuteImports), usGen) =
287 {-# SCC "genMachCode" #-}
288 initUs us $ genMachCode dflags opt_cmm
291 Opt_D_dump_asm_native "Native code"
292 (vcat $ map (docToSDoc . pprNatCmmTop) native)
294 -- tag instructions with register liveness information
295 let (withLiveness, usLive) =
296 {-# SCC "regLiveness" #-}
299 $ map natCmmTopToLive native
302 Opt_D_dump_asm_liveness "Liveness annotations added"
303 (vcat $ map ppr withLiveness)
305 -- allocate registers
306 (alloced, usAlloc, ppr_raStatsColor, ppr_raStatsLinear) <-
307 if ( dopt Opt_RegsGraph dflags
308 || dopt Opt_RegsIterative dflags)
310 -- the regs usable for allocation
311 let (alloc_regs :: UniqFM (UniqSet RealReg))
312 = foldr (\r -> plusUFM_C unionUniqSets
313 $ unitUFM (targetClassOfRealReg r) (unitUniqSet r))
317 -- do the graph coloring register allocation
318 let ((alloced, regAllocStats), usAlloc)
319 = {-# SCC "RegAlloc" #-}
324 (mkUniqSet [0..maxSpillSlots])
327 -- dump out what happened during register allocation
329 Opt_D_dump_asm_regalloc "Registers allocated"
330 (vcat $ map (docToSDoc . pprNatCmmTop) alloced)
333 Opt_D_dump_asm_regalloc_stages "Build/spill stages"
334 (vcat $ map (\(stage, stats)
335 -> text "# --------------------------"
336 $$ text "# cmm " <> int count <> text " Stage " <> int stage
338 $ zip [0..] regAllocStats)
341 if dopt Opt_D_dump_asm_stats dflags
342 then Just regAllocStats else Nothing
344 -- force evaluation of the Maybe to avoid space leak
345 mPprStats `seq` return ()
347 return ( alloced, usAlloc
352 -- do linear register allocation
353 let ((alloced, regAllocStats), usAlloc)
354 = {-# SCC "RegAlloc" #-}
357 $ mapUs Linear.regAlloc withLiveness
360 Opt_D_dump_asm_regalloc "Registers allocated"
361 (vcat $ map (docToSDoc . pprNatCmmTop) alloced)
364 if dopt Opt_D_dump_asm_stats dflags
365 then Just (catMaybes regAllocStats) else Nothing
367 -- force evaluation of the Maybe to avoid space leak
368 mPprStats `seq` return ()
370 return ( alloced, usAlloc
374 ---- x86fp_kludge. This pass inserts ffree instructions to clear
375 ---- the FPU stack on x86. The x86 ABI requires that the FPU stack
376 ---- is clear, and library functions can return odd results if it
379 ---- NB. must happen before shortcutBranches, because that
380 ---- generates JXX_GBLs which we can't fix up in x86fp_kludge.
383 {-# SCC "x86fp_kludge" #-}
384 map x86fp_kludge alloced
389 ---- generate jump tables
391 {-# SCC "generateJumpTables" #-}
392 generateJumpTables kludged
394 ---- shortcut branches
396 {-# SCC "shortcutBranches" #-}
397 shortcutBranches dflags tabled
401 {-# SCC "sequenceBlocks" #-}
402 map sequenceTop shorted
404 ---- expansion of SPARC synthetic instrs
405 #if sparc_TARGET_ARCH
407 {-# SCC "sparc_expand" #-}
408 map expandTop sequenced
411 Opt_D_dump_asm_expanded "Synthetic instructions expanded"
412 (vcat $ map (docToSDoc . pprNatCmmTop) expanded)
420 , lastMinuteImports ++ imports
426 x86fp_kludge :: NatCmmTop Instr -> NatCmmTop Instr
427 x86fp_kludge top@(CmmData _ _) = top
428 x86fp_kludge (CmmProc info lbl (ListGraph code)) =
429 CmmProc info lbl (ListGraph $ i386_insert_ffrees code)
433 -- | Build a doc for all the imports.
435 makeImportsDoc :: DynFlags -> [CLabel] -> Pretty.Doc
436 makeImportsDoc dflags imports
439 #if HAVE_SUBSECTIONS_VIA_SYMBOLS
440 -- On recent versions of Darwin, the linker supports
441 -- dead-stripping of code and data on a per-symbol basis.
442 -- There's a hack to make this work in PprMach.pprNatCmmTop.
443 Pretty.$$ Pretty.text ".subsections_via_symbols"
445 #if HAVE_GNU_NONEXEC_STACK
446 -- On recent GNU ELF systems one can mark an object file
447 -- as not requiring an executable stack. If all objects
448 -- linked into a program have this note then the program
449 -- will not use an executable stack, which is good for
450 -- security. GHC generated code does not need an executable
451 -- stack so add the note in:
452 Pretty.$$ Pretty.text ".section .note.GNU-stack,\"\",@progbits"
454 #if !defined(darwin_TARGET_OS)
455 -- And just because every other compiler does, lets stick in
456 -- an identifier directive: .ident "GHC x.y.z"
457 Pretty.$$ let compilerIdent = Pretty.text "GHC" Pretty.<+>
458 Pretty.text cProjectVersion
459 in Pretty.text ".ident" Pretty.<+>
460 Pretty.doubleQuotes compilerIdent
464 -- Generate "symbol stubs" for all external symbols that might
465 -- come from a dynamic library.
466 dyld_stubs :: [CLabel] -> Pretty.Doc
467 {- dyld_stubs imps = Pretty.vcat $ map pprDyldSymbolStub $
468 map head $ group $ sort imps-}
470 arch = platformArch $ targetPlatform dflags
471 os = platformOS $ targetPlatform dflags
473 -- (Hack) sometimes two Labels pretty-print the same, but have
474 -- different uniques; so we compare their text versions...
476 | needImportedSymbols arch os
478 (pprGotDeclaration arch os :) $
479 map ( pprImportedSymbol arch os . fst . head) $
480 groupBy (\(_,a) (_,b) -> a == b) $
481 sortBy (\(_,a) (_,b) -> compare a b) $
487 doPpr lbl = (lbl, renderWithStyle (pprCLabel lbl) astyle)
488 astyle = mkCodeStyle AsmStyle
491 -- -----------------------------------------------------------------------------
492 -- Sequencing the basic blocks
494 -- Cmm BasicBlocks are self-contained entities: they always end in a
495 -- jump, either non-local or to another basic block in the same proc.
496 -- In this phase, we attempt to place the basic blocks in a sequence
497 -- such that as many of the local jumps as possible turn into
504 sequenceTop top@(CmmData _ _) = top
505 sequenceTop (CmmProc info lbl (ListGraph blocks)) =
506 CmmProc info lbl (ListGraph $ makeFarBranches $ sequenceBlocks blocks)
508 -- The algorithm is very simple (and stupid): we make a graph out of
509 -- the blocks where there is an edge from one block to another iff the
510 -- first block ends by jumping to the second. Then we topologically
511 -- sort this graph. Then traverse the list: for each block, we first
512 -- output the block, then if it has an out edge, we move the
513 -- destination of the out edge to the front of the list, and continue.
515 -- FYI, the classic layout for basic blocks uses postorder DFS; this
516 -- algorithm is implemented in Hoopl.
520 => [NatBasicBlock instr]
521 -> [NatBasicBlock instr]
523 sequenceBlocks [] = []
524 sequenceBlocks (entry:blocks) =
525 seqBlocks (mkNode entry : reverse (flattenSCCs (sccBlocks blocks)))
526 -- the first block is the entry point ==> it must remain at the start.
531 => [NatBasicBlock instr]
532 -> [SCC ( NatBasicBlock instr
536 sccBlocks blocks = stronglyConnCompFromEdgedVerticesR (map mkNode blocks)
538 -- we're only interested in the last instruction of
539 -- the block, and only if it has a single destination.
542 => [instr] -> [Unique]
545 = case jumpDestsOfInstr (last instrs) of
546 [one] -> [getUnique one]
549 mkNode :: (Instruction t)
551 -> (GenBasicBlock t, Unique, [Unique])
552 mkNode block@(BasicBlock id instrs) = (block, getUnique id, getOutEdges instrs)
554 seqBlocks :: (Eq t) => [(GenBasicBlock t1, t, [t])] -> [GenBasicBlock t1]
556 seqBlocks ((block,_,[]) : rest)
557 = block : seqBlocks rest
558 seqBlocks ((block@(BasicBlock id instrs),_,[next]) : rest)
559 | can_fallthrough = BasicBlock id (init instrs) : seqBlocks rest'
560 | otherwise = block : seqBlocks rest'
562 (can_fallthrough, rest') = reorder next [] rest
563 -- TODO: we should do a better job for cycles; try to maximise the
564 -- fallthroughs within a loop.
565 seqBlocks _ = panic "AsmCodegen:seqBlocks"
567 reorder :: (Eq a) => a -> [(t, a, t1)] -> [(t, a, t1)] -> (Bool, [(t, a, t1)])
568 reorder _ accum [] = (False, reverse accum)
569 reorder id accum (b@(block,id',out) : rest)
570 | id == id' = (True, (block,id,out) : reverse accum ++ rest)
571 | otherwise = reorder id (b:accum) rest
574 -- -----------------------------------------------------------------------------
575 -- Making far branches
577 -- Conditional branches on PowerPC are limited to +-32KB; if our Procs get too
578 -- big, we have to work around this limitation.
581 :: [NatBasicBlock Instr]
582 -> [NatBasicBlock Instr]
584 #if powerpc_TARGET_ARCH
585 makeFarBranches blocks
586 | last blockAddresses < nearLimit = blocks
587 | otherwise = zipWith handleBlock blockAddresses blocks
589 blockAddresses = scanl (+) 0 $ map blockLen blocks
590 blockLen (BasicBlock _ instrs) = length instrs
592 handleBlock addr (BasicBlock id instrs)
593 = BasicBlock id (zipWith makeFar [addr..] instrs)
595 makeFar _ (BCC ALWAYS tgt) = BCC ALWAYS tgt
596 makeFar addr (BCC cond tgt)
597 | abs (addr - targetAddr) >= nearLimit
601 where Just targetAddr = lookupUFM blockAddressMap tgt
602 makeFar _ other = other
604 nearLimit = 7000 -- 8192 instructions are allowed; let's keep some
605 -- distance, as we have a few pseudo-insns that are
606 -- pretty-printed as multiple instructions,
607 -- and it's just not worth the effort to calculate
610 blockAddressMap = listToUFM $ zip (map blockId blocks) blockAddresses
615 -- -----------------------------------------------------------------------------
616 -- Generate jump tables
618 -- Analyzes all native code and generates data sections for all jump
619 -- table instructions.
621 :: [NatCmmTop Instr] -> [NatCmmTop Instr]
622 generateJumpTables xs = concatMap f xs
623 where f p@(CmmProc _ _ (ListGraph xs)) = p : concatMap g xs
625 g (BasicBlock _ xs) = catMaybes (map generateJumpTableForInstr xs)
627 -- -----------------------------------------------------------------------------
635 shortcutBranches dflags tops
636 | optLevel dflags < 1 = tops -- only with -O or higher
637 | otherwise = map (apply_mapping mapping) tops'
639 (tops', mappings) = mapAndUnzip build_mapping tops
640 mapping = foldr plusUFM emptyUFM mappings
642 build_mapping :: GenCmmTop d t (ListGraph Instr)
643 -> (GenCmmTop d t (ListGraph Instr), UniqFM JumpDest)
644 build_mapping top@(CmmData _ _) = (top, emptyUFM)
645 build_mapping (CmmProc info lbl (ListGraph []))
646 = (CmmProc info lbl (ListGraph []), emptyUFM)
647 build_mapping (CmmProc info lbl (ListGraph (head:blocks)))
648 = (CmmProc info lbl (ListGraph (head:others)), mapping)
649 -- drop the shorted blocks, but don't ever drop the first one,
650 -- because it is pointed to by a global label.
652 -- find all the blocks that just consist of a jump that can be
654 -- Don't completely eliminate loops here -- that can leave a dangling jump!
655 (_, shortcut_blocks, others) = foldl split (emptyBlockSet, [], []) blocks
656 split (s, shortcut_blocks, others) b@(BasicBlock id [insn])
657 | Just (DestBlockId dest) <- canShortcut insn,
658 (setMember dest s) || dest == id -- loop checks
659 = (s, shortcut_blocks, b : others)
660 split (s, shortcut_blocks, others) (BasicBlock id [insn])
661 | Just dest <- canShortcut insn
662 = (setInsert id s, (id,dest) : shortcut_blocks, others)
663 split (s, shortcut_blocks, others) other = (s, shortcut_blocks, other : others)
666 -- build a mapping from BlockId to JumpDest for shorting branches
667 mapping = foldl add emptyUFM shortcut_blocks
668 add ufm (id,dest) = addToUFM ufm id dest
670 apply_mapping :: UniqFM JumpDest
671 -> GenCmmTop CmmStatic h (ListGraph Instr)
672 -> GenCmmTop CmmStatic h (ListGraph Instr)
673 apply_mapping ufm (CmmData sec statics)
674 = CmmData sec (map (shortcutStatic (lookupUFM ufm)) statics)
675 -- we need to get the jump tables, so apply the mapping to the entries
677 apply_mapping ufm (CmmProc info lbl (ListGraph blocks))
678 = CmmProc info lbl (ListGraph $ map short_bb blocks)
680 short_bb (BasicBlock id insns) = BasicBlock id $! map short_insn insns
681 short_insn i = shortcutJump (lookupUFM ufm) i
682 -- shortcutJump should apply the mapping repeatedly,
683 -- just in case we can short multiple branches.
685 -- -----------------------------------------------------------------------------
686 -- Instruction selection
688 -- Native code instruction selection for a chunk of stix code. For
689 -- this part of the computation, we switch from the UniqSM monad to
690 -- the NatM monad. The latter carries not only a Unique, but also an
691 -- Int denoting the current C stack pointer offset in the generated
692 -- code; this is needed for creating correct spill offsets on
693 -- architectures which don't offer, or for which it would be
694 -- prohibitively expensive to employ, a frame pointer register. Viz,
697 -- The offset is measured in bytes, and indicates the difference
698 -- between the current (simulated) C stack-ptr and the value it was at
699 -- the beginning of the block. For stacks which grow down, this value
700 -- should be either zero or negative.
702 -- Switching between the two monads whilst carrying along the same
703 -- Unique supply breaks abstraction. Is that bad?
712 genMachCode dflags cmm_top
713 = do { initial_us <- getUs
714 ; let initial_st = mkNatM_State initial_us 0 dflags
715 (new_tops, final_st) = initNat initial_st (cmmTopCodeGen dflags cmm_top)
716 final_delta = natm_delta final_st
717 final_imports = natm_imports final_st
718 ; if final_delta == 0
719 then return (new_tops, final_imports)
720 else pprPanic "genMachCode: nonzero final delta" (int final_delta)
723 -- -----------------------------------------------------------------------------
724 -- Generic Cmm optimiser
730 (b) Simple inlining: a temporary which is assigned to and then
731 used, once, can be shorted.
732 (c) Position independent code and dynamic linking
733 (i) introduce the appropriate indirections
734 and position independent refs
735 (ii) compile a list of imported symbols
737 Ideas for other things we could do:
739 - shortcut jumps-to-jumps
740 - simple CSE: if an expr is assigned to a temp, then replace later occs of
741 that expr with the temp, until the expr is no longer valid (can push through
742 temp assignments, and certain assigns to mem...)
745 cmmToCmm :: DynFlags -> RawCmmTop -> (RawCmmTop, [CLabel])
746 cmmToCmm _ top@(CmmData _ _) = (top, [])
747 cmmToCmm dflags (CmmProc info lbl (ListGraph blocks)) = runCmmOpt dflags $ do
748 blocks' <- mapM cmmBlockConFold (cmmMiniInline (cmmEliminateDeadBlocks blocks))
749 return $ CmmProc info lbl (ListGraph blocks')
751 newtype CmmOptM a = CmmOptM (([CLabel], DynFlags) -> (# a, [CLabel] #))
753 instance Monad CmmOptM where
754 return x = CmmOptM $ \(imports, _) -> (# x,imports #)
756 CmmOptM $ \(imports, dflags) ->
757 case f (imports, dflags) of
760 CmmOptM g' -> g' (imports', dflags)
762 addImportCmmOpt :: CLabel -> CmmOptM ()
763 addImportCmmOpt lbl = CmmOptM $ \(imports, _dflags) -> (# (), lbl:imports #)
765 getDynFlagsCmmOpt :: CmmOptM DynFlags
766 getDynFlagsCmmOpt = CmmOptM $ \(imports, dflags) -> (# dflags, imports #)
768 runCmmOpt :: DynFlags -> CmmOptM a -> (a, [CLabel])
769 runCmmOpt dflags (CmmOptM f) = case f ([], dflags) of
770 (# result, imports #) -> (result, imports)
772 cmmBlockConFold :: CmmBasicBlock -> CmmOptM CmmBasicBlock
773 cmmBlockConFold (BasicBlock id stmts) = do
774 stmts' <- mapM cmmStmtConFold stmts
775 return $ BasicBlock id stmts'
777 cmmStmtConFold :: CmmStmt -> CmmOptM CmmStmt
781 -> do src' <- cmmExprConFold DataReference src
782 return $ case src' of
783 CmmReg reg' | reg == reg' -> CmmNop
784 new_src -> CmmAssign reg new_src
787 -> do addr' <- cmmExprConFold DataReference addr
788 src' <- cmmExprConFold DataReference src
789 return $ CmmStore addr' src'
792 -> do addr' <- cmmExprConFold JumpReference addr
793 return $ CmmJump addr' regs
795 CmmCall target regs args srt returns
796 -> do target' <- case target of
797 CmmCallee e conv -> do
798 e' <- cmmExprConFold CallReference e
799 return $ CmmCallee e' conv
800 other -> return other
801 args' <- mapM (\(CmmHinted arg hint) -> do
802 arg' <- cmmExprConFold DataReference arg
803 return (CmmHinted arg' hint)) args
804 return $ CmmCall target' regs args' srt returns
806 CmmCondBranch test dest
807 -> do test' <- cmmExprConFold DataReference test
808 return $ case test' of
809 CmmLit (CmmInt 0 _) ->
810 CmmComment (mkFastString ("deleted: " ++
811 showSDoc (pprStmt stmt)))
813 CmmLit (CmmInt _ _) -> CmmBranch dest
814 _other -> CmmCondBranch test' dest
817 -> do expr' <- cmmExprConFold DataReference expr
818 return $ CmmSwitch expr' ids
824 cmmExprConFold :: ReferenceKind -> CmmExpr -> CmmOptM CmmExpr
825 cmmExprConFold referenceKind expr = do
826 dflags <- getDynFlagsCmmOpt
827 let arch = platformArch (targetPlatform dflags)
830 -> do addr' <- cmmExprConFold DataReference addr
831 return $ CmmLoad addr' rep
834 -- For MachOps, we first optimize the children, and then we try
835 -- our hand at some constant-folding.
836 -> do args' <- mapM (cmmExprConFold DataReference) args
837 return $ cmmMachOpFold mop args'
839 CmmLit (CmmLabel lbl)
841 cmmMakeDynamicReference dflags addImportCmmOpt referenceKind lbl
842 CmmLit (CmmLabelOff lbl off)
844 dynRef <- cmmMakeDynamicReference dflags addImportCmmOpt referenceKind lbl
845 return $ cmmMachOpFold (MO_Add wordWidth) [
847 (CmmLit $ CmmInt (fromIntegral off) wordWidth)
850 -- On powerpc (non-PIC), it's easier to jump directly to a label than
851 -- to use the register table, so we replace these registers
852 -- with the corresponding labels:
853 CmmReg (CmmGlobal EagerBlackholeInfo)
854 | arch == ArchPPC && not opt_PIC
855 -> cmmExprConFold referenceKind $
856 CmmLit (CmmLabel (mkCmmCodeLabel rtsPackageId (fsLit "__stg_EAGER_BLACKHOLE_info")))
857 CmmReg (CmmGlobal GCEnter1)
858 | arch == ArchPPC && not opt_PIC
859 -> cmmExprConFold referenceKind $
860 CmmLit (CmmLabel (mkCmmCodeLabel rtsPackageId (fsLit "__stg_gc_enter_1")))
861 CmmReg (CmmGlobal GCFun)
862 | arch == ArchPPC && not opt_PIC
863 -> cmmExprConFold referenceKind $
864 CmmLit (CmmLabel (mkCmmCodeLabel rtsPackageId (fsLit "__stg_gc_fun")))