1 -- -----------------------------------------------------------------------------
3 -- (c) The University of Glasgow 1993-2004
5 -- This is the top-level module in the native code generator.
7 -- -----------------------------------------------------------------------------
10 module AsmCodeGen ( nativeCodeGen ) where
12 #include "HsVersions.h"
13 #include "nativeGen/NCG.h"
16 #if i386_TARGET_ARCH || x86_64_TARGET_ARCH
22 #elif sparc_TARGET_ARCH
24 import SPARC.CodeGen.Expand
28 import SPARC.ShortcutJump
30 #elif powerpc_TARGET_ARCH
39 #error "AsmCodeGen: unknown architecture"
43 import RegAlloc.Liveness
44 import qualified RegAlloc.Linear.Main as Linear
46 import qualified GraphColor as Color
47 import qualified RegAlloc.Graph.Main as Color
48 import qualified RegAlloc.Graph.Stats as Color
49 import qualified RegAlloc.Graph.TrivColorable as Color
59 import CgUtils ( fixStgRegisters )
61 import CmmOpt ( cmmEliminateDeadBlocks, cmmMiniInline, cmmMachOpFold )
66 import Unique ( Unique, getUnique )
73 import qualified Pretty
90 The native-code generator has machine-independent and
91 machine-dependent modules.
93 This module ("AsmCodeGen") is the top-level machine-independent
94 module. Before entering machine-dependent land, we do some
95 machine-independent optimisations (defined below) on the
98 We convert to the machine-specific 'Instr' datatype with
99 'cmmCodeGen', assuming an infinite supply of registers. We then use
100 a machine-independent register allocator ('regAlloc') to rejoin
101 reality. Obviously, 'regAlloc' has machine-specific helper
102 functions (see about "RegAllocInfo" below).
104 Finally, we order the basic blocks of the function so as to minimise
105 the number of jumps between blocks, by utilising fallthrough wherever
108 The machine-dependent bits break down as follows:
110 * ["MachRegs"] Everything about the target platform's machine
111 registers (and immediate operands, and addresses, which tend to
112 intermingle/interact with registers).
114 * ["MachInstrs"] Includes the 'Instr' datatype (possibly should
115 have a module of its own), plus a miscellany of other things
116 (e.g., 'targetDoubleSize', 'smStablePtrTable', ...)
118 * ["MachCodeGen"] is where 'Cmm' stuff turns into
119 machine instructions.
121 * ["PprMach"] 'pprInstr' turns an 'Instr' into text (well, really
124 * ["RegAllocInfo"] In the register allocator, we manipulate
125 'MRegsState's, which are 'BitSet's, one bit per machine register.
126 When we want to say something about a specific machine register
127 (e.g., ``it gets clobbered by this instruction''), we set/unset
128 its bit. Obviously, we do this 'BitSet' thing for efficiency
131 The 'RegAllocInfo' module collects together the machine-specific
132 info needed to do register allocation.
134 * ["RegisterAlloc"] The (machine-independent) register allocator.
137 -- -----------------------------------------------------------------------------
138 -- Top-level of the native codegen
141 nativeCodeGen :: DynFlags -> Handle -> UniqSupply -> [RawCmm] -> IO ()
142 nativeCodeGen dflags h us cmms
144 let split_cmms = concat $ map add_split cmms
146 -- BufHandle is a performance hack. We could hide it inside
147 -- Pretty if it weren't for the fact that we do lots of little
148 -- printDocs here (in order to do codegen in constant space).
149 bufh <- newBufHandle h
150 (imports, prof) <- cmmNativeGens dflags bufh us split_cmms [] [] 0
153 let (native, colorStats, linearStats)
158 Opt_D_dump_asm "Asm code"
159 (vcat $ map (docToSDoc . pprNatCmmTop) $ concat native)
161 -- dump global NCG stats for graph coloring allocator
162 (case concat $ catMaybes colorStats of
165 -- build the global register conflict graph
167 = foldl Color.union Color.initGraph
168 $ [ Color.raGraph stat
169 | stat@Color.RegAllocStatsStart{} <- stats]
171 dumpSDoc dflags Opt_D_dump_asm_stats "NCG stats"
172 $ Color.pprStats stats graphGlobal
175 Opt_D_dump_asm_conflicts "Register conflict graph"
179 targetVirtualRegSqueeze
180 targetRealRegSqueeze)
184 -- dump global NCG stats for linear allocator
185 (case concat $ catMaybes linearStats of
187 stats -> dumpSDoc dflags Opt_D_dump_asm_stats "NCG stats"
188 $ Linear.pprStats (concat native) stats)
190 -- write out the imports
191 Pretty.printDoc Pretty.LeftMode h
192 $ makeImportsDoc dflags (concat imports)
196 where add_split (Cmm tops)
197 | dopt Opt_SplitObjs dflags = split_marker : tops
200 split_marker = CmmProc [] mkSplitMarkerLabel (ListGraph [])
203 -- | Do native code generation on all these cmms.
205 cmmNativeGens :: DynFlags
210 -> [ ([NatCmmTop Instr],
211 Maybe [Color.RegAllocStats Instr],
212 Maybe [Linear.RegAllocStats]) ]
216 Maybe [Color.RegAllocStats Instr],
217 Maybe [Linear.RegAllocStats])] )
219 cmmNativeGens _ _ _ [] impAcc profAcc _
220 = return (reverse impAcc, reverse profAcc)
222 cmmNativeGens dflags h us (cmm : cmms) impAcc profAcc count
224 (us', native, imports, colorStats, linearStats)
225 <- cmmNativeGen dflags us cmm count
227 Pretty.bufLeftRender h
228 $ {-# SCC "pprNativeCode" #-} Pretty.vcat $ map pprNatCmmTop native
230 -- carefully evaluate this strictly. Binding it with 'let'
231 -- and then using 'seq' doesn't work, because the let
232 -- apparently gets inlined first.
233 lsPprNative <- return $!
234 if dopt Opt_D_dump_asm dflags
235 || dopt Opt_D_dump_asm_stats dflags
239 count' <- return $! count + 1;
241 -- force evaulation all this stuff to avoid space leaks
242 seqString (showSDoc $ vcat $ map ppr imports) `seq` return ()
244 cmmNativeGens dflags h us' cmms
246 ((lsPprNative, colorStats, linearStats) : profAcc)
249 where seqString [] = ()
250 seqString (x:xs) = x `seq` seqString xs `seq` ()
253 -- | Complete native code generation phase for a single top-level chunk of Cmm.
254 -- Dumping the output of each stage along the way.
255 -- Global conflict graph and NGC stats
259 -> RawCmmTop -- ^ the cmm to generate code for
260 -> Int -- ^ sequence number of this top thing
262 , [NatCmmTop Instr] -- native code
263 , [CLabel] -- things imported by this cmm
264 , Maybe [Color.RegAllocStats Instr] -- stats for the coloring register allocator
265 , Maybe [Linear.RegAllocStats]) -- stats for the linear register allocators
267 cmmNativeGen dflags us cmm count
270 -- rewrite assignments to global regs
272 {-# SCC "fixStgRegisters" #-}
275 -- cmm to cmm optimisations
276 let (opt_cmm, imports) =
277 {-# SCC "cmmToCmm" #-}
278 cmmToCmm dflags fixed_cmm
281 Opt_D_dump_opt_cmm "Optimised Cmm"
282 (pprCmm $ Cmm [opt_cmm])
284 -- generate native code from cmm
285 let ((native, lastMinuteImports), usGen) =
286 {-# SCC "genMachCode" #-}
287 initUs us $ genMachCode dflags opt_cmm
290 Opt_D_dump_asm_native "Native code"
291 (vcat $ map (docToSDoc . pprNatCmmTop) native)
293 -- tag instructions with register liveness information
294 let (withLiveness, usLive) =
295 {-# SCC "regLiveness" #-}
298 $ map natCmmTopToLive native
301 Opt_D_dump_asm_liveness "Liveness annotations added"
302 (vcat $ map ppr withLiveness)
304 -- allocate registers
305 (alloced, usAlloc, ppr_raStatsColor, ppr_raStatsLinear) <-
306 if ( dopt Opt_RegsGraph dflags
307 || dopt Opt_RegsIterative dflags)
309 -- the regs usable for allocation
310 let (alloc_regs :: UniqFM (UniqSet RealReg))
311 = foldr (\r -> plusUFM_C unionUniqSets
312 $ unitUFM (targetClassOfRealReg r) (unitUniqSet r))
316 -- do the graph coloring register allocation
317 let ((alloced, regAllocStats), usAlloc)
318 = {-# SCC "RegAlloc" #-}
323 (mkUniqSet [0..maxSpillSlots])
326 -- dump out what happened during register allocation
328 Opt_D_dump_asm_regalloc "Registers allocated"
329 (vcat $ map (docToSDoc . pprNatCmmTop) alloced)
332 Opt_D_dump_asm_regalloc_stages "Build/spill stages"
333 (vcat $ map (\(stage, stats)
334 -> text "# --------------------------"
335 $$ text "# cmm " <> int count <> text " Stage " <> int stage
337 $ zip [0..] regAllocStats)
340 if dopt Opt_D_dump_asm_stats dflags
341 then Just regAllocStats else Nothing
343 -- force evaluation of the Maybe to avoid space leak
344 mPprStats `seq` return ()
346 return ( alloced, usAlloc
351 -- do linear register allocation
352 let ((alloced, regAllocStats), usAlloc)
353 = {-# SCC "RegAlloc" #-}
356 $ mapUs Linear.regAlloc withLiveness
359 Opt_D_dump_asm_regalloc "Registers allocated"
360 (vcat $ map (docToSDoc . pprNatCmmTop) alloced)
363 if dopt Opt_D_dump_asm_stats dflags
364 then Just (catMaybes regAllocStats) else Nothing
366 -- force evaluation of the Maybe to avoid space leak
367 mPprStats `seq` return ()
369 return ( alloced, usAlloc
373 ---- x86fp_kludge. This pass inserts ffree instructions to clear
374 ---- the FPU stack on x86. The x86 ABI requires that the FPU stack
375 ---- is clear, and library functions can return odd results if it
378 ---- NB. must happen before shortcutBranches, because that
379 ---- generates JXX_GBLs which we can't fix up in x86fp_kludge.
382 {-# SCC "x86fp_kludge" #-}
383 map x86fp_kludge alloced
388 ---- generate jump tables
390 {-# SCC "generateJumpTables" #-}
391 generateJumpTables kludged
393 ---- shortcut branches
395 {-# SCC "shortcutBranches" #-}
396 shortcutBranches dflags tabled
400 {-# SCC "sequenceBlocks" #-}
401 map sequenceTop shorted
403 ---- expansion of SPARC synthetic instrs
404 #if sparc_TARGET_ARCH
406 {-# SCC "sparc_expand" #-}
407 map expandTop sequenced
410 Opt_D_dump_asm_expanded "Synthetic instructions expanded"
411 (vcat $ map (docToSDoc . pprNatCmmTop) expanded)
419 , lastMinuteImports ++ imports
425 x86fp_kludge :: NatCmmTop Instr -> NatCmmTop Instr
426 x86fp_kludge top@(CmmData _ _) = top
427 x86fp_kludge (CmmProc info lbl (ListGraph code)) =
428 CmmProc info lbl (ListGraph $ i386_insert_ffrees code)
432 -- | Build a doc for all the imports.
434 makeImportsDoc :: DynFlags -> [CLabel] -> Pretty.Doc
435 makeImportsDoc dflags imports
438 #if HAVE_SUBSECTIONS_VIA_SYMBOLS
439 -- On recent versions of Darwin, the linker supports
440 -- dead-stripping of code and data on a per-symbol basis.
441 -- There's a hack to make this work in PprMach.pprNatCmmTop.
442 Pretty.$$ Pretty.text ".subsections_via_symbols"
444 #if HAVE_GNU_NONEXEC_STACK
445 -- On recent GNU ELF systems one can mark an object file
446 -- as not requiring an executable stack. If all objects
447 -- linked into a program have this note then the program
448 -- will not use an executable stack, which is good for
449 -- security. GHC generated code does not need an executable
450 -- stack so add the note in:
451 Pretty.$$ Pretty.text ".section .note.GNU-stack,\"\",@progbits"
453 #if !defined(darwin_TARGET_OS)
454 -- And just because every other compiler does, lets stick in
455 -- an identifier directive: .ident "GHC x.y.z"
456 Pretty.$$ let compilerIdent = Pretty.text "GHC" Pretty.<+>
457 Pretty.text cProjectVersion
458 in Pretty.text ".ident" Pretty.<+>
459 Pretty.doubleQuotes compilerIdent
463 -- Generate "symbol stubs" for all external symbols that might
464 -- come from a dynamic library.
465 dyld_stubs :: [CLabel] -> Pretty.Doc
466 {- dyld_stubs imps = Pretty.vcat $ map pprDyldSymbolStub $
467 map head $ group $ sort imps-}
469 arch = platformArch $ targetPlatform dflags
470 os = platformOS $ targetPlatform dflags
472 -- (Hack) sometimes two Labels pretty-print the same, but have
473 -- different uniques; so we compare their text versions...
475 | needImportedSymbols arch os
477 (pprGotDeclaration arch os :) $
478 map ( pprImportedSymbol arch os . fst . head) $
479 groupBy (\(_,a) (_,b) -> a == b) $
480 sortBy (\(_,a) (_,b) -> compare a b) $
486 doPpr lbl = (lbl, renderWithStyle (pprCLabel lbl) astyle)
487 astyle = mkCodeStyle AsmStyle
490 -- -----------------------------------------------------------------------------
491 -- Sequencing the basic blocks
493 -- Cmm BasicBlocks are self-contained entities: they always end in a
494 -- jump, either non-local or to another basic block in the same proc.
495 -- In this phase, we attempt to place the basic blocks in a sequence
496 -- such that as many of the local jumps as possible turn into
503 sequenceTop top@(CmmData _ _) = top
504 sequenceTop (CmmProc info lbl (ListGraph blocks)) =
505 CmmProc info lbl (ListGraph $ makeFarBranches $ sequenceBlocks blocks)
507 -- The algorithm is very simple (and stupid): we make a graph out of
508 -- the blocks where there is an edge from one block to another iff the
509 -- first block ends by jumping to the second. Then we topologically
510 -- sort this graph. Then traverse the list: for each block, we first
511 -- output the block, then if it has an out edge, we move the
512 -- destination of the out edge to the front of the list, and continue.
514 -- FYI, the classic layout for basic blocks uses postorder DFS; this
515 -- algorithm is implemented in Hoopl.
519 => [NatBasicBlock instr]
520 -> [NatBasicBlock instr]
522 sequenceBlocks [] = []
523 sequenceBlocks (entry:blocks) =
524 seqBlocks (mkNode entry : reverse (flattenSCCs (sccBlocks blocks)))
525 -- the first block is the entry point ==> it must remain at the start.
530 => [NatBasicBlock instr]
531 -> [SCC ( NatBasicBlock instr
535 sccBlocks blocks = stronglyConnCompFromEdgedVerticesR (map mkNode blocks)
537 -- we're only interested in the last instruction of
538 -- the block, and only if it has a single destination.
541 => [instr] -> [Unique]
544 = case jumpDestsOfInstr (last instrs) of
545 [one] -> [getUnique one]
548 mkNode :: (Instruction t)
550 -> (GenBasicBlock t, Unique, [Unique])
551 mkNode block@(BasicBlock id instrs) = (block, getUnique id, getOutEdges instrs)
553 seqBlocks :: (Eq t) => [(GenBasicBlock t1, t, [t])] -> [GenBasicBlock t1]
555 seqBlocks ((block,_,[]) : rest)
556 = block : seqBlocks rest
557 seqBlocks ((block@(BasicBlock id instrs),_,[next]) : rest)
558 | can_fallthrough = BasicBlock id (init instrs) : seqBlocks rest'
559 | otherwise = block : seqBlocks rest'
561 (can_fallthrough, rest') = reorder next [] rest
562 -- TODO: we should do a better job for cycles; try to maximise the
563 -- fallthroughs within a loop.
564 seqBlocks _ = panic "AsmCodegen:seqBlocks"
566 reorder :: (Eq a) => a -> [(t, a, t1)] -> [(t, a, t1)] -> (Bool, [(t, a, t1)])
567 reorder _ accum [] = (False, reverse accum)
568 reorder id accum (b@(block,id',out) : rest)
569 | id == id' = (True, (block,id,out) : reverse accum ++ rest)
570 | otherwise = reorder id (b:accum) rest
573 -- -----------------------------------------------------------------------------
574 -- Making far branches
576 -- Conditional branches on PowerPC are limited to +-32KB; if our Procs get too
577 -- big, we have to work around this limitation.
580 :: [NatBasicBlock Instr]
581 -> [NatBasicBlock Instr]
583 #if powerpc_TARGET_ARCH
584 makeFarBranches blocks
585 | last blockAddresses < nearLimit = blocks
586 | otherwise = zipWith handleBlock blockAddresses blocks
588 blockAddresses = scanl (+) 0 $ map blockLen blocks
589 blockLen (BasicBlock _ instrs) = length instrs
591 handleBlock addr (BasicBlock id instrs)
592 = BasicBlock id (zipWith makeFar [addr..] instrs)
594 makeFar _ (BCC ALWAYS tgt) = BCC ALWAYS tgt
595 makeFar addr (BCC cond tgt)
596 | abs (addr - targetAddr) >= nearLimit
600 where Just targetAddr = lookupUFM blockAddressMap tgt
601 makeFar _ other = other
603 nearLimit = 7000 -- 8192 instructions are allowed; let's keep some
604 -- distance, as we have a few pseudo-insns that are
605 -- pretty-printed as multiple instructions,
606 -- and it's just not worth the effort to calculate
609 blockAddressMap = listToUFM $ zip (map blockId blocks) blockAddresses
614 -- -----------------------------------------------------------------------------
615 -- Generate jump tables
617 -- Analyzes all native code and generates data sections for all jump
618 -- table instructions.
620 :: [NatCmmTop Instr] -> [NatCmmTop Instr]
621 generateJumpTables xs = concatMap f xs
622 where f p@(CmmProc _ _ (ListGraph xs)) = p : concatMap g xs
624 g (BasicBlock _ xs) = catMaybes (map generateJumpTableForInstr xs)
626 -- -----------------------------------------------------------------------------
634 shortcutBranches dflags tops
635 | optLevel dflags < 1 = tops -- only with -O or higher
636 | otherwise = map (apply_mapping mapping) tops'
638 (tops', mappings) = mapAndUnzip build_mapping tops
639 mapping = foldr plusUFM emptyUFM mappings
641 build_mapping :: GenCmmTop d t (ListGraph Instr)
642 -> (GenCmmTop d t (ListGraph Instr), UniqFM JumpDest)
643 build_mapping top@(CmmData _ _) = (top, emptyUFM)
644 build_mapping (CmmProc info lbl (ListGraph []))
645 = (CmmProc info lbl (ListGraph []), emptyUFM)
646 build_mapping (CmmProc info lbl (ListGraph (head:blocks)))
647 = (CmmProc info lbl (ListGraph (head:others)), mapping)
648 -- drop the shorted blocks, but don't ever drop the first one,
649 -- because it is pointed to by a global label.
651 -- find all the blocks that just consist of a jump that can be
653 -- Don't completely eliminate loops here -- that can leave a dangling jump!
654 (_, shortcut_blocks, others) = foldl split (emptyBlockSet, [], []) blocks
655 split (s, shortcut_blocks, others) b@(BasicBlock id [insn])
656 | Just (DestBlockId dest) <- canShortcut insn,
657 (setMember dest s) || dest == id -- loop checks
658 = (s, shortcut_blocks, b : others)
659 split (s, shortcut_blocks, others) (BasicBlock id [insn])
660 | Just dest <- canShortcut insn
661 = (setInsert id s, (id,dest) : shortcut_blocks, others)
662 split (s, shortcut_blocks, others) other = (s, shortcut_blocks, other : others)
665 -- build a mapping from BlockId to JumpDest for shorting branches
666 mapping = foldl add emptyUFM shortcut_blocks
667 add ufm (id,dest) = addToUFM ufm id dest
669 apply_mapping :: UniqFM JumpDest
670 -> GenCmmTop CmmStatic h (ListGraph Instr)
671 -> GenCmmTop CmmStatic h (ListGraph Instr)
672 apply_mapping ufm (CmmData sec statics)
673 = CmmData sec (map (shortcutStatic (lookupUFM ufm)) statics)
674 -- we need to get the jump tables, so apply the mapping to the entries
676 apply_mapping ufm (CmmProc info lbl (ListGraph blocks))
677 = CmmProc info lbl (ListGraph $ map short_bb blocks)
679 short_bb (BasicBlock id insns) = BasicBlock id $! map short_insn insns
680 short_insn i = shortcutJump (lookupUFM ufm) i
681 -- shortcutJump should apply the mapping repeatedly,
682 -- just in case we can short multiple branches.
684 -- -----------------------------------------------------------------------------
685 -- Instruction selection
687 -- Native code instruction selection for a chunk of stix code. For
688 -- this part of the computation, we switch from the UniqSM monad to
689 -- the NatM monad. The latter carries not only a Unique, but also an
690 -- Int denoting the current C stack pointer offset in the generated
691 -- code; this is needed for creating correct spill offsets on
692 -- architectures which don't offer, or for which it would be
693 -- prohibitively expensive to employ, a frame pointer register. Viz,
696 -- The offset is measured in bytes, and indicates the difference
697 -- between the current (simulated) C stack-ptr and the value it was at
698 -- the beginning of the block. For stacks which grow down, this value
699 -- should be either zero or negative.
701 -- Switching between the two monads whilst carrying along the same
702 -- Unique supply breaks abstraction. Is that bad?
711 genMachCode dflags cmm_top
712 = do { initial_us <- getUs
713 ; let initial_st = mkNatM_State initial_us 0 dflags
714 (new_tops, final_st) = initNat initial_st (cmmTopCodeGen dflags cmm_top)
715 final_delta = natm_delta final_st
716 final_imports = natm_imports final_st
717 ; if final_delta == 0
718 then return (new_tops, final_imports)
719 else pprPanic "genMachCode: nonzero final delta" (int final_delta)
722 -- -----------------------------------------------------------------------------
723 -- Generic Cmm optimiser
729 (b) Simple inlining: a temporary which is assigned to and then
730 used, once, can be shorted.
731 (c) Position independent code and dynamic linking
732 (i) introduce the appropriate indirections
733 and position independent refs
734 (ii) compile a list of imported symbols
736 Ideas for other things we could do:
738 - shortcut jumps-to-jumps
739 - simple CSE: if an expr is assigned to a temp, then replace later occs of
740 that expr with the temp, until the expr is no longer valid (can push through
741 temp assignments, and certain assigns to mem...)
744 cmmToCmm :: DynFlags -> RawCmmTop -> (RawCmmTop, [CLabel])
745 cmmToCmm _ top@(CmmData _ _) = (top, [])
746 cmmToCmm dflags (CmmProc info lbl (ListGraph blocks)) = runCmmOpt dflags $ do
747 blocks' <- mapM cmmBlockConFold (cmmMiniInline (cmmEliminateDeadBlocks blocks))
748 return $ CmmProc info lbl (ListGraph blocks')
750 newtype CmmOptM a = CmmOptM (([CLabel], DynFlags) -> (# a, [CLabel] #))
752 instance Monad CmmOptM where
753 return x = CmmOptM $ \(imports, _) -> (# x,imports #)
755 CmmOptM $ \(imports, dflags) ->
756 case f (imports, dflags) of
759 CmmOptM g' -> g' (imports', dflags)
761 addImportCmmOpt :: CLabel -> CmmOptM ()
762 addImportCmmOpt lbl = CmmOptM $ \(imports, _dflags) -> (# (), lbl:imports #)
764 getDynFlagsCmmOpt :: CmmOptM DynFlags
765 getDynFlagsCmmOpt = CmmOptM $ \(imports, dflags) -> (# dflags, imports #)
767 runCmmOpt :: DynFlags -> CmmOptM a -> (a, [CLabel])
768 runCmmOpt dflags (CmmOptM f) = case f ([], dflags) of
769 (# result, imports #) -> (result, imports)
771 cmmBlockConFold :: CmmBasicBlock -> CmmOptM CmmBasicBlock
772 cmmBlockConFold (BasicBlock id stmts) = do
773 stmts' <- mapM cmmStmtConFold stmts
774 return $ BasicBlock id stmts'
776 cmmStmtConFold :: CmmStmt -> CmmOptM CmmStmt
780 -> do src' <- cmmExprConFold DataReference src
781 return $ case src' of
782 CmmReg reg' | reg == reg' -> CmmNop
783 new_src -> CmmAssign reg new_src
786 -> do addr' <- cmmExprConFold DataReference addr
787 src' <- cmmExprConFold DataReference src
788 return $ CmmStore addr' src'
791 -> do addr' <- cmmExprConFold JumpReference addr
792 return $ CmmJump addr' regs
794 CmmCall target regs args srt returns
795 -> do target' <- case target of
796 CmmCallee e conv -> do
797 e' <- cmmExprConFold CallReference e
798 return $ CmmCallee e' conv
799 other -> return other
800 args' <- mapM (\(CmmHinted arg hint) -> do
801 arg' <- cmmExprConFold DataReference arg
802 return (CmmHinted arg' hint)) args
803 return $ CmmCall target' regs args' srt returns
805 CmmCondBranch test dest
806 -> do test' <- cmmExprConFold DataReference test
807 return $ case test' of
808 CmmLit (CmmInt 0 _) ->
809 CmmComment (mkFastString ("deleted: " ++
810 showSDoc (pprStmt stmt)))
812 CmmLit (CmmInt _ _) -> CmmBranch dest
813 _other -> CmmCondBranch test' dest
816 -> do expr' <- cmmExprConFold DataReference expr
817 return $ CmmSwitch expr' ids
823 cmmExprConFold :: ReferenceKind -> CmmExpr -> CmmOptM CmmExpr
824 cmmExprConFold referenceKind expr = do
825 dflags <- getDynFlagsCmmOpt
826 let arch = platformArch (targetPlatform dflags)
829 -> do addr' <- cmmExprConFold DataReference addr
830 return $ CmmLoad addr' rep
833 -- For MachOps, we first optimize the children, and then we try
834 -- our hand at some constant-folding.
835 -> do args' <- mapM (cmmExprConFold DataReference) args
836 return $ cmmMachOpFold mop args'
838 CmmLit (CmmLabel lbl)
840 cmmMakeDynamicReference dflags addImportCmmOpt referenceKind lbl
841 CmmLit (CmmLabelOff lbl off)
843 dynRef <- cmmMakeDynamicReference dflags addImportCmmOpt referenceKind lbl
844 return $ cmmMachOpFold (MO_Add wordWidth) [
846 (CmmLit $ CmmInt (fromIntegral off) wordWidth)
849 -- On powerpc (non-PIC), it's easier to jump directly to a label than
850 -- to use the register table, so we replace these registers
851 -- with the corresponding labels:
852 CmmReg (CmmGlobal EagerBlackholeInfo)
853 | arch == ArchPPC && not opt_PIC
854 -> cmmExprConFold referenceKind $
855 CmmLit (CmmLabel (mkCmmCodeLabel rtsPackageId (fsLit "__stg_EAGER_BLACKHOLE_info")))
856 CmmReg (CmmGlobal GCEnter1)
857 | arch == ArchPPC && not opt_PIC
858 -> cmmExprConFold referenceKind $
859 CmmLit (CmmLabel (mkCmmCodeLabel rtsPackageId (fsLit "__stg_gc_enter_1")))
860 CmmReg (CmmGlobal GCFun)
861 | arch == ArchPPC && not opt_PIC
862 -> cmmExprConFold referenceKind $
863 CmmLit (CmmLabel (mkCmmCodeLabel rtsPackageId (fsLit "__stg_gc_fun")))