1 -- -----------------------------------------------------------------------------
3 -- (c) The University of Glasgow 1993-2004
5 -- This is the top-level module in the native code generator.
7 -- -----------------------------------------------------------------------------
10 module AsmCodeGen ( nativeCodeGen ) where
12 #include "HsVersions.h"
13 #include "nativeGen/NCG.h"
16 #if i386_TARGET_ARCH || x86_64_TARGET_ARCH
22 #elif sparc_TARGET_ARCH
24 import SPARC.CodeGen.Expand
28 import SPARC.ShortcutJump
30 #elif powerpc_TARGET_ARCH
39 #error "AsmCodeGen: unknown architecture"
43 import RegAlloc.Liveness
44 import qualified RegAlloc.Linear.Main as Linear
46 import qualified GraphColor as Color
47 import qualified RegAlloc.Graph.Main as Color
48 import qualified RegAlloc.Graph.Stats as Color
49 import qualified RegAlloc.Graph.TrivColorable as Color
60 import CgUtils ( fixStgRegisters )
62 import CmmOpt ( cmmEliminateDeadBlocks, cmmMiniInline, cmmMachOpFold )
67 import Unique ( Unique, getUnique )
74 import qualified Pretty
91 The native-code generator has machine-independent and
92 machine-dependent modules.
94 This module ("AsmCodeGen") is the top-level machine-independent
95 module. Before entering machine-dependent land, we do some
96 machine-independent optimisations (defined below) on the
99 We convert to the machine-specific 'Instr' datatype with
100 'cmmCodeGen', assuming an infinite supply of registers. We then use
101 a machine-independent register allocator ('regAlloc') to rejoin
102 reality. Obviously, 'regAlloc' has machine-specific helper
103 functions (see about "RegAllocInfo" below).
105 Finally, we order the basic blocks of the function so as to minimise
106 the number of jumps between blocks, by utilising fallthrough wherever
109 The machine-dependent bits break down as follows:
111 * ["MachRegs"] Everything about the target platform's machine
112 registers (and immediate operands, and addresses, which tend to
113 intermingle/interact with registers).
115 * ["MachInstrs"] Includes the 'Instr' datatype (possibly should
116 have a module of its own), plus a miscellany of other things
117 (e.g., 'targetDoubleSize', 'smStablePtrTable', ...)
119 * ["MachCodeGen"] is where 'Cmm' stuff turns into
120 machine instructions.
122 * ["PprMach"] 'pprInstr' turns an 'Instr' into text (well, really
125 * ["RegAllocInfo"] In the register allocator, we manipulate
126 'MRegsState's, which are 'BitSet's, one bit per machine register.
127 When we want to say something about a specific machine register
128 (e.g., ``it gets clobbered by this instruction''), we set/unset
129 its bit. Obviously, we do this 'BitSet' thing for efficiency
132 The 'RegAllocInfo' module collects together the machine-specific
133 info needed to do register allocation.
135 * ["RegisterAlloc"] The (machine-independent) register allocator.
138 -- -----------------------------------------------------------------------------
139 -- Top-level of the native codegen
142 nativeCodeGen :: DynFlags -> Handle -> UniqSupply -> [RawCmm] -> IO ()
143 nativeCodeGen dflags h us cmms
145 let split_cmms = concat $ map add_split cmms
147 -- BufHandle is a performance hack. We could hide it inside
148 -- Pretty if it weren't for the fact that we do lots of little
149 -- printDocs here (in order to do codegen in constant space).
150 bufh <- newBufHandle h
151 (imports, prof) <- cmmNativeGens dflags bufh us split_cmms [] [] 0
154 let (native, colorStats, linearStats)
159 Opt_D_dump_asm "Asm code"
160 (vcat $ map (docToSDoc . pprNatCmmTop) $ concat native)
162 -- dump global NCG stats for graph coloring allocator
163 (case concat $ catMaybes colorStats of
166 -- build the global register conflict graph
168 = foldl Color.union Color.initGraph
169 $ [ Color.raGraph stat
170 | stat@Color.RegAllocStatsStart{} <- stats]
172 dumpSDoc dflags Opt_D_dump_asm_stats "NCG stats"
173 $ Color.pprStats stats graphGlobal
176 Opt_D_dump_asm_conflicts "Register conflict graph"
180 targetVirtualRegSqueeze
181 targetRealRegSqueeze)
185 -- dump global NCG stats for linear allocator
186 (case concat $ catMaybes linearStats of
188 stats -> dumpSDoc dflags Opt_D_dump_asm_stats "NCG stats"
189 $ Linear.pprStats (concat native) stats)
191 -- write out the imports
192 Pretty.printDoc Pretty.LeftMode h
193 $ makeImportsDoc dflags (concat imports)
197 where add_split (Cmm tops)
198 | dopt Opt_SplitObjs dflags = split_marker : tops
201 split_marker = CmmProc [] mkSplitMarkerLabel (ListGraph [])
204 -- | Do native code generation on all these cmms.
206 cmmNativeGens :: DynFlags
211 -> [ ([NatCmmTop Instr],
212 Maybe [Color.RegAllocStats Instr],
213 Maybe [Linear.RegAllocStats]) ]
217 Maybe [Color.RegAllocStats Instr],
218 Maybe [Linear.RegAllocStats])] )
220 cmmNativeGens _ _ _ [] impAcc profAcc _
221 = return (reverse impAcc, reverse profAcc)
223 cmmNativeGens dflags h us (cmm : cmms) impAcc profAcc count
225 (us', native, imports, colorStats, linearStats)
226 <- cmmNativeGen dflags us cmm count
228 Pretty.bufLeftRender h
229 $ {-# SCC "pprNativeCode" #-} Pretty.vcat $ map pprNatCmmTop native
231 -- carefully evaluate this strictly. Binding it with 'let'
232 -- and then using 'seq' doesn't work, because the let
233 -- apparently gets inlined first.
234 lsPprNative <- return $!
235 if dopt Opt_D_dump_asm dflags
236 || dopt Opt_D_dump_asm_stats dflags
240 count' <- return $! count + 1;
242 -- force evaulation all this stuff to avoid space leaks
243 seqString (showSDoc $ vcat $ map ppr imports) `seq` return ()
245 cmmNativeGens dflags h us' cmms
247 ((lsPprNative, colorStats, linearStats) : profAcc)
250 where seqString [] = ()
251 seqString (x:xs) = x `seq` seqString xs `seq` ()
254 -- | Complete native code generation phase for a single top-level chunk of Cmm.
255 -- Dumping the output of each stage along the way.
256 -- Global conflict graph and NGC stats
260 -> RawCmmTop -- ^ the cmm to generate code for
261 -> Int -- ^ sequence number of this top thing
263 , [NatCmmTop Instr] -- native code
264 , [CLabel] -- things imported by this cmm
265 , Maybe [Color.RegAllocStats Instr] -- stats for the coloring register allocator
266 , Maybe [Linear.RegAllocStats]) -- stats for the linear register allocators
268 cmmNativeGen dflags us cmm count
271 -- rewrite assignments to global regs
273 {-# SCC "fixStgRegisters" #-}
276 -- cmm to cmm optimisations
277 let (opt_cmm, imports) =
278 {-# SCC "cmmToCmm" #-}
279 cmmToCmm dflags fixed_cmm
282 Opt_D_dump_opt_cmm "Optimised Cmm"
283 (pprCmm $ Cmm [opt_cmm])
285 -- generate native code from cmm
286 let ((native, lastMinuteImports), usGen) =
287 {-# SCC "genMachCode" #-}
288 initUs us $ genMachCode dflags opt_cmm
291 Opt_D_dump_asm_native "Native code"
292 (vcat $ map (docToSDoc . pprNatCmmTop) native)
294 -- tag instructions with register liveness information
295 let (withLiveness, usLive) =
296 {-# SCC "regLiveness" #-}
299 $ map natCmmTopToLive native
302 Opt_D_dump_asm_liveness "Liveness annotations added"
303 (vcat $ map ppr withLiveness)
305 -- allocate registers
306 (alloced, usAlloc, ppr_raStatsColor, ppr_raStatsLinear) <-
307 if ( dopt Opt_RegsGraph dflags
308 || dopt Opt_RegsIterative dflags)
310 -- the regs usable for allocation
311 let (alloc_regs :: UniqFM (UniqSet RealReg))
312 = foldr (\r -> plusUFM_C unionUniqSets
313 $ unitUFM (targetClassOfRealReg r) (unitUniqSet r))
317 -- do the graph coloring register allocation
318 let ((alloced, regAllocStats), usAlloc)
319 = {-# SCC "RegAlloc" #-}
324 (mkUniqSet [0..maxSpillSlots])
327 -- dump out what happened during register allocation
329 Opt_D_dump_asm_regalloc "Registers allocated"
330 (vcat $ map (docToSDoc . pprNatCmmTop) alloced)
333 Opt_D_dump_asm_regalloc_stages "Build/spill stages"
334 (vcat $ map (\(stage, stats)
335 -> text "# --------------------------"
336 $$ text "# cmm " <> int count <> text " Stage " <> int stage
338 $ zip [0..] regAllocStats)
341 if dopt Opt_D_dump_asm_stats dflags
342 then Just regAllocStats else Nothing
344 -- force evaluation of the Maybe to avoid space leak
345 mPprStats `seq` return ()
347 return ( alloced, usAlloc
352 -- do linear register allocation
353 let ((alloced, regAllocStats), usAlloc)
354 = {-# SCC "RegAlloc" #-}
357 $ mapUs Linear.regAlloc withLiveness
360 Opt_D_dump_asm_regalloc "Registers allocated"
361 (vcat $ map (docToSDoc . pprNatCmmTop) alloced)
364 if dopt Opt_D_dump_asm_stats dflags
365 then Just (catMaybes regAllocStats) else Nothing
367 -- force evaluation of the Maybe to avoid space leak
368 mPprStats `seq` return ()
370 return ( alloced, usAlloc
374 ---- x86fp_kludge. This pass inserts ffree instructions to clear
375 ---- the FPU stack on x86. The x86 ABI requires that the FPU stack
376 ---- is clear, and library functions can return odd results if it
379 ---- NB. must happen before shortcutBranches, because that
380 ---- generates JXX_GBLs which we can't fix up in x86fp_kludge.
383 {-# SCC "x86fp_kludge" #-}
384 map x86fp_kludge alloced
389 ---- generate jump tables
391 {-# SCC "generateJumpTables" #-}
392 generateJumpTables kludged
394 ---- shortcut branches
396 {-# SCC "shortcutBranches" #-}
397 shortcutBranches dflags tabled
401 {-# SCC "sequenceBlocks" #-}
402 map sequenceTop shorted
404 ---- expansion of SPARC synthetic instrs
405 #if sparc_TARGET_ARCH
407 {-# SCC "sparc_expand" #-}
408 map expandTop sequenced
411 Opt_D_dump_asm_expanded "Synthetic instructions expanded"
412 (vcat $ map (docToSDoc . pprNatCmmTop) expanded)
420 , lastMinuteImports ++ imports
426 x86fp_kludge :: NatCmmTop Instr -> NatCmmTop Instr
427 x86fp_kludge top@(CmmData _ _) = top
428 x86fp_kludge (CmmProc info lbl (ListGraph code)) =
429 CmmProc info lbl (ListGraph $ i386_insert_ffrees code)
433 -- | Build a doc for all the imports.
435 makeImportsDoc :: DynFlags -> [CLabel] -> Pretty.Doc
436 makeImportsDoc dflags imports
439 #if HAVE_SUBSECTIONS_VIA_SYMBOLS
440 -- On recent versions of Darwin, the linker supports
441 -- dead-stripping of code and data on a per-symbol basis.
442 -- There's a hack to make this work in PprMach.pprNatCmmTop.
443 Pretty.$$ Pretty.text ".subsections_via_symbols"
445 #if HAVE_GNU_NONEXEC_STACK
446 -- On recent GNU ELF systems one can mark an object file
447 -- as not requiring an executable stack. If all objects
448 -- linked into a program have this note then the program
449 -- will not use an executable stack, which is good for
450 -- security. GHC generated code does not need an executable
451 -- stack so add the note in:
452 Pretty.$$ Pretty.text ".section .note.GNU-stack,\"\",@progbits"
454 -- And just because every other compiler does, lets stick in
455 -- an identifier directive: .ident "GHC x.y.z"
456 Pretty.$$ let compilerIdent = Pretty.text "GHC" Pretty.<+>
457 Pretty.text cProjectVersion
458 in Pretty.text ".ident" Pretty.<+>
459 Pretty.doubleQuotes compilerIdent
462 -- Generate "symbol stubs" for all external symbols that might
463 -- come from a dynamic library.
464 dyld_stubs :: [CLabel] -> Pretty.Doc
465 {- dyld_stubs imps = Pretty.vcat $ map pprDyldSymbolStub $
466 map head $ group $ sort imps-}
468 arch = platformArch $ targetPlatform dflags
469 os = platformOS $ targetPlatform dflags
471 -- (Hack) sometimes two Labels pretty-print the same, but have
472 -- different uniques; so we compare their text versions...
474 | needImportedSymbols arch os
476 (pprGotDeclaration arch os :) $
477 map ( pprImportedSymbol arch os . fst . head) $
478 groupBy (\(_,a) (_,b) -> a == b) $
479 sortBy (\(_,a) (_,b) -> compare a b) $
485 doPpr lbl = (lbl, renderWithStyle (pprCLabel lbl) astyle)
486 astyle = mkCodeStyle AsmStyle
489 -- -----------------------------------------------------------------------------
490 -- Sequencing the basic blocks
492 -- Cmm BasicBlocks are self-contained entities: they always end in a
493 -- jump, either non-local or to another basic block in the same proc.
494 -- In this phase, we attempt to place the basic blocks in a sequence
495 -- such that as many of the local jumps as possible turn into
502 sequenceTop top@(CmmData _ _) = top
503 sequenceTop (CmmProc info lbl (ListGraph blocks)) =
504 CmmProc info lbl (ListGraph $ makeFarBranches $ sequenceBlocks blocks)
506 -- The algorithm is very simple (and stupid): we make a graph out of
507 -- the blocks where there is an edge from one block to another iff the
508 -- first block ends by jumping to the second. Then we topologically
509 -- sort this graph. Then traverse the list: for each block, we first
510 -- output the block, then if it has an out edge, we move the
511 -- destination of the out edge to the front of the list, and continue.
513 -- FYI, the classic layout for basic blocks uses postorder DFS; this
514 -- algorithm is implemented in Hoopl.
518 => [NatBasicBlock instr]
519 -> [NatBasicBlock instr]
521 sequenceBlocks [] = []
522 sequenceBlocks (entry:blocks) =
523 seqBlocks (mkNode entry : reverse (flattenSCCs (sccBlocks blocks)))
524 -- the first block is the entry point ==> it must remain at the start.
529 => [NatBasicBlock instr]
530 -> [SCC ( NatBasicBlock instr
534 sccBlocks blocks = stronglyConnCompFromEdgedVerticesR (map mkNode blocks)
536 -- we're only interested in the last instruction of
537 -- the block, and only if it has a single destination.
540 => [instr] -> [Unique]
543 = case jumpDestsOfInstr (last instrs) of
544 [one] -> [getUnique one]
547 mkNode :: (Instruction t)
549 -> (GenBasicBlock t, Unique, [Unique])
550 mkNode block@(BasicBlock id instrs) = (block, getUnique id, getOutEdges instrs)
552 seqBlocks :: (Eq t) => [(GenBasicBlock t1, t, [t])] -> [GenBasicBlock t1]
554 seqBlocks ((block,_,[]) : rest)
555 = block : seqBlocks rest
556 seqBlocks ((block@(BasicBlock id instrs),_,[next]) : rest)
557 | can_fallthrough = BasicBlock id (init instrs) : seqBlocks rest'
558 | otherwise = block : seqBlocks rest'
560 (can_fallthrough, rest') = reorder next [] rest
561 -- TODO: we should do a better job for cycles; try to maximise the
562 -- fallthroughs within a loop.
563 seqBlocks _ = panic "AsmCodegen:seqBlocks"
565 reorder :: (Eq a) => a -> [(t, a, t1)] -> [(t, a, t1)] -> (Bool, [(t, a, t1)])
566 reorder _ accum [] = (False, reverse accum)
567 reorder id accum (b@(block,id',out) : rest)
568 | id == id' = (True, (block,id,out) : reverse accum ++ rest)
569 | otherwise = reorder id (b:accum) rest
572 -- -----------------------------------------------------------------------------
573 -- Making far branches
575 -- Conditional branches on PowerPC are limited to +-32KB; if our Procs get too
576 -- big, we have to work around this limitation.
579 :: [NatBasicBlock Instr]
580 -> [NatBasicBlock Instr]
582 #if powerpc_TARGET_ARCH
583 makeFarBranches blocks
584 | last blockAddresses < nearLimit = blocks
585 | otherwise = zipWith handleBlock blockAddresses blocks
587 blockAddresses = scanl (+) 0 $ map blockLen blocks
588 blockLen (BasicBlock _ instrs) = length instrs
590 handleBlock addr (BasicBlock id instrs)
591 = BasicBlock id (zipWith makeFar [addr..] instrs)
593 makeFar _ (BCC ALWAYS tgt) = BCC ALWAYS tgt
594 makeFar addr (BCC cond tgt)
595 | abs (addr - targetAddr) >= nearLimit
599 where Just targetAddr = lookupUFM blockAddressMap tgt
600 makeFar _ other = other
602 nearLimit = 7000 -- 8192 instructions are allowed; let's keep some
603 -- distance, as we have a few pseudo-insns that are
604 -- pretty-printed as multiple instructions,
605 -- and it's just not worth the effort to calculate
608 blockAddressMap = listToUFM $ zip (map blockId blocks) blockAddresses
613 -- -----------------------------------------------------------------------------
614 -- Generate jump tables
616 -- Analyzes all native code and generates data sections for all jump
617 -- table instructions.
619 :: [NatCmmTop Instr] -> [NatCmmTop Instr]
620 generateJumpTables xs = concatMap f xs
621 where f p@(CmmProc _ _ (ListGraph xs)) = p : concatMap g xs
623 g (BasicBlock _ xs) = catMaybes (map generateJumpTableForInstr xs)
625 -- -----------------------------------------------------------------------------
633 shortcutBranches dflags tops
634 | optLevel dflags < 1 = tops -- only with -O or higher
635 | otherwise = map (apply_mapping mapping) tops'
637 (tops', mappings) = mapAndUnzip build_mapping tops
638 mapping = foldr plusUFM emptyUFM mappings
640 build_mapping :: GenCmmTop d t (ListGraph Instr)
641 -> (GenCmmTop d t (ListGraph Instr), UniqFM JumpDest)
642 build_mapping top@(CmmData _ _) = (top, emptyUFM)
643 build_mapping (CmmProc info lbl (ListGraph []))
644 = (CmmProc info lbl (ListGraph []), emptyUFM)
645 build_mapping (CmmProc info lbl (ListGraph (head:blocks)))
646 = (CmmProc info lbl (ListGraph (head:others)), mapping)
647 -- drop the shorted blocks, but don't ever drop the first one,
648 -- because it is pointed to by a global label.
650 -- find all the blocks that just consist of a jump that can be
652 -- Don't completely eliminate loops here -- that can leave a dangling jump!
653 (_, shortcut_blocks, others) = foldl split (emptyBlockSet, [], []) blocks
654 split (s, shortcut_blocks, others) b@(BasicBlock id [insn])
655 | Just (DestBlockId dest) <- canShortcut insn,
656 (setMember dest s) || dest == id -- loop checks
657 = (s, shortcut_blocks, b : others)
658 split (s, shortcut_blocks, others) (BasicBlock id [insn])
659 | Just dest <- canShortcut insn
660 = (setInsert id s, (id,dest) : shortcut_blocks, others)
661 split (s, shortcut_blocks, others) other = (s, shortcut_blocks, other : others)
664 -- build a mapping from BlockId to JumpDest for shorting branches
665 mapping = foldl add emptyUFM shortcut_blocks
666 add ufm (id,dest) = addToUFM ufm id dest
668 apply_mapping :: UniqFM JumpDest
669 -> GenCmmTop CmmStatic h (ListGraph Instr)
670 -> GenCmmTop CmmStatic h (ListGraph Instr)
671 apply_mapping ufm (CmmData sec statics)
672 = CmmData sec (map (shortcutStatic (lookupUFM ufm)) statics)
673 -- we need to get the jump tables, so apply the mapping to the entries
675 apply_mapping ufm (CmmProc info lbl (ListGraph blocks))
676 = CmmProc info lbl (ListGraph $ map short_bb blocks)
678 short_bb (BasicBlock id insns) = BasicBlock id $! map short_insn insns
679 short_insn i = shortcutJump (lookupUFM ufm) i
680 -- shortcutJump should apply the mapping repeatedly,
681 -- just in case we can short multiple branches.
683 -- -----------------------------------------------------------------------------
684 -- Instruction selection
686 -- Native code instruction selection for a chunk of stix code. For
687 -- this part of the computation, we switch from the UniqSM monad to
688 -- the NatM monad. The latter carries not only a Unique, but also an
689 -- Int denoting the current C stack pointer offset in the generated
690 -- code; this is needed for creating correct spill offsets on
691 -- architectures which don't offer, or for which it would be
692 -- prohibitively expensive to employ, a frame pointer register. Viz,
695 -- The offset is measured in bytes, and indicates the difference
696 -- between the current (simulated) C stack-ptr and the value it was at
697 -- the beginning of the block. For stacks which grow down, this value
698 -- should be either zero or negative.
700 -- Switching between the two monads whilst carrying along the same
701 -- Unique supply breaks abstraction. Is that bad?
710 genMachCode dflags cmm_top
711 = do { initial_us <- getUs
712 ; let initial_st = mkNatM_State initial_us 0 dflags
713 (new_tops, final_st) = initNat initial_st (cmmTopCodeGen dflags cmm_top)
714 final_delta = natm_delta final_st
715 final_imports = natm_imports final_st
716 ; if final_delta == 0
717 then return (new_tops, final_imports)
718 else pprPanic "genMachCode: nonzero final delta" (int final_delta)
721 -- -----------------------------------------------------------------------------
722 -- Generic Cmm optimiser
728 (b) Simple inlining: a temporary which is assigned to and then
729 used, once, can be shorted.
730 (c) Position independent code and dynamic linking
731 (i) introduce the appropriate indirections
732 and position independent refs
733 (ii) compile a list of imported symbols
735 Ideas for other things we could do:
737 - shortcut jumps-to-jumps
738 - simple CSE: if an expr is assigned to a temp, then replace later occs of
739 that expr with the temp, until the expr is no longer valid (can push through
740 temp assignments, and certain assigns to mem...)
743 cmmToCmm :: DynFlags -> RawCmmTop -> (RawCmmTop, [CLabel])
744 cmmToCmm _ top@(CmmData _ _) = (top, [])
745 cmmToCmm dflags (CmmProc info lbl (ListGraph blocks)) = runCmmOpt dflags $ do
746 blocks' <- mapM cmmBlockConFold (cmmMiniInline (cmmEliminateDeadBlocks blocks))
747 return $ CmmProc info lbl (ListGraph blocks')
749 newtype CmmOptM a = CmmOptM (([CLabel], DynFlags) -> (# a, [CLabel] #))
751 instance Monad CmmOptM where
752 return x = CmmOptM $ \(imports, _) -> (# x,imports #)
754 CmmOptM $ \(imports, dflags) ->
755 case f (imports, dflags) of
758 CmmOptM g' -> g' (imports', dflags)
760 addImportCmmOpt :: CLabel -> CmmOptM ()
761 addImportCmmOpt lbl = CmmOptM $ \(imports, _dflags) -> (# (), lbl:imports #)
763 getDynFlagsCmmOpt :: CmmOptM DynFlags
764 getDynFlagsCmmOpt = CmmOptM $ \(imports, dflags) -> (# dflags, imports #)
766 runCmmOpt :: DynFlags -> CmmOptM a -> (a, [CLabel])
767 runCmmOpt dflags (CmmOptM f) = case f ([], dflags) of
768 (# result, imports #) -> (result, imports)
770 cmmBlockConFold :: CmmBasicBlock -> CmmOptM CmmBasicBlock
771 cmmBlockConFold (BasicBlock id stmts) = do
772 stmts' <- mapM cmmStmtConFold stmts
773 return $ BasicBlock id stmts'
775 cmmStmtConFold :: CmmStmt -> CmmOptM CmmStmt
779 -> do src' <- cmmExprConFold DataReference src
780 return $ case src' of
781 CmmReg reg' | reg == reg' -> CmmNop
782 new_src -> CmmAssign reg new_src
785 -> do addr' <- cmmExprConFold DataReference addr
786 src' <- cmmExprConFold DataReference src
787 return $ CmmStore addr' src'
790 -> do addr' <- cmmExprConFold JumpReference addr
791 return $ CmmJump addr' regs
793 CmmCall target regs args srt returns
794 -> do target' <- case target of
795 CmmCallee e conv -> do
796 e' <- cmmExprConFold CallReference e
797 return $ CmmCallee e' conv
798 other -> return other
799 args' <- mapM (\(CmmHinted arg hint) -> do
800 arg' <- cmmExprConFold DataReference arg
801 return (CmmHinted arg' hint)) args
802 return $ CmmCall target' regs args' srt returns
804 CmmCondBranch test dest
805 -> do test' <- cmmExprConFold DataReference test
806 return $ case test' of
807 CmmLit (CmmInt 0 _) ->
808 CmmComment (mkFastString ("deleted: " ++
809 showSDoc (pprStmt stmt)))
811 CmmLit (CmmInt _ _) -> CmmBranch dest
812 _other -> CmmCondBranch test' dest
815 -> do expr' <- cmmExprConFold DataReference expr
816 return $ CmmSwitch expr' ids
822 cmmExprConFold :: ReferenceKind -> CmmExpr -> CmmOptM CmmExpr
823 cmmExprConFold referenceKind expr = do
824 dflags <- getDynFlagsCmmOpt
825 let arch = platformArch (targetPlatform dflags)
828 -> do addr' <- cmmExprConFold DataReference addr
829 return $ CmmLoad addr' rep
832 -- For MachOps, we first optimize the children, and then we try
833 -- our hand at some constant-folding.
834 -> do args' <- mapM (cmmExprConFold DataReference) args
835 return $ cmmMachOpFold mop args'
837 CmmLit (CmmLabel lbl)
839 cmmMakeDynamicReference dflags addImportCmmOpt referenceKind lbl
840 CmmLit (CmmLabelOff lbl off)
842 dynRef <- cmmMakeDynamicReference dflags addImportCmmOpt referenceKind lbl
843 return $ cmmMachOpFold (MO_Add wordWidth) [
845 (CmmLit $ CmmInt (fromIntegral off) wordWidth)
848 -- On powerpc (non-PIC), it's easier to jump directly to a label than
849 -- to use the register table, so we replace these registers
850 -- with the corresponding labels:
851 CmmReg (CmmGlobal EagerBlackholeInfo)
852 | arch == ArchPPC && not opt_PIC
853 -> cmmExprConFold referenceKind $
854 CmmLit (CmmLabel (mkCmmCodeLabel rtsPackageId (fsLit "__stg_EAGER_BLACKHOLE_info")))
855 CmmReg (CmmGlobal GCEnter1)
856 | arch == ArchPPC && not opt_PIC
857 -> cmmExprConFold referenceKind $
858 CmmLit (CmmLabel (mkCmmCodeLabel rtsPackageId (fsLit "__stg_gc_enter_1")))
859 CmmReg (CmmGlobal GCFun)
860 | arch == ArchPPC && not opt_PIC
861 -> cmmExprConFold referenceKind $
862 CmmLit (CmmLabel (mkCmmCodeLabel rtsPackageId (fsLit "__stg_gc_fun")))