1 -- -----------------------------------------------------------------------------
3 -- (c) The University of Glasgow 1993-2004
5 -- This is the top-level module in the native code generator.
7 -- -----------------------------------------------------------------------------
11 -- The above warning supression flag is a temporary kludge.
12 -- While working on this module you are encouraged to remove it and fix
13 -- any warnings in the module. See
14 -- http://hackage.haskell.org/trac/ghc/wiki/Commentary/CodingStyle#Warnings
17 module AsmCodeGen ( nativeCodeGen ) where
19 #include "HsVersions.h"
20 #include "nativeGen/NCG.h"
28 import PositionIndependentCode
31 import qualified RegAllocLinear as Linear
32 import qualified RegAllocColor as Color
33 import qualified RegAllocStats as Color
34 import qualified GraphColor as Color
37 import CmmOpt ( cmmMiniInline, cmmMachOpFold )
44 import Unique ( Unique, getUnique )
46 import List ( groupBy, sortBy )
48 #if powerpc_TARGET_ARCH
49 import StaticFlags ( opt_Static, opt_PIC )
52 import Config ( cProjectVersion )
56 import qualified Pretty
75 The native-code generator has machine-independent and
76 machine-dependent modules.
78 This module ("AsmCodeGen") is the top-level machine-independent
79 module. Before entering machine-dependent land, we do some
80 machine-independent optimisations (defined below) on the
83 We convert to the machine-specific 'Instr' datatype with
84 'cmmCodeGen', assuming an infinite supply of registers. We then use
85 a machine-independent register allocator ('regAlloc') to rejoin
86 reality. Obviously, 'regAlloc' has machine-specific helper
87 functions (see about "RegAllocInfo" below).
89 Finally, we order the basic blocks of the function so as to minimise
90 the number of jumps between blocks, by utilising fallthrough wherever
93 The machine-dependent bits break down as follows:
95 * ["MachRegs"] Everything about the target platform's machine
96 registers (and immediate operands, and addresses, which tend to
97 intermingle/interact with registers).
99 * ["MachInstrs"] Includes the 'Instr' datatype (possibly should
100 have a module of its own), plus a miscellany of other things
101 (e.g., 'targetDoubleSize', 'smStablePtrTable', ...)
103 * ["MachCodeGen"] is where 'Cmm' stuff turns into
104 machine instructions.
106 * ["PprMach"] 'pprInstr' turns an 'Instr' into text (well, really
109 * ["RegAllocInfo"] In the register allocator, we manipulate
110 'MRegsState's, which are 'BitSet's, one bit per machine register.
111 When we want to say something about a specific machine register
112 (e.g., ``it gets clobbered by this instruction''), we set/unset
113 its bit. Obviously, we do this 'BitSet' thing for efficiency
116 The 'RegAllocInfo' module collects together the machine-specific
117 info needed to do register allocation.
119 * ["RegisterAlloc"] The (machine-independent) register allocator.
122 -- -----------------------------------------------------------------------------
123 -- Top-level of the native codegen
126 nativeCodeGen :: DynFlags -> Handle -> UniqSupply -> [RawCmm] -> IO ()
127 nativeCodeGen dflags h us cmms
129 let split_cmms = concat $ map add_split cmms
132 <- cmmNativeGens dflags h us split_cmms [] []
134 let (native, colorStats, linearStats)
139 Opt_D_dump_asm "Asm code"
140 (vcat $ map (docToSDoc . pprNatCmmTop) $ concat native)
142 -- dump global NCG stats for graph coloring allocator
143 (case concat $ catMaybes colorStats of
146 -- build the global register conflict graph
148 = foldl Color.union Color.initGraph
149 $ [ Color.raGraph stat
150 | stat@Color.RegAllocStatsStart{} <- stats]
152 dumpSDoc dflags Opt_D_dump_asm_stats "NCG stats"
153 $ Color.pprStats stats graphGlobal
156 Opt_D_dump_asm_conflicts "Register conflict graph"
157 $ Color.dotGraph Color.regDotColor trivColorable
161 -- dump global NCG stats for linear allocator
162 (case concat $ catMaybes linearStats of
164 stats -> dumpSDoc dflags Opt_D_dump_asm_stats "NCG stats"
165 $ Linear.pprStats (concat native) stats)
167 -- write out the imports
168 Pretty.printDoc Pretty.LeftMode h
169 $ makeImportsDoc (concat imports)
173 where add_split (Cmm tops)
174 | dopt Opt_SplitObjs dflags = split_marker : tops
177 split_marker = CmmProc [] mkSplitMarkerLabel [] (ListGraph [])
180 -- | Do native code generation on all these cmms.
182 cmmNativeGens dflags h us [] impAcc profAcc
183 = return (reverse impAcc, reverse profAcc)
185 cmmNativeGens dflags h us (cmm : cmms) impAcc profAcc
187 (us', native, imports, colorStats, linearStats)
188 <- cmmNativeGen dflags us cmm
190 Pretty.printDoc Pretty.LeftMode h
191 $ {-# SCC "pprNativeCode" #-} Pretty.vcat $ map pprNatCmmTop native
194 if dopt Opt_D_dump_asm dflags
195 || dopt Opt_D_dump_asm_stats dflags
199 -- force evaulation of imports and lsPprNative to avoid space leak
200 seqString (showSDoc $ vcat $ map ppr imports) `seq` return ()
201 lsPprNative `seq` return ()
203 cmmNativeGens dflags h us' cmms
205 ((lsPprNative, colorStats, linearStats) : profAcc)
207 where seqString [] = ()
208 seqString (x:xs) = x `seq` seqString xs `seq` ()
211 -- | Complete native code generation phase for a single top-level chunk of Cmm.
212 -- Dumping the output of each stage along the way.
213 -- Global conflict graph and NGC stats
221 , Maybe [Color.RegAllocStats]
222 , Maybe [Linear.RegAllocStats])
224 cmmNativeGen dflags us cmm
226 -- rewrite assignments to global regs
227 let (fixed_cmm, usFix) =
228 {-# SCC "fixAssignsTop" #-}
229 initUs us $ fixAssignsTop cmm
231 -- cmm to cmm optimisations
232 let (opt_cmm, imports) =
233 {-# SCC "cmmToCmm" #-}
234 cmmToCmm dflags fixed_cmm
237 Opt_D_dump_opt_cmm "Optimised Cmm"
238 (pprCmm $ Cmm [opt_cmm])
240 -- generate native code from cmm
241 let ((native, lastMinuteImports), usGen) =
242 {-# SCC "genMachCode" #-}
243 initUs usFix $ genMachCode dflags opt_cmm
246 Opt_D_dump_asm_native "Native code"
247 (vcat $ map (docToSDoc . pprNatCmmTop) native)
250 -- tag instructions with register liveness information
251 let (withLiveness, usLive) =
252 {-# SCC "regLiveness" #-}
253 initUs usGen $ mapUs regLiveness native
256 Opt_D_dump_asm_liveness "Liveness annotations added"
257 (vcat $ map ppr withLiveness)
260 -- allocate registers
261 (alloced, usAlloc, ppr_raStatsColor, ppr_raStatsLinear) <-
262 if dopt Opt_RegsGraph dflags
264 -- the regs usable for allocation
266 = foldr (\r -> plusUFM_C unionUniqSets
267 $ unitUFM (regClass r) (unitUniqSet r))
269 $ map RealReg allocatableRegs
271 -- if any of these dump flags are turned on we want to hang on to
272 -- intermediate structures in the allocator - otherwise tell the
273 -- allocator to ditch them early so we don't end up creating space leaks.
274 let generateRegAllocStats = or
275 [ dopt Opt_D_dump_asm_regalloc_stages dflags
276 , dopt Opt_D_dump_asm_stats dflags
277 , dopt Opt_D_dump_asm_conflicts dflags ]
279 -- graph coloring register allocation
280 let ((alloced, regAllocStats), usAlloc)
281 = {-# SCC "RegAlloc" #-}
284 generateRegAllocStats
286 (mkUniqSet [0..maxSpillSlots])
289 -- dump out what happened during register allocation
291 Opt_D_dump_asm_regalloc "Registers allocated"
292 (vcat $ map (docToSDoc . pprNatCmmTop) alloced)
295 Opt_D_dump_asm_regalloc_stages "Build/spill stages"
296 (vcat $ map (\(stage, stats)
297 -> text "-- Stage " <> int stage
299 $ zip [0..] regAllocStats)
302 if dopt Opt_D_dump_asm_stats dflags
303 then Just regAllocStats else Nothing
305 -- force evaluation of the Maybe to avoid space leak
306 mPprStats `seq` return ()
308 return ( alloced, usAlloc
313 -- do linear register allocation
314 let ((alloced, regAllocStats), usAlloc)
315 = {-# SCC "RegAlloc" #-}
318 $ mapUs Linear.regAlloc withLiveness
321 Opt_D_dump_asm_regalloc "Registers allocated"
322 (vcat $ map (docToSDoc . pprNatCmmTop) alloced)
325 if dopt Opt_D_dump_asm_stats dflags
326 then Just (catMaybes regAllocStats) else Nothing
328 -- force evaluation of the Maybe to avoid space leak
329 mPprStats `seq` return ()
331 return ( alloced, usAlloc
335 ---- shortcut branches
337 {-# SCC "shortcutBranches" #-}
338 shortcutBranches dflags alloced
342 {-# SCC "sequenceBlocks" #-}
343 map sequenceTop shorted
346 let final_mach_code =
348 {-# SCC "x86fp_kludge" #-}
349 map x86fp_kludge sequenced
356 , lastMinuteImports ++ imports
362 x86fp_kludge :: NatCmmTop -> NatCmmTop
363 x86fp_kludge top@(CmmData _ _) = top
364 x86fp_kludge top@(CmmProc info lbl params (ListGraph code)) =
365 CmmProc info lbl params (ListGraph $ map bb_i386_insert_ffrees code)
367 bb_i386_insert_ffrees (BasicBlock id instrs) =
368 BasicBlock id (i386_insert_ffrees instrs)
372 -- | Build a doc for all the imports.
374 makeImportsDoc :: [CLabel] -> Pretty.Doc
375 makeImportsDoc imports
378 #if HAVE_SUBSECTIONS_VIA_SYMBOLS
379 -- On recent versions of Darwin, the linker supports
380 -- dead-stripping of code and data on a per-symbol basis.
381 -- There's a hack to make this work in PprMach.pprNatCmmTop.
382 Pretty.$$ Pretty.text ".subsections_via_symbols"
384 #if HAVE_GNU_NONEXEC_STACK
385 -- On recent GNU ELF systems one can mark an object file
386 -- as not requiring an executable stack. If all objects
387 -- linked into a program have this note then the program
388 -- will not use an executable stack, which is good for
389 -- security. GHC generated code does not need an executable
390 -- stack so add the note in:
391 Pretty.$$ Pretty.text ".section .note.GNU-stack,\"\",@progbits"
393 #if !defined(darwin_TARGET_OS)
394 -- And just because every other compiler does, lets stick in
395 -- an identifier directive: .ident "GHC x.y.z"
396 Pretty.$$ let compilerIdent = Pretty.text "GHC" Pretty.<+>
397 Pretty.text cProjectVersion
398 in Pretty.text ".ident" Pretty.<+>
399 Pretty.doubleQuotes compilerIdent
403 -- Generate "symbol stubs" for all external symbols that might
404 -- come from a dynamic library.
405 dyld_stubs :: [CLabel] -> Pretty.Doc
406 {- dyld_stubs imps = Pretty.vcat $ map pprDyldSymbolStub $
407 map head $ group $ sort imps-}
409 -- (Hack) sometimes two Labels pretty-print the same, but have
410 -- different uniques; so we compare their text versions...
412 | needImportedSymbols
414 (pprGotDeclaration :) $
415 map (pprImportedSymbol . fst . head) $
416 groupBy (\(_,a) (_,b) -> a == b) $
417 sortBy (\(_,a) (_,b) -> compare a b) $
423 doPpr lbl = (lbl, Pretty.render $ pprCLabel lbl astyle)
424 astyle = mkCodeStyle AsmStyle
427 -- -----------------------------------------------------------------------------
428 -- Sequencing the basic blocks
430 -- Cmm BasicBlocks are self-contained entities: they always end in a
431 -- jump, either non-local or to another basic block in the same proc.
432 -- In this phase, we attempt to place the basic blocks in a sequence
433 -- such that as many of the local jumps as possible turn into
436 sequenceTop :: NatCmmTop -> NatCmmTop
437 sequenceTop top@(CmmData _ _) = top
438 sequenceTop (CmmProc info lbl params (ListGraph blocks)) =
439 CmmProc info lbl params (ListGraph $ makeFarBranches $ sequenceBlocks blocks)
441 -- The algorithm is very simple (and stupid): we make a graph out of
442 -- the blocks where there is an edge from one block to another iff the
443 -- first block ends by jumping to the second. Then we topologically
444 -- sort this graph. Then traverse the list: for each block, we first
445 -- output the block, then if it has an out edge, we move the
446 -- destination of the out edge to the front of the list, and continue.
448 -- FYI, the classic layout for basic blocks uses postorder DFS; this
449 -- algorithm is implemented in cmm/ZipCfg.hs (NR 6 Sep 2007).
451 sequenceBlocks :: [NatBasicBlock] -> [NatBasicBlock]
452 sequenceBlocks [] = []
453 sequenceBlocks (entry:blocks) =
454 seqBlocks (mkNode entry : reverse (flattenSCCs (sccBlocks blocks)))
455 -- the first block is the entry point ==> it must remain at the start.
457 sccBlocks :: [NatBasicBlock] -> [SCC (NatBasicBlock,Unique,[Unique])]
458 sccBlocks blocks = stronglyConnCompR (map mkNode blocks)
460 getOutEdges :: [Instr] -> [Unique]
461 getOutEdges instrs = case jumpDests (last instrs) [] of
462 [one] -> [getUnique one]
464 -- we're only interested in the last instruction of
465 -- the block, and only if it has a single destination.
467 mkNode block@(BasicBlock id instrs) = (block, getUnique id, getOutEdges instrs)
470 seqBlocks ((block,_,[]) : rest)
471 = block : seqBlocks rest
472 seqBlocks ((block@(BasicBlock id instrs),_,[next]) : rest)
473 | can_fallthrough = BasicBlock id (init instrs) : seqBlocks rest'
474 | otherwise = block : seqBlocks rest'
476 (can_fallthrough, rest') = reorder next [] rest
477 -- TODO: we should do a better job for cycles; try to maximise the
478 -- fallthroughs within a loop.
479 seqBlocks _ = panic "AsmCodegen:seqBlocks"
481 reorder id accum [] = (False, reverse accum)
482 reorder id accum (b@(block,id',out) : rest)
483 | id == id' = (True, (block,id,out) : reverse accum ++ rest)
484 | otherwise = reorder id (b:accum) rest
487 -- -----------------------------------------------------------------------------
488 -- Making far branches
490 -- Conditional branches on PowerPC are limited to +-32KB; if our Procs get too
491 -- big, we have to work around this limitation.
493 makeFarBranches :: [NatBasicBlock] -> [NatBasicBlock]
495 #if powerpc_TARGET_ARCH
496 makeFarBranches blocks
497 | last blockAddresses < nearLimit = blocks
498 | otherwise = zipWith handleBlock blockAddresses blocks
500 blockAddresses = scanl (+) 0 $ map blockLen blocks
501 blockLen (BasicBlock _ instrs) = length instrs
503 handleBlock addr (BasicBlock id instrs)
504 = BasicBlock id (zipWith makeFar [addr..] instrs)
506 makeFar addr (BCC ALWAYS tgt) = BCC ALWAYS tgt
507 makeFar addr (BCC cond tgt)
508 | abs (addr - targetAddr) >= nearLimit
512 where Just targetAddr = lookupUFM blockAddressMap tgt
513 makeFar addr other = other
515 nearLimit = 7000 -- 8192 instructions are allowed; let's keep some
516 -- distance, as we have a few pseudo-insns that are
517 -- pretty-printed as multiple instructions,
518 -- and it's just not worth the effort to calculate
521 blockAddressMap = listToUFM $ zip (map blockId blocks) blockAddresses
526 -- -----------------------------------------------------------------------------
529 shortcutBranches :: DynFlags -> [NatCmmTop] -> [NatCmmTop]
530 shortcutBranches dflags tops
531 | optLevel dflags < 1 = tops -- only with -O or higher
532 | otherwise = map (apply_mapping mapping) tops'
534 (tops', mappings) = mapAndUnzip build_mapping tops
535 mapping = foldr plusUFM emptyUFM mappings
537 build_mapping top@(CmmData _ _) = (top, emptyUFM)
538 build_mapping (CmmProc info lbl params (ListGraph []))
539 = (CmmProc info lbl params (ListGraph []), emptyUFM)
540 build_mapping (CmmProc info lbl params (ListGraph (head:blocks)))
541 = (CmmProc info lbl params (ListGraph (head:others)), mapping)
542 -- drop the shorted blocks, but don't ever drop the first one,
543 -- because it is pointed to by a global label.
545 -- find all the blocks that just consist of a jump that can be
547 (shortcut_blocks, others) = partitionWith split blocks
548 split (BasicBlock id [insn]) | Just dest <- canShortcut insn
550 split other = Right other
552 -- build a mapping from BlockId to JumpDest for shorting branches
553 mapping = foldl add emptyUFM shortcut_blocks
554 add ufm (id,dest) = addToUFM ufm id dest
556 apply_mapping ufm (CmmData sec statics)
557 = CmmData sec (map (shortcutStatic (lookupUFM ufm)) statics)
558 -- we need to get the jump tables, so apply the mapping to the entries
560 apply_mapping ufm (CmmProc info lbl params (ListGraph blocks))
561 = CmmProc info lbl params (ListGraph $ map short_bb blocks)
563 short_bb (BasicBlock id insns) = BasicBlock id $! map short_insn insns
564 short_insn i = shortcutJump (lookupUFM ufm) i
565 -- shortcutJump should apply the mapping repeatedly,
566 -- just in case we can short multiple branches.
568 -- -----------------------------------------------------------------------------
569 -- Instruction selection
571 -- Native code instruction selection for a chunk of stix code. For
572 -- this part of the computation, we switch from the UniqSM monad to
573 -- the NatM monad. The latter carries not only a Unique, but also an
574 -- Int denoting the current C stack pointer offset in the generated
575 -- code; this is needed for creating correct spill offsets on
576 -- architectures which don't offer, or for which it would be
577 -- prohibitively expensive to employ, a frame pointer register. Viz,
580 -- The offset is measured in bytes, and indicates the difference
581 -- between the current (simulated) C stack-ptr and the value it was at
582 -- the beginning of the block. For stacks which grow down, this value
583 -- should be either zero or negative.
585 -- Switching between the two monads whilst carrying along the same
586 -- Unique supply breaks abstraction. Is that bad?
588 genMachCode :: DynFlags -> RawCmmTop -> UniqSM ([NatCmmTop], [CLabel])
590 genMachCode dflags cmm_top
591 = do { initial_us <- getUs
592 ; let initial_st = mkNatM_State initial_us 0 dflags
593 (new_tops, final_st) = initNat initial_st (cmmTopCodeGen cmm_top)
594 final_delta = natm_delta final_st
595 final_imports = natm_imports final_st
596 ; if final_delta == 0
597 then return (new_tops, final_imports)
598 else pprPanic "genMachCode: nonzero final delta" (int final_delta)
601 -- -----------------------------------------------------------------------------
602 -- Fixup assignments to global registers so that they assign to
603 -- locations within the RegTable, if appropriate.
605 -- Note that we currently don't fixup reads here: they're done by
606 -- the generic optimiser below, to avoid having two separate passes
609 fixAssignsTop :: RawCmmTop -> UniqSM RawCmmTop
610 fixAssignsTop top@(CmmData _ _) = returnUs top
611 fixAssignsTop (CmmProc info lbl params (ListGraph blocks)) =
612 mapUs fixAssignsBlock blocks `thenUs` \ blocks' ->
613 returnUs (CmmProc info lbl params (ListGraph blocks'))
615 fixAssignsBlock :: CmmBasicBlock -> UniqSM CmmBasicBlock
616 fixAssignsBlock (BasicBlock id stmts) =
617 fixAssigns stmts `thenUs` \ stmts' ->
618 returnUs (BasicBlock id stmts')
620 fixAssigns :: [CmmStmt] -> UniqSM [CmmStmt]
622 mapUs fixAssign stmts `thenUs` \ stmtss ->
623 returnUs (concat stmtss)
625 fixAssign :: CmmStmt -> UniqSM [CmmStmt]
626 fixAssign (CmmAssign (CmmGlobal reg) src)
627 | Left realreg <- reg_or_addr
628 = returnUs [CmmAssign (CmmGlobal reg) src]
629 | Right baseRegAddr <- reg_or_addr
630 = returnUs [CmmStore baseRegAddr src]
631 -- Replace register leaves with appropriate StixTrees for
632 -- the given target. GlobalRegs which map to a reg on this
633 -- arch are left unchanged. Assigning to BaseReg is always
634 -- illegal, so we check for that.
636 reg_or_addr = get_GlobalReg_reg_or_addr reg
638 fixAssign other_stmt = returnUs [other_stmt]
640 -- -----------------------------------------------------------------------------
641 -- Generic Cmm optimiser
647 (b) Simple inlining: a temporary which is assigned to and then
648 used, once, can be shorted.
649 (c) Replacement of references to GlobalRegs which do not have
650 machine registers by the appropriate memory load (eg.
651 Hp ==> *(BaseReg + 34) ).
652 (d) Position independent code and dynamic linking
653 (i) introduce the appropriate indirections
654 and position independent refs
655 (ii) compile a list of imported symbols
657 Ideas for other things we could do (ToDo):
659 - shortcut jumps-to-jumps
660 - eliminate dead code blocks
661 - simple CSE: if an expr is assigned to a temp, then replace later occs of
662 that expr with the temp, until the expr is no longer valid (can push through
663 temp assignments, and certain assigns to mem...)
666 cmmToCmm :: DynFlags -> RawCmmTop -> (RawCmmTop, [CLabel])
667 cmmToCmm _ top@(CmmData _ _) = (top, [])
668 cmmToCmm dflags (CmmProc info lbl params (ListGraph blocks)) = runCmmOpt dflags $ do
669 blocks' <- mapM cmmBlockConFold (cmmMiniInline blocks)
670 return $ CmmProc info lbl params (ListGraph blocks')
672 newtype CmmOptM a = CmmOptM (([CLabel], DynFlags) -> (# a, [CLabel] #))
674 instance Monad CmmOptM where
675 return x = CmmOptM $ \(imports, _) -> (# x,imports #)
677 CmmOptM $ \(imports, dflags) ->
678 case f (imports, dflags) of
681 CmmOptM g' -> g' (imports', dflags)
683 addImportCmmOpt :: CLabel -> CmmOptM ()
684 addImportCmmOpt lbl = CmmOptM $ \(imports, dflags) -> (# (), lbl:imports #)
686 getDynFlagsCmmOpt :: CmmOptM DynFlags
687 getDynFlagsCmmOpt = CmmOptM $ \(imports, dflags) -> (# dflags, imports #)
689 runCmmOpt :: DynFlags -> CmmOptM a -> (a, [CLabel])
690 runCmmOpt dflags (CmmOptM f) = case f ([], dflags) of
691 (# result, imports #) -> (result, imports)
693 cmmBlockConFold :: CmmBasicBlock -> CmmOptM CmmBasicBlock
694 cmmBlockConFold (BasicBlock id stmts) = do
695 stmts' <- mapM cmmStmtConFold stmts
696 return $ BasicBlock id stmts'
701 -> do src' <- cmmExprConFold DataReference src
702 return $ case src' of
703 CmmReg reg' | reg == reg' -> CmmNop
704 new_src -> CmmAssign reg new_src
707 -> do addr' <- cmmExprConFold DataReference addr
708 src' <- cmmExprConFold DataReference src
709 return $ CmmStore addr' src'
712 -> do addr' <- cmmExprConFold JumpReference addr
713 return $ CmmJump addr' regs
715 CmmCall target regs args srt returns
716 -> do target' <- case target of
717 CmmCallee e conv -> do
718 e' <- cmmExprConFold CallReference e
719 return $ CmmCallee e' conv
720 other -> return other
721 args' <- mapM (\(arg, hint) -> do
722 arg' <- cmmExprConFold DataReference arg
723 return (arg', hint)) args
724 return $ CmmCall target' regs args' srt returns
726 CmmCondBranch test dest
727 -> do test' <- cmmExprConFold DataReference test
728 return $ case test' of
729 CmmLit (CmmInt 0 _) ->
730 CmmComment (mkFastString ("deleted: " ++
731 showSDoc (pprStmt stmt)))
733 CmmLit (CmmInt n _) -> CmmBranch dest
734 other -> CmmCondBranch test' dest
737 -> do expr' <- cmmExprConFold DataReference expr
738 return $ CmmSwitch expr' ids
744 cmmExprConFold referenceKind expr
747 -> do addr' <- cmmExprConFold DataReference addr
748 return $ CmmLoad addr' rep
751 -- For MachOps, we first optimize the children, and then we try
752 -- our hand at some constant-folding.
753 -> do args' <- mapM (cmmExprConFold DataReference) args
754 return $ cmmMachOpFold mop args'
756 CmmLit (CmmLabel lbl)
758 dflags <- getDynFlagsCmmOpt
759 cmmMakeDynamicReference dflags addImportCmmOpt referenceKind lbl
760 CmmLit (CmmLabelOff lbl off)
762 dflags <- getDynFlagsCmmOpt
763 dynRef <- cmmMakeDynamicReference dflags addImportCmmOpt referenceKind lbl
764 return $ cmmMachOpFold (MO_Add wordRep) [
766 (CmmLit $ CmmInt (fromIntegral off) wordRep)
769 #if powerpc_TARGET_ARCH
770 -- On powerpc (non-PIC), it's easier to jump directly to a label than
771 -- to use the register table, so we replace these registers
772 -- with the corresponding labels:
773 CmmReg (CmmGlobal GCEnter1)
775 -> cmmExprConFold referenceKind $
776 CmmLit (CmmLabel (mkRtsCodeLabel SLIT( "__stg_gc_enter_1")))
777 CmmReg (CmmGlobal GCFun)
779 -> cmmExprConFold referenceKind $
780 CmmLit (CmmLabel (mkRtsCodeLabel SLIT( "__stg_gc_fun")))
783 CmmReg (CmmGlobal mid)
784 -- Replace register leaves with appropriate StixTrees for
785 -- the given target. MagicIds which map to a reg on this
786 -- arch are left unchanged. For the rest, BaseReg is taken
787 -- to mean the address of the reg table in MainCapability,
788 -- and for all others we generate an indirection to its
789 -- location in the register table.
790 -> case get_GlobalReg_reg_or_addr mid of
791 Left realreg -> return expr
794 BaseReg -> cmmExprConFold DataReference baseRegAddr
795 other -> cmmExprConFold DataReference
796 (CmmLoad baseRegAddr (globalRegRep mid))
797 -- eliminate zero offsets
799 -> cmmExprConFold referenceKind (CmmReg reg)
801 CmmRegOff (CmmGlobal mid) offset
802 -- RegOf leaves are just a shorthand form. If the reg maps
803 -- to a real reg, we keep the shorthand, otherwise, we just
804 -- expand it and defer to the above code.
805 -> case get_GlobalReg_reg_or_addr mid of
806 Left realreg -> return expr
808 -> cmmExprConFold DataReference (CmmMachOp (MO_Add wordRep) [
809 CmmReg (CmmGlobal mid),
810 CmmLit (CmmInt (fromIntegral offset)
815 -- -----------------------------------------------------------------------------