1 -- -----------------------------------------------------------------------------
3 -- (c) The University of Glasgow 1993-2004
5 -- This is the top-level module in the native code generator.
7 -- -----------------------------------------------------------------------------
11 -- The above warning supression flag is a temporary kludge.
12 -- While working on this module you are encouraged to remove it and fix
13 -- any warnings in the module. See
14 -- http://hackage.haskell.org/trac/ghc/wiki/Commentary/CodingStyle#Warnings
17 module AsmCodeGen ( nativeCodeGen ) where
19 #include "HsVersions.h"
20 #include "nativeGen/NCG.h"
28 import PositionIndependentCode
31 import qualified RegAllocLinear as Linear
32 import qualified RegAllocColor as Color
33 import qualified RegAllocStats as Color
34 import qualified GraphColor as Color
37 import CmmOpt ( cmmMiniInline, cmmMachOpFold )
44 import Unique ( Unique, getUnique )
46 import List ( groupBy, sortBy )
48 #if powerpc_TARGET_ARCH
49 import StaticFlags ( opt_Static, opt_PIC )
52 import Config ( cProjectVersion )
56 import qualified Pretty
75 The native-code generator has machine-independent and
76 machine-dependent modules.
78 This module ("AsmCodeGen") is the top-level machine-independent
79 module. Before entering machine-dependent land, we do some
80 machine-independent optimisations (defined below) on the
83 We convert to the machine-specific 'Instr' datatype with
84 'cmmCodeGen', assuming an infinite supply of registers. We then use
85 a machine-independent register allocator ('regAlloc') to rejoin
86 reality. Obviously, 'regAlloc' has machine-specific helper
87 functions (see about "RegAllocInfo" below).
89 Finally, we order the basic blocks of the function so as to minimise
90 the number of jumps between blocks, by utilising fallthrough wherever
93 The machine-dependent bits break down as follows:
95 * ["MachRegs"] Everything about the target platform's machine
96 registers (and immediate operands, and addresses, which tend to
97 intermingle/interact with registers).
99 * ["MachInstrs"] Includes the 'Instr' datatype (possibly should
100 have a module of its own), plus a miscellany of other things
101 (e.g., 'targetDoubleSize', 'smStablePtrTable', ...)
103 * ["MachCodeGen"] is where 'Cmm' stuff turns into
104 machine instructions.
106 * ["PprMach"] 'pprInstr' turns an 'Instr' into text (well, really
109 * ["RegAllocInfo"] In the register allocator, we manipulate
110 'MRegsState's, which are 'BitSet's, one bit per machine register.
111 When we want to say something about a specific machine register
112 (e.g., ``it gets clobbered by this instruction''), we set/unset
113 its bit. Obviously, we do this 'BitSet' thing for efficiency
116 The 'RegAllocInfo' module collects together the machine-specific
117 info needed to do register allocation.
119 * ["RegisterAlloc"] The (machine-independent) register allocator.
122 -- -----------------------------------------------------------------------------
123 -- Top-level of the native codegen
126 nativeCodeGen :: DynFlags -> Handle -> UniqSupply -> [RawCmm] -> IO ()
127 nativeCodeGen dflags h us cmms
129 let split_cmms = concat $ map add_split cmms
132 <- cmmNativeGens dflags h us split_cmms [] [] 0
134 let (native, colorStats, linearStats)
139 Opt_D_dump_asm "Asm code"
140 (vcat $ map (docToSDoc . pprNatCmmTop) $ concat native)
142 -- dump global NCG stats for graph coloring allocator
143 (case concat $ catMaybes colorStats of
146 -- build the global register conflict graph
148 = foldl Color.union Color.initGraph
149 $ [ Color.raGraph stat
150 | stat@Color.RegAllocStatsStart{} <- stats]
152 dumpSDoc dflags Opt_D_dump_asm_stats "NCG stats"
153 $ Color.pprStats stats graphGlobal
156 Opt_D_dump_asm_conflicts "Register conflict graph"
157 $ Color.dotGraph Color.regDotColor trivColorable
161 -- dump global NCG stats for linear allocator
162 (case concat $ catMaybes linearStats of
164 stats -> dumpSDoc dflags Opt_D_dump_asm_stats "NCG stats"
165 $ Linear.pprStats (concat native) stats)
167 -- write out the imports
168 Pretty.printDoc Pretty.LeftMode h
169 $ makeImportsDoc (concat imports)
173 where add_split (Cmm tops)
174 | dopt Opt_SplitObjs dflags = split_marker : tops
177 split_marker = CmmProc [] mkSplitMarkerLabel [] (ListGraph [])
180 -- | Do native code generation on all these cmms.
182 cmmNativeGens dflags h us [] impAcc profAcc count
183 = return (reverse impAcc, reverse profAcc)
185 cmmNativeGens dflags h us (cmm : cmms) impAcc profAcc count
187 (us', native, imports, colorStats, linearStats)
188 <- cmmNativeGen dflags us cmm count
190 Pretty.printDoc Pretty.LeftMode h
191 $ {-# SCC "pprNativeCode" #-} Pretty.vcat $ map pprNatCmmTop native
194 if dopt Opt_D_dump_asm dflags
195 || dopt Opt_D_dump_asm_stats dflags
199 let count' = count + 1;
202 -- force evaulation all this stuff to avoid space leaks
203 seqString (showSDoc $ vcat $ map ppr imports) `seq` return ()
204 lsPprNative `seq` return ()
205 count' `seq` return ()
207 cmmNativeGens dflags h us' cmms
209 ((lsPprNative, colorStats, linearStats) : profAcc)
212 where seqString [] = ()
213 seqString (x:xs) = x `seq` seqString xs `seq` ()
216 -- | Complete native code generation phase for a single top-level chunk of Cmm.
217 -- Dumping the output of each stage along the way.
218 -- Global conflict graph and NGC stats
222 -> RawCmmTop -- ^ the cmm to generate code for
223 -> Int -- ^ sequence number of this top thing
225 , [NatCmmTop] -- native code
226 , [CLabel] -- things imported by this cmm
227 , Maybe [Color.RegAllocStats] -- stats for the coloring register allocator
228 , Maybe [Linear.RegAllocStats]) -- stats for the linear register allocators
230 cmmNativeGen dflags us cmm count
233 -- rewrite assignments to global regs
234 let (fixed_cmm, usFix) =
235 {-# SCC "fixAssignsTop" #-}
236 initUs us $ fixAssignsTop cmm
238 -- cmm to cmm optimisations
239 let (opt_cmm, imports) =
240 {-# SCC "cmmToCmm" #-}
241 cmmToCmm dflags fixed_cmm
244 Opt_D_dump_opt_cmm "Optimised Cmm"
245 (pprCmm $ Cmm [opt_cmm])
247 -- generate native code from cmm
248 let ((native, lastMinuteImports), usGen) =
249 {-# SCC "genMachCode" #-}
250 initUs usFix $ genMachCode dflags opt_cmm
253 Opt_D_dump_asm_native "Native code"
254 (vcat $ map (docToSDoc . pprNatCmmTop) native)
257 -- tag instructions with register liveness information
258 let (withLiveness, usLive) =
259 {-# SCC "regLiveness" #-}
260 initUs usGen $ mapUs regLiveness native
263 Opt_D_dump_asm_liveness "Liveness annotations added"
264 (vcat $ map ppr withLiveness)
267 -- allocate registers
268 (alloced, usAlloc, ppr_raStatsColor, ppr_raStatsLinear) <-
269 if ( dopt Opt_RegsGraph dflags
270 || dopt Opt_RegsIterative dflags)
272 -- the regs usable for allocation
274 = foldr (\r -> plusUFM_C unionUniqSets
275 $ unitUFM (regClass r) (unitUniqSet r))
277 $ map RealReg allocatableRegs
279 -- graph coloring register allocation
280 let ((alloced, regAllocStats), usAlloc)
281 = {-# SCC "RegAlloc" #-}
286 (mkUniqSet [0..maxSpillSlots])
289 -- dump out what happened during register allocation
291 Opt_D_dump_asm_regalloc "Registers allocated"
292 (vcat $ map (docToSDoc . pprNatCmmTop) alloced)
295 Opt_D_dump_asm_regalloc_stages "Build/spill stages"
296 (vcat $ map (\(stage, stats)
297 -> text "# --------------------------"
298 $$ text "# cmm " <> int count <> text " Stage " <> int stage
300 $ zip [0..] regAllocStats)
303 if dopt Opt_D_dump_asm_stats dflags
304 then Just regAllocStats else Nothing
306 -- force evaluation of the Maybe to avoid space leak
307 mPprStats `seq` return ()
309 return ( alloced, usAlloc
314 -- do linear register allocation
315 let ((alloced, regAllocStats), usAlloc)
316 = {-# SCC "RegAlloc" #-}
319 $ mapUs Linear.regAlloc withLiveness
322 Opt_D_dump_asm_regalloc "Registers allocated"
323 (vcat $ map (docToSDoc . pprNatCmmTop) alloced)
326 if dopt Opt_D_dump_asm_stats dflags
327 then Just (catMaybes regAllocStats) else Nothing
329 -- force evaluation of the Maybe to avoid space leak
330 mPprStats `seq` return ()
332 return ( alloced, usAlloc
336 ---- shortcut branches
338 {-# SCC "shortcutBranches" #-}
339 shortcutBranches dflags alloced
343 {-# SCC "sequenceBlocks" #-}
344 map sequenceTop shorted
347 let final_mach_code =
349 {-# SCC "x86fp_kludge" #-}
350 map x86fp_kludge sequenced
357 , lastMinuteImports ++ imports
363 x86fp_kludge :: NatCmmTop -> NatCmmTop
364 x86fp_kludge top@(CmmData _ _) = top
365 x86fp_kludge top@(CmmProc info lbl params (ListGraph code)) =
366 CmmProc info lbl params (ListGraph $ map bb_i386_insert_ffrees code)
368 bb_i386_insert_ffrees (BasicBlock id instrs) =
369 BasicBlock id (i386_insert_ffrees instrs)
373 -- | Build a doc for all the imports.
375 makeImportsDoc :: [CLabel] -> Pretty.Doc
376 makeImportsDoc imports
379 #if HAVE_SUBSECTIONS_VIA_SYMBOLS
380 -- On recent versions of Darwin, the linker supports
381 -- dead-stripping of code and data on a per-symbol basis.
382 -- There's a hack to make this work in PprMach.pprNatCmmTop.
383 Pretty.$$ Pretty.text ".subsections_via_symbols"
385 #if HAVE_GNU_NONEXEC_STACK
386 -- On recent GNU ELF systems one can mark an object file
387 -- as not requiring an executable stack. If all objects
388 -- linked into a program have this note then the program
389 -- will not use an executable stack, which is good for
390 -- security. GHC generated code does not need an executable
391 -- stack so add the note in:
392 Pretty.$$ Pretty.text ".section .note.GNU-stack,\"\",@progbits"
394 #if !defined(darwin_TARGET_OS)
395 -- And just because every other compiler does, lets stick in
396 -- an identifier directive: .ident "GHC x.y.z"
397 Pretty.$$ let compilerIdent = Pretty.text "GHC" Pretty.<+>
398 Pretty.text cProjectVersion
399 in Pretty.text ".ident" Pretty.<+>
400 Pretty.doubleQuotes compilerIdent
404 -- Generate "symbol stubs" for all external symbols that might
405 -- come from a dynamic library.
406 dyld_stubs :: [CLabel] -> Pretty.Doc
407 {- dyld_stubs imps = Pretty.vcat $ map pprDyldSymbolStub $
408 map head $ group $ sort imps-}
410 -- (Hack) sometimes two Labels pretty-print the same, but have
411 -- different uniques; so we compare their text versions...
413 | needImportedSymbols
415 (pprGotDeclaration :) $
416 map (pprImportedSymbol . fst . head) $
417 groupBy (\(_,a) (_,b) -> a == b) $
418 sortBy (\(_,a) (_,b) -> compare a b) $
424 doPpr lbl = (lbl, Pretty.render $ pprCLabel lbl astyle)
425 astyle = mkCodeStyle AsmStyle
428 -- -----------------------------------------------------------------------------
429 -- Sequencing the basic blocks
431 -- Cmm BasicBlocks are self-contained entities: they always end in a
432 -- jump, either non-local or to another basic block in the same proc.
433 -- In this phase, we attempt to place the basic blocks in a sequence
434 -- such that as many of the local jumps as possible turn into
437 sequenceTop :: NatCmmTop -> NatCmmTop
438 sequenceTop top@(CmmData _ _) = top
439 sequenceTop (CmmProc info lbl params (ListGraph blocks)) =
440 CmmProc info lbl params (ListGraph $ makeFarBranches $ sequenceBlocks blocks)
442 -- The algorithm is very simple (and stupid): we make a graph out of
443 -- the blocks where there is an edge from one block to another iff the
444 -- first block ends by jumping to the second. Then we topologically
445 -- sort this graph. Then traverse the list: for each block, we first
446 -- output the block, then if it has an out edge, we move the
447 -- destination of the out edge to the front of the list, and continue.
449 -- FYI, the classic layout for basic blocks uses postorder DFS; this
450 -- algorithm is implemented in cmm/ZipCfg.hs (NR 6 Sep 2007).
452 sequenceBlocks :: [NatBasicBlock] -> [NatBasicBlock]
453 sequenceBlocks [] = []
454 sequenceBlocks (entry:blocks) =
455 seqBlocks (mkNode entry : reverse (flattenSCCs (sccBlocks blocks)))
456 -- the first block is the entry point ==> it must remain at the start.
458 sccBlocks :: [NatBasicBlock] -> [SCC (NatBasicBlock,Unique,[Unique])]
459 sccBlocks blocks = stronglyConnCompR (map mkNode blocks)
461 getOutEdges :: [Instr] -> [Unique]
462 getOutEdges instrs = case jumpDests (last instrs) [] of
463 [one] -> [getUnique one]
465 -- we're only interested in the last instruction of
466 -- the block, and only if it has a single destination.
468 mkNode block@(BasicBlock id instrs) = (block, getUnique id, getOutEdges instrs)
471 seqBlocks ((block,_,[]) : rest)
472 = block : seqBlocks rest
473 seqBlocks ((block@(BasicBlock id instrs),_,[next]) : rest)
474 | can_fallthrough = BasicBlock id (init instrs) : seqBlocks rest'
475 | otherwise = block : seqBlocks rest'
477 (can_fallthrough, rest') = reorder next [] rest
478 -- TODO: we should do a better job for cycles; try to maximise the
479 -- fallthroughs within a loop.
480 seqBlocks _ = panic "AsmCodegen:seqBlocks"
482 reorder id accum [] = (False, reverse accum)
483 reorder id accum (b@(block,id',out) : rest)
484 | id == id' = (True, (block,id,out) : reverse accum ++ rest)
485 | otherwise = reorder id (b:accum) rest
488 -- -----------------------------------------------------------------------------
489 -- Making far branches
491 -- Conditional branches on PowerPC are limited to +-32KB; if our Procs get too
492 -- big, we have to work around this limitation.
494 makeFarBranches :: [NatBasicBlock] -> [NatBasicBlock]
496 #if powerpc_TARGET_ARCH
497 makeFarBranches blocks
498 | last blockAddresses < nearLimit = blocks
499 | otherwise = zipWith handleBlock blockAddresses blocks
501 blockAddresses = scanl (+) 0 $ map blockLen blocks
502 blockLen (BasicBlock _ instrs) = length instrs
504 handleBlock addr (BasicBlock id instrs)
505 = BasicBlock id (zipWith makeFar [addr..] instrs)
507 makeFar addr (BCC ALWAYS tgt) = BCC ALWAYS tgt
508 makeFar addr (BCC cond tgt)
509 | abs (addr - targetAddr) >= nearLimit
513 where Just targetAddr = lookupUFM blockAddressMap tgt
514 makeFar addr other = other
516 nearLimit = 7000 -- 8192 instructions are allowed; let's keep some
517 -- distance, as we have a few pseudo-insns that are
518 -- pretty-printed as multiple instructions,
519 -- and it's just not worth the effort to calculate
522 blockAddressMap = listToUFM $ zip (map blockId blocks) blockAddresses
527 -- -----------------------------------------------------------------------------
530 shortcutBranches :: DynFlags -> [NatCmmTop] -> [NatCmmTop]
531 shortcutBranches dflags tops
532 | optLevel dflags < 1 = tops -- only with -O or higher
533 | otherwise = map (apply_mapping mapping) tops'
535 (tops', mappings) = mapAndUnzip build_mapping tops
536 mapping = foldr plusUFM emptyUFM mappings
538 build_mapping top@(CmmData _ _) = (top, emptyUFM)
539 build_mapping (CmmProc info lbl params (ListGraph []))
540 = (CmmProc info lbl params (ListGraph []), emptyUFM)
541 build_mapping (CmmProc info lbl params (ListGraph (head:blocks)))
542 = (CmmProc info lbl params (ListGraph (head:others)), mapping)
543 -- drop the shorted blocks, but don't ever drop the first one,
544 -- because it is pointed to by a global label.
546 -- find all the blocks that just consist of a jump that can be
548 (shortcut_blocks, others) = partitionWith split blocks
549 split (BasicBlock id [insn]) | Just dest <- canShortcut insn
551 split other = Right other
553 -- build a mapping from BlockId to JumpDest for shorting branches
554 mapping = foldl add emptyUFM shortcut_blocks
555 add ufm (id,dest) = addToUFM ufm id dest
557 apply_mapping ufm (CmmData sec statics)
558 = CmmData sec (map (shortcutStatic (lookupUFM ufm)) statics)
559 -- we need to get the jump tables, so apply the mapping to the entries
561 apply_mapping ufm (CmmProc info lbl params (ListGraph blocks))
562 = CmmProc info lbl params (ListGraph $ map short_bb blocks)
564 short_bb (BasicBlock id insns) = BasicBlock id $! map short_insn insns
565 short_insn i = shortcutJump (lookupUFM ufm) i
566 -- shortcutJump should apply the mapping repeatedly,
567 -- just in case we can short multiple branches.
569 -- -----------------------------------------------------------------------------
570 -- Instruction selection
572 -- Native code instruction selection for a chunk of stix code. For
573 -- this part of the computation, we switch from the UniqSM monad to
574 -- the NatM monad. The latter carries not only a Unique, but also an
575 -- Int denoting the current C stack pointer offset in the generated
576 -- code; this is needed for creating correct spill offsets on
577 -- architectures which don't offer, or for which it would be
578 -- prohibitively expensive to employ, a frame pointer register. Viz,
581 -- The offset is measured in bytes, and indicates the difference
582 -- between the current (simulated) C stack-ptr and the value it was at
583 -- the beginning of the block. For stacks which grow down, this value
584 -- should be either zero or negative.
586 -- Switching between the two monads whilst carrying along the same
587 -- Unique supply breaks abstraction. Is that bad?
589 genMachCode :: DynFlags -> RawCmmTop -> UniqSM ([NatCmmTop], [CLabel])
591 genMachCode dflags cmm_top
592 = do { initial_us <- getUs
593 ; let initial_st = mkNatM_State initial_us 0 dflags
594 (new_tops, final_st) = initNat initial_st (cmmTopCodeGen cmm_top)
595 final_delta = natm_delta final_st
596 final_imports = natm_imports final_st
597 ; if final_delta == 0
598 then return (new_tops, final_imports)
599 else pprPanic "genMachCode: nonzero final delta" (int final_delta)
602 -- -----------------------------------------------------------------------------
603 -- Fixup assignments to global registers so that they assign to
604 -- locations within the RegTable, if appropriate.
606 -- Note that we currently don't fixup reads here: they're done by
607 -- the generic optimiser below, to avoid having two separate passes
610 fixAssignsTop :: RawCmmTop -> UniqSM RawCmmTop
611 fixAssignsTop top@(CmmData _ _) = returnUs top
612 fixAssignsTop (CmmProc info lbl params (ListGraph blocks)) =
613 mapUs fixAssignsBlock blocks `thenUs` \ blocks' ->
614 returnUs (CmmProc info lbl params (ListGraph blocks'))
616 fixAssignsBlock :: CmmBasicBlock -> UniqSM CmmBasicBlock
617 fixAssignsBlock (BasicBlock id stmts) =
618 fixAssigns stmts `thenUs` \ stmts' ->
619 returnUs (BasicBlock id stmts')
621 fixAssigns :: [CmmStmt] -> UniqSM [CmmStmt]
623 mapUs fixAssign stmts `thenUs` \ stmtss ->
624 returnUs (concat stmtss)
626 fixAssign :: CmmStmt -> UniqSM [CmmStmt]
627 fixAssign (CmmAssign (CmmGlobal reg) src)
628 | Left realreg <- reg_or_addr
629 = returnUs [CmmAssign (CmmGlobal reg) src]
630 | Right baseRegAddr <- reg_or_addr
631 = returnUs [CmmStore baseRegAddr src]
632 -- Replace register leaves with appropriate StixTrees for
633 -- the given target. GlobalRegs which map to a reg on this
634 -- arch are left unchanged. Assigning to BaseReg is always
635 -- illegal, so we check for that.
637 reg_or_addr = get_GlobalReg_reg_or_addr reg
639 fixAssign other_stmt = returnUs [other_stmt]
641 -- -----------------------------------------------------------------------------
642 -- Generic Cmm optimiser
648 (b) Simple inlining: a temporary which is assigned to and then
649 used, once, can be shorted.
650 (c) Replacement of references to GlobalRegs which do not have
651 machine registers by the appropriate memory load (eg.
652 Hp ==> *(BaseReg + 34) ).
653 (d) Position independent code and dynamic linking
654 (i) introduce the appropriate indirections
655 and position independent refs
656 (ii) compile a list of imported symbols
658 Ideas for other things we could do (ToDo):
660 - shortcut jumps-to-jumps
661 - eliminate dead code blocks
662 - simple CSE: if an expr is assigned to a temp, then replace later occs of
663 that expr with the temp, until the expr is no longer valid (can push through
664 temp assignments, and certain assigns to mem...)
667 cmmToCmm :: DynFlags -> RawCmmTop -> (RawCmmTop, [CLabel])
668 cmmToCmm _ top@(CmmData _ _) = (top, [])
669 cmmToCmm dflags (CmmProc info lbl params (ListGraph blocks)) = runCmmOpt dflags $ do
670 blocks' <- mapM cmmBlockConFold (cmmMiniInline blocks)
671 return $ CmmProc info lbl params (ListGraph blocks')
673 newtype CmmOptM a = CmmOptM (([CLabel], DynFlags) -> (# a, [CLabel] #))
675 instance Monad CmmOptM where
676 return x = CmmOptM $ \(imports, _) -> (# x,imports #)
678 CmmOptM $ \(imports, dflags) ->
679 case f (imports, dflags) of
682 CmmOptM g' -> g' (imports', dflags)
684 addImportCmmOpt :: CLabel -> CmmOptM ()
685 addImportCmmOpt lbl = CmmOptM $ \(imports, dflags) -> (# (), lbl:imports #)
687 getDynFlagsCmmOpt :: CmmOptM DynFlags
688 getDynFlagsCmmOpt = CmmOptM $ \(imports, dflags) -> (# dflags, imports #)
690 runCmmOpt :: DynFlags -> CmmOptM a -> (a, [CLabel])
691 runCmmOpt dflags (CmmOptM f) = case f ([], dflags) of
692 (# result, imports #) -> (result, imports)
694 cmmBlockConFold :: CmmBasicBlock -> CmmOptM CmmBasicBlock
695 cmmBlockConFold (BasicBlock id stmts) = do
696 stmts' <- mapM cmmStmtConFold stmts
697 return $ BasicBlock id stmts'
702 -> do src' <- cmmExprConFold DataReference src
703 return $ case src' of
704 CmmReg reg' | reg == reg' -> CmmNop
705 new_src -> CmmAssign reg new_src
708 -> do addr' <- cmmExprConFold DataReference addr
709 src' <- cmmExprConFold DataReference src
710 return $ CmmStore addr' src'
713 -> do addr' <- cmmExprConFold JumpReference addr
714 return $ CmmJump addr' regs
716 CmmCall target regs args srt returns
717 -> do target' <- case target of
718 CmmCallee e conv -> do
719 e' <- cmmExprConFold CallReference e
720 return $ CmmCallee e' conv
721 other -> return other
722 args' <- mapM (\(CmmHinted arg hint) -> do
723 arg' <- cmmExprConFold DataReference arg
724 return (CmmHinted arg' hint)) args
725 return $ CmmCall target' regs args' srt returns
727 CmmCondBranch test dest
728 -> do test' <- cmmExprConFold DataReference test
729 return $ case test' of
730 CmmLit (CmmInt 0 _) ->
731 CmmComment (mkFastString ("deleted: " ++
732 showSDoc (pprStmt stmt)))
734 CmmLit (CmmInt n _) -> CmmBranch dest
735 other -> CmmCondBranch test' dest
738 -> do expr' <- cmmExprConFold DataReference expr
739 return $ CmmSwitch expr' ids
745 cmmExprConFold referenceKind expr
748 -> do addr' <- cmmExprConFold DataReference addr
749 return $ CmmLoad addr' rep
752 -- For MachOps, we first optimize the children, and then we try
753 -- our hand at some constant-folding.
754 -> do args' <- mapM (cmmExprConFold DataReference) args
755 return $ cmmMachOpFold mop args'
757 CmmLit (CmmLabel lbl)
759 dflags <- getDynFlagsCmmOpt
760 cmmMakeDynamicReference dflags addImportCmmOpt referenceKind lbl
761 CmmLit (CmmLabelOff lbl off)
763 dflags <- getDynFlagsCmmOpt
764 dynRef <- cmmMakeDynamicReference dflags addImportCmmOpt referenceKind lbl
765 return $ cmmMachOpFold (MO_Add wordRep) [
767 (CmmLit $ CmmInt (fromIntegral off) wordRep)
770 #if powerpc_TARGET_ARCH
771 -- On powerpc (non-PIC), it's easier to jump directly to a label than
772 -- to use the register table, so we replace these registers
773 -- with the corresponding labels:
774 CmmReg (CmmGlobal GCEnter1)
776 -> cmmExprConFold referenceKind $
777 CmmLit (CmmLabel (mkRtsCodeLabel SLIT( "__stg_gc_enter_1")))
778 CmmReg (CmmGlobal GCFun)
780 -> cmmExprConFold referenceKind $
781 CmmLit (CmmLabel (mkRtsCodeLabel SLIT( "__stg_gc_fun")))
784 CmmReg (CmmGlobal mid)
785 -- Replace register leaves with appropriate StixTrees for
786 -- the given target. MagicIds which map to a reg on this
787 -- arch are left unchanged. For the rest, BaseReg is taken
788 -- to mean the address of the reg table in MainCapability,
789 -- and for all others we generate an indirection to its
790 -- location in the register table.
791 -> case get_GlobalReg_reg_or_addr mid of
792 Left realreg -> return expr
795 BaseReg -> cmmExprConFold DataReference baseRegAddr
796 other -> cmmExprConFold DataReference
797 (CmmLoad baseRegAddr (globalRegRep mid))
798 -- eliminate zero offsets
800 -> cmmExprConFold referenceKind (CmmReg reg)
802 CmmRegOff (CmmGlobal mid) offset
803 -- RegOf leaves are just a shorthand form. If the reg maps
804 -- to a real reg, we keep the shorthand, otherwise, we just
805 -- expand it and defer to the above code.
806 -> case get_GlobalReg_reg_or_addr mid of
807 Left realreg -> return expr
809 -> cmmExprConFold DataReference (CmmMachOp (MO_Add wordRep) [
810 CmmReg (CmmGlobal mid),
811 CmmLit (CmmInt (fromIntegral offset)
816 -- -----------------------------------------------------------------------------