1 -- -----------------------------------------------------------------------------
3 -- (c) The University of Glasgow 1993-2004
5 -- This is the top-level module in the native code generator.
7 -- -----------------------------------------------------------------------------
11 -- The above warning supression flag is a temporary kludge.
12 -- While working on this module you are encouraged to remove it and fix
13 -- any warnings in the module. See
14 -- http://hackage.haskell.org/trac/ghc/wiki/Commentary/CodingStyle#Warnings
17 module AsmCodeGen ( nativeCodeGen ) where
19 #include "HsVersions.h"
20 #include "nativeGen/NCG.h"
28 import PositionIndependentCode
31 import qualified RegAllocLinear as Linear
32 import qualified RegAllocColor as Color
33 import qualified RegAllocStats as Color
34 import qualified GraphColor as Color
37 import CmmOpt ( cmmMiniInline, cmmMachOpFold )
38 import PprCmm ( pprStmt, pprCmms, pprCmm )
44 import Unique ( Unique, getUnique )
47 import List ( groupBy, sortBy )
48 import ErrUtils ( dumpIfSet_dyn )
50 import StaticFlags ( opt_Static, opt_PIC )
52 import Config ( cProjectVersion )
56 import qualified Pretty
75 The native-code generator has machine-independent and
76 machine-dependent modules.
78 This module ("AsmCodeGen") is the top-level machine-independent
79 module. Before entering machine-dependent land, we do some
80 machine-independent optimisations (defined below) on the
83 We convert to the machine-specific 'Instr' datatype with
84 'cmmCodeGen', assuming an infinite supply of registers. We then use
85 a machine-independent register allocator ('regAlloc') to rejoin
86 reality. Obviously, 'regAlloc' has machine-specific helper
87 functions (see about "RegAllocInfo" below).
89 Finally, we order the basic blocks of the function so as to minimise
90 the number of jumps between blocks, by utilising fallthrough wherever
93 The machine-dependent bits break down as follows:
95 * ["MachRegs"] Everything about the target platform's machine
96 registers (and immediate operands, and addresses, which tend to
97 intermingle/interact with registers).
99 * ["MachInstrs"] Includes the 'Instr' datatype (possibly should
100 have a module of its own), plus a miscellany of other things
101 (e.g., 'targetDoubleSize', 'smStablePtrTable', ...)
103 * ["MachCodeGen"] is where 'Cmm' stuff turns into
104 machine instructions.
106 * ["PprMach"] 'pprInstr' turns an 'Instr' into text (well, really
109 * ["RegAllocInfo"] In the register allocator, we manipulate
110 'MRegsState's, which are 'BitSet's, one bit per machine register.
111 When we want to say something about a specific machine register
112 (e.g., ``it gets clobbered by this instruction''), we set/unset
113 its bit. Obviously, we do this 'BitSet' thing for efficiency
116 The 'RegAllocInfo' module collects together the machine-specific
117 info needed to do register allocation.
119 * ["RegisterAlloc"] The (machine-independent) register allocator.
122 -- -----------------------------------------------------------------------------
123 -- Top-level of the native codegen
126 nativeCodeGen :: DynFlags -> Handle -> UniqSupply -> [RawCmm] -> IO ()
127 nativeCodeGen dflags h us cmms
129 let split_cmms = concat $ map add_split cmms
132 <- cmmNativeGens dflags h us split_cmms [] []
134 let (native, colorStats, linearStats)
139 Opt_D_dump_asm "Asm code"
140 (vcat $ map (docToSDoc . pprNatCmmTop) $ concat native)
142 -- dump global NCG stats for graph coloring allocator
143 (case concat $ catMaybes colorStats of
146 -- build the global register conflict graph
148 = foldl Color.union Color.initGraph
149 $ [ Color.raGraph stat
150 | stat@Color.RegAllocStatsStart{} <- stats]
152 dumpSDoc dflags Opt_D_dump_asm_stats "NCG stats"
153 $ Color.pprStats stats graphGlobal
156 Opt_D_dump_asm_conflicts "Register conflict graph"
157 $ Color.dotGraph Color.regDotColor trivColorable
161 -- dump global NCG stats for linear allocator
162 (case concat $ catMaybes linearStats of
164 stats -> dumpSDoc dflags Opt_D_dump_asm_stats "NCG stats"
165 $ Linear.pprStats (concat native) stats)
167 -- write out the imports
168 Pretty.printDoc Pretty.LeftMode h
169 $ makeImportsDoc (concat imports)
173 where add_split (Cmm tops)
174 | dopt Opt_SplitObjs dflags = split_marker : tops
177 split_marker = CmmProc [] mkSplitMarkerLabel [] []
180 -- | Do native code generation on all these cmms.
182 cmmNativeGens dflags h us [] impAcc profAcc
183 = return (reverse impAcc, reverse profAcc)
185 cmmNativeGens dflags h us (cmm : cmms) impAcc profAcc
187 (us', native, imports, colorStats, linearStats)
188 <- cmmNativeGen dflags us cmm
190 Pretty.printDoc Pretty.LeftMode h
191 $ {-# SCC "pprNativeCode" #-} Pretty.vcat $ map pprNatCmmTop native
194 if dopt Opt_D_dump_asm dflags
195 || dopt Opt_D_dump_asm_stats dflags
199 -- force evaulation of imports and lsPprNative to avoid space leak
200 seqString (showSDoc $ vcat $ map ppr imports) `seq` return ()
201 lsPprNative `seq` return ()
203 cmmNativeGens dflags h us' cmms
205 ((lsPprNative, colorStats, linearStats) : profAcc)
207 where seqString [] = ()
208 seqString (x:xs) = x `seq` seqString xs `seq` ()
211 -- | Complete native code generation phase for a single top-level chunk of Cmm.
212 -- Dumping the output of each stage along the way.
213 -- Global conflict graph and NGC stats
221 , Maybe [Color.RegAllocStats]
222 , Maybe [Linear.RegAllocStats])
224 cmmNativeGen dflags us cmm
226 -- rewrite assignments to global regs
227 let (fixed_cmm, usFix) =
228 {-# SCC "fixAssignsTop" #-}
229 initUs us $ fixAssignsTop cmm
231 -- cmm to cmm optimisations
232 let (opt_cmm, imports) =
233 {-# SCC "cmmToCmm" #-}
234 cmmToCmm dflags fixed_cmm
237 Opt_D_dump_opt_cmm "Optimised Cmm"
238 (pprCmm $ Cmm [opt_cmm])
240 -- generate native code from cmm
241 let ((native, lastMinuteImports), usGen) =
242 {-# SCC "genMachCode" #-}
243 initUs usFix $ genMachCode dflags opt_cmm
246 Opt_D_dump_asm_native "Native code"
247 (vcat $ map (docToSDoc . pprNatCmmTop) native)
250 -- tag instructions with register liveness information
251 let (withLiveness, usLive) =
252 {-# SCC "regLiveness" #-}
253 initUs usGen $ mapUs regLiveness native
256 Opt_D_dump_asm_liveness "Liveness annotations added"
257 (vcat $ map ppr withLiveness)
260 -- allocate registers
261 (alloced, usAlloc, ppr_raStatsColor, ppr_raStatsLinear) <-
262 if dopt Opt_RegsGraph dflags
264 -- the regs usable for allocation
266 = foldr (\r -> plusUFM_C unionUniqSets
267 $ unitUFM (regClass r) (unitUniqSet r))
269 $ map RealReg allocatableRegs
271 -- aggressively coalesce moves between virtual regs
272 let (coalesced, usCoalesce)
273 = {-# SCC "regCoalesce" #-}
274 initUs usLive $ regCoalesce withLiveness
277 Opt_D_dump_asm_coalesce "Reg-Reg moves coalesced"
278 (vcat $ map ppr coalesced)
280 -- if any of these dump flags are turned on we want to hang on to
281 -- intermediate structures in the allocator - otherwise tell the
282 -- allocator to ditch them early so we don't end up creating space leaks.
283 let generateRegAllocStats = or
284 [ dopt Opt_D_dump_asm_regalloc_stages dflags
285 , dopt Opt_D_dump_asm_stats dflags
286 , dopt Opt_D_dump_asm_conflicts dflags ]
288 -- graph coloring register allocation
289 let ((alloced, regAllocStats), usAlloc)
290 = {-# SCC "regAlloc(color)" #-}
293 generateRegAllocStats
295 (mkUniqSet [0..maxSpillSlots])
298 -- dump out what happened during register allocation
300 Opt_D_dump_asm_regalloc "Registers allocated"
301 (vcat $ map (docToSDoc . pprNatCmmTop) alloced)
304 Opt_D_dump_asm_regalloc_stages "Build/spill stages"
305 (vcat $ map (\(stage, stats)
306 -> text "-- Stage " <> int stage
308 $ zip [0..] regAllocStats)
311 if dopt Opt_D_dump_asm_stats dflags
312 then Just regAllocStats else Nothing
314 -- force evaluation of the Maybe to avoid space leak
315 mPprStats `seq` return ()
317 return ( alloced, usAlloc
322 -- do linear register allocation
323 let ((alloced, regAllocStats), usAlloc)
324 = {-# SCC "regAlloc(linear)" #-}
327 $ mapUs Linear.regAlloc withLiveness
330 Opt_D_dump_asm_regalloc "Registers allocated"
331 (vcat $ map (docToSDoc . pprNatCmmTop) alloced)
334 if dopt Opt_D_dump_asm_stats dflags
335 then Just (catMaybes regAllocStats) else Nothing
337 -- force evaluation of the Maybe to avoid space leak
338 mPprStats `seq` return ()
340 return ( alloced, usAlloc
344 ---- shortcut branches
346 {-# SCC "shortcutBranches" #-}
347 shortcutBranches dflags alloced
351 {-# SCC "sequenceBlocks" #-}
352 map sequenceTop shorted
355 let final_mach_code =
357 {-# SCC "x86fp_kludge" #-}
358 map x86fp_kludge sequenced
365 , lastMinuteImports ++ imports
371 x86fp_kludge :: NatCmmTop -> NatCmmTop
372 x86fp_kludge top@(CmmData _ _) = top
373 x86fp_kludge top@(CmmProc info lbl params code) =
374 CmmProc info lbl params (map bb_i386_insert_ffrees code)
376 bb_i386_insert_ffrees (BasicBlock id instrs) =
377 BasicBlock id (i386_insert_ffrees instrs)
381 -- | Build a doc for all the imports.
383 makeImportsDoc :: [CLabel] -> Pretty.Doc
384 makeImportsDoc imports
387 #if HAVE_SUBSECTIONS_VIA_SYMBOLS
388 -- On recent versions of Darwin, the linker supports
389 -- dead-stripping of code and data on a per-symbol basis.
390 -- There's a hack to make this work in PprMach.pprNatCmmTop.
391 Pretty.$$ Pretty.text ".subsections_via_symbols"
393 #if HAVE_GNU_NONEXEC_STACK
394 -- On recent GNU ELF systems one can mark an object file
395 -- as not requiring an executable stack. If all objects
396 -- linked into a program have this note then the program
397 -- will not use an executable stack, which is good for
398 -- security. GHC generated code does not need an executable
399 -- stack so add the note in:
400 Pretty.$$ Pretty.text ".section .note.GNU-stack,\"\",@progbits"
402 #if !defined(darwin_TARGET_OS)
403 -- And just because every other compiler does, lets stick in
404 -- an identifier directive: .ident "GHC x.y.z"
405 Pretty.$$ let compilerIdent = Pretty.text "GHC" Pretty.<+>
406 Pretty.text cProjectVersion
407 in Pretty.text ".ident" Pretty.<+>
408 Pretty.doubleQuotes compilerIdent
412 -- Generate "symbol stubs" for all external symbols that might
413 -- come from a dynamic library.
414 dyld_stubs :: [CLabel] -> Pretty.Doc
415 {- dyld_stubs imps = Pretty.vcat $ map pprDyldSymbolStub $
416 map head $ group $ sort imps-}
418 -- (Hack) sometimes two Labels pretty-print the same, but have
419 -- different uniques; so we compare their text versions...
421 | needImportedSymbols
423 (pprGotDeclaration :) $
424 map (pprImportedSymbol . fst . head) $
425 groupBy (\(_,a) (_,b) -> a == b) $
426 sortBy (\(_,a) (_,b) -> compare a b) $
432 doPpr lbl = (lbl, Pretty.render $ pprCLabel lbl astyle)
433 astyle = mkCodeStyle AsmStyle
436 -- -----------------------------------------------------------------------------
437 -- Sequencing the basic blocks
439 -- Cmm BasicBlocks are self-contained entities: they always end in a
440 -- jump, either non-local or to another basic block in the same proc.
441 -- In this phase, we attempt to place the basic blocks in a sequence
442 -- such that as many of the local jumps as possible turn into
445 sequenceTop :: NatCmmTop -> NatCmmTop
446 sequenceTop top@(CmmData _ _) = top
447 sequenceTop (CmmProc info lbl params blocks) =
448 CmmProc info lbl params (makeFarBranches $ sequenceBlocks blocks)
450 -- The algorithm is very simple (and stupid): we make a graph out of
451 -- the blocks where there is an edge from one block to another iff the
452 -- first block ends by jumping to the second. Then we topologically
453 -- sort this graph. Then traverse the list: for each block, we first
454 -- output the block, then if it has an out edge, we move the
455 -- destination of the out edge to the front of the list, and continue.
457 sequenceBlocks :: [NatBasicBlock] -> [NatBasicBlock]
458 sequenceBlocks [] = []
459 sequenceBlocks (entry:blocks) =
460 seqBlocks (mkNode entry : reverse (flattenSCCs (sccBlocks blocks)))
461 -- the first block is the entry point ==> it must remain at the start.
463 sccBlocks :: [NatBasicBlock] -> [SCC (NatBasicBlock,Unique,[Unique])]
464 sccBlocks blocks = stronglyConnCompR (map mkNode blocks)
466 getOutEdges :: [Instr] -> [Unique]
467 getOutEdges instrs = case jumpDests (last instrs) [] of
468 [one] -> [getUnique one]
470 -- we're only interested in the last instruction of
471 -- the block, and only if it has a single destination.
473 mkNode block@(BasicBlock id instrs) = (block, getUnique id, getOutEdges instrs)
476 seqBlocks ((block,_,[]) : rest)
477 = block : seqBlocks rest
478 seqBlocks ((block@(BasicBlock id instrs),_,[next]) : rest)
479 | can_fallthrough = BasicBlock id (init instrs) : seqBlocks rest'
480 | otherwise = block : seqBlocks rest'
482 (can_fallthrough, rest') = reorder next [] rest
483 -- TODO: we should do a better job for cycles; try to maximise the
484 -- fallthroughs within a loop.
485 seqBlocks _ = panic "AsmCodegen:seqBlocks"
487 reorder id accum [] = (False, reverse accum)
488 reorder id accum (b@(block,id',out) : rest)
489 | id == id' = (True, (block,id,out) : reverse accum ++ rest)
490 | otherwise = reorder id (b:accum) rest
493 -- -----------------------------------------------------------------------------
494 -- Making far branches
496 -- Conditional branches on PowerPC are limited to +-32KB; if our Procs get too
497 -- big, we have to work around this limitation.
499 makeFarBranches :: [NatBasicBlock] -> [NatBasicBlock]
501 #if powerpc_TARGET_ARCH
502 makeFarBranches blocks
503 | last blockAddresses < nearLimit = blocks
504 | otherwise = zipWith handleBlock blockAddresses blocks
506 blockAddresses = scanl (+) 0 $ map blockLen blocks
507 blockLen (BasicBlock _ instrs) = length instrs
509 handleBlock addr (BasicBlock id instrs)
510 = BasicBlock id (zipWith makeFar [addr..] instrs)
512 makeFar addr (BCC ALWAYS tgt) = BCC ALWAYS tgt
513 makeFar addr (BCC cond tgt)
514 | abs (addr - targetAddr) >= nearLimit
518 where Just targetAddr = lookupUFM blockAddressMap tgt
519 makeFar addr other = other
521 nearLimit = 7000 -- 8192 instructions are allowed; let's keep some
522 -- distance, as we have a few pseudo-insns that are
523 -- pretty-printed as multiple instructions,
524 -- and it's just not worth the effort to calculate
527 blockAddressMap = listToUFM $ zip (map blockId blocks) blockAddresses
532 -- -----------------------------------------------------------------------------
535 shortcutBranches :: DynFlags -> [NatCmmTop] -> [NatCmmTop]
536 shortcutBranches dflags tops
537 | optLevel dflags < 1 = tops -- only with -O or higher
538 | otherwise = map (apply_mapping mapping) tops'
540 (tops', mappings) = mapAndUnzip build_mapping tops
541 mapping = foldr plusUFM emptyUFM mappings
543 build_mapping top@(CmmData _ _) = (top, emptyUFM)
544 build_mapping (CmmProc info lbl params [])
545 = (CmmProc info lbl params [], emptyUFM)
546 build_mapping (CmmProc info lbl params (head:blocks))
547 = (CmmProc info lbl params (head:others), mapping)
548 -- drop the shorted blocks, but don't ever drop the first one,
549 -- because it is pointed to by a global label.
551 -- find all the blocks that just consist of a jump that can be
553 (shortcut_blocks, others) = partitionWith split blocks
554 split (BasicBlock id [insn]) | Just dest <- canShortcut insn
556 split other = Right other
558 -- build a mapping from BlockId to JumpDest for shorting branches
559 mapping = foldl add emptyUFM shortcut_blocks
560 add ufm (id,dest) = addToUFM ufm id dest
562 apply_mapping ufm (CmmData sec statics)
563 = CmmData sec (map (shortcutStatic (lookupUFM ufm)) statics)
564 -- we need to get the jump tables, so apply the mapping to the entries
566 apply_mapping ufm (CmmProc info lbl params blocks)
567 = CmmProc info lbl params (map short_bb blocks)
569 short_bb (BasicBlock id insns) = BasicBlock id $! map short_insn insns
570 short_insn i = shortcutJump (lookupUFM ufm) i
571 -- shortcutJump should apply the mapping repeatedly,
572 -- just in case we can short multiple branches.
574 -- -----------------------------------------------------------------------------
575 -- Instruction selection
577 -- Native code instruction selection for a chunk of stix code. For
578 -- this part of the computation, we switch from the UniqSM monad to
579 -- the NatM monad. The latter carries not only a Unique, but also an
580 -- Int denoting the current C stack pointer offset in the generated
581 -- code; this is needed for creating correct spill offsets on
582 -- architectures which don't offer, or for which it would be
583 -- prohibitively expensive to employ, a frame pointer register. Viz,
586 -- The offset is measured in bytes, and indicates the difference
587 -- between the current (simulated) C stack-ptr and the value it was at
588 -- the beginning of the block. For stacks which grow down, this value
589 -- should be either zero or negative.
591 -- Switching between the two monads whilst carrying along the same
592 -- Unique supply breaks abstraction. Is that bad?
594 genMachCode :: DynFlags -> RawCmmTop -> UniqSM ([NatCmmTop], [CLabel])
596 genMachCode dflags cmm_top
597 = do { initial_us <- getUs
598 ; let initial_st = mkNatM_State initial_us 0 dflags
599 (new_tops, final_st) = initNat initial_st (cmmTopCodeGen cmm_top)
600 final_delta = natm_delta final_st
601 final_imports = natm_imports final_st
602 ; if final_delta == 0
603 then return (new_tops, final_imports)
604 else pprPanic "genMachCode: nonzero final delta" (int final_delta)
607 -- -----------------------------------------------------------------------------
608 -- Fixup assignments to global registers so that they assign to
609 -- locations within the RegTable, if appropriate.
611 -- Note that we currently don't fixup reads here: they're done by
612 -- the generic optimiser below, to avoid having two separate passes
615 fixAssignsTop :: RawCmmTop -> UniqSM RawCmmTop
616 fixAssignsTop top@(CmmData _ _) = returnUs top
617 fixAssignsTop (CmmProc info lbl params blocks) =
618 mapUs fixAssignsBlock blocks `thenUs` \ blocks' ->
619 returnUs (CmmProc info lbl params blocks')
621 fixAssignsBlock :: CmmBasicBlock -> UniqSM CmmBasicBlock
622 fixAssignsBlock (BasicBlock id stmts) =
623 fixAssigns stmts `thenUs` \ stmts' ->
624 returnUs (BasicBlock id stmts')
626 fixAssigns :: [CmmStmt] -> UniqSM [CmmStmt]
628 mapUs fixAssign stmts `thenUs` \ stmtss ->
629 returnUs (concat stmtss)
631 fixAssign :: CmmStmt -> UniqSM [CmmStmt]
632 fixAssign (CmmAssign (CmmGlobal reg) src)
633 | Left realreg <- reg_or_addr
634 = returnUs [CmmAssign (CmmGlobal reg) src]
635 | Right baseRegAddr <- reg_or_addr
636 = returnUs [CmmStore baseRegAddr src]
637 -- Replace register leaves with appropriate StixTrees for
638 -- the given target. GlobalRegs which map to a reg on this
639 -- arch are left unchanged. Assigning to BaseReg is always
640 -- illegal, so we check for that.
642 reg_or_addr = get_GlobalReg_reg_or_addr reg
644 fixAssign other_stmt = returnUs [other_stmt]
646 -- -----------------------------------------------------------------------------
647 -- Generic Cmm optimiser
653 (b) Simple inlining: a temporary which is assigned to and then
654 used, once, can be shorted.
655 (c) Replacement of references to GlobalRegs which do not have
656 machine registers by the appropriate memory load (eg.
657 Hp ==> *(BaseReg + 34) ).
658 (d) Position independent code and dynamic linking
659 (i) introduce the appropriate indirections
660 and position independent refs
661 (ii) compile a list of imported symbols
663 Ideas for other things we could do (ToDo):
665 - shortcut jumps-to-jumps
666 - eliminate dead code blocks
667 - simple CSE: if an expr is assigned to a temp, then replace later occs of
668 that expr with the temp, until the expr is no longer valid (can push through
669 temp assignments, and certain assigns to mem...)
672 cmmToCmm :: DynFlags -> RawCmmTop -> (RawCmmTop, [CLabel])
673 cmmToCmm _ top@(CmmData _ _) = (top, [])
674 cmmToCmm dflags (CmmProc info lbl params blocks) = runCmmOpt dflags $ do
675 blocks' <- mapM cmmBlockConFold (cmmMiniInline blocks)
676 return $ CmmProc info lbl params blocks'
678 newtype CmmOptM a = CmmOptM (([CLabel], DynFlags) -> (# a, [CLabel] #))
680 instance Monad CmmOptM where
681 return x = CmmOptM $ \(imports, _) -> (# x,imports #)
683 CmmOptM $ \(imports, dflags) ->
684 case f (imports, dflags) of
687 CmmOptM g' -> g' (imports', dflags)
689 addImportCmmOpt :: CLabel -> CmmOptM ()
690 addImportCmmOpt lbl = CmmOptM $ \(imports, dflags) -> (# (), lbl:imports #)
692 getDynFlagsCmmOpt :: CmmOptM DynFlags
693 getDynFlagsCmmOpt = CmmOptM $ \(imports, dflags) -> (# dflags, imports #)
695 runCmmOpt :: DynFlags -> CmmOptM a -> (a, [CLabel])
696 runCmmOpt dflags (CmmOptM f) = case f ([], dflags) of
697 (# result, imports #) -> (result, imports)
699 cmmBlockConFold :: CmmBasicBlock -> CmmOptM CmmBasicBlock
700 cmmBlockConFold (BasicBlock id stmts) = do
701 stmts' <- mapM cmmStmtConFold stmts
702 return $ BasicBlock id stmts'
707 -> do src' <- cmmExprConFold DataReference src
708 return $ case src' of
709 CmmReg reg' | reg == reg' -> CmmNop
710 new_src -> CmmAssign reg new_src
713 -> do addr' <- cmmExprConFold DataReference addr
714 src' <- cmmExprConFold DataReference src
715 return $ CmmStore addr' src'
718 -> do addr' <- cmmExprConFold JumpReference addr
719 return $ CmmJump addr' regs
721 CmmCall target regs args srt returns
722 -> do target' <- case target of
723 CmmCallee e conv -> do
724 e' <- cmmExprConFold CallReference e
725 return $ CmmCallee e' conv
726 other -> return other
727 args' <- mapM (\(arg, hint) -> do
728 arg' <- cmmExprConFold DataReference arg
729 return (arg', hint)) args
730 return $ CmmCall target' regs args' srt returns
732 CmmCondBranch test dest
733 -> do test' <- cmmExprConFold DataReference test
734 return $ case test' of
735 CmmLit (CmmInt 0 _) ->
736 CmmComment (mkFastString ("deleted: " ++
737 showSDoc (pprStmt stmt)))
739 CmmLit (CmmInt n _) -> CmmBranch dest
740 other -> CmmCondBranch test' dest
743 -> do expr' <- cmmExprConFold DataReference expr
744 return $ CmmSwitch expr' ids
750 cmmExprConFold referenceKind expr
753 -> do addr' <- cmmExprConFold DataReference addr
754 return $ CmmLoad addr' rep
757 -- For MachOps, we first optimize the children, and then we try
758 -- our hand at some constant-folding.
759 -> do args' <- mapM (cmmExprConFold DataReference) args
760 return $ cmmMachOpFold mop args'
762 CmmLit (CmmLabel lbl)
764 dflags <- getDynFlagsCmmOpt
765 cmmMakeDynamicReference dflags addImportCmmOpt referenceKind lbl
766 CmmLit (CmmLabelOff lbl off)
768 dflags <- getDynFlagsCmmOpt
769 dynRef <- cmmMakeDynamicReference dflags addImportCmmOpt referenceKind lbl
770 return $ cmmMachOpFold (MO_Add wordRep) [
772 (CmmLit $ CmmInt (fromIntegral off) wordRep)
775 #if powerpc_TARGET_ARCH
776 -- On powerpc (non-PIC), it's easier to jump directly to a label than
777 -- to use the register table, so we replace these registers
778 -- with the corresponding labels:
779 CmmReg (CmmGlobal GCEnter1)
781 -> cmmExprConFold referenceKind $
782 CmmLit (CmmLabel (mkRtsCodeLabel SLIT( "__stg_gc_enter_1")))
783 CmmReg (CmmGlobal GCFun)
785 -> cmmExprConFold referenceKind $
786 CmmLit (CmmLabel (mkRtsCodeLabel SLIT( "__stg_gc_fun")))
789 CmmReg (CmmGlobal mid)
790 -- Replace register leaves with appropriate StixTrees for
791 -- the given target. MagicIds which map to a reg on this
792 -- arch are left unchanged. For the rest, BaseReg is taken
793 -- to mean the address of the reg table in MainCapability,
794 -- and for all others we generate an indirection to its
795 -- location in the register table.
796 -> case get_GlobalReg_reg_or_addr mid of
797 Left realreg -> return expr
800 BaseReg -> cmmExprConFold DataReference baseRegAddr
801 other -> cmmExprConFold DataReference
802 (CmmLoad baseRegAddr (globalRegRep mid))
803 -- eliminate zero offsets
805 -> cmmExprConFold referenceKind (CmmReg reg)
807 CmmRegOff (CmmGlobal mid) offset
808 -- RegOf leaves are just a shorthand form. If the reg maps
809 -- to a real reg, we keep the shorthand, otherwise, we just
810 -- expand it and defer to the above code.
811 -> case get_GlobalReg_reg_or_addr mid of
812 Left realreg -> return expr
814 -> cmmExprConFold DataReference (CmmMachOp (MO_Add wordRep) [
815 CmmReg (CmmGlobal mid),
816 CmmLit (CmmInt (fromIntegral offset)
821 -- -----------------------------------------------------------------------------