1 -- -----------------------------------------------------------------------------
3 -- (c) The University of Glasgow 1993-2004
5 -- This is the top-level module in the native code generator.
7 -- -----------------------------------------------------------------------------
10 {-# OPTIONS_GHC -w #-}
11 -- The above warning supression flag is a temporary kludge.
12 -- While working on this module you are encouraged to remove it and fix
13 -- any warnings in the module. See
14 -- http://hackage.haskell.org/trac/ghc/wiki/WorkingConventions#Warnings
17 module AsmCodeGen ( nativeCodeGen ) where
19 #include "HsVersions.h"
20 #include "nativeGen/NCG.h"
28 import PositionIndependentCode
31 import qualified RegAllocLinear as Linear
32 import qualified RegAllocColor as Color
33 import qualified RegAllocStats as Color
34 import qualified GraphColor as Color
37 import CmmOpt ( cmmMiniInline, cmmMachOpFold )
38 import PprCmm ( pprStmt, pprCmms, pprCmm )
44 import Unique ( Unique, getUnique )
47 import List ( groupBy, sortBy )
48 import ErrUtils ( dumpIfSet_dyn )
50 import StaticFlags ( opt_Static, opt_PIC )
52 import Config ( cProjectVersion )
56 import qualified Pretty
75 The native-code generator has machine-independent and
76 machine-dependent modules.
78 This module ("AsmCodeGen") is the top-level machine-independent
79 module. Before entering machine-dependent land, we do some
80 machine-independent optimisations (defined below) on the
83 We convert to the machine-specific 'Instr' datatype with
84 'cmmCodeGen', assuming an infinite supply of registers. We then use
85 a machine-independent register allocator ('regAlloc') to rejoin
86 reality. Obviously, 'regAlloc' has machine-specific helper
87 functions (see about "RegAllocInfo" below).
89 Finally, we order the basic blocks of the function so as to minimise
90 the number of jumps between blocks, by utilising fallthrough wherever
93 The machine-dependent bits break down as follows:
95 * ["MachRegs"] Everything about the target platform's machine
96 registers (and immediate operands, and addresses, which tend to
97 intermingle/interact with registers).
99 * ["MachInstrs"] Includes the 'Instr' datatype (possibly should
100 have a module of its own), plus a miscellany of other things
101 (e.g., 'targetDoubleSize', 'smStablePtrTable', ...)
103 * ["MachCodeGen"] is where 'Cmm' stuff turns into
104 machine instructions.
106 * ["PprMach"] 'pprInstr' turns an 'Instr' into text (well, really
109 * ["RegAllocInfo"] In the register allocator, we manipulate
110 'MRegsState's, which are 'BitSet's, one bit per machine register.
111 When we want to say something about a specific machine register
112 (e.g., ``it gets clobbered by this instruction''), we set/unset
113 its bit. Obviously, we do this 'BitSet' thing for efficiency
116 The 'RegAllocInfo' module collects together the machine-specific
117 info needed to do register allocation.
119 * ["RegisterAlloc"] The (machine-independent) register allocator.
122 -- -----------------------------------------------------------------------------
123 -- Top-level of the native codegen
126 nativeCodeGen :: DynFlags -> Handle -> UniqSupply -> [RawCmm] -> IO ()
127 nativeCodeGen dflags h us cmms
129 let split_cmms = concat $ map add_split cmms
132 <- cmmNativeGens dflags h us split_cmms [] []
134 let (native, colorStats, linearStats)
139 Opt_D_dump_asm "Asm code"
140 (vcat $ map (docToSDoc . pprNatCmmTop) $ concat native)
142 -- dump global NCG stats for graph coloring allocator
143 (case concat $ catMaybes colorStats of
146 -- build the global register conflict graph
148 = foldl Color.union Color.initGraph
149 $ [ Color.raGraph stat
150 | stat@Color.RegAllocStatsStart{} <- stats]
152 dumpSDoc dflags Opt_D_dump_asm_stats "NCG stats"
153 $ Color.pprStats stats graphGlobal
156 Opt_D_dump_asm_conflicts "Register conflict graph"
157 $ Color.dotGraph Color.regDotColor trivColorable
161 -- dump global NCG stats for linear allocator
162 (case concat $ catMaybes linearStats of
164 stats -> dumpSDoc dflags Opt_D_dump_asm_stats "NCG stats"
165 $ Linear.pprStats (concat native) stats)
167 -- write out the imports
168 Pretty.printDoc Pretty.LeftMode h
169 $ makeImportsDoc (concat imports)
173 where add_split (Cmm tops)
174 | dopt Opt_SplitObjs dflags = split_marker : tops
177 split_marker = CmmProc [] mkSplitMarkerLabel [] []
180 -- | Do native code generation on all these cmms.
182 cmmNativeGens dflags h us [] impAcc profAcc
183 = return (reverse impAcc, reverse profAcc)
185 cmmNativeGens dflags h us (cmm : cmms) impAcc profAcc
187 (us', native, imports, colorStats, linearStats)
188 <- cmmNativeGen dflags us cmm
190 Pretty.printDoc Pretty.LeftMode h
191 $ {-# SCC "pprNativeCode" #-} Pretty.vcat $ map pprNatCmmTop native
194 if dopt Opt_D_dump_asm dflags
195 || dopt Opt_D_dump_asm_stats dflags
199 -- force evaulation of imports and lsPprNative to avoid space leak
200 seqString (showSDoc $ vcat $ map ppr imports)
202 `seq` cmmNativeGens dflags h us' cmms
204 ((lsPprNative, colorStats, linearStats) : profAcc)
206 where seqString [] = ()
207 seqString (x:xs) = x `seq` seqString xs `seq` ()
210 -- | Complete native code generation phase for a single top-level chunk of Cmm.
211 -- Dumping the output of each stage along the way.
212 -- Global conflict graph and NGC stats
220 , Maybe [Color.RegAllocStats]
221 , Maybe [Linear.RegAllocStats])
223 cmmNativeGen dflags us cmm
225 -- rewrite assignments to global regs
226 let (fixed_cmm, usFix) =
227 {-# SCC "fixAssignsTop" #-}
228 initUs us $ fixAssignsTop cmm
230 -- cmm to cmm optimisations
231 let (opt_cmm, imports) =
232 {-# SCC "cmmToCmm" #-}
233 cmmToCmm dflags fixed_cmm
236 Opt_D_dump_opt_cmm "Optimised Cmm"
237 (pprCmm $ Cmm [opt_cmm])
239 -- generate native code from cmm
240 let ((native, lastMinuteImports), usGen) =
241 {-# SCC "genMachCode" #-}
242 initUs usFix $ genMachCode dflags opt_cmm
245 Opt_D_dump_asm_native "Native code"
246 (vcat $ map (docToSDoc . pprNatCmmTop) native)
249 -- tag instructions with register liveness information
250 let (withLiveness, usLive) =
251 {-# SCC "regLiveness" #-}
252 initUs usGen $ mapUs regLiveness native
255 Opt_D_dump_asm_liveness "Liveness annotations added"
256 (vcat $ map ppr withLiveness)
259 -- allocate registers
260 (alloced, usAlloc, ppr_raStatsColor, ppr_raStatsLinear) <-
261 if dopt Opt_RegsGraph dflags
263 -- the regs usable for allocation
265 = foldr (\r -> plusUFM_C unionUniqSets
266 $ unitUFM (regClass r) (unitUniqSet r))
268 $ map RealReg allocatableRegs
270 -- aggressively coalesce moves between virtual regs
271 let (coalesced, usCoalesce)
272 = {-# SCC "regCoalesce" #-}
273 initUs usLive $ regCoalesce withLiveness
276 Opt_D_dump_asm_coalesce "Reg-Reg moves coalesced"
277 (vcat $ map ppr coalesced)
279 -- if any of these dump flags are turned on we want to hang on to
280 -- intermediate structures in the allocator - otherwise tell the
281 -- allocator to ditch them early so we don't end up creating space leaks.
282 let generateRegAllocStats = or
283 [ dopt Opt_D_dump_asm_regalloc_stages dflags
284 , dopt Opt_D_dump_asm_stats dflags
285 , dopt Opt_D_dump_asm_conflicts dflags ]
287 -- graph coloring register allocation
288 let ((alloced, regAllocStats), usAlloc)
289 = {-# SCC "regAlloc(color)" #-}
292 generateRegAllocStats
294 (mkUniqSet [0..maxSpillSlots])
297 -- dump out what happened during register allocation
299 Opt_D_dump_asm_regalloc "Registers allocated"
300 (vcat $ map (docToSDoc . pprNatCmmTop) alloced)
303 Opt_D_dump_asm_regalloc_stages "Build/spill stages"
304 (vcat $ map (\(stage, stats)
305 -> text "-- Stage " <> int stage
307 $ zip [0..] regAllocStats)
310 if dopt Opt_D_dump_asm_stats dflags
311 then Just regAllocStats else Nothing
313 -- force evaluation of the Maybe to avoid space leak
315 `seq` return ( alloced, usAlloc
320 -- do linear register allocation
321 let ((alloced, regAllocStats), usAlloc)
322 = {-# SCC "regAlloc(linear)" #-}
325 $ mapUs Linear.regAlloc withLiveness
328 Opt_D_dump_asm_regalloc "Registers allocated"
329 (vcat $ map (docToSDoc . pprNatCmmTop) alloced)
332 if dopt Opt_D_dump_asm_stats dflags
333 then Just (catMaybes regAllocStats) else Nothing
335 -- force evaluation of the Maybe to avoid space leak
337 `seq` return ( alloced, usAlloc
341 ---- shortcut branches
343 {-# SCC "shortcutBranches" #-}
344 shortcutBranches dflags alloced
348 {-# SCC "sequenceBlocks" #-}
349 map sequenceTop shorted
352 let final_mach_code =
354 {-# SCC "x86fp_kludge" #-}
355 map x86fp_kludge sequenced
362 , lastMinuteImports ++ imports
368 x86fp_kludge :: NatCmmTop -> NatCmmTop
369 x86fp_kludge top@(CmmData _ _) = top
370 x86fp_kludge top@(CmmProc info lbl params code) =
371 CmmProc info lbl params (map bb_i386_insert_ffrees code)
373 bb_i386_insert_ffrees (BasicBlock id instrs) =
374 BasicBlock id (i386_insert_ffrees instrs)
378 -- | Build a doc for all the imports.
380 makeImportsDoc :: [CLabel] -> Pretty.Doc
381 makeImportsDoc imports
384 #if HAVE_SUBSECTIONS_VIA_SYMBOLS
385 -- On recent versions of Darwin, the linker supports
386 -- dead-stripping of code and data on a per-symbol basis.
387 -- There's a hack to make this work in PprMach.pprNatCmmTop.
388 Pretty.$$ Pretty.text ".subsections_via_symbols"
390 #if HAVE_GNU_NONEXEC_STACK
391 -- On recent GNU ELF systems one can mark an object file
392 -- as not requiring an executable stack. If all objects
393 -- linked into a program have this note then the program
394 -- will not use an executable stack, which is good for
395 -- security. GHC generated code does not need an executable
396 -- stack so add the note in:
397 Pretty.$$ Pretty.text ".section .note.GNU-stack,\"\",@progbits"
399 #if !defined(darwin_TARGET_OS)
400 -- And just because every other compiler does, lets stick in
401 -- an identifier directive: .ident "GHC x.y.z"
402 Pretty.$$ let compilerIdent = Pretty.text "GHC" Pretty.<+>
403 Pretty.text cProjectVersion
404 in Pretty.text ".ident" Pretty.<+>
405 Pretty.doubleQuotes compilerIdent
409 -- Generate "symbol stubs" for all external symbols that might
410 -- come from a dynamic library.
411 dyld_stubs :: [CLabel] -> Pretty.Doc
412 {- dyld_stubs imps = Pretty.vcat $ map pprDyldSymbolStub $
413 map head $ group $ sort imps-}
415 -- (Hack) sometimes two Labels pretty-print the same, but have
416 -- different uniques; so we compare their text versions...
418 | needImportedSymbols
420 (pprGotDeclaration :) $
421 map (pprImportedSymbol . fst . head) $
422 groupBy (\(_,a) (_,b) -> a == b) $
423 sortBy (\(_,a) (_,b) -> compare a b) $
429 doPpr lbl = (lbl, Pretty.render $ pprCLabel lbl astyle)
430 astyle = mkCodeStyle AsmStyle
433 -- -----------------------------------------------------------------------------
434 -- Sequencing the basic blocks
436 -- Cmm BasicBlocks are self-contained entities: they always end in a
437 -- jump, either non-local or to another basic block in the same proc.
438 -- In this phase, we attempt to place the basic blocks in a sequence
439 -- such that as many of the local jumps as possible turn into
442 sequenceTop :: NatCmmTop -> NatCmmTop
443 sequenceTop top@(CmmData _ _) = top
444 sequenceTop (CmmProc info lbl params blocks) =
445 CmmProc info lbl params (makeFarBranches $ sequenceBlocks blocks)
447 -- The algorithm is very simple (and stupid): we make a graph out of
448 -- the blocks where there is an edge from one block to another iff the
449 -- first block ends by jumping to the second. Then we topologically
450 -- sort this graph. Then traverse the list: for each block, we first
451 -- output the block, then if it has an out edge, we move the
452 -- destination of the out edge to the front of the list, and continue.
454 sequenceBlocks :: [NatBasicBlock] -> [NatBasicBlock]
455 sequenceBlocks [] = []
456 sequenceBlocks (entry:blocks) =
457 seqBlocks (mkNode entry : reverse (flattenSCCs (sccBlocks blocks)))
458 -- the first block is the entry point ==> it must remain at the start.
460 sccBlocks :: [NatBasicBlock] -> [SCC (NatBasicBlock,Unique,[Unique])]
461 sccBlocks blocks = stronglyConnCompR (map mkNode blocks)
463 getOutEdges :: [Instr] -> [Unique]
464 getOutEdges instrs = case jumpDests (last instrs) [] of
465 [one] -> [getUnique one]
467 -- we're only interested in the last instruction of
468 -- the block, and only if it has a single destination.
470 mkNode block@(BasicBlock id instrs) = (block, getUnique id, getOutEdges instrs)
473 seqBlocks ((block,_,[]) : rest)
474 = block : seqBlocks rest
475 seqBlocks ((block@(BasicBlock id instrs),_,[next]) : rest)
476 | can_fallthrough = BasicBlock id (init instrs) : seqBlocks rest'
477 | otherwise = block : seqBlocks rest'
479 (can_fallthrough, rest') = reorder next [] rest
480 -- TODO: we should do a better job for cycles; try to maximise the
481 -- fallthroughs within a loop.
482 seqBlocks _ = panic "AsmCodegen:seqBlocks"
484 reorder id accum [] = (False, reverse accum)
485 reorder id accum (b@(block,id',out) : rest)
486 | id == id' = (True, (block,id,out) : reverse accum ++ rest)
487 | otherwise = reorder id (b:accum) rest
490 -- -----------------------------------------------------------------------------
491 -- Making far branches
493 -- Conditional branches on PowerPC are limited to +-32KB; if our Procs get too
494 -- big, we have to work around this limitation.
496 makeFarBranches :: [NatBasicBlock] -> [NatBasicBlock]
498 #if powerpc_TARGET_ARCH
499 makeFarBranches blocks
500 | last blockAddresses < nearLimit = blocks
501 | otherwise = zipWith handleBlock blockAddresses blocks
503 blockAddresses = scanl (+) 0 $ map blockLen blocks
504 blockLen (BasicBlock _ instrs) = length instrs
506 handleBlock addr (BasicBlock id instrs)
507 = BasicBlock id (zipWith makeFar [addr..] instrs)
509 makeFar addr (BCC ALWAYS tgt) = BCC ALWAYS tgt
510 makeFar addr (BCC cond tgt)
511 | abs (addr - targetAddr) >= nearLimit
515 where Just targetAddr = lookupUFM blockAddressMap tgt
516 makeFar addr other = other
518 nearLimit = 7000 -- 8192 instructions are allowed; let's keep some
519 -- distance, as we have a few pseudo-insns that are
520 -- pretty-printed as multiple instructions,
521 -- and it's just not worth the effort to calculate
524 blockAddressMap = listToUFM $ zip (map blockId blocks) blockAddresses
529 -- -----------------------------------------------------------------------------
532 shortcutBranches :: DynFlags -> [NatCmmTop] -> [NatCmmTop]
533 shortcutBranches dflags tops
534 | optLevel dflags < 1 = tops -- only with -O or higher
535 | otherwise = map (apply_mapping mapping) tops'
537 (tops', mappings) = mapAndUnzip build_mapping tops
538 mapping = foldr plusUFM emptyUFM mappings
540 build_mapping top@(CmmData _ _) = (top, emptyUFM)
541 build_mapping (CmmProc info lbl params [])
542 = (CmmProc info lbl params [], emptyUFM)
543 build_mapping (CmmProc info lbl params (head:blocks))
544 = (CmmProc info lbl params (head:others), mapping)
545 -- drop the shorted blocks, but don't ever drop the first one,
546 -- because it is pointed to by a global label.
548 -- find all the blocks that just consist of a jump that can be
550 (shortcut_blocks, others) = partitionWith split blocks
551 split (BasicBlock id [insn]) | Just dest <- canShortcut insn
553 split other = Right other
555 -- build a mapping from BlockId to JumpDest for shorting branches
556 mapping = foldl add emptyUFM shortcut_blocks
557 add ufm (id,dest) = addToUFM ufm id dest
559 apply_mapping ufm (CmmData sec statics)
560 = CmmData sec (map (shortcutStatic (lookupUFM ufm)) statics)
561 -- we need to get the jump tables, so apply the mapping to the entries
563 apply_mapping ufm (CmmProc info lbl params blocks)
564 = CmmProc info lbl params (map short_bb blocks)
566 short_bb (BasicBlock id insns) = BasicBlock id $! map short_insn insns
567 short_insn i = shortcutJump (lookupUFM ufm) i
568 -- shortcutJump should apply the mapping repeatedly,
569 -- just in case we can short multiple branches.
571 -- -----------------------------------------------------------------------------
572 -- Instruction selection
574 -- Native code instruction selection for a chunk of stix code. For
575 -- this part of the computation, we switch from the UniqSM monad to
576 -- the NatM monad. The latter carries not only a Unique, but also an
577 -- Int denoting the current C stack pointer offset in the generated
578 -- code; this is needed for creating correct spill offsets on
579 -- architectures which don't offer, or for which it would be
580 -- prohibitively expensive to employ, a frame pointer register. Viz,
583 -- The offset is measured in bytes, and indicates the difference
584 -- between the current (simulated) C stack-ptr and the value it was at
585 -- the beginning of the block. For stacks which grow down, this value
586 -- should be either zero or negative.
588 -- Switching between the two monads whilst carrying along the same
589 -- Unique supply breaks abstraction. Is that bad?
591 genMachCode :: DynFlags -> RawCmmTop -> UniqSM ([NatCmmTop], [CLabel])
593 genMachCode dflags cmm_top
594 = do { initial_us <- getUs
595 ; let initial_st = mkNatM_State initial_us 0 dflags
596 (new_tops, final_st) = initNat initial_st (cmmTopCodeGen cmm_top)
597 final_delta = natm_delta final_st
598 final_imports = natm_imports final_st
599 ; if final_delta == 0
600 then return (new_tops, final_imports)
601 else pprPanic "genMachCode: nonzero final delta" (int final_delta)
604 -- -----------------------------------------------------------------------------
605 -- Fixup assignments to global registers so that they assign to
606 -- locations within the RegTable, if appropriate.
608 -- Note that we currently don't fixup reads here: they're done by
609 -- the generic optimiser below, to avoid having two separate passes
612 fixAssignsTop :: RawCmmTop -> UniqSM RawCmmTop
613 fixAssignsTop top@(CmmData _ _) = returnUs top
614 fixAssignsTop (CmmProc info lbl params blocks) =
615 mapUs fixAssignsBlock blocks `thenUs` \ blocks' ->
616 returnUs (CmmProc info lbl params blocks')
618 fixAssignsBlock :: CmmBasicBlock -> UniqSM CmmBasicBlock
619 fixAssignsBlock (BasicBlock id stmts) =
620 fixAssigns stmts `thenUs` \ stmts' ->
621 returnUs (BasicBlock id stmts')
623 fixAssigns :: [CmmStmt] -> UniqSM [CmmStmt]
625 mapUs fixAssign stmts `thenUs` \ stmtss ->
626 returnUs (concat stmtss)
628 fixAssign :: CmmStmt -> UniqSM [CmmStmt]
629 fixAssign (CmmAssign (CmmGlobal reg) src)
630 | Left realreg <- reg_or_addr
631 = returnUs [CmmAssign (CmmGlobal reg) src]
632 | Right baseRegAddr <- reg_or_addr
633 = returnUs [CmmStore baseRegAddr src]
634 -- Replace register leaves with appropriate StixTrees for
635 -- the given target. GlobalRegs which map to a reg on this
636 -- arch are left unchanged. Assigning to BaseReg is always
637 -- illegal, so we check for that.
639 reg_or_addr = get_GlobalReg_reg_or_addr reg
641 fixAssign other_stmt = returnUs [other_stmt]
643 -- -----------------------------------------------------------------------------
644 -- Generic Cmm optimiser
650 (b) Simple inlining: a temporary which is assigned to and then
651 used, once, can be shorted.
652 (c) Replacement of references to GlobalRegs which do not have
653 machine registers by the appropriate memory load (eg.
654 Hp ==> *(BaseReg + 34) ).
655 (d) Position independent code and dynamic linking
656 (i) introduce the appropriate indirections
657 and position independent refs
658 (ii) compile a list of imported symbols
660 Ideas for other things we could do (ToDo):
662 - shortcut jumps-to-jumps
663 - eliminate dead code blocks
664 - simple CSE: if an expr is assigned to a temp, then replace later occs of
665 that expr with the temp, until the expr is no longer valid (can push through
666 temp assignments, and certain assigns to mem...)
669 cmmToCmm :: DynFlags -> RawCmmTop -> (RawCmmTop, [CLabel])
670 cmmToCmm _ top@(CmmData _ _) = (top, [])
671 cmmToCmm dflags (CmmProc info lbl params blocks) = runCmmOpt dflags $ do
672 blocks' <- mapM cmmBlockConFold (cmmMiniInline blocks)
673 return $ CmmProc info lbl params blocks'
675 newtype CmmOptM a = CmmOptM (([CLabel], DynFlags) -> (# a, [CLabel] #))
677 instance Monad CmmOptM where
678 return x = CmmOptM $ \(imports, _) -> (# x,imports #)
680 CmmOptM $ \(imports, dflags) ->
681 case f (imports, dflags) of
684 CmmOptM g' -> g' (imports', dflags)
686 addImportCmmOpt :: CLabel -> CmmOptM ()
687 addImportCmmOpt lbl = CmmOptM $ \(imports, dflags) -> (# (), lbl:imports #)
689 getDynFlagsCmmOpt :: CmmOptM DynFlags
690 getDynFlagsCmmOpt = CmmOptM $ \(imports, dflags) -> (# dflags, imports #)
692 runCmmOpt :: DynFlags -> CmmOptM a -> (a, [CLabel])
693 runCmmOpt dflags (CmmOptM f) = case f ([], dflags) of
694 (# result, imports #) -> (result, imports)
696 cmmBlockConFold :: CmmBasicBlock -> CmmOptM CmmBasicBlock
697 cmmBlockConFold (BasicBlock id stmts) = do
698 stmts' <- mapM cmmStmtConFold stmts
699 return $ BasicBlock id stmts'
704 -> do src' <- cmmExprConFold DataReference src
705 return $ case src' of
706 CmmReg reg' | reg == reg' -> CmmNop
707 new_src -> CmmAssign reg new_src
710 -> do addr' <- cmmExprConFold DataReference addr
711 src' <- cmmExprConFold DataReference src
712 return $ CmmStore addr' src'
715 -> do addr' <- cmmExprConFold JumpReference addr
716 return $ CmmJump addr' regs
718 CmmCall target regs args srt returns
719 -> do target' <- case target of
720 CmmCallee e conv -> do
721 e' <- cmmExprConFold CallReference e
722 return $ CmmCallee e' conv
723 other -> return other
724 args' <- mapM (\(arg, hint) -> do
725 arg' <- cmmExprConFold DataReference arg
726 return (arg', hint)) args
727 return $ CmmCall target' regs args' srt returns
729 CmmCondBranch test dest
730 -> do test' <- cmmExprConFold DataReference test
731 return $ case test' of
732 CmmLit (CmmInt 0 _) ->
733 CmmComment (mkFastString ("deleted: " ++
734 showSDoc (pprStmt stmt)))
736 CmmLit (CmmInt n _) -> CmmBranch dest
737 other -> CmmCondBranch test' dest
740 -> do expr' <- cmmExprConFold DataReference expr
741 return $ CmmSwitch expr' ids
747 cmmExprConFold referenceKind expr
750 -> do addr' <- cmmExprConFold DataReference addr
751 return $ CmmLoad addr' rep
754 -- For MachOps, we first optimize the children, and then we try
755 -- our hand at some constant-folding.
756 -> do args' <- mapM (cmmExprConFold DataReference) args
757 return $ cmmMachOpFold mop args'
759 CmmLit (CmmLabel lbl)
761 dflags <- getDynFlagsCmmOpt
762 cmmMakeDynamicReference dflags addImportCmmOpt referenceKind lbl
763 CmmLit (CmmLabelOff lbl off)
765 dflags <- getDynFlagsCmmOpt
766 dynRef <- cmmMakeDynamicReference dflags addImportCmmOpt referenceKind lbl
767 return $ cmmMachOpFold (MO_Add wordRep) [
769 (CmmLit $ CmmInt (fromIntegral off) wordRep)
772 #if powerpc_TARGET_ARCH
773 -- On powerpc (non-PIC), it's easier to jump directly to a label than
774 -- to use the register table, so we replace these registers
775 -- with the corresponding labels:
776 CmmReg (CmmGlobal GCEnter1)
778 -> cmmExprConFold referenceKind $
779 CmmLit (CmmLabel (mkRtsCodeLabel SLIT( "__stg_gc_enter_1")))
780 CmmReg (CmmGlobal GCFun)
782 -> cmmExprConFold referenceKind $
783 CmmLit (CmmLabel (mkRtsCodeLabel SLIT( "__stg_gc_fun")))
786 CmmReg (CmmGlobal mid)
787 -- Replace register leaves with appropriate StixTrees for
788 -- the given target. MagicIds which map to a reg on this
789 -- arch are left unchanged. For the rest, BaseReg is taken
790 -- to mean the address of the reg table in MainCapability,
791 -- and for all others we generate an indirection to its
792 -- location in the register table.
793 -> case get_GlobalReg_reg_or_addr mid of
794 Left realreg -> return expr
797 BaseReg -> cmmExprConFold DataReference baseRegAddr
798 other -> cmmExprConFold DataReference
799 (CmmLoad baseRegAddr (globalRegRep mid))
800 -- eliminate zero offsets
802 -> cmmExprConFold referenceKind (CmmReg reg)
804 CmmRegOff (CmmGlobal mid) offset
805 -- RegOf leaves are just a shorthand form. If the reg maps
806 -- to a real reg, we keep the shorthand, otherwise, we just
807 -- expand it and defer to the above code.
808 -> case get_GlobalReg_reg_or_addr mid of
809 Left realreg -> return expr
811 -> cmmExprConFold DataReference (CmmMachOp (MO_Add wordRep) [
812 CmmReg (CmmGlobal mid),
813 CmmLit (CmmInt (fromIntegral offset)
818 -- -----------------------------------------------------------------------------