1 -- -----------------------------------------------------------------------------
3 -- (c) The University of Glasgow 1993-2004
5 -- This is the top-level module in the native code generator.
7 -- -----------------------------------------------------------------------------
11 -- The above warning supression flag is a temporary kludge.
12 -- While working on this module you are encouraged to remove it and fix
13 -- any warnings in the module. See
14 -- http://hackage.haskell.org/trac/ghc/wiki/Commentary/CodingStyle#Warnings
17 module AsmCodeGen ( nativeCodeGen ) where
19 #include "HsVersions.h"
20 #include "nativeGen/NCG.h"
28 import PositionIndependentCode
31 import qualified RegAllocLinear as Linear
32 import qualified RegAllocColor as Color
33 import qualified RegAllocStats as Color
34 import qualified GraphColor as Color
37 import CmmOpt ( cmmMiniInline, cmmMachOpFold )
43 import Unique ( Unique, getUnique )
45 import List ( groupBy, sortBy )
47 #if powerpc_TARGET_ARCH
48 import StaticFlags ( opt_Static, opt_PIC )
51 import Config ( cProjectVersion )
55 import qualified Pretty
74 The native-code generator has machine-independent and
75 machine-dependent modules.
77 This module ("AsmCodeGen") is the top-level machine-independent
78 module. Before entering machine-dependent land, we do some
79 machine-independent optimisations (defined below) on the
82 We convert to the machine-specific 'Instr' datatype with
83 'cmmCodeGen', assuming an infinite supply of registers. We then use
84 a machine-independent register allocator ('regAlloc') to rejoin
85 reality. Obviously, 'regAlloc' has machine-specific helper
86 functions (see about "RegAllocInfo" below).
88 Finally, we order the basic blocks of the function so as to minimise
89 the number of jumps between blocks, by utilising fallthrough wherever
92 The machine-dependent bits break down as follows:
94 * ["MachRegs"] Everything about the target platform's machine
95 registers (and immediate operands, and addresses, which tend to
96 intermingle/interact with registers).
98 * ["MachInstrs"] Includes the 'Instr' datatype (possibly should
99 have a module of its own), plus a miscellany of other things
100 (e.g., 'targetDoubleSize', 'smStablePtrTable', ...)
102 * ["MachCodeGen"] is where 'Cmm' stuff turns into
103 machine instructions.
105 * ["PprMach"] 'pprInstr' turns an 'Instr' into text (well, really
108 * ["RegAllocInfo"] In the register allocator, we manipulate
109 'MRegsState's, which are 'BitSet's, one bit per machine register.
110 When we want to say something about a specific machine register
111 (e.g., ``it gets clobbered by this instruction''), we set/unset
112 its bit. Obviously, we do this 'BitSet' thing for efficiency
115 The 'RegAllocInfo' module collects together the machine-specific
116 info needed to do register allocation.
118 * ["RegisterAlloc"] The (machine-independent) register allocator.
121 -- -----------------------------------------------------------------------------
122 -- Top-level of the native codegen
125 nativeCodeGen :: DynFlags -> Handle -> UniqSupply -> [RawCmm] -> IO ()
126 nativeCodeGen dflags h us cmms
128 let split_cmms = concat $ map add_split cmms
131 <- cmmNativeGens dflags h us split_cmms [] [] 0
133 let (native, colorStats, linearStats)
138 Opt_D_dump_asm "Asm code"
139 (vcat $ map (docToSDoc . pprNatCmmTop) $ concat native)
141 -- dump global NCG stats for graph coloring allocator
142 (case concat $ catMaybes colorStats of
145 -- build the global register conflict graph
147 = foldl Color.union Color.initGraph
148 $ [ Color.raGraph stat
149 | stat@Color.RegAllocStatsStart{} <- stats]
151 dumpSDoc dflags Opt_D_dump_asm_stats "NCG stats"
152 $ Color.pprStats stats graphGlobal
155 Opt_D_dump_asm_conflicts "Register conflict graph"
156 $ Color.dotGraph Color.regDotColor trivColorable
160 -- dump global NCG stats for linear allocator
161 (case concat $ catMaybes linearStats of
163 stats -> dumpSDoc dflags Opt_D_dump_asm_stats "NCG stats"
164 $ Linear.pprStats (concat native) stats)
166 -- write out the imports
167 Pretty.printDoc Pretty.LeftMode h
168 $ makeImportsDoc (concat imports)
172 where add_split (Cmm tops)
173 | dopt Opt_SplitObjs dflags = split_marker : tops
176 split_marker = CmmProc [] mkSplitMarkerLabel [] (ListGraph [])
179 -- | Do native code generation on all these cmms.
181 cmmNativeGens dflags h us [] impAcc profAcc count
182 = return (reverse impAcc, reverse profAcc)
184 cmmNativeGens dflags h us (cmm : cmms) impAcc profAcc count
186 (us', native, imports, colorStats, linearStats)
187 <- cmmNativeGen dflags us cmm count
189 Pretty.printDoc Pretty.LeftMode h
190 $ {-# SCC "pprNativeCode" #-} Pretty.vcat $ map pprNatCmmTop native
193 if dopt Opt_D_dump_asm dflags
194 || dopt Opt_D_dump_asm_stats dflags
198 let count' = count + 1;
201 -- force evaulation all this stuff to avoid space leaks
202 seqString (showSDoc $ vcat $ map ppr imports) `seq` return ()
203 lsPprNative `seq` return ()
204 count' `seq` return ()
206 cmmNativeGens dflags h us' cmms
208 ((lsPprNative, colorStats, linearStats) : profAcc)
211 where seqString [] = ()
212 seqString (x:xs) = x `seq` seqString xs `seq` ()
215 -- | Complete native code generation phase for a single top-level chunk of Cmm.
216 -- Dumping the output of each stage along the way.
217 -- Global conflict graph and NGC stats
221 -> RawCmmTop -- ^ the cmm to generate code for
222 -> Int -- ^ sequence number of this top thing
224 , [NatCmmTop] -- native code
225 , [CLabel] -- things imported by this cmm
226 , Maybe [Color.RegAllocStats] -- stats for the coloring register allocator
227 , Maybe [Linear.RegAllocStats]) -- stats for the linear register allocators
229 cmmNativeGen dflags us cmm count
232 -- rewrite assignments to global regs
233 let (fixed_cmm, usFix) =
234 {-# SCC "fixAssignsTop" #-}
235 initUs us $ fixAssignsTop cmm
237 -- cmm to cmm optimisations
238 let (opt_cmm, imports) =
239 {-# SCC "cmmToCmm" #-}
240 cmmToCmm dflags fixed_cmm
243 Opt_D_dump_opt_cmm "Optimised Cmm"
244 (pprCmm $ Cmm [opt_cmm])
246 -- generate native code from cmm
247 let ((native, lastMinuteImports), usGen) =
248 {-# SCC "genMachCode" #-}
249 initUs usFix $ genMachCode dflags opt_cmm
252 Opt_D_dump_asm_native "Native code"
253 (vcat $ map (docToSDoc . pprNatCmmTop) native)
256 -- tag instructions with register liveness information
257 let (withLiveness, usLive) =
258 {-# SCC "regLiveness" #-}
259 initUs usGen $ mapUs regLiveness native
262 Opt_D_dump_asm_liveness "Liveness annotations added"
263 (vcat $ map ppr withLiveness)
266 -- allocate registers
267 (alloced, usAlloc, ppr_raStatsColor, ppr_raStatsLinear) <-
268 if ( dopt Opt_RegsGraph dflags
269 || dopt Opt_RegsIterative dflags)
271 -- the regs usable for allocation
273 = foldr (\r -> plusUFM_C unionUniqSets
274 $ unitUFM (regClass r) (unitUniqSet r))
276 $ map RealReg allocatableRegs
278 -- graph coloring register allocation
279 let ((alloced, regAllocStats), usAlloc)
280 = {-# SCC "RegAlloc" #-}
285 (mkUniqSet [0..maxSpillSlots])
288 -- dump out what happened during register allocation
290 Opt_D_dump_asm_regalloc "Registers allocated"
291 (vcat $ map (docToSDoc . pprNatCmmTop) alloced)
294 Opt_D_dump_asm_regalloc_stages "Build/spill stages"
295 (vcat $ map (\(stage, stats)
296 -> text "# --------------------------"
297 $$ text "# cmm " <> int count <> text " Stage " <> int stage
299 $ zip [0..] regAllocStats)
302 if dopt Opt_D_dump_asm_stats dflags
303 then Just regAllocStats else Nothing
305 -- force evaluation of the Maybe to avoid space leak
306 mPprStats `seq` return ()
308 return ( alloced, usAlloc
313 -- do linear register allocation
314 let ((alloced, regAllocStats), usAlloc)
315 = {-# SCC "RegAlloc" #-}
318 $ mapUs Linear.regAlloc withLiveness
321 Opt_D_dump_asm_regalloc "Registers allocated"
322 (vcat $ map (docToSDoc . pprNatCmmTop) alloced)
325 if dopt Opt_D_dump_asm_stats dflags
326 then Just (catMaybes regAllocStats) else Nothing
328 -- force evaluation of the Maybe to avoid space leak
329 mPprStats `seq` return ()
331 return ( alloced, usAlloc
335 ---- shortcut branches
337 {-# SCC "shortcutBranches" #-}
338 shortcutBranches dflags alloced
342 {-# SCC "sequenceBlocks" #-}
343 map sequenceTop shorted
346 let final_mach_code =
348 {-# SCC "x86fp_kludge" #-}
349 map x86fp_kludge sequenced
356 , lastMinuteImports ++ imports
362 x86fp_kludge :: NatCmmTop -> NatCmmTop
363 x86fp_kludge top@(CmmData _ _) = top
364 x86fp_kludge top@(CmmProc info lbl params (ListGraph code)) =
365 CmmProc info lbl params (ListGraph $ i386_insert_ffrees code)
369 -- | Build a doc for all the imports.
371 makeImportsDoc :: [CLabel] -> Pretty.Doc
372 makeImportsDoc imports
375 #if HAVE_SUBSECTIONS_VIA_SYMBOLS
376 -- On recent versions of Darwin, the linker supports
377 -- dead-stripping of code and data on a per-symbol basis.
378 -- There's a hack to make this work in PprMach.pprNatCmmTop.
379 Pretty.$$ Pretty.text ".subsections_via_symbols"
381 #if HAVE_GNU_NONEXEC_STACK
382 -- On recent GNU ELF systems one can mark an object file
383 -- as not requiring an executable stack. If all objects
384 -- linked into a program have this note then the program
385 -- will not use an executable stack, which is good for
386 -- security. GHC generated code does not need an executable
387 -- stack so add the note in:
388 Pretty.$$ Pretty.text ".section .note.GNU-stack,\"\",@progbits"
390 #if !defined(darwin_TARGET_OS)
391 -- And just because every other compiler does, lets stick in
392 -- an identifier directive: .ident "GHC x.y.z"
393 Pretty.$$ let compilerIdent = Pretty.text "GHC" Pretty.<+>
394 Pretty.text cProjectVersion
395 in Pretty.text ".ident" Pretty.<+>
396 Pretty.doubleQuotes compilerIdent
400 -- Generate "symbol stubs" for all external symbols that might
401 -- come from a dynamic library.
402 dyld_stubs :: [CLabel] -> Pretty.Doc
403 {- dyld_stubs imps = Pretty.vcat $ map pprDyldSymbolStub $
404 map head $ group $ sort imps-}
406 -- (Hack) sometimes two Labels pretty-print the same, but have
407 -- different uniques; so we compare their text versions...
409 | needImportedSymbols
411 (pprGotDeclaration :) $
412 map (pprImportedSymbol . fst . head) $
413 groupBy (\(_,a) (_,b) -> a == b) $
414 sortBy (\(_,a) (_,b) -> compare a b) $
420 doPpr lbl = (lbl, Pretty.render $ pprCLabel lbl astyle)
421 astyle = mkCodeStyle AsmStyle
424 -- -----------------------------------------------------------------------------
425 -- Sequencing the basic blocks
427 -- Cmm BasicBlocks are self-contained entities: they always end in a
428 -- jump, either non-local or to another basic block in the same proc.
429 -- In this phase, we attempt to place the basic blocks in a sequence
430 -- such that as many of the local jumps as possible turn into
433 sequenceTop :: NatCmmTop -> NatCmmTop
434 sequenceTop top@(CmmData _ _) = top
435 sequenceTop (CmmProc info lbl params (ListGraph blocks)) =
436 CmmProc info lbl params (ListGraph $ makeFarBranches $ sequenceBlocks blocks)
438 -- The algorithm is very simple (and stupid): we make a graph out of
439 -- the blocks where there is an edge from one block to another iff the
440 -- first block ends by jumping to the second. Then we topologically
441 -- sort this graph. Then traverse the list: for each block, we first
442 -- output the block, then if it has an out edge, we move the
443 -- destination of the out edge to the front of the list, and continue.
445 -- FYI, the classic layout for basic blocks uses postorder DFS; this
446 -- algorithm is implemented in cmm/ZipCfg.hs (NR 6 Sep 2007).
448 sequenceBlocks :: [NatBasicBlock] -> [NatBasicBlock]
449 sequenceBlocks [] = []
450 sequenceBlocks (entry:blocks) =
451 seqBlocks (mkNode entry : reverse (flattenSCCs (sccBlocks blocks)))
452 -- the first block is the entry point ==> it must remain at the start.
454 sccBlocks :: [NatBasicBlock] -> [SCC (NatBasicBlock,Unique,[Unique])]
455 sccBlocks blocks = stronglyConnCompFromEdgedVerticesR (map mkNode blocks)
457 getOutEdges :: [Instr] -> [Unique]
458 getOutEdges instrs = case jumpDests (last instrs) [] of
459 [one] -> [getUnique one]
461 -- we're only interested in the last instruction of
462 -- the block, and only if it has a single destination.
464 mkNode block@(BasicBlock id instrs) = (block, getUnique id, getOutEdges instrs)
467 seqBlocks ((block,_,[]) : rest)
468 = block : seqBlocks rest
469 seqBlocks ((block@(BasicBlock id instrs),_,[next]) : rest)
470 | can_fallthrough = BasicBlock id (init instrs) : seqBlocks rest'
471 | otherwise = block : seqBlocks rest'
473 (can_fallthrough, rest') = reorder next [] rest
474 -- TODO: we should do a better job for cycles; try to maximise the
475 -- fallthroughs within a loop.
476 seqBlocks _ = panic "AsmCodegen:seqBlocks"
478 reorder id accum [] = (False, reverse accum)
479 reorder id accum (b@(block,id',out) : rest)
480 | id == id' = (True, (block,id,out) : reverse accum ++ rest)
481 | otherwise = reorder id (b:accum) rest
484 -- -----------------------------------------------------------------------------
485 -- Making far branches
487 -- Conditional branches on PowerPC are limited to +-32KB; if our Procs get too
488 -- big, we have to work around this limitation.
490 makeFarBranches :: [NatBasicBlock] -> [NatBasicBlock]
492 #if powerpc_TARGET_ARCH
493 makeFarBranches blocks
494 | last blockAddresses < nearLimit = blocks
495 | otherwise = zipWith handleBlock blockAddresses blocks
497 blockAddresses = scanl (+) 0 $ map blockLen blocks
498 blockLen (BasicBlock _ instrs) = length instrs
500 handleBlock addr (BasicBlock id instrs)
501 = BasicBlock id (zipWith makeFar [addr..] instrs)
503 makeFar addr (BCC ALWAYS tgt) = BCC ALWAYS tgt
504 makeFar addr (BCC cond tgt)
505 | abs (addr - targetAddr) >= nearLimit
509 where Just targetAddr = lookupUFM blockAddressMap tgt
510 makeFar addr other = other
512 nearLimit = 7000 -- 8192 instructions are allowed; let's keep some
513 -- distance, as we have a few pseudo-insns that are
514 -- pretty-printed as multiple instructions,
515 -- and it's just not worth the effort to calculate
518 blockAddressMap = listToUFM $ zip (map blockId blocks) blockAddresses
523 -- -----------------------------------------------------------------------------
526 shortcutBranches :: DynFlags -> [NatCmmTop] -> [NatCmmTop]
527 shortcutBranches dflags tops
528 | optLevel dflags < 1 = tops -- only with -O or higher
529 | otherwise = map (apply_mapping mapping) tops'
531 (tops', mappings) = mapAndUnzip build_mapping tops
532 mapping = foldr plusUFM emptyUFM mappings
534 build_mapping top@(CmmData _ _) = (top, emptyUFM)
535 build_mapping (CmmProc info lbl params (ListGraph []))
536 = (CmmProc info lbl params (ListGraph []), emptyUFM)
537 build_mapping (CmmProc info lbl params (ListGraph (head:blocks)))
538 = (CmmProc info lbl params (ListGraph (head:others)), mapping)
539 -- drop the shorted blocks, but don't ever drop the first one,
540 -- because it is pointed to by a global label.
542 -- find all the blocks that just consist of a jump that can be
544 (shortcut_blocks, others) = partitionWith split blocks
545 split (BasicBlock id [insn]) | Just dest <- canShortcut insn
547 split other = Right other
549 -- build a mapping from BlockId to JumpDest for shorting branches
550 mapping = foldl add emptyUFM shortcut_blocks
551 add ufm (id,dest) = addToUFM ufm id dest
553 apply_mapping ufm (CmmData sec statics)
554 = CmmData sec (map (shortcutStatic (lookupUFM ufm)) statics)
555 -- we need to get the jump tables, so apply the mapping to the entries
557 apply_mapping ufm (CmmProc info lbl params (ListGraph blocks))
558 = CmmProc info lbl params (ListGraph $ map short_bb blocks)
560 short_bb (BasicBlock id insns) = BasicBlock id $! map short_insn insns
561 short_insn i = shortcutJump (lookupUFM ufm) i
562 -- shortcutJump should apply the mapping repeatedly,
563 -- just in case we can short multiple branches.
565 -- -----------------------------------------------------------------------------
566 -- Instruction selection
568 -- Native code instruction selection for a chunk of stix code. For
569 -- this part of the computation, we switch from the UniqSM monad to
570 -- the NatM monad. The latter carries not only a Unique, but also an
571 -- Int denoting the current C stack pointer offset in the generated
572 -- code; this is needed for creating correct spill offsets on
573 -- architectures which don't offer, or for which it would be
574 -- prohibitively expensive to employ, a frame pointer register. Viz,
577 -- The offset is measured in bytes, and indicates the difference
578 -- between the current (simulated) C stack-ptr and the value it was at
579 -- the beginning of the block. For stacks which grow down, this value
580 -- should be either zero or negative.
582 -- Switching between the two monads whilst carrying along the same
583 -- Unique supply breaks abstraction. Is that bad?
585 genMachCode :: DynFlags -> RawCmmTop -> UniqSM ([NatCmmTop], [CLabel])
587 genMachCode dflags cmm_top
588 = do { initial_us <- getUs
589 ; let initial_st = mkNatM_State initial_us 0 dflags
590 (new_tops, final_st) = initNat initial_st (cmmTopCodeGen cmm_top)
591 final_delta = natm_delta final_st
592 final_imports = natm_imports final_st
593 ; if final_delta == 0
594 then return (new_tops, final_imports)
595 else pprPanic "genMachCode: nonzero final delta" (int final_delta)
598 -- -----------------------------------------------------------------------------
599 -- Fixup assignments to global registers so that they assign to
600 -- locations within the RegTable, if appropriate.
602 -- Note that we currently don't fixup reads here: they're done by
603 -- the generic optimiser below, to avoid having two separate passes
606 fixAssignsTop :: RawCmmTop -> UniqSM RawCmmTop
607 fixAssignsTop top@(CmmData _ _) = returnUs top
608 fixAssignsTop (CmmProc info lbl params (ListGraph blocks)) =
609 mapUs fixAssignsBlock blocks `thenUs` \ blocks' ->
610 returnUs (CmmProc info lbl params (ListGraph blocks'))
612 fixAssignsBlock :: CmmBasicBlock -> UniqSM CmmBasicBlock
613 fixAssignsBlock (BasicBlock id stmts) =
614 fixAssigns stmts `thenUs` \ stmts' ->
615 returnUs (BasicBlock id stmts')
617 fixAssigns :: [CmmStmt] -> UniqSM [CmmStmt]
619 mapUs fixAssign stmts `thenUs` \ stmtss ->
620 returnUs (concat stmtss)
622 fixAssign :: CmmStmt -> UniqSM [CmmStmt]
623 fixAssign (CmmAssign (CmmGlobal reg) src)
624 | Left realreg <- reg_or_addr
625 = returnUs [CmmAssign (CmmGlobal reg) src]
626 | Right baseRegAddr <- reg_or_addr
627 = returnUs [CmmStore baseRegAddr src]
628 -- Replace register leaves with appropriate StixTrees for
629 -- the given target. GlobalRegs which map to a reg on this
630 -- arch are left unchanged. Assigning to BaseReg is always
631 -- illegal, so we check for that.
633 reg_or_addr = get_GlobalReg_reg_or_addr reg
635 fixAssign other_stmt = returnUs [other_stmt]
637 -- -----------------------------------------------------------------------------
638 -- Generic Cmm optimiser
644 (b) Simple inlining: a temporary which is assigned to and then
645 used, once, can be shorted.
646 (c) Replacement of references to GlobalRegs which do not have
647 machine registers by the appropriate memory load (eg.
648 Hp ==> *(BaseReg + 34) ).
649 (d) Position independent code and dynamic linking
650 (i) introduce the appropriate indirections
651 and position independent refs
652 (ii) compile a list of imported symbols
654 Ideas for other things we could do (ToDo):
656 - shortcut jumps-to-jumps
657 - eliminate dead code blocks
658 - simple CSE: if an expr is assigned to a temp, then replace later occs of
659 that expr with the temp, until the expr is no longer valid (can push through
660 temp assignments, and certain assigns to mem...)
663 cmmToCmm :: DynFlags -> RawCmmTop -> (RawCmmTop, [CLabel])
664 cmmToCmm _ top@(CmmData _ _) = (top, [])
665 cmmToCmm dflags (CmmProc info lbl params (ListGraph blocks)) = runCmmOpt dflags $ do
666 blocks' <- mapM cmmBlockConFold (cmmMiniInline blocks)
667 return $ CmmProc info lbl params (ListGraph blocks')
669 newtype CmmOptM a = CmmOptM (([CLabel], DynFlags) -> (# a, [CLabel] #))
671 instance Monad CmmOptM where
672 return x = CmmOptM $ \(imports, _) -> (# x,imports #)
674 CmmOptM $ \(imports, dflags) ->
675 case f (imports, dflags) of
678 CmmOptM g' -> g' (imports', dflags)
680 addImportCmmOpt :: CLabel -> CmmOptM ()
681 addImportCmmOpt lbl = CmmOptM $ \(imports, dflags) -> (# (), lbl:imports #)
683 getDynFlagsCmmOpt :: CmmOptM DynFlags
684 getDynFlagsCmmOpt = CmmOptM $ \(imports, dflags) -> (# dflags, imports #)
686 runCmmOpt :: DynFlags -> CmmOptM a -> (a, [CLabel])
687 runCmmOpt dflags (CmmOptM f) = case f ([], dflags) of
688 (# result, imports #) -> (result, imports)
690 cmmBlockConFold :: CmmBasicBlock -> CmmOptM CmmBasicBlock
691 cmmBlockConFold (BasicBlock id stmts) = do
692 stmts' <- mapM cmmStmtConFold stmts
693 return $ BasicBlock id stmts'
698 -> do src' <- cmmExprConFold DataReference src
699 return $ case src' of
700 CmmReg reg' | reg == reg' -> CmmNop
701 new_src -> CmmAssign reg new_src
704 -> do addr' <- cmmExprConFold DataReference addr
705 src' <- cmmExprConFold DataReference src
706 return $ CmmStore addr' src'
709 -> do addr' <- cmmExprConFold JumpReference addr
710 return $ CmmJump addr' regs
712 CmmCall target regs args srt returns
713 -> do target' <- case target of
714 CmmCallee e conv -> do
715 e' <- cmmExprConFold CallReference e
716 return $ CmmCallee e' conv
717 other -> return other
718 args' <- mapM (\(CmmHinted arg hint) -> do
719 arg' <- cmmExprConFold DataReference arg
720 return (CmmHinted arg' hint)) args
721 return $ CmmCall target' regs args' srt returns
723 CmmCondBranch test dest
724 -> do test' <- cmmExprConFold DataReference test
725 return $ case test' of
726 CmmLit (CmmInt 0 _) ->
727 CmmComment (mkFastString ("deleted: " ++
728 showSDoc (pprStmt stmt)))
730 CmmLit (CmmInt n _) -> CmmBranch dest
731 other -> CmmCondBranch test' dest
734 -> do expr' <- cmmExprConFold DataReference expr
735 return $ CmmSwitch expr' ids
741 cmmExprConFold referenceKind expr
744 -> do addr' <- cmmExprConFold DataReference addr
745 return $ CmmLoad addr' rep
748 -- For MachOps, we first optimize the children, and then we try
749 -- our hand at some constant-folding.
750 -> do args' <- mapM (cmmExprConFold DataReference) args
751 return $ cmmMachOpFold mop args'
753 CmmLit (CmmLabel lbl)
755 dflags <- getDynFlagsCmmOpt
756 cmmMakeDynamicReference dflags addImportCmmOpt referenceKind lbl
757 CmmLit (CmmLabelOff lbl off)
759 dflags <- getDynFlagsCmmOpt
760 dynRef <- cmmMakeDynamicReference dflags addImportCmmOpt referenceKind lbl
761 return $ cmmMachOpFold (MO_Add wordWidth) [
763 (CmmLit $ CmmInt (fromIntegral off) wordWidth)
766 #if powerpc_TARGET_ARCH
767 -- On powerpc (non-PIC), it's easier to jump directly to a label than
768 -- to use the register table, so we replace these registers
769 -- with the corresponding labels:
770 CmmReg (CmmGlobal EagerBlackholeInfo)
772 -> cmmExprConFold referenceKind $
773 CmmLit (CmmLabel (mkRtsCodeLabel (sLit "__stg_EAGER_BLACKHOLE_INFO")))
774 CmmReg (CmmGlobal GCEnter1)
776 -> cmmExprConFold referenceKind $
777 CmmLit (CmmLabel (mkRtsCodeLabel (sLit "__stg_gc_enter_1")))
778 CmmReg (CmmGlobal GCFun)
780 -> cmmExprConFold referenceKind $
781 CmmLit (CmmLabel (mkRtsCodeLabel (sLit "__stg_gc_fun")))
784 CmmReg (CmmGlobal mid)
785 -- Replace register leaves with appropriate StixTrees for
786 -- the given target. MagicIds which map to a reg on this
787 -- arch are left unchanged. For the rest, BaseReg is taken
788 -- to mean the address of the reg table in MainCapability,
789 -- and for all others we generate an indirection to its
790 -- location in the register table.
791 -> case get_GlobalReg_reg_or_addr mid of
792 Left realreg -> return expr
795 BaseReg -> cmmExprConFold DataReference baseRegAddr
796 other -> cmmExprConFold DataReference
797 (CmmLoad baseRegAddr (globalRegType mid))
798 -- eliminate zero offsets
800 -> cmmExprConFold referenceKind (CmmReg reg)
802 CmmRegOff (CmmGlobal mid) offset
803 -- RegOf leaves are just a shorthand form. If the reg maps
804 -- to a real reg, we keep the shorthand, otherwise, we just
805 -- expand it and defer to the above code.
806 -> case get_GlobalReg_reg_or_addr mid of
807 Left realreg -> return expr
809 -> cmmExprConFold DataReference (CmmMachOp (MO_Add wordWidth) [
810 CmmReg (CmmGlobal mid),
811 CmmLit (CmmInt (fromIntegral offset)
816 -- -----------------------------------------------------------------------------