1 -- -----------------------------------------------------------------------------
3 -- (c) The University of Glasgow 1993-2004
5 -- This is the top-level module in the native code generator.
7 -- -----------------------------------------------------------------------------
10 module AsmCodeGen ( nativeCodeGen ) where
12 #include "HsVersions.h"
13 #include "nativeGen/NCG.h"
22 import PositionIndependentCode
25 import CmmOpt ( cmmMiniInline, cmmMachOpFold )
26 import PprCmm ( pprStmt, pprCmms )
31 import Unique ( Unique, getUnique )
34 import List ( groupBy, sortBy )
35 import ErrUtils ( dumpIfSet_dyn )
37 import StaticFlags ( opt_Static, opt_PIC )
39 import Config ( cProjectVersion )
42 import qualified Pretty
50 import List ( intersperse )
59 The native-code generator has machine-independent and
60 machine-dependent modules.
62 This module ("AsmCodeGen") is the top-level machine-independent
63 module. Before entering machine-dependent land, we do some
64 machine-independent optimisations (defined below) on the
67 We convert to the machine-specific 'Instr' datatype with
68 'cmmCodeGen', assuming an infinite supply of registers. We then use
69 a machine-independent register allocator ('regAlloc') to rejoin
70 reality. Obviously, 'regAlloc' has machine-specific helper
71 functions (see about "RegAllocInfo" below).
73 Finally, we order the basic blocks of the function so as to minimise
74 the number of jumps between blocks, by utilising fallthrough wherever
77 The machine-dependent bits break down as follows:
79 * ["MachRegs"] Everything about the target platform's machine
80 registers (and immediate operands, and addresses, which tend to
81 intermingle/interact with registers).
83 * ["MachInstrs"] Includes the 'Instr' datatype (possibly should
84 have a module of its own), plus a miscellany of other things
85 (e.g., 'targetDoubleSize', 'smStablePtrTable', ...)
87 * ["MachCodeGen"] is where 'Cmm' stuff turns into
90 * ["PprMach"] 'pprInstr' turns an 'Instr' into text (well, really
93 * ["RegAllocInfo"] In the register allocator, we manipulate
94 'MRegsState's, which are 'BitSet's, one bit per machine register.
95 When we want to say something about a specific machine register
96 (e.g., ``it gets clobbered by this instruction''), we set/unset
97 its bit. Obviously, we do this 'BitSet' thing for efficiency
100 The 'RegAllocInfo' module collects together the machine-specific
101 info needed to do register allocation.
103 * ["RegisterAlloc"] The (machine-independent) register allocator.
106 -- -----------------------------------------------------------------------------
107 -- Top-level of the native codegen
109 -- NB. We *lazilly* compile each block of code for space reasons.
111 nativeCodeGen :: DynFlags -> [RawCmm] -> UniqSupply -> IO Pretty.Doc
112 nativeCodeGen dflags cmms us
113 = let (res, _) = initUs us $
114 cgCmm (concat (map add_split cmms))
116 cgCmm :: [RawCmmTop] -> UniqSM (RawCmm, Pretty.Doc, [CLabel])
118 lazyMapUs (cmmNativeGen dflags) tops `thenUs` \ results ->
119 case unzip3 results of { (cmms,docs,imps) ->
120 returnUs (Cmm cmms, my_vcat docs, concat imps)
123 case res of { (ppr_cmms, insn_sdoc, imports) -> do
124 dumpIfSet_dyn dflags Opt_D_dump_opt_cmm "Optimised Cmm" (pprCmms [ppr_cmms])
125 return (insn_sdoc Pretty.$$ dyld_stubs imports
126 #if HAVE_SUBSECTIONS_VIA_SYMBOLS
127 -- On recent versions of Darwin, the linker supports
128 -- dead-stripping of code and data on a per-symbol basis.
129 -- There's a hack to make this work in PprMach.pprNatCmmTop.
130 Pretty.$$ Pretty.text ".subsections_via_symbols"
132 #if HAVE_GNU_NONEXEC_STACK
133 -- On recent GNU ELF systems one can mark an object file
134 -- as not requiring an executable stack. If all objects
135 -- linked into a program have this note then the program
136 -- will not use an executable stack, which is good for
137 -- security. GHC generated code does not need an executable
138 -- stack so add the note in:
139 Pretty.$$ Pretty.text ".section .note.GNU-stack,\"\",@progbits"
141 #if !defined(darwin_TARGET_OS)
142 -- And just because every other compiler does, lets stick in
143 -- an identifier directive: .ident "GHC x.y.z"
144 Pretty.$$ let compilerIdent = Pretty.text "GHC" Pretty.<+>
145 Pretty.text cProjectVersion
146 in Pretty.text ".ident" Pretty.<+>
147 Pretty.doubleQuotes compilerIdent
155 | dopt Opt_SplitObjs dflags = split_marker : tops
158 split_marker = CmmProc [] mkSplitMarkerLabel [] []
160 -- Generate "symbol stubs" for all external symbols that might
161 -- come from a dynamic library.
162 {- dyld_stubs imps = Pretty.vcat $ map pprDyldSymbolStub $
163 map head $ group $ sort imps-}
165 -- (Hack) sometimes two Labels pretty-print the same, but have
166 -- different uniques; so we compare their text versions...
168 | needImportedSymbols
170 (pprGotDeclaration :) $
171 map (pprImportedSymbol . fst . head) $
172 groupBy (\(_,a) (_,b) -> a == b) $
173 sortBy (\(_,a) (_,b) -> compare a b) $
179 where doPpr lbl = (lbl, Pretty.render $ pprCLabel lbl astyle)
180 astyle = mkCodeStyle AsmStyle
183 my_vcat sds = Pretty.vcat sds
185 my_vcat sds = Pretty.vcat (
188 Pretty.$$ Pretty.ptext SLIT("# ___ncg_debug_marker")
189 Pretty.$$ Pretty.char ' '
196 -- Complete native code generation phase for a single top-level chunk
199 cmmNativeGen :: DynFlags -> RawCmmTop -> UniqSM (RawCmmTop, Pretty.Doc, [CLabel])
200 cmmNativeGen dflags cmm
201 = {-# SCC "fixAssigns" #-}
202 fixAssignsTop cmm `thenUs` \ fixed_cmm ->
203 {-# SCC "genericOpt" #-}
204 cmmToCmm dflags fixed_cmm `bind` \ (cmm, imports) ->
205 (if dopt Opt_D_dump_opt_cmm dflags -- space leak avoidance
207 else CmmData Text []) `bind` \ ppr_cmm ->
208 {-# SCC "genMachCode" #-}
209 genMachCode dflags cmm `thenUs` \ (pre_regalloc, lastMinuteImports) ->
210 {-# SCC "regAlloc" #-}
211 mapUs regAlloc pre_regalloc `thenUs` \ with_regs ->
212 {-# SCC "shortcutBranches" #-}
213 shortcutBranches dflags with_regs `bind` \ shorted ->
214 {-# SCC "sequenceBlocks" #-}
215 map sequenceTop shorted `bind` \ sequenced ->
216 {-# SCC "x86fp_kludge" #-}
217 map x86fp_kludge sequenced `bind` \ final_mach_code ->
219 Pretty.vcat (map pprNatCmmTop final_mach_code) `bind` \ final_sdoc ->
221 returnUs (ppr_cmm, final_sdoc Pretty.$$ Pretty.text "", lastMinuteImports ++ imports)
223 x86fp_kludge :: NatCmmTop -> NatCmmTop
224 x86fp_kludge top@(CmmData _ _) = top
226 x86fp_kludge top@(CmmProc info lbl params code) =
227 CmmProc info lbl params (map bb_i386_insert_ffrees code)
229 bb_i386_insert_ffrees (BasicBlock id instrs) =
230 BasicBlock id (i386_insert_ffrees instrs)
232 x86fp_kludge top = top
235 -- -----------------------------------------------------------------------------
236 -- Sequencing the basic blocks
238 -- Cmm BasicBlocks are self-contained entities: they always end in a
239 -- jump, either non-local or to another basic block in the same proc.
240 -- In this phase, we attempt to place the basic blocks in a sequence
241 -- such that as many of the local jumps as possible turn into
244 sequenceTop :: NatCmmTop -> NatCmmTop
245 sequenceTop top@(CmmData _ _) = top
246 sequenceTop (CmmProc info lbl params blocks) =
247 CmmProc info lbl params (makeFarBranches $ sequenceBlocks blocks)
249 -- The algorithm is very simple (and stupid): we make a graph out of
250 -- the blocks where there is an edge from one block to another iff the
251 -- first block ends by jumping to the second. Then we topologically
252 -- sort this graph. Then traverse the list: for each block, we first
253 -- output the block, then if it has an out edge, we move the
254 -- destination of the out edge to the front of the list, and continue.
256 sequenceBlocks :: [NatBasicBlock] -> [NatBasicBlock]
257 sequenceBlocks [] = []
258 sequenceBlocks (entry:blocks) =
259 seqBlocks (mkNode entry : reverse (flattenSCCs (sccBlocks blocks)))
260 -- the first block is the entry point ==> it must remain at the start.
262 sccBlocks :: [NatBasicBlock] -> [SCC (NatBasicBlock,Unique,[Unique])]
263 sccBlocks blocks = stronglyConnCompR (map mkNode blocks)
265 getOutEdges :: [Instr] -> [Unique]
266 getOutEdges instrs = case jumpDests (last instrs) [] of
267 [one] -> [getUnique one]
269 -- we're only interested in the last instruction of
270 -- the block, and only if it has a single destination.
272 mkNode block@(BasicBlock id instrs) = (block, getUnique id, getOutEdges instrs)
275 seqBlocks ((block,_,[]) : rest)
276 = block : seqBlocks rest
277 seqBlocks ((block@(BasicBlock id instrs),_,[next]) : rest)
278 | can_fallthrough = BasicBlock id (init instrs) : seqBlocks rest'
279 | otherwise = block : seqBlocks rest'
281 (can_fallthrough, rest') = reorder next [] rest
282 -- TODO: we should do a better job for cycles; try to maximise the
283 -- fallthroughs within a loop.
284 seqBlocks _ = panic "AsmCodegen:seqBlocks"
286 reorder id accum [] = (False, reverse accum)
287 reorder id accum (b@(block,id',out) : rest)
288 | id == id' = (True, (block,id,out) : reverse accum ++ rest)
289 | otherwise = reorder id (b:accum) rest
292 -- -----------------------------------------------------------------------------
293 -- Making far branches
295 -- Conditional branches on PowerPC are limited to +-32KB; if our Procs get too
296 -- big, we have to work around this limitation.
298 makeFarBranches :: [NatBasicBlock] -> [NatBasicBlock]
300 #if powerpc_TARGET_ARCH
301 makeFarBranches blocks
302 | last blockAddresses < nearLimit = blocks
303 | otherwise = zipWith handleBlock blockAddresses blocks
305 blockAddresses = scanl (+) 0 $ map blockLen blocks
306 blockLen (BasicBlock _ instrs) = length instrs
308 handleBlock addr (BasicBlock id instrs)
309 = BasicBlock id (zipWith makeFar [addr..] instrs)
311 makeFar addr (BCC ALWAYS tgt) = BCC ALWAYS tgt
312 makeFar addr (BCC cond tgt)
313 | abs (addr - targetAddr) >= nearLimit
317 where Just targetAddr = lookupUFM blockAddressMap tgt
318 makeFar addr other = other
320 nearLimit = 7000 -- 8192 instructions are allowed; let's keep some
321 -- distance, as we have a few pseudo-insns that are
322 -- pretty-printed as multiple instructions,
323 -- and it's just not worth the effort to calculate
326 blockAddressMap = listToUFM $ zip (map blockId blocks) blockAddresses
331 -- -----------------------------------------------------------------------------
334 shortcutBranches :: DynFlags -> [NatCmmTop] -> [NatCmmTop]
335 shortcutBranches dflags tops
336 | optLevel dflags < 1 = tops -- only with -O or higher
337 | otherwise = map (apply_mapping mapping) tops'
339 (tops', mappings) = mapAndUnzip build_mapping tops
340 mapping = foldr plusUFM emptyUFM mappings
342 build_mapping top@(CmmData _ _) = (top, emptyUFM)
343 build_mapping (CmmProc info lbl params [])
344 = (CmmProc info lbl params [], emptyUFM)
345 build_mapping (CmmProc info lbl params (head:blocks))
346 = (CmmProc info lbl params (head:others), mapping)
347 -- drop the shorted blocks, but don't ever drop the first one,
348 -- because it is pointed to by a global label.
350 -- find all the blocks that just consist of a jump that can be
352 (shortcut_blocks, others) = partitionWith split blocks
353 split (BasicBlock id [insn]) | Just dest <- canShortcut insn
355 split other = Right other
357 -- build a mapping from BlockId to JumpDest for shorting branches
358 mapping = foldl add emptyUFM shortcut_blocks
359 add ufm (id,dest) = addToUFM ufm id dest
361 apply_mapping ufm (CmmData sec statics)
362 = CmmData sec (map (shortcutStatic (lookupUFM ufm)) statics)
363 -- we need to get the jump tables, so apply the mapping to the entries
365 apply_mapping ufm (CmmProc info lbl params blocks)
366 = CmmProc info lbl params (map short_bb blocks)
368 short_bb (BasicBlock id insns) = BasicBlock id $! map short_insn insns
369 short_insn i = shortcutJump (lookupUFM ufm) i
370 -- shortcutJump should apply the mapping repeatedly,
371 -- just in case we can short multiple branches.
373 -- -----------------------------------------------------------------------------
374 -- Instruction selection
376 -- Native code instruction selection for a chunk of stix code. For
377 -- this part of the computation, we switch from the UniqSM monad to
378 -- the NatM monad. The latter carries not only a Unique, but also an
379 -- Int denoting the current C stack pointer offset in the generated
380 -- code; this is needed for creating correct spill offsets on
381 -- architectures which don't offer, or for which it would be
382 -- prohibitively expensive to employ, a frame pointer register. Viz,
385 -- The offset is measured in bytes, and indicates the difference
386 -- between the current (simulated) C stack-ptr and the value it was at
387 -- the beginning of the block. For stacks which grow down, this value
388 -- should be either zero or negative.
390 -- Switching between the two monads whilst carrying along the same
391 -- Unique supply breaks abstraction. Is that bad?
393 genMachCode :: DynFlags -> RawCmmTop -> UniqSM ([NatCmmTop], [CLabel])
395 genMachCode dflags cmm_top
396 = do { initial_us <- getUs
397 ; let initial_st = mkNatM_State initial_us 0 dflags
398 (new_tops, final_st) = initNat initial_st (cmmTopCodeGen cmm_top)
399 final_delta = natm_delta final_st
400 final_imports = natm_imports final_st
401 ; if final_delta == 0
402 then return (new_tops, final_imports)
403 else pprPanic "genMachCode: nonzero final delta" (int final_delta)
406 -- -----------------------------------------------------------------------------
407 -- Fixup assignments to global registers so that they assign to
408 -- locations within the RegTable, if appropriate.
410 -- Note that we currently don't fixup reads here: they're done by
411 -- the generic optimiser below, to avoid having two separate passes
414 fixAssignsTop :: RawCmmTop -> UniqSM RawCmmTop
415 fixAssignsTop top@(CmmData _ _) = returnUs top
416 fixAssignsTop (CmmProc info lbl params blocks) =
417 mapUs fixAssignsBlock blocks `thenUs` \ blocks' ->
418 returnUs (CmmProc info lbl params blocks')
420 fixAssignsBlock :: CmmBasicBlock -> UniqSM CmmBasicBlock
421 fixAssignsBlock (BasicBlock id stmts) =
422 fixAssigns stmts `thenUs` \ stmts' ->
423 returnUs (BasicBlock id stmts')
425 fixAssigns :: [CmmStmt] -> UniqSM [CmmStmt]
427 mapUs fixAssign stmts `thenUs` \ stmtss ->
428 returnUs (concat stmtss)
430 fixAssign :: CmmStmt -> UniqSM [CmmStmt]
431 fixAssign (CmmAssign (CmmGlobal reg) src)
432 | Left realreg <- reg_or_addr
433 = returnUs [CmmAssign (CmmGlobal reg) src]
434 | Right baseRegAddr <- reg_or_addr
435 = returnUs [CmmStore baseRegAddr src]
436 -- Replace register leaves with appropriate StixTrees for
437 -- the given target. GlobalRegs which map to a reg on this
438 -- arch are left unchanged. Assigning to BaseReg is always
439 -- illegal, so we check for that.
441 reg_or_addr = get_GlobalReg_reg_or_addr reg
443 fixAssign other_stmt = returnUs [other_stmt]
445 -- -----------------------------------------------------------------------------
446 -- Generic Cmm optimiser
452 (b) Simple inlining: a temporary which is assigned to and then
453 used, once, can be shorted.
454 (c) Replacement of references to GlobalRegs which do not have
455 machine registers by the appropriate memory load (eg.
456 Hp ==> *(BaseReg + 34) ).
457 (d) Position independent code and dynamic linking
458 (i) introduce the appropriate indirections
459 and position independent refs
460 (ii) compile a list of imported symbols
462 Ideas for other things we could do (ToDo):
464 - shortcut jumps-to-jumps
465 - eliminate dead code blocks
466 - simple CSE: if an expr is assigned to a temp, then replace later occs of
467 that expr with the temp, until the expr is no longer valid (can push through
468 temp assignments, and certain assigns to mem...)
471 cmmToCmm :: DynFlags -> RawCmmTop -> (RawCmmTop, [CLabel])
472 cmmToCmm _ top@(CmmData _ _) = (top, [])
473 cmmToCmm dflags (CmmProc info lbl params blocks) = runCmmOpt dflags $ do
474 blocks' <- mapM cmmBlockConFold (cmmMiniInline blocks)
475 return $ CmmProc info lbl params blocks'
477 newtype CmmOptM a = CmmOptM (([CLabel], DynFlags) -> (# a, [CLabel] #))
479 instance Monad CmmOptM where
480 return x = CmmOptM $ \(imports, _) -> (# x,imports #)
482 CmmOptM $ \(imports, dflags) ->
483 case f (imports, dflags) of
486 CmmOptM g' -> g' (imports', dflags)
488 addImportCmmOpt :: CLabel -> CmmOptM ()
489 addImportCmmOpt lbl = CmmOptM $ \(imports, dflags) -> (# (), lbl:imports #)
491 getDynFlagsCmmOpt :: CmmOptM DynFlags
492 getDynFlagsCmmOpt = CmmOptM $ \(imports, dflags) -> (# dflags, imports #)
494 runCmmOpt :: DynFlags -> CmmOptM a -> (a, [CLabel])
495 runCmmOpt dflags (CmmOptM f) = case f ([], dflags) of
496 (# result, imports #) -> (result, imports)
498 cmmBlockConFold :: CmmBasicBlock -> CmmOptM CmmBasicBlock
499 cmmBlockConFold (BasicBlock id stmts) = do
500 stmts' <- mapM cmmStmtConFold stmts
501 return $ BasicBlock id stmts'
506 -> do src' <- cmmExprConFold DataReference src
507 return $ case src' of
508 CmmReg reg' | reg == reg' -> CmmNop
509 new_src -> CmmAssign reg new_src
512 -> do addr' <- cmmExprConFold DataReference addr
513 src' <- cmmExprConFold DataReference src
514 return $ CmmStore addr' src'
517 -> do addr' <- cmmExprConFold JumpReference addr
518 return $ CmmJump addr' regs
520 CmmCall target regs args srt
521 -> do target' <- case target of
522 CmmCallee e conv -> do
523 e' <- cmmExprConFold CallReference e
524 return $ CmmCallee e' conv
525 other -> return other
526 args' <- mapM (\(arg, hint) -> do
527 arg' <- cmmExprConFold DataReference arg
528 return (arg', hint)) args
529 return $ CmmCall target' regs args' srt
531 CmmCondBranch test dest
532 -> do test' <- cmmExprConFold DataReference test
533 return $ case test' of
534 CmmLit (CmmInt 0 _) ->
535 CmmComment (mkFastString ("deleted: " ++
536 showSDoc (pprStmt stmt)))
538 CmmLit (CmmInt n _) -> CmmBranch dest
539 other -> CmmCondBranch test' dest
542 -> do expr' <- cmmExprConFold DataReference expr
543 return $ CmmSwitch expr' ids
549 cmmExprConFold referenceKind expr
552 -> do addr' <- cmmExprConFold DataReference addr
553 return $ CmmLoad addr' rep
556 -- For MachOps, we first optimize the children, and then we try
557 -- our hand at some constant-folding.
558 -> do args' <- mapM (cmmExprConFold DataReference) args
559 return $ cmmMachOpFold mop args'
561 CmmLit (CmmLabel lbl)
563 dflags <- getDynFlagsCmmOpt
564 cmmMakeDynamicReference dflags addImportCmmOpt referenceKind lbl
565 CmmLit (CmmLabelOff lbl off)
567 dflags <- getDynFlagsCmmOpt
568 dynRef <- cmmMakeDynamicReference dflags addImportCmmOpt referenceKind lbl
569 return $ cmmMachOpFold (MO_Add wordRep) [
571 (CmmLit $ CmmInt (fromIntegral off) wordRep)
574 #if powerpc_TARGET_ARCH
575 -- On powerpc (non-PIC), it's easier to jump directly to a label than
576 -- to use the register table, so we replace these registers
577 -- with the corresponding labels:
578 CmmReg (CmmGlobal GCEnter1)
580 -> cmmExprConFold referenceKind $
581 CmmLit (CmmLabel (mkRtsCodeLabel SLIT( "__stg_gc_enter_1")))
582 CmmReg (CmmGlobal GCFun)
584 -> cmmExprConFold referenceKind $
585 CmmLit (CmmLabel (mkRtsCodeLabel SLIT( "__stg_gc_fun")))
588 CmmReg (CmmGlobal mid)
589 -- Replace register leaves with appropriate StixTrees for
590 -- the given target. MagicIds which map to a reg on this
591 -- arch are left unchanged. For the rest, BaseReg is taken
592 -- to mean the address of the reg table in MainCapability,
593 -- and for all others we generate an indirection to its
594 -- location in the register table.
595 -> case get_GlobalReg_reg_or_addr mid of
596 Left realreg -> return expr
599 BaseReg -> cmmExprConFold DataReference baseRegAddr
600 other -> cmmExprConFold DataReference
601 (CmmLoad baseRegAddr (globalRegRep mid))
602 -- eliminate zero offsets
604 -> cmmExprConFold referenceKind (CmmReg reg)
606 CmmRegOff (CmmGlobal mid) offset
607 -- RegOf leaves are just a shorthand form. If the reg maps
608 -- to a real reg, we keep the shorthand, otherwise, we just
609 -- expand it and defer to the above code.
610 -> case get_GlobalReg_reg_or_addr mid of
611 Left realreg -> return expr
613 -> cmmExprConFold DataReference (CmmMachOp (MO_Add wordRep) [
614 CmmReg (CmmGlobal mid),
615 CmmLit (CmmInt (fromIntegral offset)
620 -- -----------------------------------------------------------------------------