1 -----------------------------------------------------------------------------
3 -- Generating machine code (instruction selection)
5 -- (c) The University of Glasgow 1996-2004
7 -----------------------------------------------------------------------------
9 -- This is a big module, but, if you pay attention to
10 -- (a) the sectioning, (b) the type signatures, and
11 -- (c) the #if blah_TARGET_ARCH} things, the
12 -- structure should not be too overwhelming.
14 module MachCodeGen ( cmmTopCodeGen, InstrBlock ) where
16 #include "HsVersions.h"
17 #include "nativeGen/NCG.h"
24 import PositionIndependentCode ( cmmMakeDynamicReference, initializePicBase )
26 -- Our intermediate code:
27 import PprCmm ( pprExpr )
33 import StaticFlags ( opt_PIC )
34 import ForeignCall ( CCallConv(..) )
38 import qualified Outputable
40 import FastTypes ( isFastTrue )
41 import Constants ( wORD_SIZE )
44 import Outputable ( assertPanic )
45 import TRACE ( trace )
48 import Control.Monad ( mapAndUnzipM )
49 import Maybe ( fromJust )
53 -- -----------------------------------------------------------------------------
54 -- Top-level of the instruction selector
56 -- | 'InstrBlock's are the insn sequences generated by the insn selectors.
57 -- They are really trees of insns to facilitate fast appending, where a
58 -- left-to-right traversal (pre-order?) yields the insns in the correct
61 type InstrBlock = OrdList Instr
63 cmmTopCodeGen :: CmmTop -> NatM [NatCmmTop]
64 cmmTopCodeGen (CmmProc info lab params blocks) = do
65 (nat_blocks,statics) <- mapAndUnzipM basicBlockCodeGen blocks
66 picBaseMb <- getPicBaseMaybeNat
67 let proc = CmmProc info lab params (concat nat_blocks)
68 tops = proc : concat statics
70 Just picBase -> initializePicBase picBase tops
71 Nothing -> return tops
73 cmmTopCodeGen (CmmData sec dat) = do
74 return [CmmData sec dat] -- no translation, we just use CmmStatic
76 basicBlockCodeGen :: CmmBasicBlock -> NatM ([NatBasicBlock],[NatCmmTop])
77 basicBlockCodeGen (BasicBlock id stmts) = do
78 instrs <- stmtsToInstrs stmts
79 -- code generation may introduce new basic block boundaries, which
80 -- are indicated by the NEWBLOCK instruction. We must split up the
81 -- instruction stream into basic blocks again. Also, we extract
84 (top,other_blocks,statics) = foldrOL mkBlocks ([],[],[]) instrs
86 mkBlocks (NEWBLOCK id) (instrs,blocks,statics)
87 = ([], BasicBlock id instrs : blocks, statics)
88 mkBlocks (LDATA sec dat) (instrs,blocks,statics)
89 = (instrs, blocks, CmmData sec dat:statics)
90 mkBlocks instr (instrs,blocks,statics)
91 = (instr:instrs, blocks, statics)
93 return (BasicBlock id top : other_blocks, statics)
95 stmtsToInstrs :: [CmmStmt] -> NatM InstrBlock
97 = do instrss <- mapM stmtToInstrs stmts
98 return (concatOL instrss)
100 stmtToInstrs :: CmmStmt -> NatM InstrBlock
101 stmtToInstrs stmt = case stmt of
102 CmmNop -> return nilOL
103 CmmComment s -> return (unitOL (COMMENT s))
106 | isFloatingRep kind -> assignReg_FltCode kind reg src
107 #if WORD_SIZE_IN_BITS==32
108 | kind == I64 -> assignReg_I64Code reg src
110 | otherwise -> assignReg_IntCode kind reg src
111 where kind = cmmRegRep reg
114 | isFloatingRep kind -> assignMem_FltCode kind addr src
115 #if WORD_SIZE_IN_BITS==32
116 | kind == I64 -> assignMem_I64Code addr src
118 | otherwise -> assignMem_IntCode kind addr src
119 where kind = cmmExprRep src
121 CmmCall target result_regs args vols
122 -> genCCall target result_regs args vols
124 CmmBranch id -> genBranch id
125 CmmCondBranch arg id -> genCondJump id arg
126 CmmSwitch arg ids -> genSwitch arg ids
127 CmmJump arg params -> genJump arg
129 -- -----------------------------------------------------------------------------
130 -- General things for putting together code sequences
132 -- Expand CmmRegOff. ToDo: should we do it this way around, or convert
133 -- CmmExprs into CmmRegOff?
134 mangleIndexTree :: CmmExpr -> CmmExpr
135 mangleIndexTree (CmmRegOff reg off)
136 = CmmMachOp (MO_Add rep) [CmmReg reg, CmmLit (CmmInt (fromIntegral off) rep)]
137 where rep = cmmRegRep reg
139 -- -----------------------------------------------------------------------------
140 -- Code gen for 64-bit arithmetic on 32-bit platforms
143 Simple support for generating 64-bit code (ie, 64 bit values and 64
144 bit assignments) on 32-bit platforms. Unlike the main code generator
145 we merely shoot for generating working code as simply as possible, and
146 pay little attention to code quality. Specifically, there is no
147 attempt to deal cleverly with the fixed-vs-floating register
148 distinction; all values are generated into (pairs of) floating
149 registers, even if this would mean some redundant reg-reg moves as a
150 result. Only one of the VRegUniques is returned, since it will be
151 of the VRegUniqueLo form, and the upper-half VReg can be determined
152 by applying getHiVRegFromLo to it.
155 data ChildCode64 -- a.k.a "Register64"
158 Reg -- the lower 32-bit temporary which contains the
159 -- result; use getHiVRegFromLo to find the other
160 -- VRegUnique. Rules of this simplified insn
161 -- selection game are therefore that the returned
162 -- Reg may be modified
164 #if WORD_SIZE_IN_BITS==32
165 assignMem_I64Code :: CmmExpr -> CmmExpr -> NatM InstrBlock
166 assignReg_I64Code :: CmmReg -> CmmExpr -> NatM InstrBlock
169 #ifndef x86_64_TARGET_ARCH
170 iselExpr64 :: CmmExpr -> NatM ChildCode64
173 -- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
177 assignMem_I64Code addrTree valueTree = do
178 Amode addr addr_code <- getAmode addrTree
179 ChildCode64 vcode rlo <- iselExpr64 valueTree
181 rhi = getHiVRegFromLo rlo
183 -- Little-endian store
184 mov_lo = MOV I32 (OpReg rlo) (OpAddr addr)
185 mov_hi = MOV I32 (OpReg rhi) (OpAddr (fromJust (addrOffset addr 4)))
187 return (vcode `appOL` addr_code `snocOL` mov_lo `snocOL` mov_hi)
190 assignReg_I64Code (CmmLocal (LocalReg u_dst pk)) valueTree = do
191 ChildCode64 vcode r_src_lo <- iselExpr64 valueTree
193 r_dst_lo = mkVReg u_dst I32
194 r_dst_hi = getHiVRegFromLo r_dst_lo
195 r_src_hi = getHiVRegFromLo r_src_lo
196 mov_lo = MOV I32 (OpReg r_src_lo) (OpReg r_dst_lo)
197 mov_hi = MOV I32 (OpReg r_src_hi) (OpReg r_dst_hi)
200 vcode `snocOL` mov_lo `snocOL` mov_hi
203 assignReg_I64Code lvalue valueTree
204 = panic "assignReg_I64Code(i386): invalid lvalue"
208 iselExpr64 (CmmLit (CmmInt i _)) = do
209 (rlo,rhi) <- getNewRegPairNat I32
211 r = fromIntegral (fromIntegral i :: Word32)
212 q = fromIntegral ((fromIntegral i `shiftR` 32) :: Word32)
214 MOV I32 (OpImm (ImmInteger r)) (OpReg rlo),
215 MOV I32 (OpImm (ImmInteger q)) (OpReg rhi)
218 return (ChildCode64 code rlo)
220 iselExpr64 (CmmLoad addrTree I64) = do
221 Amode addr addr_code <- getAmode addrTree
222 (rlo,rhi) <- getNewRegPairNat I32
224 mov_lo = MOV I32 (OpAddr addr) (OpReg rlo)
225 mov_hi = MOV I32 (OpAddr (fromJust (addrOffset addr 4))) (OpReg rhi)
228 ChildCode64 (addr_code `snocOL` mov_lo `snocOL` mov_hi)
232 iselExpr64 (CmmReg (CmmLocal (LocalReg vu I64)))
233 = return (ChildCode64 nilOL (mkVReg vu I32))
235 -- we handle addition, but rather badly
236 iselExpr64 (CmmMachOp (MO_Add _) [e1, CmmLit (CmmInt i _)]) = do
237 ChildCode64 code1 r1lo <- iselExpr64 e1
238 (rlo,rhi) <- getNewRegPairNat I32
240 r = fromIntegral (fromIntegral i :: Word32)
241 q = fromIntegral ((fromIntegral i `shiftR` 32) :: Word32)
242 r1hi = getHiVRegFromLo r1lo
244 toOL [ MOV I32 (OpReg r1lo) (OpReg rlo),
245 ADD I32 (OpImm (ImmInteger r)) (OpReg rlo),
246 MOV I32 (OpReg r1hi) (OpReg rhi),
247 ADC I32 (OpImm (ImmInteger q)) (OpReg rhi) ]
249 return (ChildCode64 code rlo)
251 iselExpr64 (CmmMachOp (MO_Add _) [e1,e2]) = do
252 ChildCode64 code1 r1lo <- iselExpr64 e1
253 ChildCode64 code2 r2lo <- iselExpr64 e2
254 (rlo,rhi) <- getNewRegPairNat I32
256 r1hi = getHiVRegFromLo r1lo
257 r2hi = getHiVRegFromLo r2lo
260 toOL [ MOV I32 (OpReg r1lo) (OpReg rlo),
261 ADD I32 (OpReg r2lo) (OpReg rlo),
262 MOV I32 (OpReg r1hi) (OpReg rhi),
263 ADC I32 (OpReg r2hi) (OpReg rhi) ]
265 return (ChildCode64 code rlo)
268 = pprPanic "iselExpr64(i386)" (ppr expr)
270 #endif /* i386_TARGET_ARCH */
272 -- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
274 #if sparc_TARGET_ARCH
276 assignMem_I64Code addrTree valueTree
277 = iselExpr64 valueTree `thenNat` \ (ChildCode64 vcode vrlo) ->
278 getRegister addrTree `thenNat` \ register_addr ->
279 getNewRegNat IntRep `thenNat` \ t_addr ->
280 let rlo = VirtualRegI vrlo
281 rhi = getHiVRegFromLo rlo
282 code_addr = registerCode register_addr t_addr
283 reg_addr = registerName register_addr t_addr
285 mov_hi = ST W rhi (AddrRegImm reg_addr (ImmInt 0))
286 mov_lo = ST W rlo (AddrRegImm reg_addr (ImmInt 4))
288 return (vcode `appOL` code_addr `snocOL` mov_hi `snocOL` mov_lo)
291 assignReg_I64Code (StixTemp (StixVReg u_dst pk)) valueTree
292 = iselExpr64 valueTree `thenNat` \ (ChildCode64 vcode vr_src_lo) ->
294 r_dst_lo = mkVReg u_dst IntRep
295 r_src_lo = VirtualRegI vr_src_lo
296 r_dst_hi = getHiVRegFromLo r_dst_lo
297 r_src_hi = getHiVRegFromLo r_src_lo
298 mov_lo = mkMOV r_src_lo r_dst_lo
299 mov_hi = mkMOV r_src_hi r_dst_hi
300 mkMOV sreg dreg = OR False g0 (RIReg sreg) dreg
303 vcode `snocOL` mov_hi `snocOL` mov_lo
305 assignReg_I64Code lvalue valueTree
306 = pprPanic "assignReg_I64Code(sparc): invalid lvalue"
310 -- Don't delete this -- it's very handy for debugging.
312 -- | trace ("iselExpr64: " ++ showSDoc (pprCmmExpr expr)) False
313 -- = panic "iselExpr64(???)"
315 iselExpr64 (CmmLoad I64 addrTree)
316 = getRegister addrTree `thenNat` \ register_addr ->
317 getNewRegNat IntRep `thenNat` \ t_addr ->
318 getNewRegNat IntRep `thenNat` \ rlo ->
319 let rhi = getHiVRegFromLo rlo
320 code_addr = registerCode register_addr t_addr
321 reg_addr = registerName register_addr t_addr
322 mov_hi = LD W (AddrRegImm reg_addr (ImmInt 0)) rhi
323 mov_lo = LD W (AddrRegImm reg_addr (ImmInt 4)) rlo
326 ChildCode64 (code_addr `snocOL` mov_hi `snocOL` mov_lo)
330 iselExpr64 (CmmReg (CmmLocal (LocalReg uq I64)))
331 = getNewRegNat IntRep `thenNat` \ r_dst_lo ->
332 let r_dst_hi = getHiVRegFromLo r_dst_lo
333 r_src_lo = mkVReg vu IntRep
334 r_src_hi = getHiVRegFromLo r_src_lo
335 mov_lo = mkMOV r_src_lo r_dst_lo
336 mov_hi = mkMOV r_src_hi r_dst_hi
337 mkMOV sreg dreg = OR False g0 (RIReg sreg) dreg
340 ChildCode64 (toOL [mov_hi, mov_lo]) (getVRegUnique r_dst_lo)
343 iselExpr64 (StCall fn cconv I64 args)
344 = genCCall fn cconv kind args `thenNat` \ call ->
345 getNewRegNat IntRep `thenNat` \ r_dst_lo ->
346 let r_dst_hi = getHiVRegFromLo r_dst_lo
347 mov_lo = mkMOV o0 r_dst_lo
348 mov_hi = mkMOV o1 r_dst_hi
349 mkMOV sreg dreg = OR False g0 (RIReg sreg) dreg
352 ChildCode64 (call `snocOL` mov_hi `snocOL` mov_lo)
353 (getVRegUnique r_dst_lo)
357 = pprPanic "iselExpr64(sparc)" (pprCmmExpr expr)
359 #endif /* sparc_TARGET_ARCH */
361 -- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
363 #if powerpc_TARGET_ARCH
365 getI64Amodes :: CmmExpr -> NatM (AddrMode, AddrMode, InstrBlock)
366 getI64Amodes addrTree = do
367 Amode hi_addr addr_code <- getAmode addrTree
368 case addrOffset hi_addr 4 of
369 Just lo_addr -> return (hi_addr, lo_addr, addr_code)
370 Nothing -> do (hi_ptr, code) <- getSomeReg addrTree
371 return (AddrRegImm hi_ptr (ImmInt 0),
372 AddrRegImm hi_ptr (ImmInt 4),
375 assignMem_I64Code addrTree valueTree = do
376 (hi_addr, lo_addr, addr_code) <- getI64Amodes addrTree
377 ChildCode64 vcode rlo <- iselExpr64 valueTree
379 rhi = getHiVRegFromLo rlo
382 mov_hi = ST I32 rhi hi_addr
383 mov_lo = ST I32 rlo lo_addr
385 return (vcode `appOL` addr_code `snocOL` mov_lo `snocOL` mov_hi)
387 assignReg_I64Code (CmmLocal (LocalReg u_dst pk)) valueTree = do
388 ChildCode64 vcode r_src_lo <- iselExpr64 valueTree
390 r_dst_lo = mkVReg u_dst I32
391 r_dst_hi = getHiVRegFromLo r_dst_lo
392 r_src_hi = getHiVRegFromLo r_src_lo
393 mov_lo = MR r_dst_lo r_src_lo
394 mov_hi = MR r_dst_hi r_src_hi
397 vcode `snocOL` mov_lo `snocOL` mov_hi
400 assignReg_I64Code lvalue valueTree
401 = panic "assignReg_I64Code(powerpc): invalid lvalue"
404 -- Don't delete this -- it's very handy for debugging.
406 -- | trace ("iselExpr64: " ++ showSDoc (pprCmmExpr expr)) False
407 -- = panic "iselExpr64(???)"
409 iselExpr64 (CmmLoad addrTree I64) = do
410 (hi_addr, lo_addr, addr_code) <- getI64Amodes addrTree
411 (rlo, rhi) <- getNewRegPairNat I32
412 let mov_hi = LD I32 rhi hi_addr
413 mov_lo = LD I32 rlo lo_addr
414 return $ ChildCode64 (addr_code `snocOL` mov_lo `snocOL` mov_hi)
417 iselExpr64 (CmmReg (CmmLocal (LocalReg vu I64)))
418 = return (ChildCode64 nilOL (mkVReg vu I32))
420 iselExpr64 (CmmLit (CmmInt i _)) = do
421 (rlo,rhi) <- getNewRegPairNat I32
423 half0 = fromIntegral (fromIntegral i :: Word16)
424 half1 = fromIntegral ((fromIntegral i `shiftR` 16) :: Word16)
425 half2 = fromIntegral ((fromIntegral i `shiftR` 32) :: Word16)
426 half3 = fromIntegral ((fromIntegral i `shiftR` 48) :: Word16)
429 LIS rlo (ImmInt half1),
430 OR rlo rlo (RIImm $ ImmInt half0),
431 LIS rhi (ImmInt half3),
432 OR rlo rlo (RIImm $ ImmInt half2)
435 return (ChildCode64 code rlo)
437 iselExpr64 (CmmMachOp (MO_Add _) [e1,e2]) = do
438 ChildCode64 code1 r1lo <- iselExpr64 e1
439 ChildCode64 code2 r2lo <- iselExpr64 e2
440 (rlo,rhi) <- getNewRegPairNat I32
442 r1hi = getHiVRegFromLo r1lo
443 r2hi = getHiVRegFromLo r2lo
446 toOL [ ADDC rlo r1lo r2lo,
449 return (ChildCode64 code rlo)
452 = pprPanic "iselExpr64(powerpc)" (ppr expr)
454 #endif /* powerpc_TARGET_ARCH */
457 -- -----------------------------------------------------------------------------
458 -- The 'Register' type
460 -- 'Register's passed up the tree. If the stix code forces the register
461 -- to live in a pre-decided machine register, it comes out as @Fixed@;
462 -- otherwise, it comes out as @Any@, and the parent can decide which
463 -- register to put it in.
466 = Fixed MachRep Reg InstrBlock
467 | Any MachRep (Reg -> InstrBlock)
469 swizzleRegisterRep :: Register -> MachRep -> Register
470 swizzleRegisterRep (Fixed _ reg code) rep = Fixed rep reg code
471 swizzleRegisterRep (Any _ codefn) rep = Any rep codefn
474 -- -----------------------------------------------------------------------------
475 -- Utils based on getRegister, below
477 -- The dual to getAnyReg: compute an expression into a register, but
478 -- we don't mind which one it is.
479 getSomeReg :: CmmExpr -> NatM (Reg, InstrBlock)
481 r <- getRegister expr
484 tmp <- getNewRegNat rep
485 return (tmp, code tmp)
489 -- -----------------------------------------------------------------------------
490 -- Grab the Reg for a CmmReg
492 getRegisterReg :: CmmReg -> Reg
494 getRegisterReg (CmmLocal (LocalReg u pk))
497 getRegisterReg (CmmGlobal mid)
498 = case get_GlobalReg_reg_or_addr mid of
499 Left (RealReg rrno) -> RealReg rrno
500 _other -> pprPanic "getRegisterReg-memory" (ppr $ CmmGlobal mid)
501 -- By this stage, the only MagicIds remaining should be the
502 -- ones which map to a real machine register on this
503 -- platform. Hence ...
506 -- -----------------------------------------------------------------------------
507 -- Generate code to get a subtree into a Register
509 -- Don't delete this -- it's very handy for debugging.
511 -- | trace ("getRegister: " ++ showSDoc (pprCmmExpr expr)) False
512 -- = panic "getRegister(???)"
514 getRegister :: CmmExpr -> NatM Register
516 getRegister (CmmReg reg)
517 = return (Fixed (cmmRegRep reg) (getRegisterReg reg) nilOL)
519 getRegister tree@(CmmRegOff _ _)
520 = getRegister (mangleIndexTree tree)
522 getRegister CmmPicBaseReg
524 reg <- getPicBaseNat wordRep
525 return (Fixed wordRep reg nilOL)
527 -- end of machine-"independent" bit; here we go on the rest...
529 #if alpha_TARGET_ARCH
531 getRegister (StDouble d)
532 = getBlockIdNat `thenNat` \ lbl ->
533 getNewRegNat PtrRep `thenNat` \ tmp ->
534 let code dst = mkSeqInstrs [
535 LDATA RoDataSegment lbl [
536 DATA TF [ImmLab (rational d)]
538 LDA tmp (AddrImm (ImmCLbl lbl)),
539 LD TF dst (AddrReg tmp)]
541 return (Any F64 code)
543 getRegister (StPrim primop [x]) -- unary PrimOps
545 IntNegOp -> trivialUCode (NEG Q False) x
547 NotOp -> trivialUCode NOT x
549 FloatNegOp -> trivialUFCode FloatRep (FNEG TF) x
550 DoubleNegOp -> trivialUFCode F64 (FNEG TF) x
552 OrdOp -> coerceIntCode IntRep x
555 Float2IntOp -> coerceFP2Int x
556 Int2FloatOp -> coerceInt2FP pr x
557 Double2IntOp -> coerceFP2Int x
558 Int2DoubleOp -> coerceInt2FP pr x
560 Double2FloatOp -> coerceFltCode x
561 Float2DoubleOp -> coerceFltCode x
563 other_op -> getRegister (StCall fn CCallConv F64 [x])
565 fn = case other_op of
566 FloatExpOp -> FSLIT("exp")
567 FloatLogOp -> FSLIT("log")
568 FloatSqrtOp -> FSLIT("sqrt")
569 FloatSinOp -> FSLIT("sin")
570 FloatCosOp -> FSLIT("cos")
571 FloatTanOp -> FSLIT("tan")
572 FloatAsinOp -> FSLIT("asin")
573 FloatAcosOp -> FSLIT("acos")
574 FloatAtanOp -> FSLIT("atan")
575 FloatSinhOp -> FSLIT("sinh")
576 FloatCoshOp -> FSLIT("cosh")
577 FloatTanhOp -> FSLIT("tanh")
578 DoubleExpOp -> FSLIT("exp")
579 DoubleLogOp -> FSLIT("log")
580 DoubleSqrtOp -> FSLIT("sqrt")
581 DoubleSinOp -> FSLIT("sin")
582 DoubleCosOp -> FSLIT("cos")
583 DoubleTanOp -> FSLIT("tan")
584 DoubleAsinOp -> FSLIT("asin")
585 DoubleAcosOp -> FSLIT("acos")
586 DoubleAtanOp -> FSLIT("atan")
587 DoubleSinhOp -> FSLIT("sinh")
588 DoubleCoshOp -> FSLIT("cosh")
589 DoubleTanhOp -> FSLIT("tanh")
591 pr = panic "MachCode.getRegister: no primrep needed for Alpha"
593 getRegister (StPrim primop [x, y]) -- dyadic PrimOps
595 CharGtOp -> trivialCode (CMP LTT) y x
596 CharGeOp -> trivialCode (CMP LE) y x
597 CharEqOp -> trivialCode (CMP EQQ) x y
598 CharNeOp -> int_NE_code x y
599 CharLtOp -> trivialCode (CMP LTT) x y
600 CharLeOp -> trivialCode (CMP LE) x y
602 IntGtOp -> trivialCode (CMP LTT) y x
603 IntGeOp -> trivialCode (CMP LE) y x
604 IntEqOp -> trivialCode (CMP EQQ) x y
605 IntNeOp -> int_NE_code x y
606 IntLtOp -> trivialCode (CMP LTT) x y
607 IntLeOp -> trivialCode (CMP LE) x y
609 WordGtOp -> trivialCode (CMP ULT) y x
610 WordGeOp -> trivialCode (CMP ULE) x y
611 WordEqOp -> trivialCode (CMP EQQ) x y
612 WordNeOp -> int_NE_code x y
613 WordLtOp -> trivialCode (CMP ULT) x y
614 WordLeOp -> trivialCode (CMP ULE) x y
616 AddrGtOp -> trivialCode (CMP ULT) y x
617 AddrGeOp -> trivialCode (CMP ULE) y x
618 AddrEqOp -> trivialCode (CMP EQQ) x y
619 AddrNeOp -> int_NE_code x y
620 AddrLtOp -> trivialCode (CMP ULT) x y
621 AddrLeOp -> trivialCode (CMP ULE) x y
623 FloatGtOp -> cmpF_code (FCMP TF LE) EQQ x y
624 FloatGeOp -> cmpF_code (FCMP TF LTT) EQQ x y
625 FloatEqOp -> cmpF_code (FCMP TF EQQ) NE x y
626 FloatNeOp -> cmpF_code (FCMP TF EQQ) EQQ x y
627 FloatLtOp -> cmpF_code (FCMP TF LTT) NE x y
628 FloatLeOp -> cmpF_code (FCMP TF LE) NE x y
630 DoubleGtOp -> cmpF_code (FCMP TF LE) EQQ x y
631 DoubleGeOp -> cmpF_code (FCMP TF LTT) EQQ x y
632 DoubleEqOp -> cmpF_code (FCMP TF EQQ) NE x y
633 DoubleNeOp -> cmpF_code (FCMP TF EQQ) EQQ x y
634 DoubleLtOp -> cmpF_code (FCMP TF LTT) NE x y
635 DoubleLeOp -> cmpF_code (FCMP TF LE) NE x y
637 IntAddOp -> trivialCode (ADD Q False) x y
638 IntSubOp -> trivialCode (SUB Q False) x y
639 IntMulOp -> trivialCode (MUL Q False) x y
640 IntQuotOp -> trivialCode (DIV Q False) x y
641 IntRemOp -> trivialCode (REM Q False) x y
643 WordAddOp -> trivialCode (ADD Q False) x y
644 WordSubOp -> trivialCode (SUB Q False) x y
645 WordMulOp -> trivialCode (MUL Q False) x y
646 WordQuotOp -> trivialCode (DIV Q True) x y
647 WordRemOp -> trivialCode (REM Q True) x y
649 FloatAddOp -> trivialFCode FloatRep (FADD TF) x y
650 FloatSubOp -> trivialFCode FloatRep (FSUB TF) x y
651 FloatMulOp -> trivialFCode FloatRep (FMUL TF) x y
652 FloatDivOp -> trivialFCode FloatRep (FDIV TF) x y
654 DoubleAddOp -> trivialFCode F64 (FADD TF) x y
655 DoubleSubOp -> trivialFCode F64 (FSUB TF) x y
656 DoubleMulOp -> trivialFCode F64 (FMUL TF) x y
657 DoubleDivOp -> trivialFCode F64 (FDIV TF) x y
659 AddrAddOp -> trivialCode (ADD Q False) x y
660 AddrSubOp -> trivialCode (SUB Q False) x y
661 AddrRemOp -> trivialCode (REM Q True) x y
663 AndOp -> trivialCode AND x y
664 OrOp -> trivialCode OR x y
665 XorOp -> trivialCode XOR x y
666 SllOp -> trivialCode SLL x y
667 SrlOp -> trivialCode SRL x y
669 ISllOp -> trivialCode SLL x y -- was: panic "AlphaGen:isll"
670 ISraOp -> trivialCode SRA x y -- was: panic "AlphaGen:isra"
671 ISrlOp -> trivialCode SRL x y -- was: panic "AlphaGen:isrl"
673 FloatPowerOp -> getRegister (StCall FSLIT("pow") CCallConv F64 [x,y])
674 DoublePowerOp -> getRegister (StCall FSLIT("pow") CCallConv F64 [x,y])
676 {- ------------------------------------------------------------
677 Some bizarre special code for getting condition codes into
678 registers. Integer non-equality is a test for equality
679 followed by an XOR with 1. (Integer comparisons always set
680 the result register to 0 or 1.) Floating point comparisons of
681 any kind leave the result in a floating point register, so we
682 need to wrangle an integer register out of things.
684 int_NE_code :: StixTree -> StixTree -> NatM Register
687 = trivialCode (CMP EQQ) x y `thenNat` \ register ->
688 getNewRegNat IntRep `thenNat` \ tmp ->
690 code = registerCode register tmp
691 src = registerName register tmp
692 code__2 dst = code . mkSeqInstr (XOR src (RIImm (ImmInt 1)) dst)
694 return (Any IntRep code__2)
696 {- ------------------------------------------------------------
697 Comments for int_NE_code also apply to cmpF_code
700 :: (Reg -> Reg -> Reg -> Instr)
702 -> StixTree -> StixTree
705 cmpF_code instr cond x y
706 = trivialFCode pr instr x y `thenNat` \ register ->
707 getNewRegNat F64 `thenNat` \ tmp ->
708 getBlockIdNat `thenNat` \ lbl ->
710 code = registerCode register tmp
711 result = registerName register tmp
713 code__2 dst = code . mkSeqInstrs [
714 OR zeroh (RIImm (ImmInt 1)) dst,
715 BF cond result (ImmCLbl lbl),
716 OR zeroh (RIReg zeroh) dst,
719 return (Any IntRep code__2)
721 pr = panic "trivialU?FCode: does not use PrimRep on Alpha"
722 ------------------------------------------------------------
724 getRegister (CmmLoad pk mem)
725 = getAmode mem `thenNat` \ amode ->
727 code = amodeCode amode
728 src = amodeAddr amode
729 size = primRepToSize pk
730 code__2 dst = code . mkSeqInstr (LD size dst src)
732 return (Any pk code__2)
734 getRegister (StInt i)
737 code dst = mkSeqInstr (OR zeroh (RIImm src) dst)
739 return (Any IntRep code)
742 code dst = mkSeqInstr (LDI Q dst src)
744 return (Any IntRep code)
746 src = ImmInt (fromInteger i)
751 code dst = mkSeqInstr (LDA dst (AddrImm imm__2))
753 return (Any PtrRep code)
756 imm__2 = case imm of Just x -> x
758 #endif /* alpha_TARGET_ARCH */
760 -- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
764 getRegister (CmmLit (CmmFloat f F32)) = do
765 lbl <- getNewLabelNat
766 let code dst = toOL [
769 CmmStaticLit (CmmFloat f F32)],
770 GLD F32 (ImmAddr (ImmCLbl lbl) 0) dst
773 return (Any F32 code)
776 getRegister (CmmLit (CmmFloat d F64))
778 = let code dst = unitOL (GLDZ dst)
779 in return (Any F64 code)
782 = let code dst = unitOL (GLD1 dst)
783 in return (Any F64 code)
786 lbl <- getNewLabelNat
787 let code dst = toOL [
790 CmmStaticLit (CmmFloat d F64)],
791 GLD F64 (ImmAddr (ImmCLbl lbl) 0) dst
794 return (Any F64 code)
796 #endif /* i386_TARGET_ARCH */
798 #if x86_64_TARGET_ARCH
800 getRegister (CmmLit (CmmFloat 0.0 rep)) = do
801 let code dst = unitOL (XOR rep (OpReg dst) (OpReg dst))
802 -- I don't know why there are xorpd, xorps, and pxor instructions.
803 -- They all appear to do the same thing --SDM
804 return (Any rep code)
806 getRegister (CmmLit (CmmFloat f rep)) = do
807 lbl <- getNewLabelNat
808 let code dst = toOL [
811 CmmStaticLit (CmmFloat f rep)],
812 MOV rep (OpAddr (ripRel (ImmCLbl lbl))) (OpReg dst)
815 return (Any rep code)
817 #endif /* x86_64_TARGET_ARCH */
819 #if i386_TARGET_ARCH || x86_64_TARGET_ARCH
821 -- catch simple cases of zero- or sign-extended load
822 getRegister (CmmMachOp (MO_U_Conv I8 I32) [CmmLoad addr _]) = do
823 code <- intLoadCode (MOVZxL I8) addr
824 return (Any I32 code)
826 getRegister (CmmMachOp (MO_S_Conv I8 I32) [CmmLoad addr _]) = do
827 code <- intLoadCode (MOVSxL I8) addr
828 return (Any I32 code)
830 getRegister (CmmMachOp (MO_U_Conv I16 I32) [CmmLoad addr _]) = do
831 code <- intLoadCode (MOVZxL I16) addr
832 return (Any I32 code)
834 getRegister (CmmMachOp (MO_S_Conv I16 I32) [CmmLoad addr _]) = do
835 code <- intLoadCode (MOVSxL I16) addr
836 return (Any I32 code)
840 #if x86_64_TARGET_ARCH
842 -- catch simple cases of zero- or sign-extended load
843 getRegister (CmmMachOp (MO_U_Conv I8 I64) [CmmLoad addr _]) = do
844 code <- intLoadCode (MOVZxL I8) addr
845 return (Any I64 code)
847 getRegister (CmmMachOp (MO_S_Conv I8 I64) [CmmLoad addr _]) = do
848 code <- intLoadCode (MOVSxL I8) addr
849 return (Any I64 code)
851 getRegister (CmmMachOp (MO_U_Conv I16 I64) [CmmLoad addr _]) = do
852 code <- intLoadCode (MOVZxL I16) addr
853 return (Any I64 code)
855 getRegister (CmmMachOp (MO_S_Conv I16 I64) [CmmLoad addr _]) = do
856 code <- intLoadCode (MOVSxL I16) addr
857 return (Any I64 code)
859 getRegister (CmmMachOp (MO_U_Conv I32 I64) [CmmLoad addr _]) = do
860 code <- intLoadCode (MOV I32) addr -- 32-bit loads zero-extend
861 return (Any I64 code)
863 getRegister (CmmMachOp (MO_S_Conv I32 I64) [CmmLoad addr _]) = do
864 code <- intLoadCode (MOVSxL I32) addr
865 return (Any I64 code)
869 #if x86_64_TARGET_ARCH
870 getRegister (CmmMachOp (MO_S_Neg F32) [x]) = do
871 x_code <- getAnyReg x
872 lbl <- getNewLabelNat
874 code dst = x_code dst `appOL` toOL [
875 -- This is how gcc does it, so it can't be that bad:
876 LDATA ReadOnlyData16 [
879 CmmStaticLit (CmmInt 0x80000000 I32),
880 CmmStaticLit (CmmInt 0 I32),
881 CmmStaticLit (CmmInt 0 I32),
882 CmmStaticLit (CmmInt 0 I32)
884 XOR F32 (OpAddr (ripRel (ImmCLbl lbl))) (OpReg dst)
885 -- xorps, so we need the 128-bit constant
886 -- ToDo: rip-relative
889 return (Any F32 code)
891 getRegister (CmmMachOp (MO_S_Neg F64) [x]) = do
892 x_code <- getAnyReg x
893 lbl <- getNewLabelNat
895 -- This is how gcc does it, so it can't be that bad:
896 code dst = x_code dst `appOL` toOL [
897 LDATA ReadOnlyData16 [
900 CmmStaticLit (CmmInt 0x8000000000000000 I64),
901 CmmStaticLit (CmmInt 0 I64)
903 -- gcc puts an unpck here. Wonder if we need it.
904 XOR F64 (OpAddr (ripRel (ImmCLbl lbl))) (OpReg dst)
905 -- xorpd, so we need the 128-bit constant
908 return (Any F64 code)
911 #if i386_TARGET_ARCH || x86_64_TARGET_ARCH
913 getRegister (CmmMachOp mop [x]) -- unary MachOps
916 MO_S_Neg F32 -> trivialUFCode F32 (GNEG F32) x
917 MO_S_Neg F64 -> trivialUFCode F64 (GNEG F64) x
920 MO_S_Neg rep -> trivialUCode rep (NEGI rep) x
921 MO_Not rep -> trivialUCode rep (NOT rep) x
924 -- TODO: these are only nops if the arg is not a fixed register that
925 -- can't be byte-addressed.
926 MO_U_Conv I32 I8 -> conversionNop I32 x
927 MO_S_Conv I32 I8 -> conversionNop I32 x
928 MO_U_Conv I16 I8 -> conversionNop I16 x
929 MO_S_Conv I16 I8 -> conversionNop I16 x
930 MO_U_Conv I32 I16 -> conversionNop I32 x
931 MO_S_Conv I32 I16 -> conversionNop I32 x
932 #if x86_64_TARGET_ARCH
933 MO_U_Conv I64 I32 -> conversionNop I64 x
934 MO_S_Conv I64 I32 -> conversionNop I64 x
935 MO_U_Conv I64 I16 -> conversionNop I64 x
936 MO_S_Conv I64 I16 -> conversionNop I64 x
937 MO_U_Conv I64 I8 -> conversionNop I64 x
938 MO_S_Conv I64 I8 -> conversionNop I64 x
941 MO_U_Conv rep1 rep2 | rep1 == rep2 -> conversionNop rep1 x
942 MO_S_Conv rep1 rep2 | rep1 == rep2 -> conversionNop rep1 x
945 MO_U_Conv I8 I32 -> integerExtend I8 I32 MOVZxL x
946 MO_U_Conv I16 I32 -> integerExtend I16 I32 MOVZxL x
947 MO_U_Conv I8 I16 -> integerExtend I8 I16 MOVZxL x
949 MO_S_Conv I8 I32 -> integerExtend I8 I32 MOVSxL x
950 MO_S_Conv I16 I32 -> integerExtend I16 I32 MOVSxL x
951 MO_S_Conv I8 I16 -> integerExtend I8 I16 MOVSxL x
953 #if x86_64_TARGET_ARCH
954 MO_U_Conv I8 I64 -> integerExtend I8 I64 MOVZxL x
955 MO_U_Conv I16 I64 -> integerExtend I16 I64 MOVZxL x
956 MO_U_Conv I32 I64 -> integerExtend I32 I64 MOVZxL x
957 MO_S_Conv I8 I64 -> integerExtend I8 I64 MOVSxL x
958 MO_S_Conv I16 I64 -> integerExtend I16 I64 MOVSxL x
959 MO_S_Conv I32 I64 -> integerExtend I32 I64 MOVSxL x
960 -- for 32-to-64 bit zero extension, amd64 uses an ordinary movl.
961 -- However, we don't want the register allocator to throw it
962 -- away as an unnecessary reg-to-reg move, so we keep it in
963 -- the form of a movzl and print it as a movl later.
967 MO_S_Conv F32 F64 -> conversionNop F64 x
968 MO_S_Conv F64 F32 -> conversionNop F32 x
970 MO_S_Conv F32 F64 -> coerceFP2FP F64 x
971 MO_S_Conv F64 F32 -> coerceFP2FP F32 x
975 | isFloatingRep from -> coerceFP2Int from to x
976 | isFloatingRep to -> coerceInt2FP from to x
978 other -> pprPanic "getRegister" (pprMachOp mop)
980 -- signed or unsigned extension.
981 integerExtend from to instr expr = do
982 (reg,e_code) <- if from == I8 then getByteReg expr
987 instr from (OpReg reg) (OpReg dst)
990 conversionNop new_rep expr
991 = do e_code <- getRegister expr
992 return (swizzleRegisterRep e_code new_rep)
995 getRegister e@(CmmMachOp mop [x, y]) -- dyadic MachOps
996 = ASSERT2(cmmExprRep x /= I8, pprExpr e)
998 MO_Eq F32 -> condFltReg EQQ x y
999 MO_Ne F32 -> condFltReg NE x y
1000 MO_S_Gt F32 -> condFltReg GTT x y
1001 MO_S_Ge F32 -> condFltReg GE x y
1002 MO_S_Lt F32 -> condFltReg LTT x y
1003 MO_S_Le F32 -> condFltReg LE x y
1005 MO_Eq F64 -> condFltReg EQQ x y
1006 MO_Ne F64 -> condFltReg NE x y
1007 MO_S_Gt F64 -> condFltReg GTT x y
1008 MO_S_Ge F64 -> condFltReg GE x y
1009 MO_S_Lt F64 -> condFltReg LTT x y
1010 MO_S_Le F64 -> condFltReg LE x y
1012 MO_Eq rep -> condIntReg EQQ x y
1013 MO_Ne rep -> condIntReg NE x y
1015 MO_S_Gt rep -> condIntReg GTT x y
1016 MO_S_Ge rep -> condIntReg GE x y
1017 MO_S_Lt rep -> condIntReg LTT x y
1018 MO_S_Le rep -> condIntReg LE x y
1020 MO_U_Gt rep -> condIntReg GU x y
1021 MO_U_Ge rep -> condIntReg GEU x y
1022 MO_U_Lt rep -> condIntReg LU x y
1023 MO_U_Le rep -> condIntReg LEU x y
1025 #if i386_TARGET_ARCH
1026 MO_Add F32 -> trivialFCode F32 GADD x y
1027 MO_Sub F32 -> trivialFCode F32 GSUB x y
1029 MO_Add F64 -> trivialFCode F64 GADD x y
1030 MO_Sub F64 -> trivialFCode F64 GSUB x y
1032 MO_S_Quot F32 -> trivialFCode F32 GDIV x y
1033 MO_S_Quot F64 -> trivialFCode F64 GDIV x y
1036 #if x86_64_TARGET_ARCH
1037 MO_Add F32 -> trivialFCode F32 ADD x y
1038 MO_Sub F32 -> trivialFCode F32 SUB x y
1040 MO_Add F64 -> trivialFCode F64 ADD x y
1041 MO_Sub F64 -> trivialFCode F64 SUB x y
1043 MO_S_Quot F32 -> trivialFCode F32 FDIV x y
1044 MO_S_Quot F64 -> trivialFCode F64 FDIV x y
1047 MO_Add rep -> add_code rep x y
1048 MO_Sub rep -> sub_code rep x y
1050 MO_S_Quot rep -> div_code rep True True x y
1051 MO_S_Rem rep -> div_code rep True False x y
1052 MO_U_Quot rep -> div_code rep False True x y
1053 MO_U_Rem rep -> div_code rep False False x y
1055 #if i386_TARGET_ARCH
1056 MO_Mul F32 -> trivialFCode F32 GMUL x y
1057 MO_Mul F64 -> trivialFCode F64 GMUL x y
1060 #if x86_64_TARGET_ARCH
1061 MO_Mul F32 -> trivialFCode F32 MUL x y
1062 MO_Mul F64 -> trivialFCode F64 MUL x y
1065 MO_Mul rep -> let op = IMUL rep in
1066 trivialCode rep op (Just op) x y
1068 MO_S_MulMayOflo rep -> imulMayOflo rep x y
1070 MO_And rep -> let op = AND rep in
1071 trivialCode rep op (Just op) x y
1072 MO_Or rep -> let op = OR rep in
1073 trivialCode rep op (Just op) x y
1074 MO_Xor rep -> let op = XOR rep in
1075 trivialCode rep op (Just op) x y
1077 {- Shift ops on x86s have constraints on their source, it
1078 either has to be Imm, CL or 1
1079 => trivialCode is not restrictive enough (sigh.)
1081 MO_Shl rep -> shift_code rep (SHL rep) x y {-False-}
1082 MO_U_Shr rep -> shift_code rep (SHR rep) x y {-False-}
1083 MO_S_Shr rep -> shift_code rep (SAR rep) x y {-False-}
1085 other -> pprPanic "getRegister(x86) - binary CmmMachOp (1)" (pprMachOp mop)
1087 --------------------
1088 imulMayOflo :: MachRep -> CmmExpr -> CmmExpr -> NatM Register
1089 imulMayOflo rep a b = do
1090 (a_reg, a_code) <- getNonClobberedReg a
1091 b_code <- getAnyReg b
1093 shift_amt = case rep of
1096 _ -> panic "shift_amt"
1098 code = a_code `appOL` b_code eax `appOL`
1100 IMUL2 rep (OpReg a_reg), -- result in %edx:%eax
1101 SAR rep (OpImm (ImmInt shift_amt)) (OpReg eax),
1102 -- sign extend lower part
1103 SUB rep (OpReg edx) (OpReg eax)
1104 -- compare against upper
1105 -- eax==0 if high part == sign extended low part
1108 return (Fixed rep eax code)
1110 --------------------
1111 shift_code :: MachRep
1112 -> (Operand -> Operand -> Instr)
1117 {- Case1: shift length as immediate -}
1118 shift_code rep instr x y@(CmmLit lit) = do
1119 x_code <- getAnyReg x
1122 = x_code dst `snocOL`
1123 instr (OpImm (litToImm lit)) (OpReg dst)
1125 return (Any rep code)
1127 {- Case2: shift length is complex (non-immediate) -}
1128 shift_code rep instr x y{-amount-} = do
1129 (x_reg, x_code) <- getNonClobberedReg x
1130 y_code <- getAnyReg y
1132 code = x_code `appOL`
1134 instr (OpReg ecx) (OpReg x_reg)
1136 return (Fixed rep x_reg code)
1138 --------------------
1139 add_code :: MachRep -> CmmExpr -> CmmExpr -> NatM Register
1140 add_code rep x (CmmLit (CmmInt y _)) = add_int rep x y
1141 add_code rep x y = trivialCode rep (ADD rep) (Just (ADD rep)) x y
1143 --------------------
1144 sub_code :: MachRep -> CmmExpr -> CmmExpr -> NatM Register
1145 sub_code rep x (CmmLit (CmmInt y _)) = add_int rep x (-y)
1146 sub_code rep x y = trivialCode rep (SUB rep) Nothing x y
1148 -- our three-operand add instruction:
1149 add_int rep x y = do
1150 (x_reg, x_code) <- getSomeReg x
1152 imm = ImmInt (fromInteger y)
1156 (OpAddr (AddrBaseIndex (EABaseReg x_reg) EAIndexNone imm))
1159 return (Any rep code)
1161 ----------------------
1162 div_code rep signed quotient x y = do
1163 (y_op, y_code) <- getRegOrMem y -- cannot be clobbered
1164 x_code <- getAnyReg x
1166 widen | signed = CLTD rep
1167 | otherwise = XOR rep (OpReg edx) (OpReg edx)
1169 instr | signed = IDIV
1172 code = y_code `appOL`
1174 toOL [widen, instr rep y_op]
1176 result | quotient = eax
1180 return (Fixed rep result code)
1183 getRegister (CmmLoad mem pk)
1186 Amode src mem_code <- getAmode mem
1188 code dst = mem_code `snocOL`
1189 IF_ARCH_i386(GLD pk src dst,
1190 MOV pk (OpAddr src) (OpReg dst))
1192 return (Any pk code)
1194 #if i386_TARGET_ARCH
1195 getRegister (CmmLoad mem pk)
1198 code <- intLoadCode (instr pk) mem
1199 return (Any pk code)
1201 instr I8 = MOVZxL pk
1204 -- we always zero-extend 8-bit loads, if we
1205 -- can't think of anything better. This is because
1206 -- we can't guarantee access to an 8-bit variant of every register
1207 -- (esi and edi don't have 8-bit variants), so to make things
1208 -- simpler we do our 8-bit arithmetic with full 32-bit registers.
1211 #if x86_64_TARGET_ARCH
1212 -- Simpler memory load code on x86_64
1213 getRegister (CmmLoad mem pk)
1215 code <- intLoadCode (MOV pk) mem
1216 return (Any pk code)
1219 getRegister (CmmLit (CmmInt 0 rep))
1221 -- x86_64: 32-bit xor is one byte shorter, and zero-extends to 64 bits
1222 adj_rep = case rep of I64 -> I32; _ -> rep
1223 rep1 = IF_ARCH_i386( rep, adj_rep )
1225 = unitOL (XOR rep1 (OpReg dst) (OpReg dst))
1227 return (Any rep code)
1229 #if x86_64_TARGET_ARCH
1230 -- optimisation for loading small literals on x86_64: take advantage
1231 -- of the automatic zero-extension from 32 to 64 bits, because the 32-bit
1232 -- instruction forms are shorter.
1233 getRegister (CmmLit lit)
1234 | I64 <- cmmLitRep lit, not (isBigLit lit)
1237 code dst = unitOL (MOV I32 (OpImm imm) (OpReg dst))
1239 return (Any I64 code)
1241 isBigLit (CmmInt i I64) = i < 0 || i > 0xffffffff
1243 -- note1: not the same as is64BitLit, because that checks for
1244 -- signed literals that fit in 32 bits, but we want unsigned
1246 -- note2: all labels are small, because we're assuming the
1247 -- small memory model (see gcc docs, -mcmodel=small).
1250 getRegister (CmmLit lit)
1254 code dst = unitOL (MOV rep (OpImm imm) (OpReg dst))
1256 return (Any rep code)
1258 getRegister other = pprPanic "getRegister(x86)" (ppr other)
1261 intLoadCode :: (Operand -> Operand -> Instr) -> CmmExpr
1262 -> NatM (Reg -> InstrBlock)
1263 intLoadCode instr mem = do
1264 Amode src mem_code <- getAmode mem
1265 return (\dst -> mem_code `snocOL` instr (OpAddr src) (OpReg dst))
1267 -- Compute an expression into *any* register, adding the appropriate
1268 -- move instruction if necessary.
1269 getAnyReg :: CmmExpr -> NatM (Reg -> InstrBlock)
1271 r <- getRegister expr
1274 anyReg :: Register -> NatM (Reg -> InstrBlock)
1275 anyReg (Any _ code) = return code
1276 anyReg (Fixed rep reg fcode) = return (\dst -> fcode `snocOL` reg2reg rep reg dst)
1278 -- A bit like getSomeReg, but we want a reg that can be byte-addressed.
1279 -- Fixed registers might not be byte-addressable, so we make sure we've
1280 -- got a temporary, inserting an extra reg copy if necessary.
1281 getByteReg :: CmmExpr -> NatM (Reg, InstrBlock)
1282 #if x86_64_TARGET_ARCH
1283 getByteReg = getSomeReg -- all regs are byte-addressable on x86_64
1285 getByteReg expr = do
1286 r <- getRegister expr
1289 tmp <- getNewRegNat rep
1290 return (tmp, code tmp)
1292 | isVirtualReg reg -> return (reg,code)
1294 tmp <- getNewRegNat rep
1295 return (tmp, code `snocOL` reg2reg rep reg tmp)
1296 -- ToDo: could optimise slightly by checking for byte-addressable
1297 -- real registers, but that will happen very rarely if at all.
1300 -- Another variant: this time we want the result in a register that cannot
1301 -- be modified by code to evaluate an arbitrary expression.
1302 getNonClobberedReg :: CmmExpr -> NatM (Reg, InstrBlock)
1303 getNonClobberedReg expr = do
1304 r <- getRegister expr
1307 tmp <- getNewRegNat rep
1308 return (tmp, code tmp)
1310 -- only free regs can be clobbered
1311 | RealReg rr <- reg, isFastTrue (freeReg rr) -> do
1312 tmp <- getNewRegNat rep
1313 return (tmp, code `snocOL` reg2reg rep reg tmp)
1317 reg2reg :: MachRep -> Reg -> Reg -> Instr
1319 #if i386_TARGET_ARCH
1320 | isFloatingRep rep = GMOV src dst
1322 | otherwise = MOV rep (OpReg src) (OpReg dst)
1324 #endif /* i386_TARGET_ARCH || x86_64_TARGET_ARCH */
1326 -- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
1328 #if sparc_TARGET_ARCH
1330 getRegister (StFloat d)
1331 = getBlockIdNat `thenNat` \ lbl ->
1332 getNewRegNat PtrRep `thenNat` \ tmp ->
1333 let code dst = toOL [
1334 SEGMENT DataSegment,
1336 DATA F [ImmFloat d],
1337 SEGMENT TextSegment,
1338 SETHI (HI (ImmCLbl lbl)) tmp,
1339 LD F (AddrRegImm tmp (LO (ImmCLbl lbl))) dst]
1341 return (Any F32 code)
1343 getRegister (StDouble d)
1344 = getBlockIdNat `thenNat` \ lbl ->
1345 getNewRegNat PtrRep `thenNat` \ tmp ->
1346 let code dst = toOL [
1347 SEGMENT DataSegment,
1349 DATA DF [ImmDouble d],
1350 SEGMENT TextSegment,
1351 SETHI (HI (ImmCLbl lbl)) tmp,
1352 LD DF (AddrRegImm tmp (LO (ImmCLbl lbl))) dst]
1354 return (Any F64 code)
1357 getRegister (CmmMachOp mop [x]) -- unary PrimOps
1359 MO_NatS_Neg -> trivialUCode (SUB False False g0) x
1360 MO_Nat_Not -> trivialUCode (XNOR False g0) x
1361 MO_32U_to_8U -> trivialCode (AND False) x (StInt 255)
1363 MO_F32_Neg -> trivialUFCode F32 (FNEG F) x
1364 MO_F64_Neg -> trivialUFCode F64 (FNEG DF) x
1366 MO_F64_to_Flt -> coerceDbl2Flt x
1367 MO_F32_to_Dbl -> coerceFlt2Dbl x
1369 MO_F32_to_NatS -> coerceFP2Int F32 x
1370 MO_NatS_to_Flt -> coerceInt2FP F32 x
1371 MO_F64_to_NatS -> coerceFP2Int F64 x
1372 MO_NatS_to_Dbl -> coerceInt2FP F64 x
1374 -- Conversions which are a nop on sparc
1375 MO_32U_to_NatS -> conversionNop IntRep x
1376 MO_32S_to_NatS -> conversionNop IntRep x
1377 MO_NatS_to_32U -> conversionNop WordRep x
1378 MO_32U_to_NatU -> conversionNop WordRep x
1380 MO_NatU_to_NatS -> conversionNop IntRep x
1381 MO_NatS_to_NatU -> conversionNop WordRep x
1382 MO_NatP_to_NatU -> conversionNop WordRep x
1383 MO_NatU_to_NatP -> conversionNop PtrRep x
1384 MO_NatS_to_NatP -> conversionNop PtrRep x
1385 MO_NatP_to_NatS -> conversionNop IntRep x
1387 -- sign-extending widenings
1388 MO_8U_to_32U -> integerExtend False 24 x
1389 MO_8U_to_NatU -> integerExtend False 24 x
1390 MO_8S_to_NatS -> integerExtend True 24 x
1391 MO_16U_to_NatU -> integerExtend False 16 x
1392 MO_16S_to_NatS -> integerExtend True 16 x
1395 let fixed_x = if is_float_op -- promote to double
1396 then CmmMachOp MO_F32_to_Dbl [x]
1399 getRegister (StCall (Left fn) CCallConv F64 [fixed_x])
1401 integerExtend signed nBits x
1403 CmmMachOp (if signed then MO_Nat_Sar else MO_Nat_Shr)
1404 [CmmMachOp MO_Nat_Shl [x, StInt nBits], StInt nBits]
1406 conversionNop new_rep expr
1407 = getRegister expr `thenNat` \ e_code ->
1408 return (swizzleRegisterRep e_code new_rep)
1412 MO_F32_Exp -> (True, FSLIT("exp"))
1413 MO_F32_Log -> (True, FSLIT("log"))
1414 MO_F32_Sqrt -> (True, FSLIT("sqrt"))
1416 MO_F32_Sin -> (True, FSLIT("sin"))
1417 MO_F32_Cos -> (True, FSLIT("cos"))
1418 MO_F32_Tan -> (True, FSLIT("tan"))
1420 MO_F32_Asin -> (True, FSLIT("asin"))
1421 MO_F32_Acos -> (True, FSLIT("acos"))
1422 MO_F32_Atan -> (True, FSLIT("atan"))
1424 MO_F32_Sinh -> (True, FSLIT("sinh"))
1425 MO_F32_Cosh -> (True, FSLIT("cosh"))
1426 MO_F32_Tanh -> (True, FSLIT("tanh"))
1428 MO_F64_Exp -> (False, FSLIT("exp"))
1429 MO_F64_Log -> (False, FSLIT("log"))
1430 MO_F64_Sqrt -> (False, FSLIT("sqrt"))
1432 MO_F64_Sin -> (False, FSLIT("sin"))
1433 MO_F64_Cos -> (False, FSLIT("cos"))
1434 MO_F64_Tan -> (False, FSLIT("tan"))
1436 MO_F64_Asin -> (False, FSLIT("asin"))
1437 MO_F64_Acos -> (False, FSLIT("acos"))
1438 MO_F64_Atan -> (False, FSLIT("atan"))
1440 MO_F64_Sinh -> (False, FSLIT("sinh"))
1441 MO_F64_Cosh -> (False, FSLIT("cosh"))
1442 MO_F64_Tanh -> (False, FSLIT("tanh"))
1444 other -> pprPanic "getRegister(sparc) - binary CmmMachOp (2)"
1448 getRegister (CmmMachOp mop [x, y]) -- dyadic PrimOps
1450 MO_32U_Gt -> condIntReg GTT x y
1451 MO_32U_Ge -> condIntReg GE x y
1452 MO_32U_Eq -> condIntReg EQQ x y
1453 MO_32U_Ne -> condIntReg NE x y
1454 MO_32U_Lt -> condIntReg LTT x y
1455 MO_32U_Le -> condIntReg LE x y
1457 MO_Nat_Eq -> condIntReg EQQ x y
1458 MO_Nat_Ne -> condIntReg NE x y
1460 MO_NatS_Gt -> condIntReg GTT x y
1461 MO_NatS_Ge -> condIntReg GE x y
1462 MO_NatS_Lt -> condIntReg LTT x y
1463 MO_NatS_Le -> condIntReg LE x y
1465 MO_NatU_Gt -> condIntReg GU x y
1466 MO_NatU_Ge -> condIntReg GEU x y
1467 MO_NatU_Lt -> condIntReg LU x y
1468 MO_NatU_Le -> condIntReg LEU x y
1470 MO_F32_Gt -> condFltReg GTT x y
1471 MO_F32_Ge -> condFltReg GE x y
1472 MO_F32_Eq -> condFltReg EQQ x y
1473 MO_F32_Ne -> condFltReg NE x y
1474 MO_F32_Lt -> condFltReg LTT x y
1475 MO_F32_Le -> condFltReg LE x y
1477 MO_F64_Gt -> condFltReg GTT x y
1478 MO_F64_Ge -> condFltReg GE x y
1479 MO_F64_Eq -> condFltReg EQQ x y
1480 MO_F64_Ne -> condFltReg NE x y
1481 MO_F64_Lt -> condFltReg LTT x y
1482 MO_F64_Le -> condFltReg LE x y
1484 MO_Nat_Add -> trivialCode (ADD False False) x y
1485 MO_Nat_Sub -> trivialCode (SUB False False) x y
1487 MO_NatS_Mul -> trivialCode (SMUL False) x y
1488 MO_NatU_Mul -> trivialCode (UMUL False) x y
1489 MO_NatS_MulMayOflo -> imulMayOflo x y
1491 -- ToDo: teach about V8+ SPARC div instructions
1492 MO_NatS_Quot -> idiv FSLIT(".div") x y
1493 MO_NatS_Rem -> idiv FSLIT(".rem") x y
1494 MO_NatU_Quot -> idiv FSLIT(".udiv") x y
1495 MO_NatU_Rem -> idiv FSLIT(".urem") x y
1497 MO_F32_Add -> trivialFCode F32 FADD x y
1498 MO_F32_Sub -> trivialFCode F32 FSUB x y
1499 MO_F32_Mul -> trivialFCode F32 FMUL x y
1500 MO_F32_Div -> trivialFCode F32 FDIV x y
1502 MO_F64_Add -> trivialFCode F64 FADD x y
1503 MO_F64_Sub -> trivialFCode F64 FSUB x y
1504 MO_F64_Mul -> trivialFCode F64 FMUL x y
1505 MO_F64_Div -> trivialFCode F64 FDIV x y
1507 MO_Nat_And -> trivialCode (AND False) x y
1508 MO_Nat_Or -> trivialCode (OR False) x y
1509 MO_Nat_Xor -> trivialCode (XOR False) x y
1511 MO_Nat_Shl -> trivialCode SLL x y
1512 MO_Nat_Shr -> trivialCode SRL x y
1513 MO_Nat_Sar -> trivialCode SRA x y
1515 MO_F32_Pwr -> getRegister (StCall (Left FSLIT("pow")) CCallConv F64
1516 [promote x, promote y])
1517 where promote x = CmmMachOp MO_F32_to_Dbl [x]
1518 MO_F64_Pwr -> getRegister (StCall (Left FSLIT("pow")) CCallConv F64
1521 other -> pprPanic "getRegister(sparc) - binary CmmMachOp (1)" (pprMachOp mop)
1523 idiv fn x y = getRegister (StCall (Left fn) CCallConv IntRep [x, y])
1525 --------------------
1526 imulMayOflo :: CmmExpr -> CmmExpr -> NatM Register
1528 = getNewRegNat IntRep `thenNat` \ t1 ->
1529 getNewRegNat IntRep `thenNat` \ t2 ->
1530 getNewRegNat IntRep `thenNat` \ res_lo ->
1531 getNewRegNat IntRep `thenNat` \ res_hi ->
1532 getRegister a1 `thenNat` \ reg1 ->
1533 getRegister a2 `thenNat` \ reg2 ->
1534 let code1 = registerCode reg1 t1
1535 code2 = registerCode reg2 t2
1536 src1 = registerName reg1 t1
1537 src2 = registerName reg2 t2
1538 code dst = code1 `appOL` code2 `appOL`
1540 SMUL False src1 (RIReg src2) res_lo,
1542 SRA res_lo (RIImm (ImmInt 31)) res_lo,
1543 SUB False False res_lo (RIReg res_hi) dst
1546 return (Any IntRep code)
1548 getRegister (CmmLoad pk mem) = do
1549 Amode src code <- getAmode mem
1551 size = primRepToSize pk
1552 code__2 dst = code `snocOL` LD size src dst
1554 return (Any pk code__2)
1556 getRegister (StInt i)
1559 src = ImmInt (fromInteger i)
1560 code dst = unitOL (OR False g0 (RIImm src) dst)
1562 return (Any IntRep code)
1568 SETHI (HI imm__2) dst,
1569 OR False dst (RIImm (LO imm__2)) dst]
1571 return (Any PtrRep code)
1573 = ncgPrimopMoan "getRegister(sparc)" (pprCmmExpr leaf)
1576 imm__2 = case imm of Just x -> x
1578 #endif /* sparc_TARGET_ARCH */
1580 #if powerpc_TARGET_ARCH
1581 getRegister (CmmLoad mem pk)
1584 Amode addr addr_code <- getAmode mem
1585 let code dst = ASSERT((regClass dst == RcDouble) == isFloatingRep pk)
1586 addr_code `snocOL` LD pk dst addr
1587 return (Any pk code)
1589 -- catch simple cases of zero- or sign-extended load
1590 getRegister (CmmMachOp (MO_U_Conv I8 I32) [CmmLoad mem _]) = do
1591 Amode addr addr_code <- getAmode mem
1592 return (Any I32 (\dst -> addr_code `snocOL` LD I8 dst addr))
1594 -- Note: there is no Load Byte Arithmetic instruction, so no signed case here
1596 getRegister (CmmMachOp (MO_U_Conv I16 I32) [CmmLoad mem _]) = do
1597 Amode addr addr_code <- getAmode mem
1598 return (Any I32 (\dst -> addr_code `snocOL` LD I16 dst addr))
1600 getRegister (CmmMachOp (MO_S_Conv I16 I32) [CmmLoad mem _]) = do
1601 Amode addr addr_code <- getAmode mem
1602 return (Any I32 (\dst -> addr_code `snocOL` LA I16 dst addr))
1604 getRegister (CmmMachOp mop [x]) -- unary MachOps
1606 MO_Not rep -> trivialUCode rep NOT x
1608 MO_S_Conv F64 F32 -> trivialUCode F32 FRSP x
1609 MO_S_Conv F32 F64 -> conversionNop F64 x
1612 | from == to -> conversionNop to x
1613 | isFloatingRep from -> coerceFP2Int from to x
1614 | isFloatingRep to -> coerceInt2FP from to x
1616 -- narrowing is a nop: we treat the high bits as undefined
1617 MO_S_Conv I32 to -> conversionNop to x
1618 MO_S_Conv I16 I8 -> conversionNop I8 x
1619 MO_S_Conv I8 to -> trivialUCode to (EXTS I8) x
1620 MO_S_Conv I16 to -> trivialUCode to (EXTS I16) x
1623 | from == to -> conversionNop to x
1624 -- narrowing is a nop: we treat the high bits as undefined
1625 MO_U_Conv I32 to -> conversionNop to x
1626 MO_U_Conv I16 I8 -> conversionNop I8 x
1627 MO_U_Conv I8 to -> trivialCode to False AND x (CmmLit (CmmInt 255 I32))
1628 MO_U_Conv I16 to -> trivialCode to False AND x (CmmLit (CmmInt 65535 I32))
1630 MO_S_Neg F32 -> trivialUCode F32 FNEG x
1631 MO_S_Neg F64 -> trivialUCode F64 FNEG x
1632 MO_S_Neg rep -> trivialUCode rep NEG x
1635 conversionNop new_rep expr
1636 = do e_code <- getRegister expr
1637 return (swizzleRegisterRep e_code new_rep)
1639 getRegister (CmmMachOp mop [x, y]) -- dyadic PrimOps
1641 MO_Eq F32 -> condFltReg EQQ x y
1642 MO_Ne F32 -> condFltReg NE x y
1644 MO_S_Gt F32 -> condFltReg GTT x y
1645 MO_S_Ge F32 -> condFltReg GE x y
1646 MO_S_Lt F32 -> condFltReg LTT x y
1647 MO_S_Le F32 -> condFltReg LE x y
1649 MO_Eq F64 -> condFltReg EQQ x y
1650 MO_Ne F64 -> condFltReg NE x y
1652 MO_S_Gt F64 -> condFltReg GTT x y
1653 MO_S_Ge F64 -> condFltReg GE x y
1654 MO_S_Lt F64 -> condFltReg LTT x y
1655 MO_S_Le F64 -> condFltReg LE x y
1657 MO_Eq rep -> condIntReg EQQ (extendUExpr rep x) (extendUExpr rep y)
1658 MO_Ne rep -> condIntReg NE (extendUExpr rep x) (extendUExpr rep y)
1660 MO_S_Gt rep -> condIntReg GTT (extendSExpr rep x) (extendSExpr rep y)
1661 MO_S_Ge rep -> condIntReg GE (extendSExpr rep x) (extendSExpr rep y)
1662 MO_S_Lt rep -> condIntReg LTT (extendSExpr rep x) (extendSExpr rep y)
1663 MO_S_Le rep -> condIntReg LE (extendSExpr rep x) (extendSExpr rep y)
1665 MO_U_Gt rep -> condIntReg GU (extendUExpr rep x) (extendUExpr rep y)
1666 MO_U_Ge rep -> condIntReg GEU (extendUExpr rep x) (extendUExpr rep y)
1667 MO_U_Lt rep -> condIntReg LU (extendUExpr rep x) (extendUExpr rep y)
1668 MO_U_Le rep -> condIntReg LEU (extendUExpr rep x) (extendUExpr rep y)
1670 MO_Add F32 -> trivialCodeNoImm F32 (FADD F32) x y
1671 MO_Sub F32 -> trivialCodeNoImm F32 (FSUB F32) x y
1672 MO_Mul F32 -> trivialCodeNoImm F32 (FMUL F32) x y
1673 MO_S_Quot F32 -> trivialCodeNoImm F32 (FDIV F32) x y
1675 MO_Add F64 -> trivialCodeNoImm F64 (FADD F64) x y
1676 MO_Sub F64 -> trivialCodeNoImm F64 (FSUB F64) x y
1677 MO_Mul F64 -> trivialCodeNoImm F64 (FMUL F64) x y
1678 MO_S_Quot F64 -> trivialCodeNoImm F64 (FDIV F64) x y
1680 -- optimize addition with 32-bit immediate
1684 CmmLit (CmmInt imm immrep) | Just _ <- makeImmediate I32 True (-imm)
1685 -> trivialCode I32 True ADD x (CmmLit $ CmmInt imm immrep)
1688 (src, srcCode) <- getSomeReg x
1689 let imm = litToImm lit
1690 code dst = srcCode `appOL` toOL [
1691 ADDIS dst src (HA imm),
1692 ADD dst dst (RIImm (LO imm))
1694 return (Any I32 code)
1695 _ -> trivialCode I32 True ADD x y
1697 MO_Add rep -> trivialCode rep True ADD x y
1699 case y of -- subfi ('substract from' with immediate) doesn't exist
1700 CmmLit (CmmInt imm immrep) | Just _ <- makeImmediate rep True (-imm)
1701 -> trivialCode rep True ADD x (CmmLit $ CmmInt (-imm) immrep)
1702 _ -> trivialCodeNoImm rep SUBF y x
1704 MO_Mul rep -> trivialCode rep True MULLW x y
1706 MO_S_MulMayOflo I32 -> trivialCodeNoImm I32 MULLW_MayOflo x y
1708 MO_S_MulMayOflo rep -> panic "S_MulMayOflo (rep /= I32): not implemented"
1709 MO_U_MulMayOflo rep -> panic "U_MulMayOflo: not implemented"
1711 MO_S_Quot rep -> trivialCodeNoImm rep DIVW (extendSExpr rep x) (extendSExpr rep y)
1712 MO_U_Quot rep -> trivialCodeNoImm rep DIVWU (extendUExpr rep x) (extendUExpr rep y)
1714 MO_S_Rem rep -> remainderCode rep DIVW (extendSExpr rep x) (extendSExpr rep y)
1715 MO_U_Rem rep -> remainderCode rep DIVWU (extendUExpr rep x) (extendUExpr rep y)
1717 MO_And rep -> trivialCode rep False AND x y
1718 MO_Or rep -> trivialCode rep False OR x y
1719 MO_Xor rep -> trivialCode rep False XOR x y
1721 MO_Shl rep -> trivialCode rep False SLW x y
1722 MO_S_Shr rep -> trivialCode rep False SRAW (extendSExpr rep x) y
1723 MO_U_Shr rep -> trivialCode rep False SRW (extendUExpr rep x) y
1725 getRegister (CmmLit (CmmInt i rep))
1726 | Just imm <- makeImmediate rep True i
1728 code dst = unitOL (LI dst imm)
1730 return (Any rep code)
1732 getRegister (CmmLit (CmmFloat f frep)) = do
1733 lbl <- getNewLabelNat
1734 dynRef <- cmmMakeDynamicReference addImportNat False lbl
1735 Amode addr addr_code <- getAmode dynRef
1737 LDATA ReadOnlyData [CmmDataLabel lbl,
1738 CmmStaticLit (CmmFloat f frep)]
1739 `consOL` (addr_code `snocOL` LD frep dst addr)
1740 return (Any frep code)
1742 getRegister (CmmLit lit)
1743 = let rep = cmmLitRep lit
1747 OR dst dst (RIImm (LO imm))
1749 in return (Any rep code)
1751 getRegister other = pprPanic "getRegister(ppc)" (pprExpr other)
1753 -- extend?Rep: wrap integer expression of type rep
1754 -- in a conversion to I32
1755 extendSExpr I32 x = x
1756 extendSExpr rep x = CmmMachOp (MO_S_Conv rep I32) [x]
1757 extendUExpr I32 x = x
1758 extendUExpr rep x = CmmMachOp (MO_U_Conv rep I32) [x]
1760 #endif /* powerpc_TARGET_ARCH */
1763 -- -----------------------------------------------------------------------------
1764 -- The 'Amode' type: Memory addressing modes passed up the tree.
1766 data Amode = Amode AddrMode InstrBlock
1769 Now, given a tree (the argument to an CmmLoad) that references memory,
1770 produce a suitable addressing mode.
1772 A Rule of the Game (tm) for Amodes: use of the addr bit must
1773 immediately follow use of the code part, since the code part puts
1774 values in registers which the addr then refers to. So you can't put
1775 anything in between, lest it overwrite some of those registers. If
1776 you need to do some other computation between the code part and use of
1777 the addr bit, first store the effective address from the amode in a
1778 temporary, then do the other computation, and then use the temporary:
1782 ... other computation ...
1786 getAmode :: CmmExpr -> NatM Amode
1787 getAmode tree@(CmmRegOff _ _) = getAmode (mangleIndexTree tree)
1789 -- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
1791 #if alpha_TARGET_ARCH
1793 getAmode (StPrim IntSubOp [x, StInt i])
1794 = getNewRegNat PtrRep `thenNat` \ tmp ->
1795 getRegister x `thenNat` \ register ->
1797 code = registerCode register tmp
1798 reg = registerName register tmp
1799 off = ImmInt (-(fromInteger i))
1801 return (Amode (AddrRegImm reg off) code)
1803 getAmode (StPrim IntAddOp [x, StInt i])
1804 = getNewRegNat PtrRep `thenNat` \ tmp ->
1805 getRegister x `thenNat` \ register ->
1807 code = registerCode register tmp
1808 reg = registerName register tmp
1809 off = ImmInt (fromInteger i)
1811 return (Amode (AddrRegImm reg off) code)
1815 = return (Amode (AddrImm imm__2) id)
1818 imm__2 = case imm of Just x -> x
1821 = getNewRegNat PtrRep `thenNat` \ tmp ->
1822 getRegister other `thenNat` \ register ->
1824 code = registerCode register tmp
1825 reg = registerName register tmp
1827 return (Amode (AddrReg reg) code)
1829 #endif /* alpha_TARGET_ARCH */
1831 -- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
1833 #if i386_TARGET_ARCH || x86_64_TARGET_ARCH
1835 -- This is all just ridiculous, since it carefully undoes
1836 -- what mangleIndexTree has just done.
1837 getAmode (CmmMachOp (MO_Sub rep) [x, CmmLit lit@(CmmInt i _)])
1838 | not (is64BitLit lit)
1839 -- ASSERT(rep == I32)???
1840 = do (x_reg, x_code) <- getSomeReg x
1841 let off = ImmInt (-(fromInteger i))
1842 return (Amode (AddrBaseIndex (EABaseReg x_reg) EAIndexNone off) x_code)
1844 getAmode (CmmMachOp (MO_Add rep) [x, CmmLit lit@(CmmInt i _)])
1845 | not (is64BitLit lit)
1846 -- ASSERT(rep == I32)???
1847 = do (x_reg, x_code) <- getSomeReg x
1848 let off = ImmInt (fromInteger i)
1849 return (Amode (AddrBaseIndex (EABaseReg x_reg) EAIndexNone off) x_code)
1851 -- Turn (lit1 << n + lit2) into (lit2 + lit1 << n) so it will be
1852 -- recognised by the next rule.
1853 getAmode (CmmMachOp (MO_Add rep) [a@(CmmMachOp (MO_Shl _) _),
1855 = getAmode (CmmMachOp (MO_Add rep) [b,a])
1857 getAmode (CmmMachOp (MO_Add rep) [x, CmmMachOp (MO_Shl _)
1858 [y, CmmLit (CmmInt shift _)]])
1859 | shift == 0 || shift == 1 || shift == 2 || shift == 3
1860 = do (x_reg, x_code) <- getNonClobberedReg x
1861 -- x must be in a temp, because it has to stay live over y_code
1862 -- we could compre x_reg and y_reg and do something better here...
1863 (y_reg, y_code) <- getSomeReg y
1865 code = x_code `appOL` y_code
1866 base = case shift of 0 -> 1; 1 -> 2; 2 -> 4; 3 -> 8
1867 return (Amode (AddrBaseIndex (EABaseReg x_reg) (EAIndex y_reg base) (ImmInt 0))
1870 getAmode (CmmLit lit) | not (is64BitLit lit)
1871 = return (Amode (ImmAddr (litToImm lit) 0) nilOL)
1874 (reg,code) <- getSomeReg expr
1875 return (Amode (AddrBaseIndex (EABaseReg reg) EAIndexNone (ImmInt 0)) code)
1877 #endif /* i386_TARGET_ARCH || x86_64_TARGET_ARCH */
1879 -- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
1881 #if sparc_TARGET_ARCH
1883 getAmode (CmmMachOp MO_Nat_Sub [x, StInt i])
1885 = getNewRegNat PtrRep `thenNat` \ tmp ->
1886 getRegister x `thenNat` \ register ->
1888 code = registerCode register tmp
1889 reg = registerName register tmp
1890 off = ImmInt (-(fromInteger i))
1892 return (Amode (AddrRegImm reg off) code)
1895 getAmode (CmmMachOp MO_Nat_Add [x, StInt i])
1897 = getNewRegNat PtrRep `thenNat` \ tmp ->
1898 getRegister x `thenNat` \ register ->
1900 code = registerCode register tmp
1901 reg = registerName register tmp
1902 off = ImmInt (fromInteger i)
1904 return (Amode (AddrRegImm reg off) code)
1906 getAmode (CmmMachOp MO_Nat_Add [x, y])
1907 = getNewRegNat PtrRep `thenNat` \ tmp1 ->
1908 getNewRegNat IntRep `thenNat` \ tmp2 ->
1909 getRegister x `thenNat` \ register1 ->
1910 getRegister y `thenNat` \ register2 ->
1912 code1 = registerCode register1 tmp1
1913 reg1 = registerName register1 tmp1
1914 code2 = registerCode register2 tmp2
1915 reg2 = registerName register2 tmp2
1916 code__2 = code1 `appOL` code2
1918 return (Amode (AddrRegReg reg1 reg2) code__2)
1922 = getNewRegNat PtrRep `thenNat` \ tmp ->
1924 code = unitOL (SETHI (HI imm__2) tmp)
1926 return (Amode (AddrRegImm tmp (LO imm__2)) code)
1929 imm__2 = case imm of Just x -> x
1932 = getNewRegNat PtrRep `thenNat` \ tmp ->
1933 getRegister other `thenNat` \ register ->
1935 code = registerCode register tmp
1936 reg = registerName register tmp
1939 return (Amode (AddrRegImm reg off) code)
1941 #endif /* sparc_TARGET_ARCH */
1943 #ifdef powerpc_TARGET_ARCH
1944 getAmode (CmmMachOp (MO_Sub I32) [x, CmmLit (CmmInt i _)])
1945 | Just off <- makeImmediate I32 True (-i)
1947 (reg, code) <- getSomeReg x
1948 return (Amode (AddrRegImm reg off) code)
1951 getAmode (CmmMachOp (MO_Add I32) [x, CmmLit (CmmInt i _)])
1952 | Just off <- makeImmediate I32 True i
1954 (reg, code) <- getSomeReg x
1955 return (Amode (AddrRegImm reg off) code)
1957 -- optimize addition with 32-bit immediate
1959 getAmode (CmmMachOp (MO_Add I32) [x, CmmLit lit])
1961 tmp <- getNewRegNat I32
1962 (src, srcCode) <- getSomeReg x
1963 let imm = litToImm lit
1964 code = srcCode `snocOL` ADDIS tmp src (HA imm)
1965 return (Amode (AddrRegImm tmp (LO imm)) code)
1967 getAmode (CmmLit lit)
1969 tmp <- getNewRegNat I32
1970 let imm = litToImm lit
1971 code = unitOL (LIS tmp (HA imm))
1972 return (Amode (AddrRegImm tmp (LO imm)) code)
1974 getAmode (CmmMachOp (MO_Add I32) [x, y])
1976 (regX, codeX) <- getSomeReg x
1977 (regY, codeY) <- getSomeReg y
1978 return (Amode (AddrRegReg regX regY) (codeX `appOL` codeY))
1982 (reg, code) <- getSomeReg other
1985 return (Amode (AddrRegImm reg off) code)
1986 #endif /* powerpc_TARGET_ARCH */
1988 -- -----------------------------------------------------------------------------
1989 -- getOperand: sometimes any operand will do.
1991 -- getNonClobberedOperand: the value of the operand will remain valid across
1992 -- the computation of an arbitrary expression, unless the expression
1993 -- is computed directly into a register which the operand refers to
1994 -- (see trivialCode where this function is used for an example).
1996 #if i386_TARGET_ARCH || x86_64_TARGET_ARCH
1998 getNonClobberedOperand :: CmmExpr -> NatM (Operand, InstrBlock)
1999 getNonClobberedOperand (CmmLit lit)
2000 | isSuitableFloatingPointLit lit = do
2001 lbl <- getNewLabelNat
2002 let code = unitOL (LDATA ReadOnlyData [CmmDataLabel lbl,
2004 return (OpAddr (ripRel (ImmCLbl lbl)), code)
2005 getNonClobberedOperand (CmmLit lit)
2006 | not (is64BitLit lit) && not (isFloatingRep (cmmLitRep lit)) =
2007 return (OpImm (litToImm lit), nilOL)
2008 getNonClobberedOperand (CmmLoad mem pk)
2009 | IF_ARCH_i386(not (isFloatingRep pk) && pk /= I64, True) = do
2010 Amode src mem_code <- getAmode mem
2012 if (amodeCouldBeClobbered src)
2014 tmp <- getNewRegNat wordRep
2015 return (AddrBaseIndex (EABaseReg tmp) EAIndexNone (ImmInt 0),
2016 unitOL (LEA I32 (OpAddr src) (OpReg tmp)))
2019 return (OpAddr src', save_code `appOL` mem_code)
2020 getNonClobberedOperand e = do
2021 (reg, code) <- getNonClobberedReg e
2022 return (OpReg reg, code)
2024 amodeCouldBeClobbered :: AddrMode -> Bool
2025 amodeCouldBeClobbered amode = any regClobbered (addrModeRegs amode)
2027 regClobbered (RealReg rr) = isFastTrue (freeReg rr)
2028 regClobbered _ = False
2030 -- getOperand: the operand is not required to remain valid across the
2031 -- computation of an arbitrary expression.
2032 getOperand :: CmmExpr -> NatM (Operand, InstrBlock)
2033 getOperand (CmmLit lit)
2034 | isSuitableFloatingPointLit lit = do
2035 lbl <- getNewLabelNat
2036 let code = unitOL (LDATA ReadOnlyData [CmmDataLabel lbl,
2038 return (OpAddr (ripRel (ImmCLbl lbl)), code)
2039 getOperand (CmmLit lit)
2040 | not (is64BitLit lit) && not (isFloatingRep (cmmLitRep lit)) = do
2041 return (OpImm (litToImm lit), nilOL)
2042 getOperand (CmmLoad mem pk)
2043 | IF_ARCH_i386(not (isFloatingRep pk) && pk /= I64, True) = do
2044 Amode src mem_code <- getAmode mem
2045 return (OpAddr src, mem_code)
2047 (reg, code) <- getSomeReg e
2048 return (OpReg reg, code)
2050 isOperand :: CmmExpr -> Bool
2051 isOperand (CmmLoad _ _) = True
2052 isOperand (CmmLit lit) = not (is64BitLit lit)
2053 || isSuitableFloatingPointLit lit
2056 -- if we want a floating-point literal as an operand, we can
2057 -- use it directly from memory. However, if the literal is
2058 -- zero, we're better off generating it into a register using
2060 isSuitableFloatingPointLit (CmmFloat f _) = f /= 0.0
2061 isSuitableFloatingPointLit _ = False
2063 getRegOrMem :: CmmExpr -> NatM (Operand, InstrBlock)
2064 getRegOrMem (CmmLoad mem pk)
2065 | IF_ARCH_i386(not (isFloatingRep pk) && pk /= I64, True) = do
2066 Amode src mem_code <- getAmode mem
2067 return (OpAddr src, mem_code)
2069 (reg, code) <- getNonClobberedReg e
2070 return (OpReg reg, code)
2072 #if x86_64_TARGET_ARCH
2073 is64BitLit (CmmInt i I64) = i > 0x7fffffff || i < -0x80000000
2074 -- assume that labels are in the range 0-2^31-1: this assumes the
2075 -- small memory model (see gcc docs, -mcmodel=small).
2077 is64BitLit x = False
2080 -- -----------------------------------------------------------------------------
2081 -- The 'CondCode' type: Condition codes passed up the tree.
2083 data CondCode = CondCode Bool Cond InstrBlock
2085 -- Set up a condition code for a conditional branch.
2087 getCondCode :: CmmExpr -> NatM CondCode
2089 -- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2091 #if alpha_TARGET_ARCH
2092 getCondCode = panic "MachCode.getCondCode: not on Alphas"
2093 #endif /* alpha_TARGET_ARCH */
2095 -- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2097 #if i386_TARGET_ARCH || x86_64_TARGET_ARCH || sparc_TARGET_ARCH
2098 -- yes, they really do seem to want exactly the same!
2100 getCondCode (CmmMachOp mop [x, y])
2101 = ASSERT (cmmExprRep x /= I8) -- tmp, not set up to handle 8-bit comparisons
2103 MO_Eq F32 -> condFltCode EQQ x y
2104 MO_Ne F32 -> condFltCode NE x y
2106 MO_S_Gt F32 -> condFltCode GTT x y
2107 MO_S_Ge F32 -> condFltCode GE x y
2108 MO_S_Lt F32 -> condFltCode LTT x y
2109 MO_S_Le F32 -> condFltCode LE x y
2111 MO_Eq F64 -> condFltCode EQQ x y
2112 MO_Ne F64 -> condFltCode NE x y
2114 MO_S_Gt F64 -> condFltCode GTT x y
2115 MO_S_Ge F64 -> condFltCode GE x y
2116 MO_S_Lt F64 -> condFltCode LTT x y
2117 MO_S_Le F64 -> condFltCode LE x y
2119 MO_Eq rep -> condIntCode EQQ x y
2120 MO_Ne rep -> condIntCode NE x y
2122 MO_S_Gt rep -> condIntCode GTT x y
2123 MO_S_Ge rep -> condIntCode GE x y
2124 MO_S_Lt rep -> condIntCode LTT x y
2125 MO_S_Le rep -> condIntCode LE x y
2127 MO_U_Gt rep -> condIntCode GU x y
2128 MO_U_Ge rep -> condIntCode GEU x y
2129 MO_U_Lt rep -> condIntCode LU x y
2130 MO_U_Le rep -> condIntCode LEU x y
2132 other -> pprPanic "getCondCode(x86,sparc)" (pprMachOp mop)
2134 getCondCode other = pprPanic "getCondCode(2)(x86,sparc)" (ppr other)
2136 #elif powerpc_TARGET_ARCH
2138 -- almost the same as everywhere else - but we need to
2139 -- extend small integers to 32 bit first
2141 getCondCode (CmmMachOp mop [x, y])
2143 MO_Eq F32 -> condFltCode EQQ x y
2144 MO_Ne F32 -> condFltCode NE x y
2146 MO_S_Gt F32 -> condFltCode GTT x y
2147 MO_S_Ge F32 -> condFltCode GE x y
2148 MO_S_Lt F32 -> condFltCode LTT x y
2149 MO_S_Le F32 -> condFltCode LE x y
2151 MO_Eq F64 -> condFltCode EQQ x y
2152 MO_Ne F64 -> condFltCode NE x y
2154 MO_S_Gt F64 -> condFltCode GTT x y
2155 MO_S_Ge F64 -> condFltCode GE x y
2156 MO_S_Lt F64 -> condFltCode LTT x y
2157 MO_S_Le F64 -> condFltCode LE x y
2159 MO_Eq rep -> condIntCode EQQ (extendUExpr rep x) (extendUExpr rep y)
2160 MO_Ne rep -> condIntCode NE (extendUExpr rep x) (extendUExpr rep y)
2162 MO_S_Gt rep -> condIntCode GTT (extendSExpr rep x) (extendSExpr rep y)
2163 MO_S_Ge rep -> condIntCode GE (extendSExpr rep x) (extendSExpr rep y)
2164 MO_S_Lt rep -> condIntCode LTT (extendSExpr rep x) (extendSExpr rep y)
2165 MO_S_Le rep -> condIntCode LE (extendSExpr rep x) (extendSExpr rep y)
2167 MO_U_Gt rep -> condIntCode GU (extendUExpr rep x) (extendUExpr rep y)
2168 MO_U_Ge rep -> condIntCode GEU (extendUExpr rep x) (extendUExpr rep y)
2169 MO_U_Lt rep -> condIntCode LU (extendUExpr rep x) (extendUExpr rep y)
2170 MO_U_Le rep -> condIntCode LEU (extendUExpr rep x) (extendUExpr rep y)
2172 other -> pprPanic "getCondCode(powerpc)" (pprMachOp mop)
2174 getCondCode other = panic "getCondCode(2)(powerpc)"
2180 -- @cond(Int|Flt)Code@: Turn a boolean expression into a condition, to be
2181 -- passed back up the tree.
2183 condIntCode, condFltCode :: Cond -> CmmExpr -> CmmExpr -> NatM CondCode
2185 #if alpha_TARGET_ARCH
2186 condIntCode = panic "MachCode.condIntCode: not on Alphas"
2187 condFltCode = panic "MachCode.condFltCode: not on Alphas"
2188 #endif /* alpha_TARGET_ARCH */
2190 -- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2191 #if i386_TARGET_ARCH || x86_64_TARGET_ARCH
2193 -- memory vs immediate
2194 condIntCode cond (CmmLoad x pk) (CmmLit lit) | not (is64BitLit lit) = do
2195 Amode x_addr x_code <- getAmode x
2198 code = x_code `snocOL`
2199 CMP pk (OpImm imm) (OpAddr x_addr)
2201 return (CondCode False cond code)
2204 condIntCode cond x (CmmLit (CmmInt 0 pk)) = do
2205 (x_reg, x_code) <- getSomeReg x
2207 code = x_code `snocOL`
2208 TEST pk (OpReg x_reg) (OpReg x_reg)
2210 return (CondCode False cond code)
2212 -- anything vs operand
2213 condIntCode cond x y | isOperand y = do
2214 (x_reg, x_code) <- getNonClobberedReg x
2215 (y_op, y_code) <- getOperand y
2217 code = x_code `appOL` y_code `snocOL`
2218 CMP (cmmExprRep x) y_op (OpReg x_reg)
2220 return (CondCode False cond code)
2222 -- anything vs anything
2223 condIntCode cond x y = do
2224 (y_reg, y_code) <- getNonClobberedReg y
2225 (x_op, x_code) <- getRegOrMem x
2227 code = y_code `appOL`
2229 CMP (cmmExprRep x) (OpReg y_reg) x_op
2231 return (CondCode False cond code)
2234 #if i386_TARGET_ARCH
2235 condFltCode cond x y
2236 = ASSERT(cond `elem` ([EQQ, NE, LE, LTT, GE, GTT])) do
2237 (x_reg, x_code) <- getNonClobberedReg x
2238 (y_reg, y_code) <- getSomeReg y
2240 code = x_code `appOL` y_code `snocOL`
2241 GCMP cond x_reg y_reg
2242 -- The GCMP insn does the test and sets the zero flag if comparable
2243 -- and true. Hence we always supply EQQ as the condition to test.
2244 return (CondCode True EQQ code)
2245 #endif /* i386_TARGET_ARCH */
2247 #if x86_64_TARGET_ARCH
2248 -- in the SSE2 comparison ops (ucomiss, ucomisd) the left arg may be
2249 -- an operand, but the right must be a reg. We can probably do better
2250 -- than this general case...
2251 condFltCode cond x y = do
2252 (x_reg, x_code) <- getNonClobberedReg x
2253 (y_op, y_code) <- getOperand y
2255 code = x_code `appOL`
2257 CMP (cmmExprRep x) y_op (OpReg x_reg)
2258 -- NB(1): we need to use the unsigned comparison operators on the
2259 -- result of this comparison.
2261 return (CondCode True (condToUnsigned cond) code)
2264 -- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2266 #if sparc_TARGET_ARCH
2268 condIntCode cond x (StInt y)
2270 = getRegister x `thenNat` \ register ->
2271 getNewRegNat IntRep `thenNat` \ tmp ->
2273 code = registerCode register tmp
2274 src1 = registerName register tmp
2275 src2 = ImmInt (fromInteger y)
2276 code__2 = code `snocOL` SUB False True src1 (RIImm src2) g0
2278 return (CondCode False cond code__2)
2280 condIntCode cond x y
2281 = getRegister x `thenNat` \ register1 ->
2282 getRegister y `thenNat` \ register2 ->
2283 getNewRegNat IntRep `thenNat` \ tmp1 ->
2284 getNewRegNat IntRep `thenNat` \ tmp2 ->
2286 code1 = registerCode register1 tmp1
2287 src1 = registerName register1 tmp1
2288 code2 = registerCode register2 tmp2
2289 src2 = registerName register2 tmp2
2290 code__2 = code1 `appOL` code2 `snocOL`
2291 SUB False True src1 (RIReg src2) g0
2293 return (CondCode False cond code__2)
2296 condFltCode cond x y
2297 = getRegister x `thenNat` \ register1 ->
2298 getRegister y `thenNat` \ register2 ->
2299 getNewRegNat (registerRep register1)
2301 getNewRegNat (registerRep register2)
2303 getNewRegNat F64 `thenNat` \ tmp ->
2305 promote x = FxTOy F DF x tmp
2307 pk1 = registerRep register1
2308 code1 = registerCode register1 tmp1
2309 src1 = registerName register1 tmp1
2311 pk2 = registerRep register2
2312 code2 = registerCode register2 tmp2
2313 src2 = registerName register2 tmp2
2317 code1 `appOL` code2 `snocOL`
2318 FCMP True (primRepToSize pk1) src1 src2
2319 else if pk1 == F32 then
2320 code1 `snocOL` promote src1 `appOL` code2 `snocOL`
2321 FCMP True DF tmp src2
2323 code1 `appOL` code2 `snocOL` promote src2 `snocOL`
2324 FCMP True DF src1 tmp
2326 return (CondCode True cond code__2)
2328 #endif /* sparc_TARGET_ARCH */
2330 #if powerpc_TARGET_ARCH
2331 -- ###FIXME: I16 and I8!
2332 condIntCode cond x (CmmLit (CmmInt y rep))
2333 | Just src2 <- makeImmediate rep (not $ condUnsigned cond) y
2335 (src1, code) <- getSomeReg x
2337 code' = code `snocOL`
2338 (if condUnsigned cond then CMPL else CMP) I32 src1 (RIImm src2)
2339 return (CondCode False cond code')
2341 condIntCode cond x y = do
2342 (src1, code1) <- getSomeReg x
2343 (src2, code2) <- getSomeReg y
2345 code' = code1 `appOL` code2 `snocOL`
2346 (if condUnsigned cond then CMPL else CMP) I32 src1 (RIReg src2)
2347 return (CondCode False cond code')
2349 condFltCode cond x y = do
2350 (src1, code1) <- getSomeReg x
2351 (src2, code2) <- getSomeReg y
2353 code' = code1 `appOL` code2 `snocOL` FCMP src1 src2
2354 code'' = case cond of -- twiddle CR to handle unordered case
2355 GE -> code' `snocOL` CRNOR ltbit eqbit gtbit
2356 LE -> code' `snocOL` CRNOR gtbit eqbit ltbit
2359 ltbit = 0 ; eqbit = 2 ; gtbit = 1
2360 return (CondCode True cond code'')
2362 #endif /* powerpc_TARGET_ARCH */
2364 -- -----------------------------------------------------------------------------
2365 -- Generating assignments
2367 -- Assignments are really at the heart of the whole code generation
2368 -- business. Almost all top-level nodes of any real importance are
2369 -- assignments, which correspond to loads, stores, or register
2370 -- transfers. If we're really lucky, some of the register transfers
2371 -- will go away, because we can use the destination register to
2372 -- complete the code generation for the right hand side. This only
2373 -- fails when the right hand side is forced into a fixed register
2374 -- (e.g. the result of a call).
2376 assignMem_IntCode :: MachRep -> CmmExpr -> CmmExpr -> NatM InstrBlock
2377 assignReg_IntCode :: MachRep -> CmmReg -> CmmExpr -> NatM InstrBlock
2379 assignMem_FltCode :: MachRep -> CmmExpr -> CmmExpr -> NatM InstrBlock
2380 assignReg_FltCode :: MachRep -> CmmReg -> CmmExpr -> NatM InstrBlock
2382 -- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2384 #if alpha_TARGET_ARCH
2386 assignIntCode pk (CmmLoad dst _) src
2387 = getNewRegNat IntRep `thenNat` \ tmp ->
2388 getAmode dst `thenNat` \ amode ->
2389 getRegister src `thenNat` \ register ->
2391 code1 = amodeCode amode []
2392 dst__2 = amodeAddr amode
2393 code2 = registerCode register tmp []
2394 src__2 = registerName register tmp
2395 sz = primRepToSize pk
2396 code__2 = asmSeqThen [code1, code2] . mkSeqInstr (ST sz src__2 dst__2)
2400 assignIntCode pk dst src
2401 = getRegister dst `thenNat` \ register1 ->
2402 getRegister src `thenNat` \ register2 ->
2404 dst__2 = registerName register1 zeroh
2405 code = registerCode register2 dst__2
2406 src__2 = registerName register2 dst__2
2407 code__2 = if isFixed register2
2408 then code . mkSeqInstr (OR src__2 (RIReg src__2) dst__2)
2413 #endif /* alpha_TARGET_ARCH */
2415 -- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2417 #if i386_TARGET_ARCH || x86_64_TARGET_ARCH
2419 -- integer assignment to memory
2420 assignMem_IntCode pk addr src = do
2421 Amode addr code_addr <- getAmode addr
2422 (code_src, op_src) <- get_op_RI src
2424 code = code_src `appOL`
2426 MOV pk op_src (OpAddr addr)
2427 -- NOTE: op_src is stable, so it will still be valid
2428 -- after code_addr. This may involve the introduction
2429 -- of an extra MOV to a temporary register, but we hope
2430 -- the register allocator will get rid of it.
2434 get_op_RI :: CmmExpr -> NatM (InstrBlock,Operand) -- code, operator
2435 get_op_RI (CmmLit lit) | not (is64BitLit lit)
2436 = return (nilOL, OpImm (litToImm lit))
2438 = do (reg,code) <- getNonClobberedReg op
2439 return (code, OpReg reg)
2442 -- Assign; dst is a reg, rhs is mem
2443 assignReg_IntCode pk reg (CmmLoad src _) = do
2444 load_code <- intLoadCode (MOV pk) src
2445 return (load_code (getRegisterReg reg))
2447 -- dst is a reg, but src could be anything
2448 assignReg_IntCode pk reg src = do
2449 code <- getAnyReg src
2450 return (code (getRegisterReg reg))
2452 #endif /* i386_TARGET_ARCH */
2454 -- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2456 #if sparc_TARGET_ARCH
2458 assignMem_IntCode pk addr src
2459 = getNewRegNat IntRep `thenNat` \ tmp ->
2460 getAmode addr `thenNat` \ amode ->
2461 getRegister src `thenNat` \ register ->
2463 code1 = amodeCode amode
2464 dst__2 = amodeAddr amode
2465 code2 = registerCode register tmp
2466 src__2 = registerName register tmp
2467 sz = primRepToSize pk
2468 code__2 = code1 `appOL` code2 `snocOL` ST sz src__2 dst__2
2472 assignReg_IntCode pk reg src
2473 = getRegister src `thenNat` \ register2 ->
2474 getRegisterReg reg `thenNat` \ register1 ->
2475 getNewRegNat IntRep `thenNat` \ tmp ->
2477 dst__2 = registerName register1 tmp
2478 code = registerCode register2 dst__2
2479 src__2 = registerName register2 dst__2
2480 code__2 = if isFixed register2
2481 then code `snocOL` OR False g0 (RIReg src__2) dst__2
2486 #endif /* sparc_TARGET_ARCH */
2488 #if powerpc_TARGET_ARCH
2490 assignMem_IntCode pk addr src = do
2491 (srcReg, code) <- getSomeReg src
2492 Amode dstAddr addr_code <- getAmode addr
2493 return $ code `appOL` addr_code `snocOL` ST pk srcReg dstAddr
2495 -- dst is a reg, but src could be anything
2496 assignReg_IntCode pk reg src
2498 r <- getRegister src
2500 Any _ code -> code dst
2501 Fixed _ freg fcode -> fcode `snocOL` MR dst freg
2503 dst = getRegisterReg reg
2505 #endif /* powerpc_TARGET_ARCH */
2508 -- -----------------------------------------------------------------------------
2509 -- Floating-point assignments
2511 #if alpha_TARGET_ARCH
2513 assignFltCode pk (CmmLoad dst _) src
2514 = getNewRegNat pk `thenNat` \ tmp ->
2515 getAmode dst `thenNat` \ amode ->
2516 getRegister src `thenNat` \ register ->
2518 code1 = amodeCode amode []
2519 dst__2 = amodeAddr amode
2520 code2 = registerCode register tmp []
2521 src__2 = registerName register tmp
2522 sz = primRepToSize pk
2523 code__2 = asmSeqThen [code1, code2] . mkSeqInstr (ST sz src__2 dst__2)
2527 assignFltCode pk dst src
2528 = getRegister dst `thenNat` \ register1 ->
2529 getRegister src `thenNat` \ register2 ->
2531 dst__2 = registerName register1 zeroh
2532 code = registerCode register2 dst__2
2533 src__2 = registerName register2 dst__2
2534 code__2 = if isFixed register2
2535 then code . mkSeqInstr (FMOV src__2 dst__2)
2540 #endif /* alpha_TARGET_ARCH */
2542 -- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2544 #if i386_TARGET_ARCH || x86_64_TARGET_ARCH
2546 -- Floating point assignment to memory
2547 assignMem_FltCode pk addr src = do
2548 (src_reg, src_code) <- getNonClobberedReg src
2549 Amode addr addr_code <- getAmode addr
2551 code = src_code `appOL`
2553 IF_ARCH_i386(GST pk src_reg addr,
2554 MOV pk (OpReg src_reg) (OpAddr addr))
2557 -- Floating point assignment to a register/temporary
2558 assignReg_FltCode pk reg src = do
2559 src_code <- getAnyReg src
2560 return (src_code (getRegisterReg reg))
2562 #endif /* i386_TARGET_ARCH */
2564 -- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2566 #if sparc_TARGET_ARCH
2568 -- Floating point assignment to memory
2569 assignMem_FltCode pk addr src
2570 = getNewRegNat pk `thenNat` \ tmp1 ->
2571 getAmode addr `thenNat` \ amode ->
2572 getRegister src `thenNat` \ register ->
2574 sz = primRepToSize pk
2575 dst__2 = amodeAddr amode
2577 code1 = amodeCode amode
2578 code2 = registerCode register tmp1
2580 src__2 = registerName register tmp1
2581 pk__2 = registerRep register
2582 sz__2 = primRepToSize pk__2
2584 code__2 = code1 `appOL` code2 `appOL`
2586 then unitOL (ST sz src__2 dst__2)
2587 else toOL [FxTOy sz__2 sz src__2 tmp1, ST sz tmp1 dst__2]
2591 -- Floating point assignment to a register/temporary
2592 -- Why is this so bizarrely ugly?
2593 assignReg_FltCode pk reg src
2594 = getRegisterReg reg `thenNat` \ register1 ->
2595 getRegister src `thenNat` \ register2 ->
2597 pk__2 = registerRep register2
2598 sz__2 = primRepToSize pk__2
2600 getNewRegNat pk__2 `thenNat` \ tmp ->
2602 sz = primRepToSize pk
2603 dst__2 = registerName register1 g0 -- must be Fixed
2604 reg__2 = if pk /= pk__2 then tmp else dst__2
2605 code = registerCode register2 reg__2
2606 src__2 = registerName register2 reg__2
2609 code `snocOL` FxTOy sz__2 sz src__2 dst__2
2610 else if isFixed register2 then
2611 code `snocOL` FMOV sz src__2 dst__2
2617 #endif /* sparc_TARGET_ARCH */
2619 #if powerpc_TARGET_ARCH
2622 assignMem_FltCode = assignMem_IntCode
2623 assignReg_FltCode = assignReg_IntCode
2625 #endif /* powerpc_TARGET_ARCH */
2628 -- -----------------------------------------------------------------------------
2629 -- Generating an non-local jump
2631 -- (If applicable) Do not fill the delay slots here; you will confuse the
2632 -- register allocator.
2634 genJump :: CmmExpr{-the branch target-} -> NatM InstrBlock
2636 -- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2638 #if alpha_TARGET_ARCH
2640 genJump (CmmLabel lbl)
2641 | isAsmTemp lbl = returnInstr (BR target)
2642 | otherwise = returnInstrs [LDA pv (AddrImm target), JMP zeroh (AddrReg pv) 0]
2644 target = ImmCLbl lbl
2647 = getRegister tree `thenNat` \ register ->
2648 getNewRegNat PtrRep `thenNat` \ tmp ->
2650 dst = registerName register pv
2651 code = registerCode register pv
2652 target = registerName register pv
2654 if isFixed register then
2655 returnSeq code [OR dst (RIReg dst) pv, JMP zeroh (AddrReg pv) 0]
2657 return (code . mkSeqInstr (JMP zeroh (AddrReg pv) 0))
2659 #endif /* alpha_TARGET_ARCH */
2661 -- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2663 #if i386_TARGET_ARCH || x86_64_TARGET_ARCH
2665 genJump (CmmLoad mem pk) = do
2666 Amode target code <- getAmode mem
2667 return (code `snocOL` JMP (OpAddr target))
2669 genJump (CmmLit lit) = do
2670 return (unitOL (JMP (OpImm (litToImm lit))))
2673 (reg,code) <- getSomeReg expr
2674 return (code `snocOL` JMP (OpReg reg))
2676 #endif /* i386_TARGET_ARCH */
2678 -- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2680 #if sparc_TARGET_ARCH
2682 genJump (CmmLabel lbl)
2683 = return (toOL [CALL (Left target) 0 True, NOP])
2685 target = ImmCLbl lbl
2688 = getRegister tree `thenNat` \ register ->
2689 getNewRegNat PtrRep `thenNat` \ tmp ->
2691 code = registerCode register tmp
2692 target = registerName register tmp
2694 return (code `snocOL` JMP dsts (AddrRegReg target g0) `snocOL` NOP)
2696 #endif /* sparc_TARGET_ARCH */
2698 #if powerpc_TARGET_ARCH
2699 genJump (CmmLit (CmmLabel lbl))
2700 = return (unitOL $ JMP lbl)
2704 (target,code) <- getSomeReg tree
2705 return (code `snocOL` MTCTR target `snocOL` BCTR [])
2706 #endif /* powerpc_TARGET_ARCH */
2709 -- -----------------------------------------------------------------------------
2710 -- Unconditional branches
2712 genBranch :: BlockId -> NatM InstrBlock
2714 #if alpha_TARGET_ARCH
2715 genBranch id = return (unitOL (BR id))
2718 #if i386_TARGET_ARCH || x86_64_TARGET_ARCH
2719 genBranch id = return (unitOL (JXX ALWAYS id))
2722 #if sparc_TARGET_ARCH
2723 genBranch id = return (toOL [BI ALWAYS False id, NOP])
2726 #if powerpc_TARGET_ARCH
2727 genBranch id = return (unitOL (BCC ALWAYS id))
2731 -- -----------------------------------------------------------------------------
2732 -- Conditional jumps
2735 Conditional jumps are always to local labels, so we can use branch
2736 instructions. We peek at the arguments to decide what kind of
2739 ALPHA: For comparisons with 0, we're laughing, because we can just do
2740 the desired conditional branch.
2742 I386: First, we have to ensure that the condition
2743 codes are set according to the supplied comparison operation.
2745 SPARC: First, we have to ensure that the condition codes are set
2746 according to the supplied comparison operation. We generate slightly
2747 different code for floating point comparisons, because a floating
2748 point operation cannot directly precede a @BF@. We assume the worst
2749 and fill that slot with a @NOP@.
2751 SPARC: Do not fill the delay slots here; you will confuse the register
2757 :: BlockId -- the branch target
2758 -> CmmExpr -- the condition on which to branch
2761 -- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2763 #if alpha_TARGET_ARCH
2765 genCondJump id (StPrim op [x, StInt 0])
2766 = getRegister x `thenNat` \ register ->
2767 getNewRegNat (registerRep register)
2770 code = registerCode register tmp
2771 value = registerName register tmp
2772 pk = registerRep register
2773 target = ImmCLbl lbl
2775 returnSeq code [BI (cmpOp op) value target]
2777 cmpOp CharGtOp = GTT
2779 cmpOp CharEqOp = EQQ
2781 cmpOp CharLtOp = LTT
2790 cmpOp WordGeOp = ALWAYS
2791 cmpOp WordEqOp = EQQ
2793 cmpOp WordLtOp = NEVER
2794 cmpOp WordLeOp = EQQ
2796 cmpOp AddrGeOp = ALWAYS
2797 cmpOp AddrEqOp = EQQ
2799 cmpOp AddrLtOp = NEVER
2800 cmpOp AddrLeOp = EQQ
2802 genCondJump lbl (StPrim op [x, StDouble 0.0])
2803 = getRegister x `thenNat` \ register ->
2804 getNewRegNat (registerRep register)
2807 code = registerCode register tmp
2808 value = registerName register tmp
2809 pk = registerRep register
2810 target = ImmCLbl lbl
2812 return (code . mkSeqInstr (BF (cmpOp op) value target))
2814 cmpOp FloatGtOp = GTT
2815 cmpOp FloatGeOp = GE
2816 cmpOp FloatEqOp = EQQ
2817 cmpOp FloatNeOp = NE
2818 cmpOp FloatLtOp = LTT
2819 cmpOp FloatLeOp = LE
2820 cmpOp DoubleGtOp = GTT
2821 cmpOp DoubleGeOp = GE
2822 cmpOp DoubleEqOp = EQQ
2823 cmpOp DoubleNeOp = NE
2824 cmpOp DoubleLtOp = LTT
2825 cmpOp DoubleLeOp = LE
2827 genCondJump lbl (StPrim op [x, y])
2829 = trivialFCode pr instr x y `thenNat` \ register ->
2830 getNewRegNat F64 `thenNat` \ tmp ->
2832 code = registerCode register tmp
2833 result = registerName register tmp
2834 target = ImmCLbl lbl
2836 return (code . mkSeqInstr (BF cond result target))
2838 pr = panic "trivialU?FCode: does not use PrimRep on Alpha"
2840 fltCmpOp op = case op of
2854 (instr, cond) = case op of
2855 FloatGtOp -> (FCMP TF LE, EQQ)
2856 FloatGeOp -> (FCMP TF LTT, EQQ)
2857 FloatEqOp -> (FCMP TF EQQ, NE)
2858 FloatNeOp -> (FCMP TF EQQ, EQQ)
2859 FloatLtOp -> (FCMP TF LTT, NE)
2860 FloatLeOp -> (FCMP TF LE, NE)
2861 DoubleGtOp -> (FCMP TF LE, EQQ)
2862 DoubleGeOp -> (FCMP TF LTT, EQQ)
2863 DoubleEqOp -> (FCMP TF EQQ, NE)
2864 DoubleNeOp -> (FCMP TF EQQ, EQQ)
2865 DoubleLtOp -> (FCMP TF LTT, NE)
2866 DoubleLeOp -> (FCMP TF LE, NE)
2868 genCondJump lbl (StPrim op [x, y])
2869 = trivialCode instr x y `thenNat` \ register ->
2870 getNewRegNat IntRep `thenNat` \ tmp ->
2872 code = registerCode register tmp
2873 result = registerName register tmp
2874 target = ImmCLbl lbl
2876 return (code . mkSeqInstr (BI cond result target))
2878 (instr, cond) = case op of
2879 CharGtOp -> (CMP LE, EQQ)
2880 CharGeOp -> (CMP LTT, EQQ)
2881 CharEqOp -> (CMP EQQ, NE)
2882 CharNeOp -> (CMP EQQ, EQQ)
2883 CharLtOp -> (CMP LTT, NE)
2884 CharLeOp -> (CMP LE, NE)
2885 IntGtOp -> (CMP LE, EQQ)
2886 IntGeOp -> (CMP LTT, EQQ)
2887 IntEqOp -> (CMP EQQ, NE)
2888 IntNeOp -> (CMP EQQ, EQQ)
2889 IntLtOp -> (CMP LTT, NE)
2890 IntLeOp -> (CMP LE, NE)
2891 WordGtOp -> (CMP ULE, EQQ)
2892 WordGeOp -> (CMP ULT, EQQ)
2893 WordEqOp -> (CMP EQQ, NE)
2894 WordNeOp -> (CMP EQQ, EQQ)
2895 WordLtOp -> (CMP ULT, NE)
2896 WordLeOp -> (CMP ULE, NE)
2897 AddrGtOp -> (CMP ULE, EQQ)
2898 AddrGeOp -> (CMP ULT, EQQ)
2899 AddrEqOp -> (CMP EQQ, NE)
2900 AddrNeOp -> (CMP EQQ, EQQ)
2901 AddrLtOp -> (CMP ULT, NE)
2902 AddrLeOp -> (CMP ULE, NE)
2904 #endif /* alpha_TARGET_ARCH */
2906 -- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2908 #if i386_TARGET_ARCH
2910 genCondJump id bool = do
2911 CondCode _ cond code <- getCondCode bool
2912 return (code `snocOL` JXX cond id)
2916 -- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2918 #if x86_64_TARGET_ARCH
2920 genCondJump id bool = do
2921 CondCode is_float cond cond_code <- getCondCode bool
2924 return (cond_code `snocOL` JXX cond id)
2926 lbl <- getBlockIdNat
2928 -- see comment with condFltReg
2929 let code = case cond of
2935 plain_test = unitOL (
2938 or_unordered = toOL [
2942 and_ordered = toOL [
2948 return (cond_code `appOL` code)
2952 -- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2954 #if sparc_TARGET_ARCH
2956 genCondJump id bool = do
2957 CondCode is_float cond code <- getCondCode bool
2962 then [NOP, BF cond False id, NOP]
2963 else [BI cond False id, NOP]
2967 #endif /* sparc_TARGET_ARCH */
2970 #if powerpc_TARGET_ARCH
2972 genCondJump id bool = do
2973 CondCode is_float cond code <- getCondCode bool
2974 return (code `snocOL` BCC cond id)
2976 #endif /* powerpc_TARGET_ARCH */
2979 -- -----------------------------------------------------------------------------
2980 -- Generating C calls
2982 -- Now the biggest nightmare---calls. Most of the nastiness is buried in
2983 -- @get_arg@, which moves the arguments to the correct registers/stack
2984 -- locations. Apart from that, the code is easy.
2986 -- (If applicable) Do not fill the delay slots here; you will confuse the
2987 -- register allocator.
2990 :: CmmCallTarget -- function to call
2991 -> [(CmmReg,MachHint)] -- where to put the result
2992 -> [(CmmExpr,MachHint)] -- arguments (of mixed type)
2993 -> Maybe [GlobalReg] -- volatile regs to save
2996 -- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2998 #if alpha_TARGET_ARCH
3002 genCCall fn cconv result_regs args
3003 = mapAccumLNat get_arg (allArgRegs, eXTRA_STK_ARGS_HERE) args
3004 `thenNat` \ ((unused,_), argCode) ->
3006 nRegs = length allArgRegs - length unused
3007 code = asmSeqThen (map ($ []) argCode)
3010 LDA pv (AddrImm (ImmLab (ptext fn))),
3011 JSR ra (AddrReg pv) nRegs,
3012 LDGP gp (AddrReg ra)]
3014 ------------------------
3015 {- Try to get a value into a specific register (or registers) for
3016 a call. The first 6 arguments go into the appropriate
3017 argument register (separate registers for integer and floating
3018 point arguments, but used in lock-step), and the remaining
3019 arguments are dumped to the stack, beginning at 0(sp). Our
3020 first argument is a pair of the list of remaining argument
3021 registers to be assigned for this call and the next stack
3022 offset to use for overflowing arguments. This way,
3023 @get_Arg@ can be applied to all of a call's arguments using
3027 :: ([(Reg,Reg)], Int) -- Argument registers and stack offset (accumulator)
3028 -> StixTree -- Current argument
3029 -> NatM (([(Reg,Reg)],Int), InstrBlock) -- Updated accumulator and code
3031 -- We have to use up all of our argument registers first...
3033 get_arg ((iDst,fDst):dsts, offset) arg
3034 = getRegister arg `thenNat` \ register ->
3036 reg = if isFloatingRep pk then fDst else iDst
3037 code = registerCode register reg
3038 src = registerName register reg
3039 pk = registerRep register
3042 if isFloatingRep pk then
3043 ((dsts, offset), if isFixed register then
3044 code . mkSeqInstr (FMOV src fDst)
3047 ((dsts, offset), if isFixed register then
3048 code . mkSeqInstr (OR src (RIReg src) iDst)
3051 -- Once we have run out of argument registers, we move to the
3054 get_arg ([], offset) arg
3055 = getRegister arg `thenNat` \ register ->
3056 getNewRegNat (registerRep register)
3059 code = registerCode register tmp
3060 src = registerName register tmp
3061 pk = registerRep register
3062 sz = primRepToSize pk
3064 return (([], offset + 1), code . mkSeqInstr (ST sz src (spRel offset)))
3066 #endif /* alpha_TARGET_ARCH */
3068 -- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
3070 #if i386_TARGET_ARCH
3072 -- we only cope with a single result for foreign calls
3073 genCCall (CmmPrim op) [(r,_)] args vols = do
3075 MO_F32_Sqrt -> actuallyInlineFloatOp F32 (GSQRT F32) args
3076 MO_F64_Sqrt -> actuallyInlineFloatOp F64 (GSQRT F64) args
3078 MO_F32_Sin -> actuallyInlineFloatOp F32 (GSIN F32) args
3079 MO_F64_Sin -> actuallyInlineFloatOp F64 (GSIN F64) args
3081 MO_F32_Cos -> actuallyInlineFloatOp F32 (GCOS F32) args
3082 MO_F64_Cos -> actuallyInlineFloatOp F64 (GCOS F64) args
3084 MO_F32_Tan -> actuallyInlineFloatOp F32 (GTAN F32) args
3085 MO_F64_Tan -> actuallyInlineFloatOp F64 (GTAN F64) args
3087 other_op -> outOfLineFloatOp op r args vols
3089 actuallyInlineFloatOp rep instr [(x,_)]
3090 = do res <- trivialUFCode rep instr x
3092 return (any (getRegisterReg r))
3094 genCCall target dest_regs args vols = do
3095 sizes_n_codes <- mapM push_arg (reverse args)
3096 delta <- getDeltaNat
3098 (sizes, push_codes) = unzip sizes_n_codes
3099 tot_arg_size = sum sizes
3101 -- deal with static vs dynamic call targets
3102 (callinsns,cconv) <-
3105 CmmForeignCall (CmmLit (CmmLabel lbl)) conv
3106 -> -- ToDo: stdcall arg sizes
3107 return (unitOL (CALL (Left fn_imm)), conv)
3108 where fn_imm = ImmCLbl lbl
3109 CmmForeignCall expr conv
3110 -> do (dyn_c, dyn_r, dyn_rep) <- get_op expr
3111 ASSERT(dyn_rep == I32)
3112 return (dyn_c `snocOL` CALL (Right dyn_r), conv)
3114 let push_code = concatOL push_codes
3115 call = callinsns `appOL`
3117 -- Deallocate parameters after call for ccall;
3118 -- but not for stdcall (callee does it)
3119 (if cconv == StdCallConv || tot_arg_size==0 then [] else
3120 [ADD I32 (OpImm (ImmInt tot_arg_size)) (OpReg esp)])
3122 [DELTA (delta + tot_arg_size)]
3125 setDeltaNat (delta + tot_arg_size)
3128 -- assign the results, if necessary
3129 assign_code [] = nilOL
3130 assign_code [(dest,_hint)] =
3132 I64 -> toOL [MOV I32 (OpReg eax) (OpReg r_dest),
3133 MOV I32 (OpReg edx) (OpReg r_dest_hi)]
3134 F32 -> unitOL (GMOV fake0 r_dest)
3135 F64 -> unitOL (GMOV fake0 r_dest)
3136 rep -> unitOL (MOV rep (OpReg eax) (OpReg r_dest))
3138 r_dest_hi = getHiVRegFromLo r_dest
3139 rep = cmmRegRep dest
3140 r_dest = getRegisterReg dest
3141 assign_code many = panic "genCCall.assign_code many"
3143 return (push_code `appOL`
3145 assign_code dest_regs)
3152 push_arg :: (CmmExpr,MachHint){-current argument-}
3153 -> NatM (Int, InstrBlock) -- argsz, code
3155 push_arg (arg,_hint) -- we don't need the hints on x86
3156 | arg_rep == I64 = do
3157 ChildCode64 code r_lo <- iselExpr64 arg
3158 delta <- getDeltaNat
3159 setDeltaNat (delta - 8)
3161 r_hi = getHiVRegFromLo r_lo
3163 return (8, code `appOL`
3164 toOL [PUSH I32 (OpReg r_hi), DELTA (delta - 4),
3165 PUSH I32 (OpReg r_lo), DELTA (delta - 8),
3170 (code, reg, sz) <- get_op arg
3171 delta <- getDeltaNat
3172 let size = arg_size sz
3173 setDeltaNat (delta-size)
3174 if (case sz of F64 -> True; F32 -> True; _ -> False)
3177 toOL [SUB I32 (OpImm (ImmInt size)) (OpReg esp),
3179 GST sz reg (AddrBaseIndex (EABaseReg esp)
3185 PUSH I32 (OpReg reg) `snocOL`
3189 arg_rep = cmmExprRep arg
3192 get_op :: CmmExpr -> NatM (InstrBlock, Reg, MachRep) -- code, reg, size
3194 (reg,code) <- getSomeReg op
3195 return (code, reg, cmmExprRep op)
3197 #endif /* i386_TARGET_ARCH */
3199 #if i386_TARGET_ARCH || x86_64_TARGET_ARCH
3201 outOfLineFloatOp :: CallishMachOp -> CmmReg -> [(CmmExpr,MachHint)]
3202 -> Maybe [GlobalReg] -> NatM InstrBlock
3203 outOfLineFloatOp mop res args vols
3204 | cmmRegRep res == F64
3205 = stmtToInstrs (CmmCall target [(res,FloatHint)] args vols)
3208 = do uq <- getUniqueNat
3210 tmp = CmmLocal (LocalReg uq F64)
3212 code1 <- stmtToInstrs (CmmCall target [(tmp,FloatHint)] (map promote args) vols)
3213 code2 <- stmtToInstrs (CmmAssign res (demote (CmmReg tmp)))
3214 return (code1 `appOL` code2)
3216 promote (x,hint) = (CmmMachOp (MO_S_Conv F32 F64) [x], hint)
3217 demote x = CmmMachOp (MO_S_Conv F64 F32) [x]
3219 target = CmmForeignCall (CmmLit lbl) CCallConv
3220 lbl = CmmLabel (mkForeignLabel fn Nothing False)
3223 MO_F32_Sqrt -> FSLIT("sqrt")
3224 MO_F32_Sin -> FSLIT("sin")
3225 MO_F32_Cos -> FSLIT("cos")
3226 MO_F32_Tan -> FSLIT("tan")
3227 MO_F32_Exp -> FSLIT("exp")
3228 MO_F32_Log -> FSLIT("log")
3230 MO_F32_Asin -> FSLIT("asin")
3231 MO_F32_Acos -> FSLIT("acos")
3232 MO_F32_Atan -> FSLIT("atan")
3234 MO_F32_Sinh -> FSLIT("sinh")
3235 MO_F32_Cosh -> FSLIT("cosh")
3236 MO_F32_Tanh -> FSLIT("tanh")
3237 MO_F32_Pwr -> FSLIT("pow")
3239 MO_F64_Sqrt -> FSLIT("sqrt")
3240 MO_F64_Sin -> FSLIT("sin")
3241 MO_F64_Cos -> FSLIT("cos")
3242 MO_F64_Tan -> FSLIT("tan")
3243 MO_F64_Exp -> FSLIT("exp")
3244 MO_F64_Log -> FSLIT("log")
3246 MO_F64_Asin -> FSLIT("asin")
3247 MO_F64_Acos -> FSLIT("acos")
3248 MO_F64_Atan -> FSLIT("atan")
3250 MO_F64_Sinh -> FSLIT("sinh")
3251 MO_F64_Cosh -> FSLIT("cosh")
3252 MO_F64_Tanh -> FSLIT("tanh")
3253 MO_F64_Pwr -> FSLIT("pow")
3255 other -> pprPanic "outOfLineFloatOp" (pprCallishMachOp mop)
3257 #endif /* i386_TARGET_ARCH || x86_64_TARGET_ARCH */
3259 -- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
3261 #if x86_64_TARGET_ARCH
3263 genCCall (CmmPrim op) [(r,_)] args vols =
3264 outOfLineFloatOp op r args vols
3266 genCCall target dest_regs args vols = do
3268 -- load up the register arguments
3269 (stack_args, sse_regs, load_args_code)
3270 <- load_args args allArgRegs allFPArgRegs 0 nilOL
3273 tot_arg_size = arg_size * length stack_args
3275 -- On entry to the called function, %rsp should be aligned
3276 -- on a 16-byte boundary +8 (i.e. the first stack arg after
3277 -- the return address is 16-byte aligned). In STG land
3278 -- %rsp is kept 16-byte aligned (see StgCRun.c), so we just
3279 -- need to make sure we push a multiple of 16-bytes of args,
3280 -- plus the return address, to get the correct alignment.
3281 -- Urg, this is hard. We need to feed the delta back into
3282 -- the arg pushing code.
3283 (real_size, adjust_rsp) <-
3284 if tot_arg_size `rem` 16 == 0
3285 then return (tot_arg_size, nilOL)
3286 else do -- we need to adjust...
3287 delta <- getDeltaNat
3288 setDeltaNat (delta-8)
3289 return (tot_arg_size+8, toOL [
3290 SUB I64 (OpImm (ImmInt 8)) (OpReg rsp),
3294 -- push the stack args, right to left
3295 push_code <- push_args (reverse stack_args) nilOL
3296 delta <- getDeltaNat
3298 -- deal with static vs dynamic call targets
3299 (callinsns,cconv) <-
3302 CmmForeignCall (CmmLit (CmmLabel lbl)) conv
3303 -> -- ToDo: stdcall arg sizes
3304 return (unitOL (CALL (Left fn_imm)), conv)
3305 where fn_imm = ImmCLbl lbl
3306 CmmForeignCall expr conv
3307 -> do (dyn_r, dyn_c) <- getSomeReg expr
3308 return (dyn_c `snocOL` CALL (Right dyn_r), conv)
3311 -- The x86_64 ABI requires us to set %al to the number of SSE
3312 -- registers that contain arguments, if the called routine
3313 -- is a varargs function. We don't know whether it's a
3314 -- varargs function or not, so we have to assume it is.
3316 -- It's not safe to omit this assignment, even if the number
3317 -- of SSE regs in use is zero. If %al is larger than 8
3318 -- on entry to a varargs function, seg faults ensue.
3319 assign_eax n = unitOL (MOV I32 (OpImm (ImmInt n)) (OpReg eax))
3321 let call = callinsns `appOL`
3323 -- Deallocate parameters after call for ccall;
3324 -- but not for stdcall (callee does it)
3325 (if cconv == StdCallConv || real_size==0 then [] else
3326 [ADD wordRep (OpImm (ImmInt real_size)) (OpReg esp)])
3328 [DELTA (delta + real_size)]
3331 setDeltaNat (delta + real_size)
3334 -- assign the results, if necessary
3335 assign_code [] = nilOL
3336 assign_code [(dest,_hint)] =
3338 F32 -> unitOL (MOV rep (OpReg xmm0) (OpReg r_dest))
3339 F64 -> unitOL (MOV rep (OpReg xmm0) (OpReg r_dest))
3340 rep -> unitOL (MOV rep (OpReg rax) (OpReg r_dest))
3342 rep = cmmRegRep dest
3343 r_dest = getRegisterReg dest
3344 assign_code many = panic "genCCall.assign_code many"
3346 return (load_args_code `appOL`
3349 assign_eax sse_regs `appOL`
3351 assign_code dest_regs)
3354 arg_size = 8 -- always, at the mo
3356 load_args :: [(CmmExpr,MachHint)]
3357 -> [Reg] -- int regs avail for args
3358 -> [Reg] -- FP regs avail for args
3359 -> Int -> InstrBlock
3360 -> NatM ([(CmmExpr,MachHint)],Int,InstrBlock)
3361 load_args args [] [] sse_regs code = return (args, sse_regs, code)
3362 -- no more regs to use
3363 load_args [] aregs fregs sse_regs code = return ([],sse_regs,code)
3364 -- no more args to push
3365 load_args ((arg,hint) : rest) aregs fregs sse_regs code
3366 | isFloatingRep arg_rep =
3370 arg_code <- getAnyReg arg
3371 load_args rest aregs rs (sse_regs+1) (code `appOL` arg_code r)
3376 arg_code <- getAnyReg arg
3377 load_args rest rs fregs sse_regs (code `appOL` arg_code r)
3379 arg_rep = cmmExprRep arg
3382 (args',sse',code') <- load_args rest aregs fregs sse_regs code
3383 return ((arg,hint):args', sse', code')
3385 push_args [] code = return code
3386 push_args ((arg,hint):rest) code
3387 | isFloatingRep arg_rep = do
3388 (arg_reg, arg_code) <- getSomeReg arg
3389 delta <- getDeltaNat
3390 setDeltaNat (delta-arg_size)
3391 let code' = code `appOL` toOL [
3392 MOV arg_rep (OpReg arg_reg) (OpAddr (spRel 0)),
3393 SUB wordRep (OpImm (ImmInt arg_size)) (OpReg rsp) ,
3394 DELTA (delta-arg_size)]
3395 push_args rest code'
3398 -- we only ever generate word-sized function arguments. Promotion
3399 -- has already happened: our Int8# type is kept sign-extended
3400 -- in an Int#, for example.
3401 ASSERT(arg_rep == I64) return ()
3402 (arg_op, arg_code) <- getOperand arg
3403 delta <- getDeltaNat
3404 setDeltaNat (delta-arg_size)
3405 let code' = code `appOL` toOL [PUSH I64 arg_op,
3406 DELTA (delta-arg_size)]
3407 push_args rest code'
3409 arg_rep = cmmExprRep arg
3412 -- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
3414 #if sparc_TARGET_ARCH
3416 The SPARC calling convention is an absolute
3417 nightmare. The first 6x32 bits of arguments are mapped into
3418 %o0 through %o5, and the remaining arguments are dumped to the
3419 stack, beginning at [%sp+92]. (Note that %o6 == %sp.)
3421 If we have to put args on the stack, move %o6==%sp down by
3422 the number of words to go on the stack, to ensure there's enough space.
3424 According to Fraser and Hanson's lcc book, page 478, fig 17.2,
3425 16 words above the stack pointer is a word for the address of
3426 a structure return value. I use this as a temporary location
3427 for moving values from float to int regs. Certainly it isn't
3428 safe to put anything in the 16 words starting at %sp, since
3429 this area can get trashed at any time due to window overflows
3430 caused by signal handlers.
3432 A final complication (if the above isn't enough) is that
3433 we can't blithely calculate the arguments one by one into
3434 %o0 .. %o5. Consider the following nested calls:
3438 Naive code moves a into %o0, and (fff b c) into %o1. Unfortunately
3439 the inner call will itself use %o0, which trashes the value put there
3440 in preparation for the outer call. Upshot: we need to calculate the
3441 args into temporary regs, and move those to arg regs or onto the
3442 stack only immediately prior to the call proper. Sigh.
3445 genCCall fn cconv kind args
3446 = mapNat arg_to_int_vregs args `thenNat` \ argcode_and_vregs ->
3448 (argcodes, vregss) = unzip argcode_and_vregs
3449 n_argRegs = length allArgRegs
3450 n_argRegs_used = min (length vregs) n_argRegs
3451 vregs = concat vregss
3453 -- deal with static vs dynamic call targets
3456 -> return (unitOL (CALL (Left fn__2) n_argRegs_used False))
3458 -> arg_to_int_vregs dyn `thenNat` \ (dyn_c, [dyn_r]) ->
3459 return (dyn_c `snocOL` CALL (Right dyn_r) n_argRegs_used False)
3461 `thenNat` \ callinsns ->
3463 argcode = concatOL argcodes
3464 (move_sp_down, move_sp_up)
3465 = let diff = length vregs - n_argRegs
3466 nn = if odd diff then diff + 1 else diff -- keep 8-byte alignment
3469 else (unitOL (moveSp (-1*nn)), unitOL (moveSp (1*nn)))
3471 = toOL (move_final vregs allArgRegs eXTRA_STK_ARGS_HERE)
3473 return (argcode `appOL`
3474 move_sp_down `appOL`
3475 transfer_code `appOL`
3480 -- function names that begin with '.' are assumed to be special
3481 -- internally generated names like '.mul,' which don't get an
3482 -- underscore prefix
3483 -- ToDo:needed (WDP 96/03) ???
3484 fn_static = unLeft fn
3485 fn__2 = case (headFS fn_static) of
3486 '.' -> ImmLit (ftext fn_static)
3487 _ -> ImmCLbl (mkForeignLabel fn_static False)
3489 -- move args from the integer vregs into which they have been
3490 -- marshalled, into %o0 .. %o5, and the rest onto the stack.
3491 move_final :: [Reg] -> [Reg] -> Int -> [Instr]
3493 move_final [] _ offset -- all args done
3496 move_final (v:vs) [] offset -- out of aregs; move to stack
3497 = ST W v (spRel offset)
3498 : move_final vs [] (offset+1)
3500 move_final (v:vs) (a:az) offset -- move into an arg (%o[0..5]) reg
3501 = OR False g0 (RIReg v) a
3502 : move_final vs az offset
3504 -- generate code to calculate an argument, and move it into one
3505 -- or two integer vregs.
3506 arg_to_int_vregs :: CmmExpr -> NatM (OrdList Instr, [Reg])
3507 arg_to_int_vregs arg
3508 | is64BitRep (repOfCmmExpr arg)
3509 = iselExpr64 arg `thenNat` \ (ChildCode64 code vr_lo) ->
3510 let r_lo = VirtualRegI vr_lo
3511 r_hi = getHiVRegFromLo r_lo
3512 in return (code, [r_hi, r_lo])
3514 = getRegister arg `thenNat` \ register ->
3515 getNewRegNat (registerRep register) `thenNat` \ tmp ->
3516 let code = registerCode register tmp
3517 src = registerName register tmp
3518 pk = registerRep register
3520 -- the value is in src. Get it into 1 or 2 int vregs.
3523 getNewRegNat WordRep `thenNat` \ v1 ->
3524 getNewRegNat WordRep `thenNat` \ v2 ->
3527 FMOV DF src f0 `snocOL`
3528 ST F f0 (spRel 16) `snocOL`
3529 LD W (spRel 16) v1 `snocOL`
3530 ST F (fPair f0) (spRel 16) `snocOL`
3536 getNewRegNat WordRep `thenNat` \ v1 ->
3539 ST F src (spRel 16) `snocOL`
3545 getNewRegNat WordRep `thenNat` \ v1 ->
3547 code `snocOL` OR False g0 (RIReg src) v1
3551 #endif /* sparc_TARGET_ARCH */
3553 #if powerpc_TARGET_ARCH
3555 #if darwin_TARGET_OS || linux_TARGET_OS
3557 The PowerPC calling convention for Darwin/Mac OS X
3558 is described in Apple's document
3559 "Inside Mac OS X - Mach-O Runtime Architecture".
3561 PowerPC Linux uses the System V Release 4 Calling Convention
3562 for PowerPC. It is described in the
3563 "System V Application Binary Interface PowerPC Processor Supplement".
3565 Both conventions are similar:
3566 Parameters may be passed in general-purpose registers starting at r3, in
3567 floating point registers starting at f1, or on the stack.
3569 But there are substantial differences:
3570 * The number of registers used for parameter passing and the exact set of
3571 nonvolatile registers differs (see MachRegs.lhs).
3572 * On Darwin, stack space is always reserved for parameters, even if they are
3573 passed in registers. The called routine may choose to save parameters from
3574 registers to the corresponding space on the stack.
3575 * On Darwin, a corresponding amount of GPRs is skipped when a floating point
3576 parameter is passed in an FPR.
3577 * SysV insists on either passing I64 arguments on the stack, or in two GPRs,
3578 starting with an odd-numbered GPR. It may skip a GPR to achieve this.
3579 Darwin just treats an I64 like two separate I32s (high word first).
3580 * I64 and F64 arguments are 8-byte aligned on the stack for SysV, but only
3581 4-byte aligned like everything else on Darwin.
3582 * The SysV spec claims that F32 is represented as F64 on the stack. GCC on
3583 PowerPC Linux does not agree, so neither do we.
3585 According to both conventions, The parameter area should be part of the
3586 caller's stack frame, allocated in the caller's prologue code (large enough
3587 to hold the parameter lists for all called routines). The NCG already
3588 uses the stack for register spilling, leaving 64 bytes free at the top.
3589 If we need a larger parameter area than that, we just allocate a new stack
3590 frame just before ccalling.
3593 genCCall target dest_regs argsAndHints vols
3594 = ASSERT (not $ any (`elem` [I8,I16]) argReps)
3595 -- we rely on argument promotion in the codeGen
3597 (finalStack,passArgumentsCode,usedRegs) <- passArguments
3599 allArgRegs allFPArgRegs
3603 (labelOrExpr, reduceToF32) <- case target of
3604 CmmForeignCall (CmmLit (CmmLabel lbl)) conv -> return (Left lbl, False)
3605 CmmForeignCall expr conv -> return (Right expr, False)
3606 CmmPrim mop -> outOfLineFloatOp mop
3608 let codeBefore = move_sp_down finalStack `appOL` passArgumentsCode
3609 codeAfter = move_sp_up finalStack `appOL` moveResult reduceToF32
3614 `snocOL` BL lbl usedRegs
3617 (dynReg, dynCode) <- getSomeReg dyn
3619 `snocOL` MTCTR dynReg
3621 `snocOL` BCTRL usedRegs
3624 #if darwin_TARGET_OS
3625 initialStackOffset = 24
3626 -- size of linkage area + size of arguments, in bytes
3627 stackDelta _finalStack = roundTo 16 $ (24 +) $ max 32 $ sum $
3628 map machRepByteWidth argReps
3629 #elif linux_TARGET_OS
3630 initialStackOffset = 8
3631 stackDelta finalStack = roundTo 16 finalStack
3633 args = map fst argsAndHints
3634 argReps = map cmmExprRep args
3636 roundTo a x | x `mod` a == 0 = x
3637 | otherwise = x + a - (x `mod` a)
3639 move_sp_down finalStack
3641 toOL [STU I32 sp (AddrRegImm sp (ImmInt (-delta))),
3644 where delta = stackDelta finalStack
3645 move_sp_up finalStack
3647 toOL [ADD sp sp (RIImm (ImmInt delta)),
3650 where delta = stackDelta finalStack
3653 passArguments [] _ _ stackOffset accumCode accumUsed = return (stackOffset, accumCode, accumUsed)
3654 passArguments ((arg,I64):args) gprs fprs stackOffset
3655 accumCode accumUsed =
3657 ChildCode64 code vr_lo <- iselExpr64 arg
3658 let vr_hi = getHiVRegFromLo vr_lo
3660 #if darwin_TARGET_OS
3665 (accumCode `appOL` code
3666 `snocOL` storeWord vr_hi gprs stackOffset
3667 `snocOL` storeWord vr_lo (drop 1 gprs) (stackOffset+4))
3668 ((take 2 gprs) ++ accumUsed)
3670 storeWord vr (gpr:_) offset = MR gpr vr
3671 storeWord vr [] offset = ST I32 vr (AddrRegImm sp (ImmInt offset))
3673 #elif linux_TARGET_OS
3674 let stackOffset' = roundTo 8 stackOffset
3675 stackCode = accumCode `appOL` code
3676 `snocOL` ST I32 vr_hi (AddrRegImm sp (ImmInt stackOffset'))
3677 `snocOL` ST I32 vr_lo (AddrRegImm sp (ImmInt (stackOffset'+4)))
3678 regCode hireg loreg =
3679 accumCode `appOL` code
3680 `snocOL` MR hireg vr_hi
3681 `snocOL` MR loreg vr_lo
3684 hireg : loreg : regs | even (length gprs) ->
3685 passArguments args regs fprs stackOffset
3686 (regCode hireg loreg) (hireg : loreg : accumUsed)
3687 _skipped : hireg : loreg : regs ->
3688 passArguments args regs fprs stackOffset
3689 (regCode hireg loreg) (hireg : loreg : accumUsed)
3690 _ -> -- only one or no regs left
3691 passArguments args [] fprs (stackOffset'+8)
3695 passArguments ((arg,rep):args) gprs fprs stackOffset accumCode accumUsed
3696 | reg : _ <- regs = do
3697 register <- getRegister arg
3698 let code = case register of
3699 Fixed _ freg fcode -> fcode `snocOL` MR reg freg
3700 Any _ acode -> acode reg
3704 #if darwin_TARGET_OS
3705 -- The Darwin ABI requires that we reserve stack slots for register parameters
3706 (stackOffset + stackBytes)
3707 #elif linux_TARGET_OS
3708 -- ... the SysV ABI doesn't.
3711 (accumCode `appOL` code)
3714 (vr, code) <- getSomeReg arg
3718 (stackOffset' + stackBytes)
3719 (accumCode `appOL` code `snocOL` ST rep vr stackSlot)
3722 #if darwin_TARGET_OS
3723 -- stackOffset is at least 4-byte aligned
3724 -- The Darwin ABI is happy with that.
3725 stackOffset' = stackOffset
3727 -- ... the SysV ABI requires 8-byte alignment for doubles.
3728 stackOffset' | rep == F64 = roundTo 8 stackOffset
3729 | otherwise = stackOffset
3731 stackSlot = AddrRegImm sp (ImmInt stackOffset')
3732 (nGprs, nFprs, stackBytes, regs) = case rep of
3733 I32 -> (1, 0, 4, gprs)
3734 #if darwin_TARGET_OS
3735 -- The Darwin ABI requires that we skip a corresponding number of GPRs when
3737 F32 -> (1, 1, 4, fprs)
3738 F64 -> (2, 1, 8, fprs)
3739 #elif linux_TARGET_OS
3740 -- ... the SysV ABI doesn't.
3741 F32 -> (0, 1, 4, fprs)
3742 F64 -> (0, 1, 8, fprs)
3745 moveResult reduceToF32 =
3749 | reduceToF32 && rep == F32 -> unitOL (FRSP r_dest f1)
3750 | rep == F32 || rep == F64 -> unitOL (MR r_dest f1)
3751 | rep == I64 -> toOL [MR (getHiVRegFromLo r_dest) r3,
3753 | otherwise -> unitOL (MR r_dest r3)
3754 where rep = cmmRegRep dest
3755 r_dest = getRegisterReg dest
3757 outOfLineFloatOp mop =
3759 mopExpr <- cmmMakeDynamicReference addImportNat True $
3760 mkForeignLabel functionName Nothing True
3761 let mopLabelOrExpr = case mopExpr of
3762 CmmLit (CmmLabel lbl) -> Left lbl
3764 return (mopLabelOrExpr, reduce)
3766 (functionName, reduce) = case mop of
3767 MO_F32_Exp -> (FSLIT("exp"), True)
3768 MO_F32_Log -> (FSLIT("log"), True)
3769 MO_F32_Sqrt -> (FSLIT("sqrt"), True)
3771 MO_F32_Sin -> (FSLIT("sin"), True)
3772 MO_F32_Cos -> (FSLIT("cos"), True)
3773 MO_F32_Tan -> (FSLIT("tan"), True)
3775 MO_F32_Asin -> (FSLIT("asin"), True)
3776 MO_F32_Acos -> (FSLIT("acos"), True)
3777 MO_F32_Atan -> (FSLIT("atan"), True)
3779 MO_F32_Sinh -> (FSLIT("sinh"), True)
3780 MO_F32_Cosh -> (FSLIT("cosh"), True)
3781 MO_F32_Tanh -> (FSLIT("tanh"), True)
3782 MO_F32_Pwr -> (FSLIT("pow"), True)
3784 MO_F64_Exp -> (FSLIT("exp"), False)
3785 MO_F64_Log -> (FSLIT("log"), False)
3786 MO_F64_Sqrt -> (FSLIT("sqrt"), False)
3788 MO_F64_Sin -> (FSLIT("sin"), False)
3789 MO_F64_Cos -> (FSLIT("cos"), False)
3790 MO_F64_Tan -> (FSLIT("tan"), False)
3792 MO_F64_Asin -> (FSLIT("asin"), False)
3793 MO_F64_Acos -> (FSLIT("acos"), False)
3794 MO_F64_Atan -> (FSLIT("atan"), False)
3796 MO_F64_Sinh -> (FSLIT("sinh"), False)
3797 MO_F64_Cosh -> (FSLIT("cosh"), False)
3798 MO_F64_Tanh -> (FSLIT("tanh"), False)
3799 MO_F64_Pwr -> (FSLIT("pow"), False)
3800 other -> pprPanic "genCCall(ppc): unknown callish op"
3801 (pprCallishMachOp other)
3803 #endif /* darwin_TARGET_OS || linux_TARGET_OS */
3805 #endif /* powerpc_TARGET_ARCH */
3808 -- -----------------------------------------------------------------------------
3809 -- Generating a table-branch
3811 genSwitch :: CmmExpr -> [Maybe BlockId] -> NatM InstrBlock
3813 #if i386_TARGET_ARCH || x86_64_TARGET_ARCH
3814 genSwitch expr ids = do
3815 (reg,e_code) <- getSomeReg expr
3816 lbl <- getNewLabelNat
3818 jumpTable = map jumpTableEntry ids
3819 op = OpAddr (AddrBaseIndex EABaseNone (EAIndex reg wORD_SIZE) (ImmCLbl lbl))
3820 code = e_code `appOL` toOL [
3821 LDATA ReadOnlyData (CmmDataLabel lbl : jumpTable),
3822 JMP_TBL op [ id | Just id <- ids ]
3826 #elif powerpc_TARGET_ARCH
3830 (reg,e_code) <- getSomeReg expr
3831 tmp <- getNewRegNat I32
3832 lbl <- getNewLabelNat
3833 dynRef <- cmmMakeDynamicReference addImportNat False lbl
3834 (tableReg,t_code) <- getSomeReg $ dynRef
3836 jumpTable = map jumpTableEntryRel ids
3838 jumpTableEntryRel Nothing
3839 = CmmStaticLit (CmmInt 0 wordRep)
3840 jumpTableEntryRel (Just (BlockId id))
3841 = CmmStaticLit (CmmLabelDiffOff blockLabel lbl 0)
3842 where blockLabel = mkAsmTempLabel id
3844 code = e_code `appOL` t_code `appOL` toOL [
3845 LDATA ReadOnlyData (CmmDataLabel lbl : jumpTable),
3846 SLW tmp reg (RIImm (ImmInt 2)),
3847 LD I32 tmp (AddrRegReg tableReg tmp),
3848 ADD tmp tmp (RIReg tableReg),
3850 BCTR [ id | Just id <- ids ]
3855 (reg,e_code) <- getSomeReg expr
3856 tmp <- getNewRegNat I32
3857 lbl <- getNewLabelNat
3859 jumpTable = map jumpTableEntry ids
3861 code = e_code `appOL` toOL [
3862 LDATA ReadOnlyData (CmmDataLabel lbl : jumpTable),
3863 SLW tmp reg (RIImm (ImmInt 2)),
3864 ADDIS tmp tmp (HA (ImmCLbl lbl)),
3865 LD I32 tmp (AddrRegImm tmp (LO (ImmCLbl lbl))),
3867 BCTR [ id | Just id <- ids ]
3871 genSwitch expr ids = panic "ToDo: genSwitch"
3874 jumpTableEntry Nothing = CmmStaticLit (CmmInt 0 wordRep)
3875 jumpTableEntry (Just (BlockId id)) = CmmStaticLit (CmmLabel blockLabel)
3876 where blockLabel = mkAsmTempLabel id
3878 -- -----------------------------------------------------------------------------
3880 -- -----------------------------------------------------------------------------
3883 -- -----------------------------------------------------------------------------
3884 -- 'condIntReg' and 'condFltReg': condition codes into registers
3886 -- Turn those condition codes into integers now (when they appear on
3887 -- the right hand side of an assignment).
3889 -- (If applicable) Do not fill the delay slots here; you will confuse the
3890 -- register allocator.
3892 condIntReg, condFltReg :: Cond -> CmmExpr -> CmmExpr -> NatM Register
3894 -- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
3896 #if alpha_TARGET_ARCH
3897 condIntReg = panic "MachCode.condIntReg (not on Alpha)"
3898 condFltReg = panic "MachCode.condFltReg (not on Alpha)"
3899 #endif /* alpha_TARGET_ARCH */
3901 -- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
3903 #if i386_TARGET_ARCH || x86_64_TARGET_ARCH
3905 condIntReg cond x y = do
3906 CondCode _ cond cond_code <- condIntCode cond x y
3907 tmp <- getNewRegNat I8
3909 code dst = cond_code `appOL` toOL [
3910 SETCC cond (OpReg tmp),
3911 MOVZxL I8 (OpReg tmp) (OpReg dst)
3914 return (Any I32 code)
3916 condFltReg cond x y = do
3917 CondCode _ cond cond_code <- condFltCode cond x y
3918 tmp1 <- getNewRegNat wordRep
3919 tmp2 <- getNewRegNat wordRep
3921 -- We have to worry about unordered operands (eg. comparisons
3922 -- against NaN). If the operands are unordered, the comparison
3923 -- sets the parity flag, carry flag and zero flag.
3924 -- All comparisons are supposed to return false for unordered
3925 -- operands except for !=, which returns true.
3927 -- Optimisation: we don't have to test the parity flag if we
3928 -- know the test has already excluded the unordered case: eg >
3929 -- and >= test for a zero carry flag, which can only occur for
3930 -- ordered operands.
3932 -- ToDo: by reversing comparisons we could avoid testing the
3933 -- parity flag in more cases.
3938 NE -> or_unordered dst
3939 GU -> plain_test dst
3940 GEU -> plain_test dst
3941 _ -> and_ordered dst)
3943 plain_test dst = toOL [
3944 SETCC cond (OpReg tmp1),
3945 MOVZxL I8 (OpReg tmp1) (OpReg dst)
3947 or_unordered dst = toOL [
3948 SETCC cond (OpReg tmp1),
3949 SETCC PARITY (OpReg tmp2),
3950 OR I8 (OpReg tmp1) (OpReg tmp2),
3951 MOVZxL I8 (OpReg tmp2) (OpReg dst)
3953 and_ordered dst = toOL [
3954 SETCC cond (OpReg tmp1),
3955 SETCC NOTPARITY (OpReg tmp2),
3956 AND I8 (OpReg tmp1) (OpReg tmp2),
3957 MOVZxL I8 (OpReg tmp2) (OpReg dst)
3960 return (Any I32 code)
3963 -- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
3965 #if sparc_TARGET_ARCH
3967 condIntReg EQQ x (StInt 0)
3968 = getRegister x `thenNat` \ register ->
3969 getNewRegNat IntRep `thenNat` \ tmp ->
3971 code = registerCode register tmp
3972 src = registerName register tmp
3973 code__2 dst = code `appOL` toOL [
3974 SUB False True g0 (RIReg src) g0,
3975 SUB True False g0 (RIImm (ImmInt (-1))) dst]
3977 return (Any IntRep code__2)
3980 = getRegister x `thenNat` \ register1 ->
3981 getRegister y `thenNat` \ register2 ->
3982 getNewRegNat IntRep `thenNat` \ tmp1 ->
3983 getNewRegNat IntRep `thenNat` \ tmp2 ->
3985 code1 = registerCode register1 tmp1
3986 src1 = registerName register1 tmp1
3987 code2 = registerCode register2 tmp2
3988 src2 = registerName register2 tmp2
3989 code__2 dst = code1 `appOL` code2 `appOL` toOL [
3990 XOR False src1 (RIReg src2) dst,
3991 SUB False True g0 (RIReg dst) g0,
3992 SUB True False g0 (RIImm (ImmInt (-1))) dst]
3994 return (Any IntRep code__2)
3996 condIntReg NE x (StInt 0)
3997 = getRegister x `thenNat` \ register ->
3998 getNewRegNat IntRep `thenNat` \ tmp ->
4000 code = registerCode register tmp
4001 src = registerName register tmp
4002 code__2 dst = code `appOL` toOL [
4003 SUB False True g0 (RIReg src) g0,
4004 ADD True False g0 (RIImm (ImmInt 0)) dst]
4006 return (Any IntRep code__2)
4009 = getRegister x `thenNat` \ register1 ->
4010 getRegister y `thenNat` \ register2 ->
4011 getNewRegNat IntRep `thenNat` \ tmp1 ->
4012 getNewRegNat IntRep `thenNat` \ tmp2 ->
4014 code1 = registerCode register1 tmp1
4015 src1 = registerName register1 tmp1
4016 code2 = registerCode register2 tmp2
4017 src2 = registerName register2 tmp2
4018 code__2 dst = code1 `appOL` code2 `appOL` toOL [
4019 XOR False src1 (RIReg src2) dst,
4020 SUB False True g0 (RIReg dst) g0,
4021 ADD True False g0 (RIImm (ImmInt 0)) dst]
4023 return (Any IntRep code__2)
4026 = getBlockIdNat `thenNat` \ lbl1 ->
4027 getBlockIdNat `thenNat` \ lbl2 ->
4028 condIntCode cond x y `thenNat` \ condition ->
4030 code = condCode condition
4031 cond = condName condition
4032 code__2 dst = code `appOL` toOL [
4033 BI cond False (ImmCLbl lbl1), NOP,
4034 OR False g0 (RIImm (ImmInt 0)) dst,
4035 BI ALWAYS False (ImmCLbl lbl2), NOP,
4037 OR False g0 (RIImm (ImmInt 1)) dst,
4040 return (Any IntRep code__2)
4043 = getBlockIdNat `thenNat` \ lbl1 ->
4044 getBlockIdNat `thenNat` \ lbl2 ->
4045 condFltCode cond x y `thenNat` \ condition ->
4047 code = condCode condition
4048 cond = condName condition
4049 code__2 dst = code `appOL` toOL [
4051 BF cond False (ImmCLbl lbl1), NOP,
4052 OR False g0 (RIImm (ImmInt 0)) dst,
4053 BI ALWAYS False (ImmCLbl lbl2), NOP,
4055 OR False g0 (RIImm (ImmInt 1)) dst,
4058 return (Any IntRep code__2)
4060 #endif /* sparc_TARGET_ARCH */
4062 #if powerpc_TARGET_ARCH
4063 condReg getCond = do
4064 lbl1 <- getBlockIdNat
4065 lbl2 <- getBlockIdNat
4066 CondCode _ cond cond_code <- getCond
4068 {- code dst = cond_code `appOL` toOL [
4077 code dst = cond_code
4081 RLWINM dst dst (bit + 1) 31 31
4084 negate_code | do_negate = unitOL (CRNOR bit bit bit)
4087 (bit, do_negate) = case cond of
4101 return (Any I32 code)
4103 condIntReg cond x y = condReg (condIntCode cond x y)
4104 condFltReg cond x y = condReg (condFltCode cond x y)
4105 #endif /* powerpc_TARGET_ARCH */
4108 -- -----------------------------------------------------------------------------
4109 -- 'trivial*Code': deal with trivial instructions
4111 -- Trivial (dyadic: 'trivialCode', floating-point: 'trivialFCode',
4112 -- unary: 'trivialUCode', unary fl-pt:'trivialUFCode') instructions.
4113 -- Only look for constants on the right hand side, because that's
4114 -- where the generic optimizer will have put them.
4116 -- Similarly, for unary instructions, we don't have to worry about
4117 -- matching an StInt as the argument, because genericOpt will already
4118 -- have handled the constant-folding.
4122 -> IF_ARCH_alpha((Reg -> RI -> Reg -> Instr)
4123 ,IF_ARCH_i386 ((Operand -> Operand -> Instr)
4124 -> Maybe (Operand -> Operand -> Instr)
4125 ,IF_ARCH_x86_64 ((Operand -> Operand -> Instr)
4126 -> Maybe (Operand -> Operand -> Instr)
4127 ,IF_ARCH_sparc((Reg -> RI -> Reg -> Instr)
4128 ,IF_ARCH_powerpc(Bool -> (Reg -> Reg -> RI -> Instr)
4130 -> CmmExpr -> CmmExpr -- the two arguments
4133 #ifndef powerpc_TARGET_ARCH
4136 -> IF_ARCH_alpha((Reg -> Reg -> Reg -> Instr)
4137 ,IF_ARCH_sparc((Size -> Reg -> Reg -> Reg -> Instr)
4138 ,IF_ARCH_i386 ((MachRep -> Reg -> Reg -> Reg -> Instr)
4139 ,IF_ARCH_x86_64 ((MachRep -> Operand -> Operand -> Instr)
4141 -> CmmExpr -> CmmExpr -- the two arguments
4147 -> IF_ARCH_alpha((RI -> Reg -> Instr)
4148 ,IF_ARCH_i386 ((Operand -> Instr)
4149 ,IF_ARCH_x86_64 ((Operand -> Instr)
4150 ,IF_ARCH_sparc((RI -> Reg -> Instr)
4151 ,IF_ARCH_powerpc((Reg -> Reg -> Instr)
4153 -> CmmExpr -- the one argument
4156 #ifndef powerpc_TARGET_ARCH
4159 -> IF_ARCH_alpha((Reg -> Reg -> Instr)
4160 ,IF_ARCH_i386 ((Reg -> Reg -> Instr)
4161 ,IF_ARCH_x86_64 ((Reg -> Reg -> Instr)
4162 ,IF_ARCH_sparc((Reg -> Reg -> Instr)
4164 -> CmmExpr -- the one argument
4168 -- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
4170 #if alpha_TARGET_ARCH
4172 trivialCode instr x (StInt y)
4174 = getRegister x `thenNat` \ register ->
4175 getNewRegNat IntRep `thenNat` \ tmp ->
4177 code = registerCode register tmp
4178 src1 = registerName register tmp
4179 src2 = ImmInt (fromInteger y)
4180 code__2 dst = code . mkSeqInstr (instr src1 (RIImm src2) dst)
4182 return (Any IntRep code__2)
4184 trivialCode instr x y
4185 = getRegister x `thenNat` \ register1 ->
4186 getRegister y `thenNat` \ register2 ->
4187 getNewRegNat IntRep `thenNat` \ tmp1 ->
4188 getNewRegNat IntRep `thenNat` \ tmp2 ->
4190 code1 = registerCode register1 tmp1 []
4191 src1 = registerName register1 tmp1
4192 code2 = registerCode register2 tmp2 []
4193 src2 = registerName register2 tmp2
4194 code__2 dst = asmSeqThen [code1, code2] .
4195 mkSeqInstr (instr src1 (RIReg src2) dst)
4197 return (Any IntRep code__2)
4200 trivialUCode instr x
4201 = getRegister x `thenNat` \ register ->
4202 getNewRegNat IntRep `thenNat` \ tmp ->
4204 code = registerCode register tmp
4205 src = registerName register tmp
4206 code__2 dst = code . mkSeqInstr (instr (RIReg src) dst)
4208 return (Any IntRep code__2)
4211 trivialFCode _ instr x y
4212 = getRegister x `thenNat` \ register1 ->
4213 getRegister y `thenNat` \ register2 ->
4214 getNewRegNat F64 `thenNat` \ tmp1 ->
4215 getNewRegNat F64 `thenNat` \ tmp2 ->
4217 code1 = registerCode register1 tmp1
4218 src1 = registerName register1 tmp1
4220 code2 = registerCode register2 tmp2
4221 src2 = registerName register2 tmp2
4223 code__2 dst = asmSeqThen [code1 [], code2 []] .
4224 mkSeqInstr (instr src1 src2 dst)
4226 return (Any F64 code__2)
4228 trivialUFCode _ instr x
4229 = getRegister x `thenNat` \ register ->
4230 getNewRegNat F64 `thenNat` \ tmp ->
4232 code = registerCode register tmp
4233 src = registerName register tmp
4234 code__2 dst = code . mkSeqInstr (instr src dst)
4236 return (Any F64 code__2)
4238 #endif /* alpha_TARGET_ARCH */
4240 -- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
4242 #if i386_TARGET_ARCH || x86_64_TARGET_ARCH
4245 The Rules of the Game are:
4247 * You cannot assume anything about the destination register dst;
4248 it may be anything, including a fixed reg.
4250 * You may compute an operand into a fixed reg, but you may not
4251 subsequently change the contents of that fixed reg. If you
4252 want to do so, first copy the value either to a temporary
4253 or into dst. You are free to modify dst even if it happens
4254 to be a fixed reg -- that's not your problem.
4256 * You cannot assume that a fixed reg will stay live over an
4257 arbitrary computation. The same applies to the dst reg.
4259 * Temporary regs obtained from getNewRegNat are distinct from
4260 each other and from all other regs, and stay live over
4261 arbitrary computations.
4263 --------------------
4265 SDM's version of The Rules:
4267 * If getRegister returns Any, that means it can generate correct
4268 code which places the result in any register, period. Even if that
4269 register happens to be read during the computation.
4271 Corollary #1: this means that if you are generating code for an
4272 operation with two arbitrary operands, you cannot assign the result
4273 of the first operand into the destination register before computing
4274 the second operand. The second operand might require the old value
4275 of the destination register.
4277 Corollary #2: A function might be able to generate more efficient
4278 code if it knows the destination register is a new temporary (and
4279 therefore not read by any of the sub-computations).
4281 * If getRegister returns Any, then the code it generates may modify only:
4282 (a) fresh temporaries
4283 (b) the destination register
4284 (c) known registers (eg. %ecx is used by shifts)
4285 In particular, it may *not* modify global registers, unless the global
4286 register happens to be the destination register.
4289 trivialCode rep instr (Just revinstr) (CmmLit lit_a) b
4290 | not (is64BitLit lit_a) = do
4291 b_code <- getAnyReg b
4294 = b_code dst `snocOL`
4295 revinstr (OpImm (litToImm lit_a)) (OpReg dst)
4297 return (Any rep code)
4299 trivialCode rep instr maybe_revinstr a b = genTrivialCode rep instr a b
4301 -- This is re-used for floating pt instructions too.
4302 genTrivialCode rep instr a b = do
4303 (b_op, b_code) <- getNonClobberedOperand b
4304 a_code <- getAnyReg a
4305 tmp <- getNewRegNat rep
4307 -- We want the value of b to stay alive across the computation of a.
4308 -- But, we want to calculate a straight into the destination register,
4309 -- because the instruction only has two operands (dst := dst `op` src).
4310 -- The troublesome case is when the result of b is in the same register
4311 -- as the destination reg. In this case, we have to save b in a
4312 -- new temporary across the computation of a.
4314 | dst `regClashesWithOp` b_op =
4316 unitOL (MOV rep b_op (OpReg tmp)) `appOL`
4318 instr (OpReg tmp) (OpReg dst)
4322 instr b_op (OpReg dst)
4324 return (Any rep code)
4326 reg `regClashesWithOp` OpReg reg2 = reg == reg2
4327 reg `regClashesWithOp` OpAddr amode = any (==reg) (addrModeRegs amode)
4328 reg `regClashesWithOp` _ = False
4332 trivialUCode rep instr x = do
4333 x_code <- getAnyReg x
4339 return (Any rep code)
4343 #if i386_TARGET_ARCH
4345 trivialFCode pk instr x y = do
4346 (x_reg, x_code) <- getNonClobberedReg x -- these work for float regs too
4347 (y_reg, y_code) <- getSomeReg y
4352 instr pk x_reg y_reg dst
4354 return (Any pk code)
4358 #if x86_64_TARGET_ARCH
4360 trivialFCode pk instr x y = genTrivialCode pk (instr pk) x y
4366 trivialUFCode rep instr x = do
4367 (x_reg, x_code) <- getSomeReg x
4373 return (Any rep code)
4375 #endif /* i386_TARGET_ARCH */
4377 -- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
4379 #if sparc_TARGET_ARCH
4381 trivialCode instr x (StInt y)
4383 = getRegister x `thenNat` \ register ->
4384 getNewRegNat IntRep `thenNat` \ tmp ->
4386 code = registerCode register tmp
4387 src1 = registerName register tmp
4388 src2 = ImmInt (fromInteger y)
4389 code__2 dst = code `snocOL` instr src1 (RIImm src2) dst
4391 return (Any IntRep code__2)
4393 trivialCode instr x y
4394 = getRegister x `thenNat` \ register1 ->
4395 getRegister y `thenNat` \ register2 ->
4396 getNewRegNat IntRep `thenNat` \ tmp1 ->
4397 getNewRegNat IntRep `thenNat` \ tmp2 ->
4399 code1 = registerCode register1 tmp1
4400 src1 = registerName register1 tmp1
4401 code2 = registerCode register2 tmp2
4402 src2 = registerName register2 tmp2
4403 code__2 dst = code1 `appOL` code2 `snocOL`
4404 instr src1 (RIReg src2) dst
4406 return (Any IntRep code__2)
4409 trivialFCode pk instr x y
4410 = getRegister x `thenNat` \ register1 ->
4411 getRegister y `thenNat` \ register2 ->
4412 getNewRegNat (registerRep register1)
4414 getNewRegNat (registerRep register2)
4416 getNewRegNat F64 `thenNat` \ tmp ->
4418 promote x = FxTOy F DF x tmp
4420 pk1 = registerRep register1
4421 code1 = registerCode register1 tmp1
4422 src1 = registerName register1 tmp1
4424 pk2 = registerRep register2
4425 code2 = registerCode register2 tmp2
4426 src2 = registerName register2 tmp2
4430 code1 `appOL` code2 `snocOL`
4431 instr (primRepToSize pk) src1 src2 dst
4432 else if pk1 == F32 then
4433 code1 `snocOL` promote src1 `appOL` code2 `snocOL`
4434 instr DF tmp src2 dst
4436 code1 `appOL` code2 `snocOL` promote src2 `snocOL`
4437 instr DF src1 tmp dst
4439 return (Any (if pk1 == pk2 then pk1 else F64) code__2)
4442 trivialUCode instr x
4443 = getRegister x `thenNat` \ register ->
4444 getNewRegNat IntRep `thenNat` \ tmp ->
4446 code = registerCode register tmp
4447 src = registerName register tmp
4448 code__2 dst = code `snocOL` instr (RIReg src) dst
4450 return (Any IntRep code__2)
4453 trivialUFCode pk instr x
4454 = getRegister x `thenNat` \ register ->
4455 getNewRegNat pk `thenNat` \ tmp ->
4457 code = registerCode register tmp
4458 src = registerName register tmp
4459 code__2 dst = code `snocOL` instr src dst
4461 return (Any pk code__2)
4463 #endif /* sparc_TARGET_ARCH */
4465 #if powerpc_TARGET_ARCH
4468 Wolfgang's PowerPC version of The Rules:
4470 A slightly modified version of The Rules to take advantage of the fact
4471 that PowerPC instructions work on all registers and don't implicitly
4472 clobber any fixed registers.
4474 * The only expression for which getRegister returns Fixed is (CmmReg reg).
4476 * If getRegister returns Any, then the code it generates may modify only:
4477 (a) fresh temporaries
4478 (b) the destination register
4479 It may *not* modify global registers, unless the global
4480 register happens to be the destination register.
4481 It may not clobber any other registers. In fact, only ccalls clobber any
4483 Also, it may not modify the counter register (used by genCCall).
4485 Corollary: If a getRegister for a subexpression returns Fixed, you need
4486 not move it to a fresh temporary before evaluating the next subexpression.
4487 The Fixed register won't be modified.
4488 Therefore, we don't need a counterpart for the x86's getStableReg on PPC.
4490 * SDM's First Rule is valid for PowerPC, too: subexpressions can depend on
4491 the value of the destination register.
4494 trivialCode rep signed instr x (CmmLit (CmmInt y _))
4495 | Just imm <- makeImmediate rep signed y
4497 (src1, code1) <- getSomeReg x
4498 let code dst = code1 `snocOL` instr dst src1 (RIImm imm)
4499 return (Any rep code)
4501 trivialCode rep signed instr x y = do
4502 (src1, code1) <- getSomeReg x
4503 (src2, code2) <- getSomeReg y
4504 let code dst = code1 `appOL` code2 `snocOL` instr dst src1 (RIReg src2)
4505 return (Any rep code)
4507 trivialCodeNoImm :: MachRep -> (Reg -> Reg -> Reg -> Instr)
4508 -> CmmExpr -> CmmExpr -> NatM Register
4509 trivialCodeNoImm rep instr x y = do
4510 (src1, code1) <- getSomeReg x
4511 (src2, code2) <- getSomeReg y
4512 let code dst = code1 `appOL` code2 `snocOL` instr dst src1 src2
4513 return (Any rep code)
4515 trivialUCode rep instr x = do
4516 (src, code) <- getSomeReg x
4517 let code' dst = code `snocOL` instr dst src
4518 return (Any rep code')
4520 -- There is no "remainder" instruction on the PPC, so we have to do
4522 -- The "div" parameter is the division instruction to use (DIVW or DIVWU)
4524 remainderCode :: MachRep -> (Reg -> Reg -> Reg -> Instr)
4525 -> CmmExpr -> CmmExpr -> NatM Register
4526 remainderCode rep div x y = do
4527 (src1, code1) <- getSomeReg x
4528 (src2, code2) <- getSomeReg y
4529 let code dst = code1 `appOL` code2 `appOL` toOL [
4531 MULLW dst dst (RIReg src2),
4534 return (Any rep code)
4536 #endif /* powerpc_TARGET_ARCH */
4539 -- -----------------------------------------------------------------------------
4540 -- Coercing to/from integer/floating-point...
4542 -- @coerce(Int2FP|FP2Int)@ are more complicated integer/float
4543 -- conversions. We have to store temporaries in memory to move
4544 -- between the integer and the floating point register sets.
4546 -- @coerceDbl2Flt@ and @coerceFlt2Dbl@ are done this way because we
4547 -- pretend, on sparc at least, that double and float regs are seperate
4548 -- kinds, so the value has to be computed into one kind before being
4549 -- explicitly "converted" to live in the other kind.
4551 coerceInt2FP :: MachRep -> MachRep -> CmmExpr -> NatM Register
4552 coerceFP2Int :: MachRep -> MachRep -> CmmExpr -> NatM Register
4554 #if sparc_TARGET_ARCH
4555 coerceDbl2Flt :: CmmExpr -> NatM Register
4556 coerceFlt2Dbl :: CmmExpr -> NatM Register
4559 -- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
4561 #if alpha_TARGET_ARCH
4564 = getRegister x `thenNat` \ register ->
4565 getNewRegNat IntRep `thenNat` \ reg ->
4567 code = registerCode register reg
4568 src = registerName register reg
4570 code__2 dst = code . mkSeqInstrs [
4572 LD TF dst (spRel 0),
4575 return (Any F64 code__2)
4579 = getRegister x `thenNat` \ register ->
4580 getNewRegNat F64 `thenNat` \ tmp ->
4582 code = registerCode register tmp
4583 src = registerName register tmp
4585 code__2 dst = code . mkSeqInstrs [
4587 ST TF tmp (spRel 0),
4590 return (Any IntRep code__2)
4592 #endif /* alpha_TARGET_ARCH */
4594 -- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
4596 #if i386_TARGET_ARCH
4598 coerceInt2FP from to x = do
4599 (x_reg, x_code) <- getSomeReg x
4601 opc = case to of F32 -> GITOF; F64 -> GITOD
4602 code dst = x_code `snocOL` opc x_reg dst
4603 -- ToDo: works for non-I32 reps?
4605 return (Any to code)
4609 coerceFP2Int from to x = do
4610 (x_reg, x_code) <- getSomeReg x
4612 opc = case from of F32 -> GFTOI; F64 -> GDTOI
4613 code dst = x_code `snocOL` opc x_reg dst
4614 -- ToDo: works for non-I32 reps?
4616 return (Any to code)
4618 #endif /* i386_TARGET_ARCH */
4620 -- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
4622 #if x86_64_TARGET_ARCH
4624 coerceFP2Int from to x = do
4625 (x_op, x_code) <- getOperand x -- ToDo: could be a safe operand
4627 opc = case from of F32 -> CVTSS2SI; F64 -> CVTSD2SI
4628 code dst = x_code `snocOL` opc x_op dst
4630 return (Any to code) -- works even if the destination rep is <I32
4632 coerceInt2FP from to x = do
4633 (x_op, x_code) <- getOperand x -- ToDo: could be a safe operand
4635 opc = case to of F32 -> CVTSI2SS; F64 -> CVTSI2SD
4636 code dst = x_code `snocOL` opc x_op dst
4638 return (Any to code) -- works even if the destination rep is <I32
4640 coerceFP2FP :: MachRep -> CmmExpr -> NatM Register
4641 coerceFP2FP to x = do
4642 (x_reg, x_code) <- getSomeReg x
4644 opc = case to of F32 -> CVTSD2SS; F64 -> CVTSS2SD
4645 code dst = x_code `snocOL` opc x_reg dst
4647 return (Any to code)
4651 -- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
4653 #if sparc_TARGET_ARCH
4656 = getRegister x `thenNat` \ register ->
4657 getNewRegNat IntRep `thenNat` \ reg ->
4659 code = registerCode register reg
4660 src = registerName register reg
4662 code__2 dst = code `appOL` toOL [
4663 ST W src (spRel (-2)),
4664 LD W (spRel (-2)) dst,
4665 FxTOy W (primRepToSize pk) dst dst]
4667 return (Any pk code__2)
4670 coerceFP2Int fprep x
4671 = ASSERT(fprep == F64 || fprep == F32)
4672 getRegister x `thenNat` \ register ->
4673 getNewRegNat fprep `thenNat` \ reg ->
4674 getNewRegNat F32 `thenNat` \ tmp ->
4676 code = registerCode register reg
4677 src = registerName register reg
4678 code__2 dst = code `appOL` toOL [
4679 FxTOy (primRepToSize fprep) W src tmp,
4680 ST W tmp (spRel (-2)),
4681 LD W (spRel (-2)) dst]
4683 return (Any IntRep code__2)
4687 = getRegister x `thenNat` \ register ->
4688 getNewRegNat F64 `thenNat` \ tmp ->
4689 let code = registerCode register tmp
4690 src = registerName register tmp
4693 (\dst -> code `snocOL` FxTOy DF F src dst))
4697 = getRegister x `thenNat` \ register ->
4698 getNewRegNat F32 `thenNat` \ tmp ->
4699 let code = registerCode register tmp
4700 src = registerName register tmp
4703 (\dst -> code `snocOL` FxTOy F DF src dst))
4705 #endif /* sparc_TARGET_ARCH */
4707 #if powerpc_TARGET_ARCH
4708 coerceInt2FP fromRep toRep x = do
4709 (src, code) <- getSomeReg x
4710 lbl <- getNewLabelNat
4711 itmp <- getNewRegNat I32
4712 ftmp <- getNewRegNat F64
4713 dynRef <- cmmMakeDynamicReference addImportNat False lbl
4714 Amode addr addr_code <- getAmode dynRef
4716 code' dst = code `appOL` maybe_exts `appOL` toOL [
4719 CmmStaticLit (CmmInt 0x43300000 I32),
4720 CmmStaticLit (CmmInt 0x80000000 I32)],
4721 XORIS itmp src (ImmInt 0x8000),
4722 ST I32 itmp (spRel 3),
4723 LIS itmp (ImmInt 0x4330),
4724 ST I32 itmp (spRel 2),
4725 LD F64 ftmp (spRel 2)
4726 ] `appOL` addr_code `appOL` toOL [
4728 FSUB F64 dst ftmp dst
4729 ] `appOL` maybe_frsp dst
4731 maybe_exts = case fromRep of
4732 I8 -> unitOL $ EXTS I8 src src
4733 I16 -> unitOL $ EXTS I16 src src
4735 maybe_frsp dst = case toRep of
4736 F32 -> unitOL $ FRSP dst dst
4738 return (Any toRep code')
4740 coerceFP2Int fromRep toRep x = do
4741 -- the reps don't really matter: F*->F64 and I32->I* are no-ops
4742 (src, code) <- getSomeReg x
4743 tmp <- getNewRegNat F64
4745 code' dst = code `appOL` toOL [
4746 -- convert to int in FP reg
4748 -- store value (64bit) from FP to stack
4749 ST F64 tmp (spRel 2),
4750 -- read low word of value (high word is undefined)
4751 LD I32 dst (spRel 3)]
4752 return (Any toRep code')
4753 #endif /* powerpc_TARGET_ARCH */
4756 -- -----------------------------------------------------------------------------
4757 -- eXTRA_STK_ARGS_HERE
4759 -- We (allegedly) put the first six C-call arguments in registers;
4760 -- where do we start putting the rest of them?
4762 -- Moved from MachInstrs (SDM):
4764 #if alpha_TARGET_ARCH || sparc_TARGET_ARCH
4765 eXTRA_STK_ARGS_HERE :: Int
4767 = IF_ARCH_alpha(0, IF_ARCH_sparc(23, ???))