1 -----------------------------------------------------------------------------
3 -- Generating machine code (instruction selection)
5 -- (c) The University of Glasgow 1996-2004
7 -----------------------------------------------------------------------------
9 -- This is a big module, but, if you pay attention to
10 -- (a) the sectioning, (b) the type signatures, and
11 -- (c) the #if blah_TARGET_ARCH} things, the
12 -- structure should not be too overwhelming.
14 module MachCodeGen ( cmmTopCodeGen, InstrBlock ) where
16 #include "HsVersions.h"
17 #include "nativeGen/NCG.h"
24 import PositionIndependentCode ( cmmMakeDynamicReference, initializePicBase )
26 -- Our intermediate code:
27 import PprCmm ( pprExpr )
33 import StaticFlags ( opt_PIC )
34 import ForeignCall ( CCallConv(..) )
38 import qualified Outputable
40 import FastTypes ( isFastTrue )
41 import Constants ( wORD_SIZE )
44 import Outputable ( assertPanic )
45 import TRACE ( trace )
48 import Control.Monad ( mapAndUnzipM )
49 import Maybe ( fromJust )
53 -- -----------------------------------------------------------------------------
54 -- Top-level of the instruction selector
56 -- | 'InstrBlock's are the insn sequences generated by the insn selectors.
57 -- They are really trees of insns to facilitate fast appending, where a
58 -- left-to-right traversal (pre-order?) yields the insns in the correct
61 type InstrBlock = OrdList Instr
63 cmmTopCodeGen :: CmmTop -> NatM [NatCmmTop]
64 cmmTopCodeGen (CmmProc info lab params blocks) = do
65 (nat_blocks,statics) <- mapAndUnzipM basicBlockCodeGen blocks
66 picBaseMb <- getPicBaseMaybeNat
67 let proc = CmmProc info lab params (concat nat_blocks)
68 tops = proc : concat statics
70 Just picBase -> initializePicBase picBase tops
71 Nothing -> return tops
73 cmmTopCodeGen (CmmData sec dat) = do
74 return [CmmData sec dat] -- no translation, we just use CmmStatic
76 basicBlockCodeGen :: CmmBasicBlock -> NatM ([NatBasicBlock],[NatCmmTop])
77 basicBlockCodeGen (BasicBlock id stmts) = do
78 instrs <- stmtsToInstrs stmts
79 -- code generation may introduce new basic block boundaries, which
80 -- are indicated by the NEWBLOCK instruction. We must split up the
81 -- instruction stream into basic blocks again. Also, we extract
84 (top,other_blocks,statics) = foldrOL mkBlocks ([],[],[]) instrs
86 mkBlocks (NEWBLOCK id) (instrs,blocks,statics)
87 = ([], BasicBlock id instrs : blocks, statics)
88 mkBlocks (LDATA sec dat) (instrs,blocks,statics)
89 = (instrs, blocks, CmmData sec dat:statics)
90 mkBlocks instr (instrs,blocks,statics)
91 = (instr:instrs, blocks, statics)
93 return (BasicBlock id top : other_blocks, statics)
95 stmtsToInstrs :: [CmmStmt] -> NatM InstrBlock
97 = do instrss <- mapM stmtToInstrs stmts
98 return (concatOL instrss)
100 stmtToInstrs :: CmmStmt -> NatM InstrBlock
101 stmtToInstrs stmt = case stmt of
102 CmmNop -> return nilOL
103 CmmComment s -> return (unitOL (COMMENT s))
106 | isFloatingRep kind -> assignReg_FltCode kind reg src
107 #if WORD_SIZE_IN_BITS==32
108 | kind == I64 -> assignReg_I64Code reg src
110 | otherwise -> assignReg_IntCode kind reg src
111 where kind = cmmRegRep reg
114 | isFloatingRep kind -> assignMem_FltCode kind addr src
115 #if WORD_SIZE_IN_BITS==32
116 | kind == I64 -> assignMem_I64Code addr src
118 | otherwise -> assignMem_IntCode kind addr src
119 where kind = cmmExprRep src
121 CmmCall target result_regs args vols
122 -> genCCall target result_regs args vols
124 CmmBranch id -> genBranch id
125 CmmCondBranch arg id -> genCondJump id arg
126 CmmSwitch arg ids -> genSwitch arg ids
127 CmmJump arg params -> genJump arg
129 -- -----------------------------------------------------------------------------
130 -- General things for putting together code sequences
132 -- Expand CmmRegOff. ToDo: should we do it this way around, or convert
133 -- CmmExprs into CmmRegOff?
134 mangleIndexTree :: CmmExpr -> CmmExpr
135 mangleIndexTree (CmmRegOff reg off)
136 = CmmMachOp (MO_Add rep) [CmmReg reg, CmmLit (CmmInt (fromIntegral off) rep)]
137 where rep = cmmRegRep reg
139 -- -----------------------------------------------------------------------------
140 -- Code gen for 64-bit arithmetic on 32-bit platforms
143 Simple support for generating 64-bit code (ie, 64 bit values and 64
144 bit assignments) on 32-bit platforms. Unlike the main code generator
145 we merely shoot for generating working code as simply as possible, and
146 pay little attention to code quality. Specifically, there is no
147 attempt to deal cleverly with the fixed-vs-floating register
148 distinction; all values are generated into (pairs of) floating
149 registers, even if this would mean some redundant reg-reg moves as a
150 result. Only one of the VRegUniques is returned, since it will be
151 of the VRegUniqueLo form, and the upper-half VReg can be determined
152 by applying getHiVRegFromLo to it.
155 data ChildCode64 -- a.k.a "Register64"
158 Reg -- the lower 32-bit temporary which contains the
159 -- result; use getHiVRegFromLo to find the other
160 -- VRegUnique. Rules of this simplified insn
161 -- selection game are therefore that the returned
162 -- Reg may be modified
164 #if WORD_SIZE_IN_BITS==32
165 assignMem_I64Code :: CmmExpr -> CmmExpr -> NatM InstrBlock
166 assignReg_I64Code :: CmmReg -> CmmExpr -> NatM InstrBlock
169 #ifndef x86_64_TARGET_ARCH
170 iselExpr64 :: CmmExpr -> NatM ChildCode64
173 -- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
177 assignMem_I64Code addrTree valueTree = do
178 Amode addr addr_code <- getAmode addrTree
179 ChildCode64 vcode rlo <- iselExpr64 valueTree
181 rhi = getHiVRegFromLo rlo
183 -- Little-endian store
184 mov_lo = MOV I32 (OpReg rlo) (OpAddr addr)
185 mov_hi = MOV I32 (OpReg rhi) (OpAddr (fromJust (addrOffset addr 4)))
187 return (vcode `appOL` addr_code `snocOL` mov_lo `snocOL` mov_hi)
190 assignReg_I64Code (CmmLocal (LocalReg u_dst pk)) valueTree = do
191 ChildCode64 vcode r_src_lo <- iselExpr64 valueTree
193 r_dst_lo = mkVReg u_dst I32
194 r_dst_hi = getHiVRegFromLo r_dst_lo
195 r_src_hi = getHiVRegFromLo r_src_lo
196 mov_lo = MOV I32 (OpReg r_src_lo) (OpReg r_dst_lo)
197 mov_hi = MOV I32 (OpReg r_src_hi) (OpReg r_dst_hi)
200 vcode `snocOL` mov_lo `snocOL` mov_hi
203 assignReg_I64Code lvalue valueTree
204 = panic "assignReg_I64Code(i386): invalid lvalue"
208 iselExpr64 (CmmLit (CmmInt i _)) = do
209 (rlo,rhi) <- getNewRegPairNat I32
211 r = fromIntegral (fromIntegral i :: Word32)
212 q = fromIntegral ((fromIntegral i `shiftR` 32) :: Word32)
214 MOV I32 (OpImm (ImmInteger r)) (OpReg rlo),
215 MOV I32 (OpImm (ImmInteger q)) (OpReg rhi)
218 return (ChildCode64 code rlo)
220 iselExpr64 (CmmLoad addrTree I64) = do
221 Amode addr addr_code <- getAmode addrTree
222 (rlo,rhi) <- getNewRegPairNat I32
224 mov_lo = MOV I32 (OpAddr addr) (OpReg rlo)
225 mov_hi = MOV I32 (OpAddr (fromJust (addrOffset addr 4))) (OpReg rhi)
228 ChildCode64 (addr_code `snocOL` mov_lo `snocOL` mov_hi)
232 iselExpr64 (CmmReg (CmmLocal (LocalReg vu I64)))
233 = return (ChildCode64 nilOL (mkVReg vu I32))
235 -- we handle addition, but rather badly
236 iselExpr64 (CmmMachOp (MO_Add _) [e1, CmmLit (CmmInt i _)]) = do
237 ChildCode64 code1 r1lo <- iselExpr64 e1
238 (rlo,rhi) <- getNewRegPairNat I32
240 r = fromIntegral (fromIntegral i :: Word32)
241 q = fromIntegral ((fromIntegral i `shiftR` 32) :: Word32)
242 r1hi = getHiVRegFromLo r1lo
244 toOL [ MOV I32 (OpReg r1lo) (OpReg rlo),
245 ADD I32 (OpImm (ImmInteger r)) (OpReg rlo),
246 MOV I32 (OpReg r1hi) (OpReg rhi),
247 ADC I32 (OpImm (ImmInteger q)) (OpReg rhi) ]
249 return (ChildCode64 code rlo)
251 iselExpr64 (CmmMachOp (MO_Add _) [e1,e2]) = do
252 ChildCode64 code1 r1lo <- iselExpr64 e1
253 ChildCode64 code2 r2lo <- iselExpr64 e2
254 (rlo,rhi) <- getNewRegPairNat I32
256 r1hi = getHiVRegFromLo r1lo
257 r2hi = getHiVRegFromLo r2lo
260 toOL [ MOV I32 (OpReg r1lo) (OpReg rlo),
261 ADD I32 (OpReg r2lo) (OpReg rlo),
262 MOV I32 (OpReg r1hi) (OpReg rhi),
263 ADC I32 (OpReg r2hi) (OpReg rhi) ]
265 return (ChildCode64 code rlo)
268 = pprPanic "iselExpr64(i386)" (ppr expr)
270 #endif /* i386_TARGET_ARCH */
272 -- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
274 #if sparc_TARGET_ARCH
276 assignMem_I64Code addrTree valueTree
277 = iselExpr64 valueTree `thenNat` \ (ChildCode64 vcode vrlo) ->
278 getRegister addrTree `thenNat` \ register_addr ->
279 getNewRegNat IntRep `thenNat` \ t_addr ->
280 let rlo = VirtualRegI vrlo
281 rhi = getHiVRegFromLo rlo
282 code_addr = registerCode register_addr t_addr
283 reg_addr = registerName register_addr t_addr
285 mov_hi = ST W rhi (AddrRegImm reg_addr (ImmInt 0))
286 mov_lo = ST W rlo (AddrRegImm reg_addr (ImmInt 4))
288 return (vcode `appOL` code_addr `snocOL` mov_hi `snocOL` mov_lo)
291 assignReg_I64Code (StixTemp (StixVReg u_dst pk)) valueTree
292 = iselExpr64 valueTree `thenNat` \ (ChildCode64 vcode vr_src_lo) ->
294 r_dst_lo = mkVReg u_dst IntRep
295 r_src_lo = VirtualRegI vr_src_lo
296 r_dst_hi = getHiVRegFromLo r_dst_lo
297 r_src_hi = getHiVRegFromLo r_src_lo
298 mov_lo = mkMOV r_src_lo r_dst_lo
299 mov_hi = mkMOV r_src_hi r_dst_hi
300 mkMOV sreg dreg = OR False g0 (RIReg sreg) dreg
303 vcode `snocOL` mov_hi `snocOL` mov_lo
305 assignReg_I64Code lvalue valueTree
306 = pprPanic "assignReg_I64Code(sparc): invalid lvalue"
310 -- Don't delete this -- it's very handy for debugging.
312 -- | trace ("iselExpr64: " ++ showSDoc (pprCmmExpr expr)) False
313 -- = panic "iselExpr64(???)"
315 iselExpr64 (CmmLoad I64 addrTree)
316 = getRegister addrTree `thenNat` \ register_addr ->
317 getNewRegNat IntRep `thenNat` \ t_addr ->
318 getNewRegNat IntRep `thenNat` \ rlo ->
319 let rhi = getHiVRegFromLo rlo
320 code_addr = registerCode register_addr t_addr
321 reg_addr = registerName register_addr t_addr
322 mov_hi = LD W (AddrRegImm reg_addr (ImmInt 0)) rhi
323 mov_lo = LD W (AddrRegImm reg_addr (ImmInt 4)) rlo
326 ChildCode64 (code_addr `snocOL` mov_hi `snocOL` mov_lo)
330 iselExpr64 (CmmReg (CmmLocal (LocalReg uq I64)))
331 = getNewRegNat IntRep `thenNat` \ r_dst_lo ->
332 let r_dst_hi = getHiVRegFromLo r_dst_lo
333 r_src_lo = mkVReg vu IntRep
334 r_src_hi = getHiVRegFromLo r_src_lo
335 mov_lo = mkMOV r_src_lo r_dst_lo
336 mov_hi = mkMOV r_src_hi r_dst_hi
337 mkMOV sreg dreg = OR False g0 (RIReg sreg) dreg
340 ChildCode64 (toOL [mov_hi, mov_lo]) (getVRegUnique r_dst_lo)
343 iselExpr64 (StCall fn cconv I64 args)
344 = genCCall fn cconv kind args `thenNat` \ call ->
345 getNewRegNat IntRep `thenNat` \ r_dst_lo ->
346 let r_dst_hi = getHiVRegFromLo r_dst_lo
347 mov_lo = mkMOV o0 r_dst_lo
348 mov_hi = mkMOV o1 r_dst_hi
349 mkMOV sreg dreg = OR False g0 (RIReg sreg) dreg
352 ChildCode64 (call `snocOL` mov_hi `snocOL` mov_lo)
353 (getVRegUnique r_dst_lo)
357 = pprPanic "iselExpr64(sparc)" (pprCmmExpr expr)
359 #endif /* sparc_TARGET_ARCH */
361 -- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
363 #if powerpc_TARGET_ARCH
365 getI64Amodes :: CmmExpr -> NatM (AddrMode, AddrMode, InstrBlock)
366 getI64Amodes addrTree = do
367 Amode hi_addr addr_code <- getAmode addrTree
368 case addrOffset hi_addr 4 of
369 Just lo_addr -> return (hi_addr, lo_addr, addr_code)
370 Nothing -> do (hi_ptr, code) <- getSomeReg addrTree
371 return (AddrRegImm hi_ptr (ImmInt 0),
372 AddrRegImm hi_ptr (ImmInt 4),
375 assignMem_I64Code addrTree valueTree = do
376 (hi_addr, lo_addr, addr_code) <- getI64Amodes addrTree
377 ChildCode64 vcode rlo <- iselExpr64 valueTree
379 rhi = getHiVRegFromLo rlo
382 mov_hi = ST I32 rhi hi_addr
383 mov_lo = ST I32 rlo lo_addr
385 return (vcode `appOL` addr_code `snocOL` mov_lo `snocOL` mov_hi)
387 assignReg_I64Code (CmmLocal (LocalReg u_dst pk)) valueTree = do
388 ChildCode64 vcode r_src_lo <- iselExpr64 valueTree
390 r_dst_lo = mkVReg u_dst I32
391 r_dst_hi = getHiVRegFromLo r_dst_lo
392 r_src_hi = getHiVRegFromLo r_src_lo
393 mov_lo = MR r_dst_lo r_src_lo
394 mov_hi = MR r_dst_hi r_src_hi
397 vcode `snocOL` mov_lo `snocOL` mov_hi
400 assignReg_I64Code lvalue valueTree
401 = panic "assignReg_I64Code(powerpc): invalid lvalue"
404 -- Don't delete this -- it's very handy for debugging.
406 -- | trace ("iselExpr64: " ++ showSDoc (pprCmmExpr expr)) False
407 -- = panic "iselExpr64(???)"
409 iselExpr64 (CmmLoad addrTree I64) = do
410 (hi_addr, lo_addr, addr_code) <- getI64Amodes addrTree
411 (rlo, rhi) <- getNewRegPairNat I32
412 let mov_hi = LD I32 rhi hi_addr
413 mov_lo = LD I32 rlo lo_addr
414 return $ ChildCode64 (addr_code `snocOL` mov_lo `snocOL` mov_hi)
417 iselExpr64 (CmmReg (CmmLocal (LocalReg vu I64)))
418 = return (ChildCode64 nilOL (mkVReg vu I32))
420 iselExpr64 (CmmLit (CmmInt i _)) = do
421 (rlo,rhi) <- getNewRegPairNat I32
423 half0 = fromIntegral (fromIntegral i :: Word16)
424 half1 = fromIntegral ((fromIntegral i `shiftR` 16) :: Word16)
425 half2 = fromIntegral ((fromIntegral i `shiftR` 32) :: Word16)
426 half3 = fromIntegral ((fromIntegral i `shiftR` 48) :: Word16)
429 LIS rlo (ImmInt half1),
430 OR rlo rlo (RIImm $ ImmInt half0),
431 LIS rhi (ImmInt half3),
432 OR rlo rlo (RIImm $ ImmInt half2)
435 return (ChildCode64 code rlo)
437 iselExpr64 (CmmMachOp (MO_Add _) [e1,e2]) = do
438 ChildCode64 code1 r1lo <- iselExpr64 e1
439 ChildCode64 code2 r2lo <- iselExpr64 e2
440 (rlo,rhi) <- getNewRegPairNat I32
442 r1hi = getHiVRegFromLo r1lo
443 r2hi = getHiVRegFromLo r2lo
446 toOL [ ADDC rlo r1lo r2lo,
449 return (ChildCode64 code rlo)
452 = pprPanic "iselExpr64(powerpc)" (ppr expr)
454 #endif /* powerpc_TARGET_ARCH */
457 -- -----------------------------------------------------------------------------
458 -- The 'Register' type
460 -- 'Register's passed up the tree. If the stix code forces the register
461 -- to live in a pre-decided machine register, it comes out as @Fixed@;
462 -- otherwise, it comes out as @Any@, and the parent can decide which
463 -- register to put it in.
466 = Fixed MachRep Reg InstrBlock
467 | Any MachRep (Reg -> InstrBlock)
469 swizzleRegisterRep :: Register -> MachRep -> Register
470 swizzleRegisterRep (Fixed _ reg code) rep = Fixed rep reg code
471 swizzleRegisterRep (Any _ codefn) rep = Any rep codefn
474 -- -----------------------------------------------------------------------------
475 -- Utils based on getRegister, below
477 -- The dual to getAnyReg: compute an expression into a register, but
478 -- we don't mind which one it is.
479 getSomeReg :: CmmExpr -> NatM (Reg, InstrBlock)
481 r <- getRegister expr
484 tmp <- getNewRegNat rep
485 return (tmp, code tmp)
489 -- -----------------------------------------------------------------------------
490 -- Grab the Reg for a CmmReg
492 getRegisterReg :: CmmReg -> Reg
494 getRegisterReg (CmmLocal (LocalReg u pk))
497 getRegisterReg (CmmGlobal mid)
498 = case get_GlobalReg_reg_or_addr mid of
499 Left (RealReg rrno) -> RealReg rrno
500 _other -> pprPanic "getRegisterReg-memory" (ppr $ CmmGlobal mid)
501 -- By this stage, the only MagicIds remaining should be the
502 -- ones which map to a real machine register on this
503 -- platform. Hence ...
506 -- -----------------------------------------------------------------------------
507 -- Generate code to get a subtree into a Register
509 -- Don't delete this -- it's very handy for debugging.
511 -- | trace ("getRegister: " ++ showSDoc (pprCmmExpr expr)) False
512 -- = panic "getRegister(???)"
514 getRegister :: CmmExpr -> NatM Register
516 getRegister (CmmReg reg)
517 = return (Fixed (cmmRegRep reg) (getRegisterReg reg) nilOL)
519 getRegister tree@(CmmRegOff _ _)
520 = getRegister (mangleIndexTree tree)
522 getRegister CmmPicBaseReg
524 reg <- getPicBaseNat wordRep
525 return (Fixed wordRep reg nilOL)
527 -- end of machine-"independent" bit; here we go on the rest...
529 #if alpha_TARGET_ARCH
531 getRegister (StDouble d)
532 = getBlockIdNat `thenNat` \ lbl ->
533 getNewRegNat PtrRep `thenNat` \ tmp ->
534 let code dst = mkSeqInstrs [
535 LDATA RoDataSegment lbl [
536 DATA TF [ImmLab (rational d)]
538 LDA tmp (AddrImm (ImmCLbl lbl)),
539 LD TF dst (AddrReg tmp)]
541 return (Any F64 code)
543 getRegister (StPrim primop [x]) -- unary PrimOps
545 IntNegOp -> trivialUCode (NEG Q False) x
547 NotOp -> trivialUCode NOT x
549 FloatNegOp -> trivialUFCode FloatRep (FNEG TF) x
550 DoubleNegOp -> trivialUFCode F64 (FNEG TF) x
552 OrdOp -> coerceIntCode IntRep x
555 Float2IntOp -> coerceFP2Int x
556 Int2FloatOp -> coerceInt2FP pr x
557 Double2IntOp -> coerceFP2Int x
558 Int2DoubleOp -> coerceInt2FP pr x
560 Double2FloatOp -> coerceFltCode x
561 Float2DoubleOp -> coerceFltCode x
563 other_op -> getRegister (StCall fn CCallConv F64 [x])
565 fn = case other_op of
566 FloatExpOp -> FSLIT("exp")
567 FloatLogOp -> FSLIT("log")
568 FloatSqrtOp -> FSLIT("sqrt")
569 FloatSinOp -> FSLIT("sin")
570 FloatCosOp -> FSLIT("cos")
571 FloatTanOp -> FSLIT("tan")
572 FloatAsinOp -> FSLIT("asin")
573 FloatAcosOp -> FSLIT("acos")
574 FloatAtanOp -> FSLIT("atan")
575 FloatSinhOp -> FSLIT("sinh")
576 FloatCoshOp -> FSLIT("cosh")
577 FloatTanhOp -> FSLIT("tanh")
578 DoubleExpOp -> FSLIT("exp")
579 DoubleLogOp -> FSLIT("log")
580 DoubleSqrtOp -> FSLIT("sqrt")
581 DoubleSinOp -> FSLIT("sin")
582 DoubleCosOp -> FSLIT("cos")
583 DoubleTanOp -> FSLIT("tan")
584 DoubleAsinOp -> FSLIT("asin")
585 DoubleAcosOp -> FSLIT("acos")
586 DoubleAtanOp -> FSLIT("atan")
587 DoubleSinhOp -> FSLIT("sinh")
588 DoubleCoshOp -> FSLIT("cosh")
589 DoubleTanhOp -> FSLIT("tanh")
591 pr = panic "MachCode.getRegister: no primrep needed for Alpha"
593 getRegister (StPrim primop [x, y]) -- dyadic PrimOps
595 CharGtOp -> trivialCode (CMP LTT) y x
596 CharGeOp -> trivialCode (CMP LE) y x
597 CharEqOp -> trivialCode (CMP EQQ) x y
598 CharNeOp -> int_NE_code x y
599 CharLtOp -> trivialCode (CMP LTT) x y
600 CharLeOp -> trivialCode (CMP LE) x y
602 IntGtOp -> trivialCode (CMP LTT) y x
603 IntGeOp -> trivialCode (CMP LE) y x
604 IntEqOp -> trivialCode (CMP EQQ) x y
605 IntNeOp -> int_NE_code x y
606 IntLtOp -> trivialCode (CMP LTT) x y
607 IntLeOp -> trivialCode (CMP LE) x y
609 WordGtOp -> trivialCode (CMP ULT) y x
610 WordGeOp -> trivialCode (CMP ULE) x y
611 WordEqOp -> trivialCode (CMP EQQ) x y
612 WordNeOp -> int_NE_code x y
613 WordLtOp -> trivialCode (CMP ULT) x y
614 WordLeOp -> trivialCode (CMP ULE) x y
616 AddrGtOp -> trivialCode (CMP ULT) y x
617 AddrGeOp -> trivialCode (CMP ULE) y x
618 AddrEqOp -> trivialCode (CMP EQQ) x y
619 AddrNeOp -> int_NE_code x y
620 AddrLtOp -> trivialCode (CMP ULT) x y
621 AddrLeOp -> trivialCode (CMP ULE) x y
623 FloatGtOp -> cmpF_code (FCMP TF LE) EQQ x y
624 FloatGeOp -> cmpF_code (FCMP TF LTT) EQQ x y
625 FloatEqOp -> cmpF_code (FCMP TF EQQ) NE x y
626 FloatNeOp -> cmpF_code (FCMP TF EQQ) EQQ x y
627 FloatLtOp -> cmpF_code (FCMP TF LTT) NE x y
628 FloatLeOp -> cmpF_code (FCMP TF LE) NE x y
630 DoubleGtOp -> cmpF_code (FCMP TF LE) EQQ x y
631 DoubleGeOp -> cmpF_code (FCMP TF LTT) EQQ x y
632 DoubleEqOp -> cmpF_code (FCMP TF EQQ) NE x y
633 DoubleNeOp -> cmpF_code (FCMP TF EQQ) EQQ x y
634 DoubleLtOp -> cmpF_code (FCMP TF LTT) NE x y
635 DoubleLeOp -> cmpF_code (FCMP TF LE) NE x y
637 IntAddOp -> trivialCode (ADD Q False) x y
638 IntSubOp -> trivialCode (SUB Q False) x y
639 IntMulOp -> trivialCode (MUL Q False) x y
640 IntQuotOp -> trivialCode (DIV Q False) x y
641 IntRemOp -> trivialCode (REM Q False) x y
643 WordAddOp -> trivialCode (ADD Q False) x y
644 WordSubOp -> trivialCode (SUB Q False) x y
645 WordMulOp -> trivialCode (MUL Q False) x y
646 WordQuotOp -> trivialCode (DIV Q True) x y
647 WordRemOp -> trivialCode (REM Q True) x y
649 FloatAddOp -> trivialFCode FloatRep (FADD TF) x y
650 FloatSubOp -> trivialFCode FloatRep (FSUB TF) x y
651 FloatMulOp -> trivialFCode FloatRep (FMUL TF) x y
652 FloatDivOp -> trivialFCode FloatRep (FDIV TF) x y
654 DoubleAddOp -> trivialFCode F64 (FADD TF) x y
655 DoubleSubOp -> trivialFCode F64 (FSUB TF) x y
656 DoubleMulOp -> trivialFCode F64 (FMUL TF) x y
657 DoubleDivOp -> trivialFCode F64 (FDIV TF) x y
659 AddrAddOp -> trivialCode (ADD Q False) x y
660 AddrSubOp -> trivialCode (SUB Q False) x y
661 AddrRemOp -> trivialCode (REM Q True) x y
663 AndOp -> trivialCode AND x y
664 OrOp -> trivialCode OR x y
665 XorOp -> trivialCode XOR x y
666 SllOp -> trivialCode SLL x y
667 SrlOp -> trivialCode SRL x y
669 ISllOp -> trivialCode SLL x y -- was: panic "AlphaGen:isll"
670 ISraOp -> trivialCode SRA x y -- was: panic "AlphaGen:isra"
671 ISrlOp -> trivialCode SRL x y -- was: panic "AlphaGen:isrl"
673 FloatPowerOp -> getRegister (StCall FSLIT("pow") CCallConv F64 [x,y])
674 DoublePowerOp -> getRegister (StCall FSLIT("pow") CCallConv F64 [x,y])
676 {- ------------------------------------------------------------
677 Some bizarre special code for getting condition codes into
678 registers. Integer non-equality is a test for equality
679 followed by an XOR with 1. (Integer comparisons always set
680 the result register to 0 or 1.) Floating point comparisons of
681 any kind leave the result in a floating point register, so we
682 need to wrangle an integer register out of things.
684 int_NE_code :: StixTree -> StixTree -> NatM Register
687 = trivialCode (CMP EQQ) x y `thenNat` \ register ->
688 getNewRegNat IntRep `thenNat` \ tmp ->
690 code = registerCode register tmp
691 src = registerName register tmp
692 code__2 dst = code . mkSeqInstr (XOR src (RIImm (ImmInt 1)) dst)
694 return (Any IntRep code__2)
696 {- ------------------------------------------------------------
697 Comments for int_NE_code also apply to cmpF_code
700 :: (Reg -> Reg -> Reg -> Instr)
702 -> StixTree -> StixTree
705 cmpF_code instr cond x y
706 = trivialFCode pr instr x y `thenNat` \ register ->
707 getNewRegNat F64 `thenNat` \ tmp ->
708 getBlockIdNat `thenNat` \ lbl ->
710 code = registerCode register tmp
711 result = registerName register tmp
713 code__2 dst = code . mkSeqInstrs [
714 OR zeroh (RIImm (ImmInt 1)) dst,
715 BF cond result (ImmCLbl lbl),
716 OR zeroh (RIReg zeroh) dst,
719 return (Any IntRep code__2)
721 pr = panic "trivialU?FCode: does not use PrimRep on Alpha"
722 ------------------------------------------------------------
724 getRegister (CmmLoad pk mem)
725 = getAmode mem `thenNat` \ amode ->
727 code = amodeCode amode
728 src = amodeAddr amode
729 size = primRepToSize pk
730 code__2 dst = code . mkSeqInstr (LD size dst src)
732 return (Any pk code__2)
734 getRegister (StInt i)
737 code dst = mkSeqInstr (OR zeroh (RIImm src) dst)
739 return (Any IntRep code)
742 code dst = mkSeqInstr (LDI Q dst src)
744 return (Any IntRep code)
746 src = ImmInt (fromInteger i)
751 code dst = mkSeqInstr (LDA dst (AddrImm imm__2))
753 return (Any PtrRep code)
756 imm__2 = case imm of Just x -> x
758 #endif /* alpha_TARGET_ARCH */
760 -- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
764 getRegister (CmmLit (CmmFloat f F32)) = do
765 lbl <- getNewLabelNat
766 let code dst = toOL [
769 CmmStaticLit (CmmFloat f F32)],
770 GLD F32 (ImmAddr (ImmCLbl lbl) 0) dst
773 return (Any F32 code)
776 getRegister (CmmLit (CmmFloat d F64))
778 = let code dst = unitOL (GLDZ dst)
779 in return (Any F64 code)
782 = let code dst = unitOL (GLD1 dst)
783 in return (Any F64 code)
786 lbl <- getNewLabelNat
787 let code dst = toOL [
790 CmmStaticLit (CmmFloat d F64)],
791 GLD F64 (ImmAddr (ImmCLbl lbl) 0) dst
794 return (Any F64 code)
796 #endif /* i386_TARGET_ARCH */
798 #if x86_64_TARGET_ARCH
800 getRegister (CmmLit (CmmFloat 0.0 rep)) = do
801 let code dst = unitOL (XOR rep (OpReg dst) (OpReg dst))
802 -- I don't know why there are xorpd, xorps, and pxor instructions.
803 -- They all appear to do the same thing --SDM
804 return (Any rep code)
806 getRegister (CmmLit (CmmFloat f rep)) = do
807 lbl <- getNewLabelNat
808 let code dst = toOL [
811 CmmStaticLit (CmmFloat f rep)],
812 MOV rep (OpAddr (ImmAddr (ImmCLbl lbl) 0)) (OpReg dst)
813 -- ToDo: should use %rip-relative
816 return (Any rep code)
818 #endif /* x86_64_TARGET_ARCH */
820 #if i386_TARGET_ARCH || x86_64_TARGET_ARCH
822 -- catch simple cases of zero- or sign-extended load
823 getRegister (CmmMachOp (MO_U_Conv I8 I32) [CmmLoad addr _]) = do
824 code <- intLoadCode (MOVZxL I8) addr
825 return (Any I32 code)
827 getRegister (CmmMachOp (MO_S_Conv I8 I32) [CmmLoad addr _]) = do
828 code <- intLoadCode (MOVSxL I8) addr
829 return (Any I32 code)
831 getRegister (CmmMachOp (MO_U_Conv I16 I32) [CmmLoad addr _]) = do
832 code <- intLoadCode (MOVZxL I16) addr
833 return (Any I32 code)
835 getRegister (CmmMachOp (MO_S_Conv I16 I32) [CmmLoad addr _]) = do
836 code <- intLoadCode (MOVSxL I16) addr
837 return (Any I32 code)
841 #if x86_64_TARGET_ARCH
843 -- catch simple cases of zero- or sign-extended load
844 getRegister (CmmMachOp (MO_U_Conv I8 I64) [CmmLoad addr _]) = do
845 code <- intLoadCode (MOVZxL I8) addr
846 return (Any I64 code)
848 getRegister (CmmMachOp (MO_S_Conv I8 I64) [CmmLoad addr _]) = do
849 code <- intLoadCode (MOVSxL I8) addr
850 return (Any I64 code)
852 getRegister (CmmMachOp (MO_U_Conv I16 I64) [CmmLoad addr _]) = do
853 code <- intLoadCode (MOVZxL I16) addr
854 return (Any I64 code)
856 getRegister (CmmMachOp (MO_S_Conv I16 I64) [CmmLoad addr _]) = do
857 code <- intLoadCode (MOVSxL I16) addr
858 return (Any I64 code)
860 getRegister (CmmMachOp (MO_U_Conv I32 I64) [CmmLoad addr _]) = do
861 code <- intLoadCode (MOV I32) addr -- 32-bit loads zero-extend
862 return (Any I64 code)
864 getRegister (CmmMachOp (MO_S_Conv I32 I64) [CmmLoad addr _]) = do
865 code <- intLoadCode (MOVSxL I32) addr
866 return (Any I64 code)
870 #if x86_64_TARGET_ARCH
871 getRegister (CmmMachOp (MO_S_Neg F32) [x]) = do
872 lbl <- getNewLabelNat
875 -- This is how gcc does it, so it can't be that bad:
876 LDATA ReadOnlyData16 [
879 CmmStaticLit (CmmInt 0x80000000 I32),
880 CmmStaticLit (CmmInt 0 I32),
881 CmmStaticLit (CmmInt 0 I32),
882 CmmStaticLit (CmmInt 0 I32)
884 XOR F32 (OpAddr (ImmAddr (ImmCLbl lbl) 0)) (OpReg dst)
885 -- xorps, so we need the 128-bit constant
886 -- ToDo: rip-relative
889 return (Any F32 code)
891 getRegister (CmmMachOp (MO_S_Neg F64) [x]) = do
892 lbl <- getNewLabelNat
894 -- This is how gcc does it, so it can't be that bad:
896 LDATA ReadOnlyData16 [
899 CmmStaticLit (CmmInt 0x8000000000000000 I64),
900 CmmStaticLit (CmmInt 0 I64)
902 -- gcc puts an unpck here. Wonder if we need it.
903 XOR F64 (OpAddr (ImmAddr (ImmCLbl lbl) 0)) (OpReg dst)
904 -- xorpd, so we need the 128-bit constant
905 -- ToDo: rip-relative
908 return (Any F64 code)
911 #if i386_TARGET_ARCH || x86_64_TARGET_ARCH
913 getRegister (CmmMachOp mop [x]) -- unary MachOps
916 MO_S_Neg F32 -> trivialUFCode F32 (GNEG F32) x
917 MO_S_Neg F64 -> trivialUFCode F64 (GNEG F64) x
920 MO_S_Neg rep -> trivialUCode rep (NEGI rep) x
921 MO_Not rep -> trivialUCode rep (NOT rep) x
924 -- TODO: these are only nops if the arg is not a fixed register that
925 -- can't be byte-addressed.
926 MO_U_Conv I32 I8 -> conversionNop I32 x
927 MO_S_Conv I32 I8 -> conversionNop I32 x
928 MO_U_Conv I16 I8 -> conversionNop I16 x
929 MO_S_Conv I16 I8 -> conversionNop I16 x
930 MO_U_Conv I32 I16 -> conversionNop I32 x
931 MO_S_Conv I32 I16 -> conversionNop I32 x
932 #if x86_64_TARGET_ARCH
933 MO_U_Conv I64 I32 -> conversionNop I64 x
934 MO_S_Conv I64 I32 -> conversionNop I64 x
935 MO_U_Conv I64 I16 -> conversionNop I64 x
936 MO_S_Conv I64 I16 -> conversionNop I64 x
937 MO_U_Conv I64 I8 -> conversionNop I64 x
938 MO_S_Conv I64 I8 -> conversionNop I64 x
941 MO_U_Conv rep1 rep2 | rep1 == rep2 -> conversionNop rep1 x
942 MO_S_Conv rep1 rep2 | rep1 == rep2 -> conversionNop rep1 x
945 MO_U_Conv I8 I32 -> integerExtend I8 I32 MOVZxL x
946 MO_U_Conv I16 I32 -> integerExtend I16 I32 MOVZxL x
947 MO_U_Conv I8 I16 -> integerExtend I8 I16 MOVZxL x
949 MO_S_Conv I8 I32 -> integerExtend I8 I32 MOVSxL x
950 MO_S_Conv I16 I32 -> integerExtend I16 I32 MOVSxL x
951 MO_S_Conv I8 I16 -> integerExtend I8 I16 MOVSxL x
953 #if x86_64_TARGET_ARCH
954 MO_U_Conv I8 I64 -> integerExtend I8 I64 MOVZxL x
955 MO_U_Conv I16 I64 -> integerExtend I16 I64 MOVZxL x
956 MO_U_Conv I32 I64 -> integerExtend I32 I64 MOVZxL x
957 MO_S_Conv I8 I64 -> integerExtend I8 I64 MOVSxL x
958 MO_S_Conv I16 I64 -> integerExtend I16 I64 MOVSxL x
959 MO_S_Conv I32 I64 -> integerExtend I32 I64 MOVSxL x
960 -- for 32-to-64 bit zero extension, amd64 uses an ordinary movl.
961 -- However, we don't want the register allocator to throw it
962 -- away as an unnecessary reg-to-reg move, so we keep it in
963 -- the form of a movzl and print it as a movl later.
967 MO_S_Conv F32 F64 -> conversionNop F64 x
968 MO_S_Conv F64 F32 -> conversionNop F32 x
970 MO_S_Conv F32 F64 -> coerceFP2FP F64 x
971 MO_S_Conv F64 F32 -> coerceFP2FP F32 x
975 | isFloatingRep from -> coerceFP2Int from to x
976 | isFloatingRep to -> coerceInt2FP from to x
978 other -> pprPanic "getRegister" (pprMachOp mop)
980 -- signed or unsigned extension.
981 integerExtend from to instr expr = do
982 (reg,e_code) <- if from == I8 then getByteReg expr
987 instr from (OpReg reg) (OpReg dst)
990 conversionNop new_rep expr
991 = do e_code <- getRegister expr
992 return (swizzleRegisterRep e_code new_rep)
995 getRegister e@(CmmMachOp mop [x, y]) -- dyadic MachOps
996 = ASSERT2(cmmExprRep x /= I8, pprExpr e)
998 MO_Eq F32 -> condFltReg EQQ x y
999 MO_Ne F32 -> condFltReg NE x y
1000 MO_S_Gt F32 -> condFltReg GTT x y
1001 MO_S_Ge F32 -> condFltReg GE x y
1002 MO_S_Lt F32 -> condFltReg LTT x y
1003 MO_S_Le F32 -> condFltReg LE x y
1005 MO_Eq F64 -> condFltReg EQQ x y
1006 MO_Ne F64 -> condFltReg NE x y
1007 MO_S_Gt F64 -> condFltReg GTT x y
1008 MO_S_Ge F64 -> condFltReg GE x y
1009 MO_S_Lt F64 -> condFltReg LTT x y
1010 MO_S_Le F64 -> condFltReg LE x y
1012 MO_Eq rep -> condIntReg EQQ x y
1013 MO_Ne rep -> condIntReg NE x y
1015 MO_S_Gt rep -> condIntReg GTT x y
1016 MO_S_Ge rep -> condIntReg GE x y
1017 MO_S_Lt rep -> condIntReg LTT x y
1018 MO_S_Le rep -> condIntReg LE x y
1020 MO_U_Gt rep -> condIntReg GU x y
1021 MO_U_Ge rep -> condIntReg GEU x y
1022 MO_U_Lt rep -> condIntReg LU x y
1023 MO_U_Le rep -> condIntReg LEU x y
1025 #if i386_TARGET_ARCH
1026 MO_Add F32 -> trivialFCode F32 GADD x y
1027 MO_Sub F32 -> trivialFCode F32 GSUB x y
1029 MO_Add F64 -> trivialFCode F64 GADD x y
1030 MO_Sub F64 -> trivialFCode F64 GSUB x y
1032 MO_S_Quot F32 -> trivialFCode F32 GDIV x y
1033 MO_S_Quot F64 -> trivialFCode F64 GDIV x y
1036 #if x86_64_TARGET_ARCH
1037 MO_Add F32 -> trivialFCode F32 ADD x y
1038 MO_Sub F32 -> trivialFCode F32 SUB x y
1040 MO_Add F64 -> trivialFCode F64 ADD x y
1041 MO_Sub F64 -> trivialFCode F64 SUB x y
1043 MO_S_Quot F32 -> trivialFCode F32 FDIV x y
1044 MO_S_Quot F64 -> trivialFCode F64 FDIV x y
1047 MO_Add rep -> add_code rep x y
1048 MO_Sub rep -> sub_code rep x y
1050 MO_S_Quot rep -> div_code rep True True x y
1051 MO_S_Rem rep -> div_code rep True False x y
1052 MO_U_Quot rep -> div_code rep False True x y
1053 MO_U_Rem rep -> div_code rep False False x y
1055 #if i386_TARGET_ARCH
1056 MO_Mul F32 -> trivialFCode F32 GMUL x y
1057 MO_Mul F64 -> trivialFCode F64 GMUL x y
1060 #if x86_64_TARGET_ARCH
1061 MO_Mul F32 -> trivialFCode F32 MUL x y
1062 MO_Mul F64 -> trivialFCode F64 MUL x y
1065 MO_Mul rep -> let op = IMUL rep in
1066 trivialCode rep op (Just op) x y
1068 MO_S_MulMayOflo rep -> imulMayOflo rep x y
1070 MO_And rep -> let op = AND rep in
1071 trivialCode rep op (Just op) x y
1072 MO_Or rep -> let op = OR rep in
1073 trivialCode rep op (Just op) x y
1074 MO_Xor rep -> let op = XOR rep in
1075 trivialCode rep op (Just op) x y
1077 {- Shift ops on x86s have constraints on their source, it
1078 either has to be Imm, CL or 1
1079 => trivialCode is not restrictive enough (sigh.)
1081 MO_Shl rep -> shift_code rep (SHL rep) x y {-False-}
1082 MO_U_Shr rep -> shift_code rep (SHR rep) x y {-False-}
1083 MO_S_Shr rep -> shift_code rep (SAR rep) x y {-False-}
1085 other -> pprPanic "getRegister(x86) - binary CmmMachOp (1)" (pprMachOp mop)
1087 --------------------
1088 imulMayOflo :: MachRep -> CmmExpr -> CmmExpr -> NatM Register
1089 imulMayOflo rep a b = do
1090 (a_reg, a_code) <- getNonClobberedReg a
1091 b_code <- getAnyReg b
1093 shift_amt = case rep of
1096 _ -> panic "shift_amt"
1098 code = a_code `appOL` b_code eax `appOL`
1100 IMUL2 rep (OpReg a_reg), -- result in %edx:%eax
1101 SAR rep (OpImm (ImmInt shift_amt)) (OpReg eax),
1102 -- sign extend lower part
1103 SUB rep (OpReg edx) (OpReg eax)
1104 -- compare against upper
1105 -- eax==0 if high part == sign extended low part
1108 return (Fixed rep eax code)
1110 --------------------
1111 shift_code :: MachRep
1112 -> (Operand -> Operand -> Instr)
1117 {- Case1: shift length as immediate -}
1118 shift_code rep instr x y@(CmmLit lit) = do
1119 x_code <- getAnyReg x
1122 = x_code dst `snocOL`
1123 instr (OpImm (litToImm lit)) (OpReg dst)
1125 return (Any rep code)
1127 {- Case2: shift length is complex (non-immediate) -}
1128 shift_code rep instr x y{-amount-} = do
1129 (x_reg, x_code) <- getNonClobberedReg x
1130 y_code <- getAnyReg y
1132 code = x_code `appOL`
1134 instr (OpReg ecx) (OpReg x_reg)
1136 return (Fixed rep x_reg code)
1138 --------------------
1139 add_code :: MachRep -> CmmExpr -> CmmExpr -> NatM Register
1140 add_code rep x (CmmLit (CmmInt y _)) = add_int rep x y
1141 add_code rep x y = trivialCode rep (ADD rep) (Just (ADD rep)) x y
1143 --------------------
1144 sub_code :: MachRep -> CmmExpr -> CmmExpr -> NatM Register
1145 sub_code rep x (CmmLit (CmmInt y _)) = add_int rep x (-y)
1146 sub_code rep x y = trivialCode rep (SUB rep) Nothing x y
1148 -- our three-operand add instruction:
1149 add_int rep x y = do
1150 (x_reg, x_code) <- getSomeReg x
1152 imm = ImmInt (fromInteger y)
1156 (OpAddr (AddrBaseIndex (Just x_reg) Nothing imm))
1159 return (Any rep code)
1161 ----------------------
1162 div_code rep signed quotient x y = do
1163 (y_op, y_code) <- getRegOrMem y -- cannot be clobbered
1164 x_code <- getAnyReg x
1166 widen | signed = CLTD rep
1167 | otherwise = XOR rep (OpReg edx) (OpReg edx)
1169 instr | signed = IDIV
1172 code = y_code `appOL`
1174 toOL [widen, instr rep y_op]
1176 result | quotient = eax
1180 return (Fixed rep result code)
1183 getRegister (CmmLoad mem pk)
1186 Amode src mem_code <- getAmode mem
1188 code dst = mem_code `snocOL`
1189 IF_ARCH_i386(GLD pk src dst,
1190 MOV pk (OpAddr src) (OpReg dst))
1192 return (Any pk code)
1194 #if i386_TARGET_ARCH
1195 getRegister (CmmLoad mem pk)
1198 code <- intLoadCode (instr pk) mem
1199 return (Any pk code)
1201 instr I8 = MOVZxL pk
1204 -- we always zero-extend 8-bit loads, if we
1205 -- can't think of anything better. This is because
1206 -- we can't guarantee access to an 8-bit variant of every register
1207 -- (esi and edi don't have 8-bit variants), so to make things
1208 -- simpler we do our 8-bit arithmetic with full 32-bit registers.
1211 #if x86_64_TARGET_ARCH
1212 -- Simpler memory load code on x86_64
1213 getRegister (CmmLoad mem pk)
1215 code <- intLoadCode (MOV pk) mem
1216 return (Any pk code)
1219 getRegister (CmmLit (CmmInt 0 rep))
1221 -- x86_64: 32-bit xor is one byte shorter, and zero-extends to 64 bits
1222 adj_rep = case rep of I64 -> I32; _ -> rep
1223 rep1 = IF_ARCH_i386( rep, adj_rep )
1225 = unitOL (XOR rep1 (OpReg dst) (OpReg dst))
1227 return (Any rep code)
1229 #if x86_64_TARGET_ARCH
1230 -- optimisation for loading small literals on x86_64: take advantage
1231 -- of the automatic zero-extension from 32 to 64 bits, because the 32-bit
1232 -- instruction forms are shorter.
1233 getRegister (CmmLit lit)
1234 | I64 <- cmmLitRep lit, not (isBigLit lit)
1237 code dst = unitOL (MOV I32 (OpImm imm) (OpReg dst))
1239 return (Any I64 code)
1241 isBigLit (CmmInt i I64) = i < 0 || i > 0xffffffff
1243 -- note1: not the same as is64BitLit, because that checks for
1244 -- signed literals that fit in 32 bits, but we want unsigned
1246 -- note2: all labels are small, because we're assuming the
1247 -- small memory model (see gcc docs, -mcmodel=small).
1250 getRegister (CmmLit lit)
1254 code dst = unitOL (MOV rep (OpImm imm) (OpReg dst))
1256 return (Any rep code)
1258 getRegister other = pprPanic "getRegister(x86)" (ppr other)
1261 intLoadCode :: (Operand -> Operand -> Instr) -> CmmExpr
1262 -> NatM (Reg -> InstrBlock)
1263 intLoadCode instr mem = do
1264 Amode src mem_code <- getAmode mem
1265 return (\dst -> mem_code `snocOL` instr (OpAddr src) (OpReg dst))
1267 -- Compute an expression into *any* register, adding the appropriate
1268 -- move instruction if necessary.
1269 getAnyReg :: CmmExpr -> NatM (Reg -> InstrBlock)
1271 r <- getRegister expr
1274 anyReg :: Register -> NatM (Reg -> InstrBlock)
1275 anyReg (Any _ code) = return code
1276 anyReg (Fixed rep reg fcode) = return (\dst -> fcode `snocOL` reg2reg rep reg dst)
1278 -- A bit like getSomeReg, but we want a reg that can be byte-addressed.
1279 -- Fixed registers might not be byte-addressable, so we make sure we've
1280 -- got a temporary, inserting an extra reg copy if necessary.
1281 getByteReg :: CmmExpr -> NatM (Reg, InstrBlock)
1282 #if x86_64_TARGET_ARCH
1283 getByteReg = getSomeReg -- all regs are byte-addressable on x86_64
1285 getByteReg expr = do
1286 r <- getRegister expr
1289 tmp <- getNewRegNat rep
1290 return (tmp, code tmp)
1292 | isVirtualReg reg -> return (reg,code)
1294 tmp <- getNewRegNat rep
1295 return (tmp, code `snocOL` reg2reg rep reg tmp)
1296 -- ToDo: could optimise slightly by checking for byte-addressable
1297 -- real registers, but that will happen very rarely if at all.
1300 -- Another variant: this time we want the result in a register that cannot
1301 -- be modified by code to evaluate an arbitrary expression.
1302 getNonClobberedReg :: CmmExpr -> NatM (Reg, InstrBlock)
1303 getNonClobberedReg expr = do
1304 r <- getRegister expr
1307 tmp <- getNewRegNat rep
1308 return (tmp, code tmp)
1310 -- only free regs can be clobbered
1311 | RealReg rr <- reg, isFastTrue (freeReg rr) -> do
1312 tmp <- getNewRegNat rep
1313 return (tmp, code `snocOL` reg2reg rep reg tmp)
1317 reg2reg :: MachRep -> Reg -> Reg -> Instr
1319 #if i386_TARGET_ARCH
1320 | isFloatingRep rep = GMOV src dst
1322 | otherwise = MOV rep (OpReg src) (OpReg dst)
1324 #endif /* i386_TARGET_ARCH || x86_64_TARGET_ARCH */
1326 -- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
1328 #if sparc_TARGET_ARCH
1330 getRegister (StFloat d)
1331 = getBlockIdNat `thenNat` \ lbl ->
1332 getNewRegNat PtrRep `thenNat` \ tmp ->
1333 let code dst = toOL [
1334 SEGMENT DataSegment,
1336 DATA F [ImmFloat d],
1337 SEGMENT TextSegment,
1338 SETHI (HI (ImmCLbl lbl)) tmp,
1339 LD F (AddrRegImm tmp (LO (ImmCLbl lbl))) dst]
1341 return (Any F32 code)
1343 getRegister (StDouble d)
1344 = getBlockIdNat `thenNat` \ lbl ->
1345 getNewRegNat PtrRep `thenNat` \ tmp ->
1346 let code dst = toOL [
1347 SEGMENT DataSegment,
1349 DATA DF [ImmDouble d],
1350 SEGMENT TextSegment,
1351 SETHI (HI (ImmCLbl lbl)) tmp,
1352 LD DF (AddrRegImm tmp (LO (ImmCLbl lbl))) dst]
1354 return (Any F64 code)
1357 getRegister (CmmMachOp mop [x]) -- unary PrimOps
1359 MO_NatS_Neg -> trivialUCode (SUB False False g0) x
1360 MO_Nat_Not -> trivialUCode (XNOR False g0) x
1361 MO_32U_to_8U -> trivialCode (AND False) x (StInt 255)
1363 MO_F32_Neg -> trivialUFCode F32 (FNEG F) x
1364 MO_F64_Neg -> trivialUFCode F64 (FNEG DF) x
1366 MO_F64_to_Flt -> coerceDbl2Flt x
1367 MO_F32_to_Dbl -> coerceFlt2Dbl x
1369 MO_F32_to_NatS -> coerceFP2Int F32 x
1370 MO_NatS_to_Flt -> coerceInt2FP F32 x
1371 MO_F64_to_NatS -> coerceFP2Int F64 x
1372 MO_NatS_to_Dbl -> coerceInt2FP F64 x
1374 -- Conversions which are a nop on sparc
1375 MO_32U_to_NatS -> conversionNop IntRep x
1376 MO_32S_to_NatS -> conversionNop IntRep x
1377 MO_NatS_to_32U -> conversionNop WordRep x
1378 MO_32U_to_NatU -> conversionNop WordRep x
1380 MO_NatU_to_NatS -> conversionNop IntRep x
1381 MO_NatS_to_NatU -> conversionNop WordRep x
1382 MO_NatP_to_NatU -> conversionNop WordRep x
1383 MO_NatU_to_NatP -> conversionNop PtrRep x
1384 MO_NatS_to_NatP -> conversionNop PtrRep x
1385 MO_NatP_to_NatS -> conversionNop IntRep x
1387 -- sign-extending widenings
1388 MO_8U_to_32U -> integerExtend False 24 x
1389 MO_8U_to_NatU -> integerExtend False 24 x
1390 MO_8S_to_NatS -> integerExtend True 24 x
1391 MO_16U_to_NatU -> integerExtend False 16 x
1392 MO_16S_to_NatS -> integerExtend True 16 x
1395 let fixed_x = if is_float_op -- promote to double
1396 then CmmMachOp MO_F32_to_Dbl [x]
1399 getRegister (StCall (Left fn) CCallConv F64 [fixed_x])
1401 integerExtend signed nBits x
1403 CmmMachOp (if signed then MO_Nat_Sar else MO_Nat_Shr)
1404 [CmmMachOp MO_Nat_Shl [x, StInt nBits], StInt nBits]
1406 conversionNop new_rep expr
1407 = getRegister expr `thenNat` \ e_code ->
1408 return (swizzleRegisterRep e_code new_rep)
1412 MO_F32_Exp -> (True, FSLIT("exp"))
1413 MO_F32_Log -> (True, FSLIT("log"))
1414 MO_F32_Sqrt -> (True, FSLIT("sqrt"))
1416 MO_F32_Sin -> (True, FSLIT("sin"))
1417 MO_F32_Cos -> (True, FSLIT("cos"))
1418 MO_F32_Tan -> (True, FSLIT("tan"))
1420 MO_F32_Asin -> (True, FSLIT("asin"))
1421 MO_F32_Acos -> (True, FSLIT("acos"))
1422 MO_F32_Atan -> (True, FSLIT("atan"))
1424 MO_F32_Sinh -> (True, FSLIT("sinh"))
1425 MO_F32_Cosh -> (True, FSLIT("cosh"))
1426 MO_F32_Tanh -> (True, FSLIT("tanh"))
1428 MO_F64_Exp -> (False, FSLIT("exp"))
1429 MO_F64_Log -> (False, FSLIT("log"))
1430 MO_F64_Sqrt -> (False, FSLIT("sqrt"))
1432 MO_F64_Sin -> (False, FSLIT("sin"))
1433 MO_F64_Cos -> (False, FSLIT("cos"))
1434 MO_F64_Tan -> (False, FSLIT("tan"))
1436 MO_F64_Asin -> (False, FSLIT("asin"))
1437 MO_F64_Acos -> (False, FSLIT("acos"))
1438 MO_F64_Atan -> (False, FSLIT("atan"))
1440 MO_F64_Sinh -> (False, FSLIT("sinh"))
1441 MO_F64_Cosh -> (False, FSLIT("cosh"))
1442 MO_F64_Tanh -> (False, FSLIT("tanh"))
1444 other -> pprPanic "getRegister(sparc) - binary CmmMachOp (2)"
1448 getRegister (CmmMachOp mop [x, y]) -- dyadic PrimOps
1450 MO_32U_Gt -> condIntReg GTT x y
1451 MO_32U_Ge -> condIntReg GE x y
1452 MO_32U_Eq -> condIntReg EQQ x y
1453 MO_32U_Ne -> condIntReg NE x y
1454 MO_32U_Lt -> condIntReg LTT x y
1455 MO_32U_Le -> condIntReg LE x y
1457 MO_Nat_Eq -> condIntReg EQQ x y
1458 MO_Nat_Ne -> condIntReg NE x y
1460 MO_NatS_Gt -> condIntReg GTT x y
1461 MO_NatS_Ge -> condIntReg GE x y
1462 MO_NatS_Lt -> condIntReg LTT x y
1463 MO_NatS_Le -> condIntReg LE x y
1465 MO_NatU_Gt -> condIntReg GU x y
1466 MO_NatU_Ge -> condIntReg GEU x y
1467 MO_NatU_Lt -> condIntReg LU x y
1468 MO_NatU_Le -> condIntReg LEU x y
1470 MO_F32_Gt -> condFltReg GTT x y
1471 MO_F32_Ge -> condFltReg GE x y
1472 MO_F32_Eq -> condFltReg EQQ x y
1473 MO_F32_Ne -> condFltReg NE x y
1474 MO_F32_Lt -> condFltReg LTT x y
1475 MO_F32_Le -> condFltReg LE x y
1477 MO_F64_Gt -> condFltReg GTT x y
1478 MO_F64_Ge -> condFltReg GE x y
1479 MO_F64_Eq -> condFltReg EQQ x y
1480 MO_F64_Ne -> condFltReg NE x y
1481 MO_F64_Lt -> condFltReg LTT x y
1482 MO_F64_Le -> condFltReg LE x y
1484 MO_Nat_Add -> trivialCode (ADD False False) x y
1485 MO_Nat_Sub -> trivialCode (SUB False False) x y
1487 MO_NatS_Mul -> trivialCode (SMUL False) x y
1488 MO_NatU_Mul -> trivialCode (UMUL False) x y
1489 MO_NatS_MulMayOflo -> imulMayOflo x y
1491 -- ToDo: teach about V8+ SPARC div instructions
1492 MO_NatS_Quot -> idiv FSLIT(".div") x y
1493 MO_NatS_Rem -> idiv FSLIT(".rem") x y
1494 MO_NatU_Quot -> idiv FSLIT(".udiv") x y
1495 MO_NatU_Rem -> idiv FSLIT(".urem") x y
1497 MO_F32_Add -> trivialFCode F32 FADD x y
1498 MO_F32_Sub -> trivialFCode F32 FSUB x y
1499 MO_F32_Mul -> trivialFCode F32 FMUL x y
1500 MO_F32_Div -> trivialFCode F32 FDIV x y
1502 MO_F64_Add -> trivialFCode F64 FADD x y
1503 MO_F64_Sub -> trivialFCode F64 FSUB x y
1504 MO_F64_Mul -> trivialFCode F64 FMUL x y
1505 MO_F64_Div -> trivialFCode F64 FDIV x y
1507 MO_Nat_And -> trivialCode (AND False) x y
1508 MO_Nat_Or -> trivialCode (OR False) x y
1509 MO_Nat_Xor -> trivialCode (XOR False) x y
1511 MO_Nat_Shl -> trivialCode SLL x y
1512 MO_Nat_Shr -> trivialCode SRL x y
1513 MO_Nat_Sar -> trivialCode SRA x y
1515 MO_F32_Pwr -> getRegister (StCall (Left FSLIT("pow")) CCallConv F64
1516 [promote x, promote y])
1517 where promote x = CmmMachOp MO_F32_to_Dbl [x]
1518 MO_F64_Pwr -> getRegister (StCall (Left FSLIT("pow")) CCallConv F64
1521 other -> pprPanic "getRegister(sparc) - binary CmmMachOp (1)" (pprMachOp mop)
1523 idiv fn x y = getRegister (StCall (Left fn) CCallConv IntRep [x, y])
1525 --------------------
1526 imulMayOflo :: CmmExpr -> CmmExpr -> NatM Register
1528 = getNewRegNat IntRep `thenNat` \ t1 ->
1529 getNewRegNat IntRep `thenNat` \ t2 ->
1530 getNewRegNat IntRep `thenNat` \ res_lo ->
1531 getNewRegNat IntRep `thenNat` \ res_hi ->
1532 getRegister a1 `thenNat` \ reg1 ->
1533 getRegister a2 `thenNat` \ reg2 ->
1534 let code1 = registerCode reg1 t1
1535 code2 = registerCode reg2 t2
1536 src1 = registerName reg1 t1
1537 src2 = registerName reg2 t2
1538 code dst = code1 `appOL` code2 `appOL`
1540 SMUL False src1 (RIReg src2) res_lo,
1542 SRA res_lo (RIImm (ImmInt 31)) res_lo,
1543 SUB False False res_lo (RIReg res_hi) dst
1546 return (Any IntRep code)
1548 getRegister (CmmLoad pk mem) = do
1549 Amode src code <- getAmode mem
1551 size = primRepToSize pk
1552 code__2 dst = code `snocOL` LD size src dst
1554 return (Any pk code__2)
1556 getRegister (StInt i)
1559 src = ImmInt (fromInteger i)
1560 code dst = unitOL (OR False g0 (RIImm src) dst)
1562 return (Any IntRep code)
1568 SETHI (HI imm__2) dst,
1569 OR False dst (RIImm (LO imm__2)) dst]
1571 return (Any PtrRep code)
1573 = ncgPrimopMoan "getRegister(sparc)" (pprCmmExpr leaf)
1576 imm__2 = case imm of Just x -> x
1578 #endif /* sparc_TARGET_ARCH */
1580 #if powerpc_TARGET_ARCH
1581 getRegister (CmmLoad mem pk)
1584 Amode addr addr_code <- getAmode mem
1585 let code dst = ASSERT((regClass dst == RcDouble) == isFloatingRep pk)
1586 addr_code `snocOL` LD pk dst addr
1587 return (Any pk code)
1589 -- catch simple cases of zero- or sign-extended load
1590 getRegister (CmmMachOp (MO_U_Conv I8 I32) [CmmLoad mem _]) = do
1591 Amode addr addr_code <- getAmode mem
1592 return (Any I32 (\dst -> addr_code `snocOL` LD I8 dst addr))
1594 -- Note: there is no Load Byte Arithmetic instruction, so no signed case here
1596 getRegister (CmmMachOp (MO_U_Conv I16 I32) [CmmLoad mem _]) = do
1597 Amode addr addr_code <- getAmode mem
1598 return (Any I32 (\dst -> addr_code `snocOL` LD I16 dst addr))
1600 getRegister (CmmMachOp (MO_S_Conv I16 I32) [CmmLoad mem _]) = do
1601 Amode addr addr_code <- getAmode mem
1602 return (Any I32 (\dst -> addr_code `snocOL` LA I16 dst addr))
1604 getRegister (CmmMachOp mop [x]) -- unary MachOps
1606 MO_Not rep -> trivialUCode rep NOT x
1608 MO_S_Conv F64 F32 -> trivialUCode F32 FRSP x
1609 MO_S_Conv F32 F64 -> conversionNop F64 x
1612 | from == to -> conversionNop to x
1613 | isFloatingRep from -> coerceFP2Int from to x
1614 | isFloatingRep to -> coerceInt2FP from to x
1616 -- narrowing is a nop: we treat the high bits as undefined
1617 MO_S_Conv I32 to -> conversionNop to x
1618 MO_S_Conv I16 I8 -> conversionNop I8 x
1619 MO_S_Conv I8 to -> trivialUCode to (EXTS I8) x
1620 MO_S_Conv I16 to -> trivialUCode to (EXTS I16) x
1623 | from == to -> conversionNop to x
1624 -- narrowing is a nop: we treat the high bits as undefined
1625 MO_U_Conv I32 to -> conversionNop to x
1626 MO_U_Conv I16 I8 -> conversionNop I8 x
1627 MO_U_Conv I8 to -> trivialCode to False AND x (CmmLit (CmmInt 255 I32))
1628 MO_U_Conv I16 to -> trivialCode to False AND x (CmmLit (CmmInt 65535 I32))
1630 MO_S_Neg F32 -> trivialUCode F32 FNEG x
1631 MO_S_Neg F64 -> trivialUCode F64 FNEG x
1632 MO_S_Neg rep -> trivialUCode rep NEG x
1635 conversionNop new_rep expr
1636 = do e_code <- getRegister expr
1637 return (swizzleRegisterRep e_code new_rep)
1639 getRegister (CmmMachOp mop [x, y]) -- dyadic PrimOps
1641 MO_Eq F32 -> condFltReg EQQ x y
1642 MO_Ne F32 -> condFltReg NE x y
1644 MO_S_Gt F32 -> condFltReg GTT x y
1645 MO_S_Ge F32 -> condFltReg GE x y
1646 MO_S_Lt F32 -> condFltReg LTT x y
1647 MO_S_Le F32 -> condFltReg LE x y
1649 MO_Eq F64 -> condFltReg EQQ x y
1650 MO_Ne F64 -> condFltReg NE x y
1652 MO_S_Gt F64 -> condFltReg GTT x y
1653 MO_S_Ge F64 -> condFltReg GE x y
1654 MO_S_Lt F64 -> condFltReg LTT x y
1655 MO_S_Le F64 -> condFltReg LE x y
1657 MO_Eq rep -> condIntReg EQQ (extendUExpr rep x) (extendUExpr rep y)
1658 MO_Ne rep -> condIntReg NE (extendUExpr rep x) (extendUExpr rep y)
1660 MO_S_Gt rep -> condIntReg GTT (extendSExpr rep x) (extendSExpr rep y)
1661 MO_S_Ge rep -> condIntReg GE (extendSExpr rep x) (extendSExpr rep y)
1662 MO_S_Lt rep -> condIntReg LTT (extendSExpr rep x) (extendSExpr rep y)
1663 MO_S_Le rep -> condIntReg LE (extendSExpr rep x) (extendSExpr rep y)
1665 MO_U_Gt rep -> condIntReg GU (extendUExpr rep x) (extendUExpr rep y)
1666 MO_U_Ge rep -> condIntReg GEU (extendUExpr rep x) (extendUExpr rep y)
1667 MO_U_Lt rep -> condIntReg LU (extendUExpr rep x) (extendUExpr rep y)
1668 MO_U_Le rep -> condIntReg LEU (extendUExpr rep x) (extendUExpr rep y)
1670 MO_Add F32 -> trivialCodeNoImm F32 (FADD F32) x y
1671 MO_Sub F32 -> trivialCodeNoImm F32 (FSUB F32) x y
1672 MO_Mul F32 -> trivialCodeNoImm F32 (FMUL F32) x y
1673 MO_S_Quot F32 -> trivialCodeNoImm F32 (FDIV F32) x y
1675 MO_Add F64 -> trivialCodeNoImm F64 (FADD F64) x y
1676 MO_Sub F64 -> trivialCodeNoImm F64 (FSUB F64) x y
1677 MO_Mul F64 -> trivialCodeNoImm F64 (FMUL F64) x y
1678 MO_S_Quot F64 -> trivialCodeNoImm F64 (FDIV F64) x y
1680 -- optimize addition with 32-bit immediate
1684 CmmLit (CmmInt imm immrep) | Just _ <- makeImmediate I32 True (-imm)
1685 -> trivialCode I32 True ADD x (CmmLit $ CmmInt imm immrep)
1688 (src, srcCode) <- getSomeReg x
1689 let imm = litToImm lit
1690 code dst = srcCode `appOL` toOL [
1691 ADDIS dst src (HA imm),
1692 ADD dst dst (RIImm (LO imm))
1694 return (Any I32 code)
1695 _ -> trivialCode I32 True ADD x y
1697 MO_Add rep -> trivialCode rep True ADD x y
1699 case y of -- subfi ('substract from' with immediate) doesn't exist
1700 CmmLit (CmmInt imm immrep) | Just _ <- makeImmediate rep True (-imm)
1701 -> trivialCode rep True ADD x (CmmLit $ CmmInt (-imm) immrep)
1702 _ -> trivialCodeNoImm rep SUBF y x
1704 MO_Mul rep -> trivialCode rep True MULLW x y
1706 MO_S_MulMayOflo I32 -> trivialCodeNoImm I32 MULLW_MayOflo x y
1708 MO_S_MulMayOflo rep -> panic "S_MulMayOflo (rep /= I32): not implemented"
1709 MO_U_MulMayOflo rep -> panic "U_MulMayOflo: not implemented"
1711 MO_S_Quot rep -> trivialCodeNoImm rep DIVW (extendSExpr rep x) (extendSExpr rep y)
1712 MO_U_Quot rep -> trivialCodeNoImm rep DIVWU (extendUExpr rep x) (extendUExpr rep y)
1714 MO_S_Rem rep -> remainderCode rep DIVW (extendSExpr rep x) (extendSExpr rep y)
1715 MO_U_Rem rep -> remainderCode rep DIVWU (extendUExpr rep x) (extendUExpr rep y)
1717 MO_And rep -> trivialCode rep False AND x y
1718 MO_Or rep -> trivialCode rep False OR x y
1719 MO_Xor rep -> trivialCode rep False XOR x y
1721 MO_Shl rep -> trivialCode rep False SLW x y
1722 MO_S_Shr rep -> trivialCode rep False SRAW (extendSExpr rep x) y
1723 MO_U_Shr rep -> trivialCode rep False SRW (extendUExpr rep x) y
1725 getRegister (CmmLit (CmmInt i rep))
1726 | Just imm <- makeImmediate rep True i
1728 code dst = unitOL (LI dst imm)
1730 return (Any rep code)
1732 getRegister (CmmLit (CmmFloat f frep)) = do
1733 lbl <- getNewLabelNat
1734 dynRef <- cmmMakeDynamicReference addImportNat False lbl
1735 Amode addr addr_code <- getAmode dynRef
1737 LDATA ReadOnlyData [CmmDataLabel lbl,
1738 CmmStaticLit (CmmFloat f frep)]
1739 `consOL` (addr_code `snocOL` LD frep dst addr)
1740 return (Any frep code)
1742 getRegister (CmmLit lit)
1743 = let rep = cmmLitRep lit
1747 OR dst dst (RIImm (LO imm))
1749 in return (Any rep code)
1751 getRegister other = pprPanic "getRegister(ppc)" (pprExpr other)
1753 -- extend?Rep: wrap integer expression of type rep
1754 -- in a conversion to I32
1755 extendSExpr I32 x = x
1756 extendSExpr rep x = CmmMachOp (MO_S_Conv rep I32) [x]
1757 extendUExpr I32 x = x
1758 extendUExpr rep x = CmmMachOp (MO_U_Conv rep I32) [x]
1760 #endif /* powerpc_TARGET_ARCH */
1763 -- -----------------------------------------------------------------------------
1764 -- The 'Amode' type: Memory addressing modes passed up the tree.
1766 data Amode = Amode AddrMode InstrBlock
1769 Now, given a tree (the argument to an CmmLoad) that references memory,
1770 produce a suitable addressing mode.
1772 A Rule of the Game (tm) for Amodes: use of the addr bit must
1773 immediately follow use of the code part, since the code part puts
1774 values in registers which the addr then refers to. So you can't put
1775 anything in between, lest it overwrite some of those registers. If
1776 you need to do some other computation between the code part and use of
1777 the addr bit, first store the effective address from the amode in a
1778 temporary, then do the other computation, and then use the temporary:
1782 ... other computation ...
1786 getAmode :: CmmExpr -> NatM Amode
1787 getAmode tree@(CmmRegOff _ _) = getAmode (mangleIndexTree tree)
1789 -- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
1791 #if alpha_TARGET_ARCH
1793 getAmode (StPrim IntSubOp [x, StInt i])
1794 = getNewRegNat PtrRep `thenNat` \ tmp ->
1795 getRegister x `thenNat` \ register ->
1797 code = registerCode register tmp
1798 reg = registerName register tmp
1799 off = ImmInt (-(fromInteger i))
1801 return (Amode (AddrRegImm reg off) code)
1803 getAmode (StPrim IntAddOp [x, StInt i])
1804 = getNewRegNat PtrRep `thenNat` \ tmp ->
1805 getRegister x `thenNat` \ register ->
1807 code = registerCode register tmp
1808 reg = registerName register tmp
1809 off = ImmInt (fromInteger i)
1811 return (Amode (AddrRegImm reg off) code)
1815 = return (Amode (AddrImm imm__2) id)
1818 imm__2 = case imm of Just x -> x
1821 = getNewRegNat PtrRep `thenNat` \ tmp ->
1822 getRegister other `thenNat` \ register ->
1824 code = registerCode register tmp
1825 reg = registerName register tmp
1827 return (Amode (AddrReg reg) code)
1829 #endif /* alpha_TARGET_ARCH */
1831 -- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
1833 #if i386_TARGET_ARCH || x86_64_TARGET_ARCH
1835 -- This is all just ridiculous, since it carefully undoes
1836 -- what mangleIndexTree has just done.
1837 getAmode (CmmMachOp (MO_Sub rep) [x, CmmLit lit@(CmmInt i _)])
1838 | not (is64BitLit lit)
1839 -- ASSERT(rep == I32)???
1840 = do (x_reg, x_code) <- getSomeReg x
1841 let off = ImmInt (-(fromInteger i))
1842 return (Amode (AddrBaseIndex (Just x_reg) Nothing off) x_code)
1844 getAmode (CmmMachOp (MO_Add rep) [x, CmmLit lit@(CmmInt i _)])
1845 | not (is64BitLit lit)
1846 -- ASSERT(rep == I32)???
1847 = do (x_reg, x_code) <- getSomeReg x
1848 let off = ImmInt (fromInteger i)
1849 return (Amode (AddrBaseIndex (Just x_reg) Nothing off) x_code)
1851 -- Turn (lit1 << n + lit2) into (lit2 + lit1 << n) so it will be
1852 -- recognised by the next rule.
1853 getAmode (CmmMachOp (MO_Add rep) [a@(CmmMachOp (MO_Shl _) _),
1855 = getAmode (CmmMachOp (MO_Add rep) [b,a])
1857 getAmode (CmmMachOp (MO_Add rep) [x, CmmMachOp (MO_Shl _)
1858 [y, CmmLit (CmmInt shift _)]])
1859 | shift == 0 || shift == 1 || shift == 2 || shift == 3
1860 = do (x_reg, x_code) <- getNonClobberedReg x
1861 -- x must be in a temp, because it has to stay live over y_code
1862 -- we could compre x_reg and y_reg and do something better here...
1863 (y_reg, y_code) <- getSomeReg y
1865 code = x_code `appOL` y_code
1866 base = case shift of 0 -> 1; 1 -> 2; 2 -> 4; 3 -> 8
1867 return (Amode (AddrBaseIndex (Just x_reg) (Just (y_reg,base)) (ImmInt 0))
1870 getAmode (CmmLit lit) | not (is64BitLit lit)
1871 = return (Amode (ImmAddr (litToImm lit) 0) nilOL)
1874 (reg,code) <- getSomeReg expr
1875 return (Amode (AddrBaseIndex (Just reg) Nothing (ImmInt 0)) code)
1877 #endif /* i386_TARGET_ARCH || x86_64_TARGET_ARCH */
1879 -- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
1881 #if sparc_TARGET_ARCH
1883 getAmode (CmmMachOp MO_Nat_Sub [x, StInt i])
1885 = getNewRegNat PtrRep `thenNat` \ tmp ->
1886 getRegister x `thenNat` \ register ->
1888 code = registerCode register tmp
1889 reg = registerName register tmp
1890 off = ImmInt (-(fromInteger i))
1892 return (Amode (AddrRegImm reg off) code)
1895 getAmode (CmmMachOp MO_Nat_Add [x, StInt i])
1897 = getNewRegNat PtrRep `thenNat` \ tmp ->
1898 getRegister x `thenNat` \ register ->
1900 code = registerCode register tmp
1901 reg = registerName register tmp
1902 off = ImmInt (fromInteger i)
1904 return (Amode (AddrRegImm reg off) code)
1906 getAmode (CmmMachOp MO_Nat_Add [x, y])
1907 = getNewRegNat PtrRep `thenNat` \ tmp1 ->
1908 getNewRegNat IntRep `thenNat` \ tmp2 ->
1909 getRegister x `thenNat` \ register1 ->
1910 getRegister y `thenNat` \ register2 ->
1912 code1 = registerCode register1 tmp1
1913 reg1 = registerName register1 tmp1
1914 code2 = registerCode register2 tmp2
1915 reg2 = registerName register2 tmp2
1916 code__2 = code1 `appOL` code2
1918 return (Amode (AddrRegReg reg1 reg2) code__2)
1922 = getNewRegNat PtrRep `thenNat` \ tmp ->
1924 code = unitOL (SETHI (HI imm__2) tmp)
1926 return (Amode (AddrRegImm tmp (LO imm__2)) code)
1929 imm__2 = case imm of Just x -> x
1932 = getNewRegNat PtrRep `thenNat` \ tmp ->
1933 getRegister other `thenNat` \ register ->
1935 code = registerCode register tmp
1936 reg = registerName register tmp
1939 return (Amode (AddrRegImm reg off) code)
1941 #endif /* sparc_TARGET_ARCH */
1943 #ifdef powerpc_TARGET_ARCH
1944 getAmode (CmmMachOp (MO_Sub I32) [x, CmmLit (CmmInt i _)])
1945 | Just off <- makeImmediate I32 True (-i)
1947 (reg, code) <- getSomeReg x
1948 return (Amode (AddrRegImm reg off) code)
1951 getAmode (CmmMachOp (MO_Add I32) [x, CmmLit (CmmInt i _)])
1952 | Just off <- makeImmediate I32 True i
1954 (reg, code) <- getSomeReg x
1955 return (Amode (AddrRegImm reg off) code)
1957 -- optimize addition with 32-bit immediate
1959 getAmode (CmmMachOp (MO_Add I32) [x, CmmLit lit])
1961 tmp <- getNewRegNat I32
1962 (src, srcCode) <- getSomeReg x
1963 let imm = litToImm lit
1964 code = srcCode `snocOL` ADDIS tmp src (HA imm)
1965 return (Amode (AddrRegImm tmp (LO imm)) code)
1967 getAmode (CmmLit lit)
1969 tmp <- getNewRegNat I32
1970 let imm = litToImm lit
1971 code = unitOL (LIS tmp (HA imm))
1972 return (Amode (AddrRegImm tmp (LO imm)) code)
1974 getAmode (CmmMachOp (MO_Add I32) [x, y])
1976 (regX, codeX) <- getSomeReg x
1977 (regY, codeY) <- getSomeReg y
1978 return (Amode (AddrRegReg regX regY) (codeX `appOL` codeY))
1982 (reg, code) <- getSomeReg other
1985 return (Amode (AddrRegImm reg off) code)
1986 #endif /* powerpc_TARGET_ARCH */
1988 -- -----------------------------------------------------------------------------
1989 -- getOperand: sometimes any operand will do.
1991 -- getNonClobberedOperand: the value of the operand will remain valid across
1992 -- the computation of an arbitrary expression, unless the expression
1993 -- is computed directly into a register which the operand refers to
1994 -- (see trivialCode where this function is used for an example).
1996 #if i386_TARGET_ARCH || x86_64_TARGET_ARCH
1998 getNonClobberedOperand :: CmmExpr -> NatM (Operand, InstrBlock)
1999 getNonClobberedOperand (CmmLit lit)
2000 | not (is64BitLit lit) && not (isFloatingRep (cmmLitRep lit)) =
2001 return (OpImm (litToImm lit), nilOL)
2002 getNonClobberedOperand (CmmLoad mem pk)
2003 | IF_ARCH_i386(not (isFloatingRep pk) && pk /= I64, True) = do
2004 Amode src mem_code <- getAmode mem
2006 if (amodeCouldBeClobbered src)
2008 tmp <- getNewRegNat wordRep
2009 return (AddrBaseIndex (Just tmp) Nothing (ImmInt 0),
2010 unitOL (LEA I32 (OpAddr src) (OpReg tmp)))
2013 return (OpAddr src', save_code `appOL` mem_code)
2014 getNonClobberedOperand e = do
2015 (reg, code) <- getNonClobberedReg e
2016 return (OpReg reg, code)
2018 amodeCouldBeClobbered :: AddrMode -> Bool
2019 amodeCouldBeClobbered amode = any regClobbered (addrModeRegs amode)
2021 regClobbered (RealReg rr) = isFastTrue (freeReg rr)
2022 regClobbered _ = False
2024 -- getOperand: the operand is not required to remain valid across the
2025 -- computation of an arbitrary expression.
2026 getOperand :: CmmExpr -> NatM (Operand, InstrBlock)
2027 getOperand (CmmLit lit)
2028 | not (is64BitLit lit) && not (isFloatingRep (cmmLitRep lit)) =
2029 return (OpImm (litToImm lit), nilOL)
2030 getOperand (CmmLoad mem pk)
2031 | IF_ARCH_i386(not (isFloatingRep pk) && pk /= I64, True) = do
2032 Amode src mem_code <- getAmode mem
2033 return (OpAddr src, mem_code)
2035 (reg, code) <- getNonClobberedReg e
2036 return (OpReg reg, code)
2038 isOperand :: CmmExpr -> Bool
2039 isOperand (CmmLoad _ _) = True
2040 isOperand (CmmLit lit) = not (is64BitLit lit) &&
2041 not (isFloatingRep (cmmLitRep lit))
2044 getRegOrMem :: CmmExpr -> NatM (Operand, InstrBlock)
2045 getRegOrMem (CmmLoad mem pk)
2046 | IF_ARCH_i386(not (isFloatingRep pk) && pk /= I64, True) = do
2047 Amode src mem_code <- getAmode mem
2048 return (OpAddr src, mem_code)
2050 (reg, code) <- getNonClobberedReg e
2051 return (OpReg reg, code)
2053 #if x86_64_TARGET_ARCH
2054 is64BitLit (CmmInt i I64) = i > 0x7fffffff || i < -0x80000000
2055 -- assume that labels are in the range 0-2^31-1: this assumes the
2056 -- small memory model (see gcc docs, -mcmodel=small).
2058 is64BitLit x = False
2061 -- -----------------------------------------------------------------------------
2062 -- The 'CondCode' type: Condition codes passed up the tree.
2064 data CondCode = CondCode Bool Cond InstrBlock
2066 -- Set up a condition code for a conditional branch.
2068 getCondCode :: CmmExpr -> NatM CondCode
2070 -- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2072 #if alpha_TARGET_ARCH
2073 getCondCode = panic "MachCode.getCondCode: not on Alphas"
2074 #endif /* alpha_TARGET_ARCH */
2076 -- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2078 #if i386_TARGET_ARCH || x86_64_TARGET_ARCH || sparc_TARGET_ARCH
2079 -- yes, they really do seem to want exactly the same!
2081 getCondCode (CmmMachOp mop [x, y])
2082 = ASSERT (cmmExprRep x /= I8) -- tmp, not set up to handle 8-bit comparisons
2084 MO_Eq F32 -> condFltCode EQQ x y
2085 MO_Ne F32 -> condFltCode NE x y
2087 MO_S_Gt F32 -> condFltCode GTT x y
2088 MO_S_Ge F32 -> condFltCode GE x y
2089 MO_S_Lt F32 -> condFltCode LTT x y
2090 MO_S_Le F32 -> condFltCode LE x y
2092 MO_Eq F64 -> condFltCode EQQ x y
2093 MO_Ne F64 -> condFltCode NE x y
2095 MO_S_Gt F64 -> condFltCode GTT x y
2096 MO_S_Ge F64 -> condFltCode GE x y
2097 MO_S_Lt F64 -> condFltCode LTT x y
2098 MO_S_Le F64 -> condFltCode LE x y
2100 MO_Eq rep -> condIntCode EQQ x y
2101 MO_Ne rep -> condIntCode NE x y
2103 MO_S_Gt rep -> condIntCode GTT x y
2104 MO_S_Ge rep -> condIntCode GE x y
2105 MO_S_Lt rep -> condIntCode LTT x y
2106 MO_S_Le rep -> condIntCode LE x y
2108 MO_U_Gt rep -> condIntCode GU x y
2109 MO_U_Ge rep -> condIntCode GEU x y
2110 MO_U_Lt rep -> condIntCode LU x y
2111 MO_U_Le rep -> condIntCode LEU x y
2113 other -> pprPanic "getCondCode(x86,sparc)" (pprMachOp mop)
2115 getCondCode other = pprPanic "getCondCode(2)(x86,sparc)" (ppr other)
2117 #elif powerpc_TARGET_ARCH
2119 -- almost the same as everywhere else - but we need to
2120 -- extend small integers to 32 bit first
2122 getCondCode (CmmMachOp mop [x, y])
2124 MO_Eq F32 -> condFltCode EQQ x y
2125 MO_Ne F32 -> condFltCode NE x y
2127 MO_S_Gt F32 -> condFltCode GTT x y
2128 MO_S_Ge F32 -> condFltCode GE x y
2129 MO_S_Lt F32 -> condFltCode LTT x y
2130 MO_S_Le F32 -> condFltCode LE x y
2132 MO_Eq F64 -> condFltCode EQQ x y
2133 MO_Ne F64 -> condFltCode NE x y
2135 MO_S_Gt F64 -> condFltCode GTT x y
2136 MO_S_Ge F64 -> condFltCode GE x y
2137 MO_S_Lt F64 -> condFltCode LTT x y
2138 MO_S_Le F64 -> condFltCode LE x y
2140 MO_Eq rep -> condIntCode EQQ (extendUExpr rep x) (extendUExpr rep y)
2141 MO_Ne rep -> condIntCode NE (extendUExpr rep x) (extendUExpr rep y)
2143 MO_S_Gt rep -> condIntCode GTT (extendSExpr rep x) (extendSExpr rep y)
2144 MO_S_Ge rep -> condIntCode GE (extendSExpr rep x) (extendSExpr rep y)
2145 MO_S_Lt rep -> condIntCode LTT (extendSExpr rep x) (extendSExpr rep y)
2146 MO_S_Le rep -> condIntCode LE (extendSExpr rep x) (extendSExpr rep y)
2148 MO_U_Gt rep -> condIntCode GU (extendUExpr rep x) (extendUExpr rep y)
2149 MO_U_Ge rep -> condIntCode GEU (extendUExpr rep x) (extendUExpr rep y)
2150 MO_U_Lt rep -> condIntCode LU (extendUExpr rep x) (extendUExpr rep y)
2151 MO_U_Le rep -> condIntCode LEU (extendUExpr rep x) (extendUExpr rep y)
2153 other -> pprPanic "getCondCode(powerpc)" (pprMachOp mop)
2155 getCondCode other = panic "getCondCode(2)(powerpc)"
2161 -- @cond(Int|Flt)Code@: Turn a boolean expression into a condition, to be
2162 -- passed back up the tree.
2164 condIntCode, condFltCode :: Cond -> CmmExpr -> CmmExpr -> NatM CondCode
2166 #if alpha_TARGET_ARCH
2167 condIntCode = panic "MachCode.condIntCode: not on Alphas"
2168 condFltCode = panic "MachCode.condFltCode: not on Alphas"
2169 #endif /* alpha_TARGET_ARCH */
2171 -- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2172 #if i386_TARGET_ARCH || x86_64_TARGET_ARCH
2174 -- memory vs immediate
2175 condIntCode cond (CmmLoad x pk) (CmmLit lit) | not (is64BitLit lit) = do
2176 Amode x_addr x_code <- getAmode x
2179 code = x_code `snocOL`
2180 CMP pk (OpImm imm) (OpAddr x_addr)
2182 return (CondCode False cond code)
2185 condIntCode cond x (CmmLit (CmmInt 0 pk)) = do
2186 (x_reg, x_code) <- getSomeReg x
2188 code = x_code `snocOL`
2189 TEST pk (OpReg x_reg) (OpReg x_reg)
2191 return (CondCode False cond code)
2193 -- anything vs operand
2194 condIntCode cond x y | isOperand y = do
2195 (x_reg, x_code) <- getNonClobberedReg x
2196 (y_op, y_code) <- getOperand y
2198 code = x_code `appOL` y_code `snocOL`
2199 CMP (cmmExprRep x) y_op (OpReg x_reg)
2201 return (CondCode False cond code)
2203 -- anything vs anything
2204 condIntCode cond x y = do
2205 (y_reg, y_code) <- getNonClobberedReg y
2206 (x_op, x_code) <- getRegOrMem x
2208 code = y_code `appOL`
2210 CMP (cmmExprRep x) (OpReg y_reg) x_op
2212 return (CondCode False cond code)
2215 #if i386_TARGET_ARCH
2216 condFltCode cond x y
2217 = ASSERT(cond `elem` ([EQQ, NE, LE, LTT, GE, GTT])) do
2218 (x_reg, x_code) <- getNonClobberedReg x
2219 (y_reg, y_code) <- getSomeReg y
2221 code = x_code `appOL` y_code `snocOL`
2222 GCMP cond x_reg y_reg
2223 -- The GCMP insn does the test and sets the zero flag if comparable
2224 -- and true. Hence we always supply EQQ as the condition to test.
2225 return (CondCode True EQQ code)
2226 #endif /* i386_TARGET_ARCH */
2228 #if x86_64_TARGET_ARCH
2229 -- in the SSE2 comparison ops (ucomiss, ucomisd) the left arg may be
2230 -- an operand, but the right must be a reg. We can probably do better
2231 -- than this general case...
2232 condFltCode cond x y = do
2233 (x_reg, x_code) <- getNonClobberedReg x
2234 (y_op, y_code) <- getOperand y
2236 code = x_code `appOL`
2238 CMP (cmmExprRep x) y_op (OpReg x_reg)
2240 return (CondCode False (condToUnsigned cond) code)
2241 -- we need to use the unsigned comparison operators on the
2242 -- result of this comparison.
2245 -- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2247 #if sparc_TARGET_ARCH
2249 condIntCode cond x (StInt y)
2251 = getRegister x `thenNat` \ register ->
2252 getNewRegNat IntRep `thenNat` \ tmp ->
2254 code = registerCode register tmp
2255 src1 = registerName register tmp
2256 src2 = ImmInt (fromInteger y)
2257 code__2 = code `snocOL` SUB False True src1 (RIImm src2) g0
2259 return (CondCode False cond code__2)
2261 condIntCode cond x y
2262 = getRegister x `thenNat` \ register1 ->
2263 getRegister y `thenNat` \ register2 ->
2264 getNewRegNat IntRep `thenNat` \ tmp1 ->
2265 getNewRegNat IntRep `thenNat` \ tmp2 ->
2267 code1 = registerCode register1 tmp1
2268 src1 = registerName register1 tmp1
2269 code2 = registerCode register2 tmp2
2270 src2 = registerName register2 tmp2
2271 code__2 = code1 `appOL` code2 `snocOL`
2272 SUB False True src1 (RIReg src2) g0
2274 return (CondCode False cond code__2)
2277 condFltCode cond x y
2278 = getRegister x `thenNat` \ register1 ->
2279 getRegister y `thenNat` \ register2 ->
2280 getNewRegNat (registerRep register1)
2282 getNewRegNat (registerRep register2)
2284 getNewRegNat F64 `thenNat` \ tmp ->
2286 promote x = FxTOy F DF x tmp
2288 pk1 = registerRep register1
2289 code1 = registerCode register1 tmp1
2290 src1 = registerName register1 tmp1
2292 pk2 = registerRep register2
2293 code2 = registerCode register2 tmp2
2294 src2 = registerName register2 tmp2
2298 code1 `appOL` code2 `snocOL`
2299 FCMP True (primRepToSize pk1) src1 src2
2300 else if pk1 == F32 then
2301 code1 `snocOL` promote src1 `appOL` code2 `snocOL`
2302 FCMP True DF tmp src2
2304 code1 `appOL` code2 `snocOL` promote src2 `snocOL`
2305 FCMP True DF src1 tmp
2307 return (CondCode True cond code__2)
2309 #endif /* sparc_TARGET_ARCH */
2311 #if powerpc_TARGET_ARCH
2312 -- ###FIXME: I16 and I8!
2313 condIntCode cond x (CmmLit (CmmInt y rep))
2314 | Just src2 <- makeImmediate rep (not $ condUnsigned cond) y
2316 (src1, code) <- getSomeReg x
2318 code' = code `snocOL`
2319 (if condUnsigned cond then CMPL else CMP) I32 src1 (RIImm src2)
2320 return (CondCode False cond code')
2322 condIntCode cond x y = do
2323 (src1, code1) <- getSomeReg x
2324 (src2, code2) <- getSomeReg y
2326 code' = code1 `appOL` code2 `snocOL`
2327 (if condUnsigned cond then CMPL else CMP) I32 src1 (RIReg src2)
2328 return (CondCode False cond code')
2330 condFltCode cond x y = do
2331 (src1, code1) <- getSomeReg x
2332 (src2, code2) <- getSomeReg y
2334 code' = code1 `appOL` code2 `snocOL` FCMP src1 src2
2335 code'' = case cond of -- twiddle CR to handle unordered case
2336 GE -> code' `snocOL` CRNOR ltbit eqbit gtbit
2337 LE -> code' `snocOL` CRNOR gtbit eqbit ltbit
2340 ltbit = 0 ; eqbit = 2 ; gtbit = 1
2341 return (CondCode True cond code'')
2343 #endif /* powerpc_TARGET_ARCH */
2345 -- -----------------------------------------------------------------------------
2346 -- Generating assignments
2348 -- Assignments are really at the heart of the whole code generation
2349 -- business. Almost all top-level nodes of any real importance are
2350 -- assignments, which correspond to loads, stores, or register
2351 -- transfers. If we're really lucky, some of the register transfers
2352 -- will go away, because we can use the destination register to
2353 -- complete the code generation for the right hand side. This only
2354 -- fails when the right hand side is forced into a fixed register
2355 -- (e.g. the result of a call).
2357 assignMem_IntCode :: MachRep -> CmmExpr -> CmmExpr -> NatM InstrBlock
2358 assignReg_IntCode :: MachRep -> CmmReg -> CmmExpr -> NatM InstrBlock
2360 assignMem_FltCode :: MachRep -> CmmExpr -> CmmExpr -> NatM InstrBlock
2361 assignReg_FltCode :: MachRep -> CmmReg -> CmmExpr -> NatM InstrBlock
2363 -- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2365 #if alpha_TARGET_ARCH
2367 assignIntCode pk (CmmLoad dst _) src
2368 = getNewRegNat IntRep `thenNat` \ tmp ->
2369 getAmode dst `thenNat` \ amode ->
2370 getRegister src `thenNat` \ register ->
2372 code1 = amodeCode amode []
2373 dst__2 = amodeAddr amode
2374 code2 = registerCode register tmp []
2375 src__2 = registerName register tmp
2376 sz = primRepToSize pk
2377 code__2 = asmSeqThen [code1, code2] . mkSeqInstr (ST sz src__2 dst__2)
2381 assignIntCode pk dst src
2382 = getRegister dst `thenNat` \ register1 ->
2383 getRegister src `thenNat` \ register2 ->
2385 dst__2 = registerName register1 zeroh
2386 code = registerCode register2 dst__2
2387 src__2 = registerName register2 dst__2
2388 code__2 = if isFixed register2
2389 then code . mkSeqInstr (OR src__2 (RIReg src__2) dst__2)
2394 #endif /* alpha_TARGET_ARCH */
2396 -- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2398 #if i386_TARGET_ARCH || x86_64_TARGET_ARCH
2400 -- integer assignment to memory
2401 assignMem_IntCode pk addr src = do
2402 Amode addr code_addr <- getAmode addr
2403 (code_src, op_src) <- get_op_RI src
2405 code = code_src `appOL`
2407 MOV pk op_src (OpAddr addr)
2408 -- NOTE: op_src is stable, so it will still be valid
2409 -- after code_addr. This may involve the introduction
2410 -- of an extra MOV to a temporary register, but we hope
2411 -- the register allocator will get rid of it.
2415 get_op_RI :: CmmExpr -> NatM (InstrBlock,Operand) -- code, operator
2416 get_op_RI (CmmLit lit) | not (is64BitLit lit)
2417 = return (nilOL, OpImm (litToImm lit))
2419 = do (reg,code) <- getNonClobberedReg op
2420 return (code, OpReg reg)
2423 -- Assign; dst is a reg, rhs is mem
2424 assignReg_IntCode pk reg (CmmLoad src _) = do
2425 load_code <- intLoadCode (MOV pk) src
2426 return (load_code (getRegisterReg reg))
2428 -- dst is a reg, but src could be anything
2429 assignReg_IntCode pk reg src = do
2430 code <- getAnyReg src
2431 return (code (getRegisterReg reg))
2433 #endif /* i386_TARGET_ARCH */
2435 -- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2437 #if sparc_TARGET_ARCH
2439 assignMem_IntCode pk addr src
2440 = getNewRegNat IntRep `thenNat` \ tmp ->
2441 getAmode addr `thenNat` \ amode ->
2442 getRegister src `thenNat` \ register ->
2444 code1 = amodeCode amode
2445 dst__2 = amodeAddr amode
2446 code2 = registerCode register tmp
2447 src__2 = registerName register tmp
2448 sz = primRepToSize pk
2449 code__2 = code1 `appOL` code2 `snocOL` ST sz src__2 dst__2
2453 assignReg_IntCode pk reg src
2454 = getRegister src `thenNat` \ register2 ->
2455 getRegisterReg reg `thenNat` \ register1 ->
2456 getNewRegNat IntRep `thenNat` \ tmp ->
2458 dst__2 = registerName register1 tmp
2459 code = registerCode register2 dst__2
2460 src__2 = registerName register2 dst__2
2461 code__2 = if isFixed register2
2462 then code `snocOL` OR False g0 (RIReg src__2) dst__2
2467 #endif /* sparc_TARGET_ARCH */
2469 #if powerpc_TARGET_ARCH
2471 assignMem_IntCode pk addr src = do
2472 (srcReg, code) <- getSomeReg src
2473 Amode dstAddr addr_code <- getAmode addr
2474 return $ code `appOL` addr_code `snocOL` ST pk srcReg dstAddr
2476 -- dst is a reg, but src could be anything
2477 assignReg_IntCode pk reg src
2479 r <- getRegister src
2481 Any _ code -> code dst
2482 Fixed _ freg fcode -> fcode `snocOL` MR dst freg
2484 dst = getRegisterReg reg
2486 #endif /* powerpc_TARGET_ARCH */
2489 -- -----------------------------------------------------------------------------
2490 -- Floating-point assignments
2492 #if alpha_TARGET_ARCH
2494 assignFltCode pk (CmmLoad dst _) src
2495 = getNewRegNat pk `thenNat` \ tmp ->
2496 getAmode dst `thenNat` \ amode ->
2497 getRegister src `thenNat` \ register ->
2499 code1 = amodeCode amode []
2500 dst__2 = amodeAddr amode
2501 code2 = registerCode register tmp []
2502 src__2 = registerName register tmp
2503 sz = primRepToSize pk
2504 code__2 = asmSeqThen [code1, code2] . mkSeqInstr (ST sz src__2 dst__2)
2508 assignFltCode pk dst src
2509 = getRegister dst `thenNat` \ register1 ->
2510 getRegister src `thenNat` \ register2 ->
2512 dst__2 = registerName register1 zeroh
2513 code = registerCode register2 dst__2
2514 src__2 = registerName register2 dst__2
2515 code__2 = if isFixed register2
2516 then code . mkSeqInstr (FMOV src__2 dst__2)
2521 #endif /* alpha_TARGET_ARCH */
2523 -- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2525 #if i386_TARGET_ARCH || x86_64_TARGET_ARCH
2527 -- Floating point assignment to memory
2528 assignMem_FltCode pk addr src = do
2529 (src_reg, src_code) <- getNonClobberedReg src
2530 Amode addr addr_code <- getAmode addr
2532 code = src_code `appOL`
2534 IF_ARCH_i386(GST pk src_reg addr,
2535 MOV pk (OpReg src_reg) (OpAddr addr))
2538 -- Floating point assignment to a register/temporary
2539 assignReg_FltCode pk reg src = do
2540 src_code <- getAnyReg src
2541 return (src_code (getRegisterReg reg))
2543 #endif /* i386_TARGET_ARCH */
2545 -- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2547 #if sparc_TARGET_ARCH
2549 -- Floating point assignment to memory
2550 assignMem_FltCode pk addr src
2551 = getNewRegNat pk `thenNat` \ tmp1 ->
2552 getAmode addr `thenNat` \ amode ->
2553 getRegister src `thenNat` \ register ->
2555 sz = primRepToSize pk
2556 dst__2 = amodeAddr amode
2558 code1 = amodeCode amode
2559 code2 = registerCode register tmp1
2561 src__2 = registerName register tmp1
2562 pk__2 = registerRep register
2563 sz__2 = primRepToSize pk__2
2565 code__2 = code1 `appOL` code2 `appOL`
2567 then unitOL (ST sz src__2 dst__2)
2568 else toOL [FxTOy sz__2 sz src__2 tmp1, ST sz tmp1 dst__2]
2572 -- Floating point assignment to a register/temporary
2573 -- Why is this so bizarrely ugly?
2574 assignReg_FltCode pk reg src
2575 = getRegisterReg reg `thenNat` \ register1 ->
2576 getRegister src `thenNat` \ register2 ->
2578 pk__2 = registerRep register2
2579 sz__2 = primRepToSize pk__2
2581 getNewRegNat pk__2 `thenNat` \ tmp ->
2583 sz = primRepToSize pk
2584 dst__2 = registerName register1 g0 -- must be Fixed
2585 reg__2 = if pk /= pk__2 then tmp else dst__2
2586 code = registerCode register2 reg__2
2587 src__2 = registerName register2 reg__2
2590 code `snocOL` FxTOy sz__2 sz src__2 dst__2
2591 else if isFixed register2 then
2592 code `snocOL` FMOV sz src__2 dst__2
2598 #endif /* sparc_TARGET_ARCH */
2600 #if powerpc_TARGET_ARCH
2603 assignMem_FltCode = assignMem_IntCode
2604 assignReg_FltCode = assignReg_IntCode
2606 #endif /* powerpc_TARGET_ARCH */
2609 -- -----------------------------------------------------------------------------
2610 -- Generating an non-local jump
2612 -- (If applicable) Do not fill the delay slots here; you will confuse the
2613 -- register allocator.
2615 genJump :: CmmExpr{-the branch target-} -> NatM InstrBlock
2617 -- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2619 #if alpha_TARGET_ARCH
2621 genJump (CmmLabel lbl)
2622 | isAsmTemp lbl = returnInstr (BR target)
2623 | otherwise = returnInstrs [LDA pv (AddrImm target), JMP zeroh (AddrReg pv) 0]
2625 target = ImmCLbl lbl
2628 = getRegister tree `thenNat` \ register ->
2629 getNewRegNat PtrRep `thenNat` \ tmp ->
2631 dst = registerName register pv
2632 code = registerCode register pv
2633 target = registerName register pv
2635 if isFixed register then
2636 returnSeq code [OR dst (RIReg dst) pv, JMP zeroh (AddrReg pv) 0]
2638 return (code . mkSeqInstr (JMP zeroh (AddrReg pv) 0))
2640 #endif /* alpha_TARGET_ARCH */
2642 -- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2644 #if i386_TARGET_ARCH || x86_64_TARGET_ARCH
2646 genJump (CmmLoad mem pk) = do
2647 Amode target code <- getAmode mem
2648 return (code `snocOL` JMP (OpAddr target))
2650 genJump (CmmLit lit) = do
2651 return (unitOL (JMP (OpImm (litToImm lit))))
2654 (reg,code) <- getSomeReg expr
2655 return (code `snocOL` JMP (OpReg reg))
2657 #endif /* i386_TARGET_ARCH */
2659 -- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2661 #if sparc_TARGET_ARCH
2663 genJump (CmmLabel lbl)
2664 = return (toOL [CALL (Left target) 0 True, NOP])
2666 target = ImmCLbl lbl
2669 = getRegister tree `thenNat` \ register ->
2670 getNewRegNat PtrRep `thenNat` \ tmp ->
2672 code = registerCode register tmp
2673 target = registerName register tmp
2675 return (code `snocOL` JMP dsts (AddrRegReg target g0) `snocOL` NOP)
2677 #endif /* sparc_TARGET_ARCH */
2679 #if powerpc_TARGET_ARCH
2680 genJump (CmmLit (CmmLabel lbl))
2681 = return (unitOL $ JMP lbl)
2685 (target,code) <- getSomeReg tree
2686 return (code `snocOL` MTCTR target `snocOL` BCTR [])
2687 #endif /* powerpc_TARGET_ARCH */
2690 -- -----------------------------------------------------------------------------
2691 -- Unconditional branches
2693 genBranch :: BlockId -> NatM InstrBlock
2695 #if alpha_TARGET_ARCH
2696 genBranch id = return (unitOL (BR id))
2699 #if i386_TARGET_ARCH || x86_64_TARGET_ARCH
2700 genBranch id = return (unitOL (JXX ALWAYS id))
2703 #if sparc_TARGET_ARCH
2704 genBranch id = return (toOL [BI ALWAYS False id, NOP])
2707 #if powerpc_TARGET_ARCH
2708 genBranch id = return (unitOL (BCC ALWAYS id))
2712 -- -----------------------------------------------------------------------------
2713 -- Conditional jumps
2716 Conditional jumps are always to local labels, so we can use branch
2717 instructions. We peek at the arguments to decide what kind of
2720 ALPHA: For comparisons with 0, we're laughing, because we can just do
2721 the desired conditional branch.
2723 I386: First, we have to ensure that the condition
2724 codes are set according to the supplied comparison operation.
2726 SPARC: First, we have to ensure that the condition codes are set
2727 according to the supplied comparison operation. We generate slightly
2728 different code for floating point comparisons, because a floating
2729 point operation cannot directly precede a @BF@. We assume the worst
2730 and fill that slot with a @NOP@.
2732 SPARC: Do not fill the delay slots here; you will confuse the register
2738 :: BlockId -- the branch target
2739 -> CmmExpr -- the condition on which to branch
2742 -- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2744 #if alpha_TARGET_ARCH
2746 genCondJump id (StPrim op [x, StInt 0])
2747 = getRegister x `thenNat` \ register ->
2748 getNewRegNat (registerRep register)
2751 code = registerCode register tmp
2752 value = registerName register tmp
2753 pk = registerRep register
2754 target = ImmCLbl lbl
2756 returnSeq code [BI (cmpOp op) value target]
2758 cmpOp CharGtOp = GTT
2760 cmpOp CharEqOp = EQQ
2762 cmpOp CharLtOp = LTT
2771 cmpOp WordGeOp = ALWAYS
2772 cmpOp WordEqOp = EQQ
2774 cmpOp WordLtOp = NEVER
2775 cmpOp WordLeOp = EQQ
2777 cmpOp AddrGeOp = ALWAYS
2778 cmpOp AddrEqOp = EQQ
2780 cmpOp AddrLtOp = NEVER
2781 cmpOp AddrLeOp = EQQ
2783 genCondJump lbl (StPrim op [x, StDouble 0.0])
2784 = getRegister x `thenNat` \ register ->
2785 getNewRegNat (registerRep register)
2788 code = registerCode register tmp
2789 value = registerName register tmp
2790 pk = registerRep register
2791 target = ImmCLbl lbl
2793 return (code . mkSeqInstr (BF (cmpOp op) value target))
2795 cmpOp FloatGtOp = GTT
2796 cmpOp FloatGeOp = GE
2797 cmpOp FloatEqOp = EQQ
2798 cmpOp FloatNeOp = NE
2799 cmpOp FloatLtOp = LTT
2800 cmpOp FloatLeOp = LE
2801 cmpOp DoubleGtOp = GTT
2802 cmpOp DoubleGeOp = GE
2803 cmpOp DoubleEqOp = EQQ
2804 cmpOp DoubleNeOp = NE
2805 cmpOp DoubleLtOp = LTT
2806 cmpOp DoubleLeOp = LE
2808 genCondJump lbl (StPrim op [x, y])
2810 = trivialFCode pr instr x y `thenNat` \ register ->
2811 getNewRegNat F64 `thenNat` \ tmp ->
2813 code = registerCode register tmp
2814 result = registerName register tmp
2815 target = ImmCLbl lbl
2817 return (code . mkSeqInstr (BF cond result target))
2819 pr = panic "trivialU?FCode: does not use PrimRep on Alpha"
2821 fltCmpOp op = case op of
2835 (instr, cond) = case op of
2836 FloatGtOp -> (FCMP TF LE, EQQ)
2837 FloatGeOp -> (FCMP TF LTT, EQQ)
2838 FloatEqOp -> (FCMP TF EQQ, NE)
2839 FloatNeOp -> (FCMP TF EQQ, EQQ)
2840 FloatLtOp -> (FCMP TF LTT, NE)
2841 FloatLeOp -> (FCMP TF LE, NE)
2842 DoubleGtOp -> (FCMP TF LE, EQQ)
2843 DoubleGeOp -> (FCMP TF LTT, EQQ)
2844 DoubleEqOp -> (FCMP TF EQQ, NE)
2845 DoubleNeOp -> (FCMP TF EQQ, EQQ)
2846 DoubleLtOp -> (FCMP TF LTT, NE)
2847 DoubleLeOp -> (FCMP TF LE, NE)
2849 genCondJump lbl (StPrim op [x, y])
2850 = trivialCode instr x y `thenNat` \ register ->
2851 getNewRegNat IntRep `thenNat` \ tmp ->
2853 code = registerCode register tmp
2854 result = registerName register tmp
2855 target = ImmCLbl lbl
2857 return (code . mkSeqInstr (BI cond result target))
2859 (instr, cond) = case op of
2860 CharGtOp -> (CMP LE, EQQ)
2861 CharGeOp -> (CMP LTT, EQQ)
2862 CharEqOp -> (CMP EQQ, NE)
2863 CharNeOp -> (CMP EQQ, EQQ)
2864 CharLtOp -> (CMP LTT, NE)
2865 CharLeOp -> (CMP LE, NE)
2866 IntGtOp -> (CMP LE, EQQ)
2867 IntGeOp -> (CMP LTT, EQQ)
2868 IntEqOp -> (CMP EQQ, NE)
2869 IntNeOp -> (CMP EQQ, EQQ)
2870 IntLtOp -> (CMP LTT, NE)
2871 IntLeOp -> (CMP LE, NE)
2872 WordGtOp -> (CMP ULE, EQQ)
2873 WordGeOp -> (CMP ULT, EQQ)
2874 WordEqOp -> (CMP EQQ, NE)
2875 WordNeOp -> (CMP EQQ, EQQ)
2876 WordLtOp -> (CMP ULT, NE)
2877 WordLeOp -> (CMP ULE, NE)
2878 AddrGtOp -> (CMP ULE, EQQ)
2879 AddrGeOp -> (CMP ULT, EQQ)
2880 AddrEqOp -> (CMP EQQ, NE)
2881 AddrNeOp -> (CMP EQQ, EQQ)
2882 AddrLtOp -> (CMP ULT, NE)
2883 AddrLeOp -> (CMP ULE, NE)
2885 #endif /* alpha_TARGET_ARCH */
2887 -- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2889 #if i386_TARGET_ARCH || x86_64_TARGET_ARCH
2891 genCondJump id bool = do
2892 CondCode _ cond code <- getCondCode bool
2893 return (code `snocOL` JXX cond id)
2895 #endif /* i386_TARGET_ARCH */
2898 #if sparc_TARGET_ARCH
2900 genCondJump id bool = do
2901 CondCode is_float cond code <- getCondCode bool
2906 then [NOP, BF cond False id, NOP]
2907 else [BI cond False id, NOP]
2911 #endif /* sparc_TARGET_ARCH */
2914 #if powerpc_TARGET_ARCH
2916 genCondJump id bool = do
2917 CondCode is_float cond code <- getCondCode bool
2918 return (code `snocOL` BCC cond id)
2920 #endif /* powerpc_TARGET_ARCH */
2923 -- -----------------------------------------------------------------------------
2924 -- Generating C calls
2926 -- Now the biggest nightmare---calls. Most of the nastiness is buried in
2927 -- @get_arg@, which moves the arguments to the correct registers/stack
2928 -- locations. Apart from that, the code is easy.
2930 -- (If applicable) Do not fill the delay slots here; you will confuse the
2931 -- register allocator.
2934 :: CmmCallTarget -- function to call
2935 -> [(CmmReg,MachHint)] -- where to put the result
2936 -> [(CmmExpr,MachHint)] -- arguments (of mixed type)
2937 -> Maybe [GlobalReg] -- volatile regs to save
2940 -- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2942 #if alpha_TARGET_ARCH
2946 genCCall fn cconv result_regs args
2947 = mapAccumLNat get_arg (allArgRegs, eXTRA_STK_ARGS_HERE) args
2948 `thenNat` \ ((unused,_), argCode) ->
2950 nRegs = length allArgRegs - length unused
2951 code = asmSeqThen (map ($ []) argCode)
2954 LDA pv (AddrImm (ImmLab (ptext fn))),
2955 JSR ra (AddrReg pv) nRegs,
2956 LDGP gp (AddrReg ra)]
2958 ------------------------
2959 {- Try to get a value into a specific register (or registers) for
2960 a call. The first 6 arguments go into the appropriate
2961 argument register (separate registers for integer and floating
2962 point arguments, but used in lock-step), and the remaining
2963 arguments are dumped to the stack, beginning at 0(sp). Our
2964 first argument is a pair of the list of remaining argument
2965 registers to be assigned for this call and the next stack
2966 offset to use for overflowing arguments. This way,
2967 @get_Arg@ can be applied to all of a call's arguments using
2971 :: ([(Reg,Reg)], Int) -- Argument registers and stack offset (accumulator)
2972 -> StixTree -- Current argument
2973 -> NatM (([(Reg,Reg)],Int), InstrBlock) -- Updated accumulator and code
2975 -- We have to use up all of our argument registers first...
2977 get_arg ((iDst,fDst):dsts, offset) arg
2978 = getRegister arg `thenNat` \ register ->
2980 reg = if isFloatingRep pk then fDst else iDst
2981 code = registerCode register reg
2982 src = registerName register reg
2983 pk = registerRep register
2986 if isFloatingRep pk then
2987 ((dsts, offset), if isFixed register then
2988 code . mkSeqInstr (FMOV src fDst)
2991 ((dsts, offset), if isFixed register then
2992 code . mkSeqInstr (OR src (RIReg src) iDst)
2995 -- Once we have run out of argument registers, we move to the
2998 get_arg ([], offset) arg
2999 = getRegister arg `thenNat` \ register ->
3000 getNewRegNat (registerRep register)
3003 code = registerCode register tmp
3004 src = registerName register tmp
3005 pk = registerRep register
3006 sz = primRepToSize pk
3008 return (([], offset + 1), code . mkSeqInstr (ST sz src (spRel offset)))
3010 #endif /* alpha_TARGET_ARCH */
3012 -- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
3014 #if i386_TARGET_ARCH
3016 -- we only cope with a single result for foreign calls
3017 genCCall (CmmPrim op) [(r,_)] args vols = do
3019 MO_F32_Sqrt -> actuallyInlineFloatOp F32 (GSQRT F32) args
3020 MO_F64_Sqrt -> actuallyInlineFloatOp F64 (GSQRT F64) args
3022 MO_F32_Sin -> actuallyInlineFloatOp F32 (GSIN F32) args
3023 MO_F64_Sin -> actuallyInlineFloatOp F64 (GSIN F64) args
3025 MO_F32_Cos -> actuallyInlineFloatOp F32 (GCOS F32) args
3026 MO_F64_Cos -> actuallyInlineFloatOp F64 (GCOS F64) args
3028 MO_F32_Tan -> actuallyInlineFloatOp F32 (GTAN F32) args
3029 MO_F64_Tan -> actuallyInlineFloatOp F64 (GTAN F64) args
3031 other_op -> outOfLineFloatOp op r args vols
3033 actuallyInlineFloatOp rep instr [(x,_)]
3034 = do res <- trivialUFCode rep instr x
3036 return (any (getRegisterReg r))
3038 genCCall target dest_regs args vols = do
3039 sizes_n_codes <- mapM push_arg (reverse args)
3040 delta <- getDeltaNat
3042 (sizes, push_codes) = unzip sizes_n_codes
3043 tot_arg_size = sum sizes
3045 -- deal with static vs dynamic call targets
3046 (callinsns,cconv) <-
3049 CmmForeignCall (CmmLit (CmmLabel lbl)) conv
3050 -> -- ToDo: stdcall arg sizes
3051 return (unitOL (CALL (Left fn_imm)), conv)
3052 where fn_imm = ImmCLbl lbl
3053 CmmForeignCall expr conv
3054 -> do (dyn_c, dyn_r, dyn_rep) <- get_op expr
3055 ASSERT(dyn_rep == I32)
3056 return (dyn_c `snocOL` CALL (Right dyn_r), conv)
3058 let push_code = concatOL push_codes
3059 call = callinsns `appOL`
3061 -- Deallocate parameters after call for ccall;
3062 -- but not for stdcall (callee does it)
3063 (if cconv == StdCallConv || tot_arg_size==0 then [] else
3064 [ADD I32 (OpImm (ImmInt tot_arg_size)) (OpReg esp)])
3066 [DELTA (delta + tot_arg_size)]
3069 setDeltaNat (delta + tot_arg_size)
3072 -- assign the results, if necessary
3073 assign_code [] = nilOL
3074 assign_code [(dest,_hint)] =
3076 I64 -> toOL [MOV I32 (OpReg eax) (OpReg r_dest),
3077 MOV I32 (OpReg edx) (OpReg r_dest_hi)]
3078 F32 -> unitOL (GMOV fake0 r_dest)
3079 F64 -> unitOL (GMOV fake0 r_dest)
3080 rep -> unitOL (MOV rep (OpReg eax) (OpReg r_dest))
3082 r_dest_hi = getHiVRegFromLo r_dest
3083 rep = cmmRegRep dest
3084 r_dest = getRegisterReg dest
3085 assign_code many = panic "genCCall.assign_code many"
3087 return (push_code `appOL`
3089 assign_code dest_regs)
3096 push_arg :: (CmmExpr,MachHint){-current argument-}
3097 -> NatM (Int, InstrBlock) -- argsz, code
3099 push_arg (arg,_hint) -- we don't need the hints on x86
3100 | arg_rep == I64 = do
3101 ChildCode64 code r_lo <- iselExpr64 arg
3102 delta <- getDeltaNat
3103 setDeltaNat (delta - 8)
3105 r_hi = getHiVRegFromLo r_lo
3107 return (8, code `appOL`
3108 toOL [PUSH I32 (OpReg r_hi), DELTA (delta - 4),
3109 PUSH I32 (OpReg r_lo), DELTA (delta - 8),
3114 (code, reg, sz) <- get_op arg
3115 delta <- getDeltaNat
3116 let size = arg_size sz
3117 setDeltaNat (delta-size)
3118 if (case sz of F64 -> True; F32 -> True; _ -> False)
3121 toOL [SUB I32 (OpImm (ImmInt size)) (OpReg esp),
3123 GST sz reg (AddrBaseIndex (Just esp)
3129 PUSH I32 (OpReg reg) `snocOL`
3133 arg_rep = cmmExprRep arg
3136 get_op :: CmmExpr -> NatM (InstrBlock, Reg, MachRep) -- code, reg, size
3138 (reg,code) <- getSomeReg op
3139 return (code, reg, cmmExprRep op)
3142 outOfLineFloatOp :: CallishMachOp -> CmmReg -> [(CmmExpr,MachHint)]
3143 -> Maybe [GlobalReg] -> NatM InstrBlock
3144 outOfLineFloatOp mop res args vols
3145 | cmmRegRep res == F64
3146 = stmtToInstrs (CmmCall target [(res,FloatHint)] args vols)
3149 = do uq <- getUniqueNat
3151 tmp = CmmLocal (LocalReg uq F64)
3153 code1 <- stmtToInstrs (CmmCall target [(tmp,FloatHint)] (map promote args) vols)
3154 code2 <- stmtToInstrs (CmmAssign res (demote (CmmReg tmp)))
3155 return (code1 `appOL` code2)
3157 promote (x,hint) = (CmmMachOp (MO_S_Conv F32 F64) [x], hint)
3158 demote x = CmmMachOp (MO_S_Conv F64 F32) [x]
3160 target = CmmForeignCall (CmmLit lbl) CCallConv
3161 lbl = CmmLabel (mkForeignLabel fn Nothing False)
3164 MO_F32_Exp -> FSLIT("exp")
3165 MO_F32_Log -> FSLIT("log")
3167 MO_F32_Asin -> FSLIT("asin")
3168 MO_F32_Acos -> FSLIT("acos")
3169 MO_F32_Atan -> FSLIT("atan")
3171 MO_F32_Sinh -> FSLIT("sinh")
3172 MO_F32_Cosh -> FSLIT("cosh")
3173 MO_F32_Tanh -> FSLIT("tanh")
3174 MO_F32_Pwr -> FSLIT("pow")
3176 MO_F64_Exp -> FSLIT("exp")
3177 MO_F64_Log -> FSLIT("log")
3179 MO_F64_Asin -> FSLIT("asin")
3180 MO_F64_Acos -> FSLIT("acos")
3181 MO_F64_Atan -> FSLIT("atan")
3183 MO_F64_Sinh -> FSLIT("sinh")
3184 MO_F64_Cosh -> FSLIT("cosh")
3185 MO_F64_Tanh -> FSLIT("tanh")
3186 MO_F64_Pwr -> FSLIT("pow")
3188 other -> pprPanic "outOfLineFloatOp" (pprCallishMachOp mop)
3190 #endif /* i386_TARGET_ARCH */
3192 -- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
3194 #if x86_64_TARGET_ARCH
3196 genCCall (CmmPrim op) [(r,_)] args vols =
3197 panic "genCCall(CmmPrim)(x86_64)"
3199 genCCall target dest_regs args vols = do
3201 -- load up the register arguments
3202 (stack_args, sse_regs, load_args_code)
3203 <- load_args args allArgRegs allFPArgRegs 0 nilOL
3206 tot_arg_size = arg_size * length stack_args
3208 -- On entry to the called function, %rsp should be aligned
3209 -- on a 16-byte boundary +8 (i.e. the first stack arg after
3210 -- the return address is 16-byte aligned). In STG land
3211 -- %rsp is kept 16-byte aligned (see StgCRun.c), so we just
3212 -- need to make sure we push a multiple of 16-bytes of args,
3213 -- plus the return address, to get the correct alignment.
3214 -- Urg, this is hard. We need to feed the delta back into
3215 -- the arg pushing code.
3216 (real_size, adjust_rsp) <-
3217 if tot_arg_size `rem` 16 == 0
3218 then return (tot_arg_size, nilOL)
3219 else do -- we need to adjust...
3220 delta <- getDeltaNat
3221 setDeltaNat (delta-8)
3222 return (tot_arg_size+8, toOL [
3223 SUB I64 (OpImm (ImmInt 8)) (OpReg rsp),
3227 -- push the stack args, right to left
3228 push_code <- push_args (reverse stack_args) nilOL
3229 delta <- getDeltaNat
3231 -- deal with static vs dynamic call targets
3232 (callinsns,cconv) <-
3235 CmmForeignCall (CmmLit (CmmLabel lbl)) conv
3236 -> -- ToDo: stdcall arg sizes
3237 return (unitOL (CALL (Left fn_imm)), conv)
3238 where fn_imm = ImmCLbl lbl
3239 CmmForeignCall expr conv
3240 -> do (dyn_r, dyn_c) <- getSomeReg expr
3241 return (dyn_c `snocOL` CALL (Right dyn_r), conv)
3244 -- The x86_64 ABI requires us to set %al to the number of SSE
3245 -- registers that contain arguments, if the called routine
3246 -- is a varargs function. We don't know whether it's a
3247 -- varargs function or not, so we have to assume it is.
3249 -- It's not safe to omit this assignment, even if the number
3250 -- of SSE regs in use is zero. If %al is larger than 8
3251 -- on entry to a varargs function, seg faults ensue.
3252 assign_eax n = unitOL (MOV I32 (OpImm (ImmInt n)) (OpReg eax))
3254 let call = callinsns `appOL`
3256 -- Deallocate parameters after call for ccall;
3257 -- but not for stdcall (callee does it)
3258 (if cconv == StdCallConv || real_size==0 then [] else
3259 [ADD wordRep (OpImm (ImmInt real_size)) (OpReg esp)])
3261 [DELTA (delta + real_size)]
3264 setDeltaNat (delta + real_size)
3267 -- assign the results, if necessary
3268 assign_code [] = nilOL
3269 assign_code [(dest,_hint)] =
3271 F32 -> unitOL (MOV rep (OpReg xmm0) (OpReg r_dest))
3272 F64 -> unitOL (MOV rep (OpReg xmm0) (OpReg r_dest))
3273 rep -> unitOL (MOV rep (OpReg rax) (OpReg r_dest))
3275 rep = cmmRegRep dest
3276 r_dest = getRegisterReg dest
3277 assign_code many = panic "genCCall.assign_code many"
3279 return (load_args_code `appOL`
3282 assign_eax sse_regs `appOL`
3284 assign_code dest_regs)
3287 arg_size = 8 -- always, at the mo
3289 load_args :: [(CmmExpr,MachHint)]
3290 -> [Reg] -- int regs avail for args
3291 -> [Reg] -- FP regs avail for args
3292 -> Int -> InstrBlock
3293 -> NatM ([(CmmExpr,MachHint)],Int,InstrBlock)
3294 load_args args [] [] sse_regs code = return (args, sse_regs, code)
3295 -- no more regs to use
3296 load_args [] aregs fregs sse_regs code = return ([],sse_regs,code)
3297 -- no more args to push
3298 load_args ((arg,hint) : rest) aregs fregs sse_regs code
3299 | isFloatingRep arg_rep =
3303 arg_code <- getAnyReg arg
3304 load_args rest aregs rs (sse_regs+1) (code `appOL` arg_code r)
3309 arg_code <- getAnyReg arg
3310 load_args rest rs fregs sse_regs (code `appOL` arg_code r)
3312 arg_rep = cmmExprRep arg
3315 (args',sse',code') <- load_args rest aregs fregs sse_regs code
3316 return ((arg,hint):args', sse', code')
3318 push_args [] code = return code
3319 push_args ((arg,hint):rest) code
3320 | isFloatingRep arg_rep = do
3321 (arg_reg, arg_code) <- getSomeReg arg
3322 delta <- getDeltaNat
3323 setDeltaNat (delta-arg_size)
3324 let code' = code `appOL` toOL [
3325 MOV arg_rep (OpReg arg_reg) (OpAddr (spRel 0)),
3326 SUB wordRep (OpImm (ImmInt arg_size)) (OpReg rsp) ,
3327 DELTA (delta-arg_size)]
3328 push_args rest code'
3331 -- we only ever generate word-sized function arguments. Promotion
3332 -- has already happened: our Int8# type is kept sign-extended
3333 -- in an Int#, for example.
3334 ASSERT(arg_rep == I64) return ()
3335 (arg_op, arg_code) <- getOperand arg
3336 delta <- getDeltaNat
3337 setDeltaNat (delta-arg_size)
3338 let code' = code `appOL` toOL [PUSH I64 arg_op,
3339 DELTA (delta-arg_size)]
3340 push_args rest code'
3342 arg_rep = cmmExprRep arg
3345 -- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
3347 #if sparc_TARGET_ARCH
3349 The SPARC calling convention is an absolute
3350 nightmare. The first 6x32 bits of arguments are mapped into
3351 %o0 through %o5, and the remaining arguments are dumped to the
3352 stack, beginning at [%sp+92]. (Note that %o6 == %sp.)
3354 If we have to put args on the stack, move %o6==%sp down by
3355 the number of words to go on the stack, to ensure there's enough space.
3357 According to Fraser and Hanson's lcc book, page 478, fig 17.2,
3358 16 words above the stack pointer is a word for the address of
3359 a structure return value. I use this as a temporary location
3360 for moving values from float to int regs. Certainly it isn't
3361 safe to put anything in the 16 words starting at %sp, since
3362 this area can get trashed at any time due to window overflows
3363 caused by signal handlers.
3365 A final complication (if the above isn't enough) is that
3366 we can't blithely calculate the arguments one by one into
3367 %o0 .. %o5. Consider the following nested calls:
3371 Naive code moves a into %o0, and (fff b c) into %o1. Unfortunately
3372 the inner call will itself use %o0, which trashes the value put there
3373 in preparation for the outer call. Upshot: we need to calculate the
3374 args into temporary regs, and move those to arg regs or onto the
3375 stack only immediately prior to the call proper. Sigh.
3378 genCCall fn cconv kind args
3379 = mapNat arg_to_int_vregs args `thenNat` \ argcode_and_vregs ->
3381 (argcodes, vregss) = unzip argcode_and_vregs
3382 n_argRegs = length allArgRegs
3383 n_argRegs_used = min (length vregs) n_argRegs
3384 vregs = concat vregss
3386 -- deal with static vs dynamic call targets
3389 -> return (unitOL (CALL (Left fn__2) n_argRegs_used False))
3391 -> arg_to_int_vregs dyn `thenNat` \ (dyn_c, [dyn_r]) ->
3392 return (dyn_c `snocOL` CALL (Right dyn_r) n_argRegs_used False)
3394 `thenNat` \ callinsns ->
3396 argcode = concatOL argcodes
3397 (move_sp_down, move_sp_up)
3398 = let diff = length vregs - n_argRegs
3399 nn = if odd diff then diff + 1 else diff -- keep 8-byte alignment
3402 else (unitOL (moveSp (-1*nn)), unitOL (moveSp (1*nn)))
3404 = toOL (move_final vregs allArgRegs eXTRA_STK_ARGS_HERE)
3406 return (argcode `appOL`
3407 move_sp_down `appOL`
3408 transfer_code `appOL`
3413 -- function names that begin with '.' are assumed to be special
3414 -- internally generated names like '.mul,' which don't get an
3415 -- underscore prefix
3416 -- ToDo:needed (WDP 96/03) ???
3417 fn_static = unLeft fn
3418 fn__2 = case (headFS fn_static) of
3419 '.' -> ImmLit (ftext fn_static)
3420 _ -> ImmCLbl (mkForeignLabel fn_static False)
3422 -- move args from the integer vregs into which they have been
3423 -- marshalled, into %o0 .. %o5, and the rest onto the stack.
3424 move_final :: [Reg] -> [Reg] -> Int -> [Instr]
3426 move_final [] _ offset -- all args done
3429 move_final (v:vs) [] offset -- out of aregs; move to stack
3430 = ST W v (spRel offset)
3431 : move_final vs [] (offset+1)
3433 move_final (v:vs) (a:az) offset -- move into an arg (%o[0..5]) reg
3434 = OR False g0 (RIReg v) a
3435 : move_final vs az offset
3437 -- generate code to calculate an argument, and move it into one
3438 -- or two integer vregs.
3439 arg_to_int_vregs :: CmmExpr -> NatM (OrdList Instr, [Reg])
3440 arg_to_int_vregs arg
3441 | is64BitRep (repOfCmmExpr arg)
3442 = iselExpr64 arg `thenNat` \ (ChildCode64 code vr_lo) ->
3443 let r_lo = VirtualRegI vr_lo
3444 r_hi = getHiVRegFromLo r_lo
3445 in return (code, [r_hi, r_lo])
3447 = getRegister arg `thenNat` \ register ->
3448 getNewRegNat (registerRep register) `thenNat` \ tmp ->
3449 let code = registerCode register tmp
3450 src = registerName register tmp
3451 pk = registerRep register
3453 -- the value is in src. Get it into 1 or 2 int vregs.
3456 getNewRegNat WordRep `thenNat` \ v1 ->
3457 getNewRegNat WordRep `thenNat` \ v2 ->
3460 FMOV DF src f0 `snocOL`
3461 ST F f0 (spRel 16) `snocOL`
3462 LD W (spRel 16) v1 `snocOL`
3463 ST F (fPair f0) (spRel 16) `snocOL`
3469 getNewRegNat WordRep `thenNat` \ v1 ->
3472 ST F src (spRel 16) `snocOL`
3478 getNewRegNat WordRep `thenNat` \ v1 ->
3480 code `snocOL` OR False g0 (RIReg src) v1
3484 #endif /* sparc_TARGET_ARCH */
3486 #if powerpc_TARGET_ARCH
3488 #if darwin_TARGET_OS || linux_TARGET_OS
3490 The PowerPC calling convention for Darwin/Mac OS X
3491 is described in Apple's document
3492 "Inside Mac OS X - Mach-O Runtime Architecture".
3494 PowerPC Linux uses the System V Release 4 Calling Convention
3495 for PowerPC. It is described in the
3496 "System V Application Binary Interface PowerPC Processor Supplement".
3498 Both conventions are similar:
3499 Parameters may be passed in general-purpose registers starting at r3, in
3500 floating point registers starting at f1, or on the stack.
3502 But there are substantial differences:
3503 * The number of registers used for parameter passing and the exact set of
3504 nonvolatile registers differs (see MachRegs.lhs).
3505 * On Darwin, stack space is always reserved for parameters, even if they are
3506 passed in registers. The called routine may choose to save parameters from
3507 registers to the corresponding space on the stack.
3508 * On Darwin, a corresponding amount of GPRs is skipped when a floating point
3509 parameter is passed in an FPR.
3510 * SysV insists on either passing I64 arguments on the stack, or in two GPRs,
3511 starting with an odd-numbered GPR. It may skip a GPR to achieve this.
3512 Darwin just treats an I64 like two separate I32s (high word first).
3513 * I64 and F64 arguments are 8-byte aligned on the stack for SysV, but only
3514 4-byte aligned like everything else on Darwin.
3515 * The SysV spec claims that F32 is represented as F64 on the stack. GCC on
3516 PowerPC Linux does not agree, so neither do we.
3518 According to both conventions, The parameter area should be part of the
3519 caller's stack frame, allocated in the caller's prologue code (large enough
3520 to hold the parameter lists for all called routines). The NCG already
3521 uses the stack for register spilling, leaving 64 bytes free at the top.
3522 If we need a larger parameter area than that, we just allocate a new stack
3523 frame just before ccalling.
3526 genCCall target dest_regs argsAndHints vols
3527 = ASSERT (not $ any (`elem` [I8,I16]) argReps)
3528 -- we rely on argument promotion in the codeGen
3530 (finalStack,passArgumentsCode,usedRegs) <- passArguments
3532 allArgRegs allFPArgRegs
3536 (labelOrExpr, reduceToF32) <- case target of
3537 CmmForeignCall (CmmLit (CmmLabel lbl)) conv -> return (Left lbl, False)
3538 CmmForeignCall expr conv -> return (Right expr, False)
3539 CmmPrim mop -> outOfLineFloatOp mop
3541 let codeBefore = move_sp_down finalStack `appOL` passArgumentsCode
3542 codeAfter = move_sp_up finalStack `appOL` moveResult reduceToF32
3547 `snocOL` BL lbl usedRegs
3550 (dynReg, dynCode) <- getSomeReg dyn
3552 `snocOL` MTCTR dynReg
3554 `snocOL` BCTRL usedRegs
3557 #if darwin_TARGET_OS
3558 initialStackOffset = 24
3559 -- size of linkage area + size of arguments, in bytes
3560 stackDelta _finalStack = roundTo 16 $ (24 +) $ max 32 $ sum $
3561 map machRepByteWidth argReps
3562 #elif linux_TARGET_OS
3563 initialStackOffset = 8
3564 stackDelta finalStack = roundTo 16 finalStack
3566 args = map fst argsAndHints
3567 argReps = map cmmExprRep args
3569 roundTo a x | x `mod` a == 0 = x
3570 | otherwise = x + a - (x `mod` a)
3572 move_sp_down finalStack
3574 toOL [STU I32 sp (AddrRegImm sp (ImmInt (-delta))),
3577 where delta = stackDelta finalStack
3578 move_sp_up finalStack
3580 toOL [ADD sp sp (RIImm (ImmInt delta)),
3583 where delta = stackDelta finalStack
3586 passArguments [] _ _ stackOffset accumCode accumUsed = return (stackOffset, accumCode, accumUsed)
3587 passArguments ((arg,I64):args) gprs fprs stackOffset
3588 accumCode accumUsed =
3590 ChildCode64 code vr_lo <- iselExpr64 arg
3591 let vr_hi = getHiVRegFromLo vr_lo
3593 #if darwin_TARGET_OS
3598 (accumCode `appOL` code
3599 `snocOL` storeWord vr_hi gprs stackOffset
3600 `snocOL` storeWord vr_lo (drop 1 gprs) (stackOffset+4))
3601 ((take 2 gprs) ++ accumUsed)
3603 storeWord vr (gpr:_) offset = MR gpr vr
3604 storeWord vr [] offset = ST I32 vr (AddrRegImm sp (ImmInt offset))
3606 #elif linux_TARGET_OS
3607 let stackOffset' = roundTo 8 stackOffset
3608 stackCode = accumCode `appOL` code
3609 `snocOL` ST I32 vr_hi (AddrRegImm sp (ImmInt stackOffset'))
3610 `snocOL` ST I32 vr_lo (AddrRegImm sp (ImmInt (stackOffset'+4)))
3611 regCode hireg loreg =
3612 accumCode `appOL` code
3613 `snocOL` MR hireg vr_hi
3614 `snocOL` MR loreg vr_lo
3617 hireg : loreg : regs | even (length gprs) ->
3618 passArguments args regs fprs stackOffset
3619 (regCode hireg loreg) (hireg : loreg : accumUsed)
3620 _skipped : hireg : loreg : regs ->
3621 passArguments args regs fprs stackOffset
3622 (regCode hireg loreg) (hireg : loreg : accumUsed)
3623 _ -> -- only one or no regs left
3624 passArguments args [] fprs (stackOffset'+8)
3628 passArguments ((arg,rep):args) gprs fprs stackOffset accumCode accumUsed
3629 | reg : _ <- regs = do
3630 register <- getRegister arg
3631 let code = case register of
3632 Fixed _ freg fcode -> fcode `snocOL` MR reg freg
3633 Any _ acode -> acode reg
3637 #if darwin_TARGET_OS
3638 -- The Darwin ABI requires that we reserve stack slots for register parameters
3639 (stackOffset + stackBytes)
3640 #elif linux_TARGET_OS
3641 -- ... the SysV ABI doesn't.
3644 (accumCode `appOL` code)
3647 (vr, code) <- getSomeReg arg
3651 (stackOffset' + stackBytes)
3652 (accumCode `appOL` code `snocOL` ST rep vr stackSlot)
3655 #if darwin_TARGET_OS
3656 -- stackOffset is at least 4-byte aligned
3657 -- The Darwin ABI is happy with that.
3658 stackOffset' = stackOffset
3660 -- ... the SysV ABI requires 8-byte alignment for doubles.
3661 stackOffset' | rep == F64 = roundTo 8 stackOffset
3662 | otherwise = stackOffset
3664 stackSlot = AddrRegImm sp (ImmInt stackOffset')
3665 (nGprs, nFprs, stackBytes, regs) = case rep of
3666 I32 -> (1, 0, 4, gprs)
3667 #if darwin_TARGET_OS
3668 -- The Darwin ABI requires that we skip a corresponding number of GPRs when
3670 F32 -> (1, 1, 4, fprs)
3671 F64 -> (2, 1, 8, fprs)
3672 #elif linux_TARGET_OS
3673 -- ... the SysV ABI doesn't.
3674 F32 -> (0, 1, 4, fprs)
3675 F64 -> (0, 1, 8, fprs)
3678 moveResult reduceToF32 =
3682 | reduceToF32 && rep == F32 -> unitOL (FRSP r_dest f1)
3683 | rep == F32 || rep == F64 -> unitOL (MR r_dest f1)
3684 | rep == I64 -> toOL [MR (getHiVRegFromLo r_dest) r3,
3686 | otherwise -> unitOL (MR r_dest r3)
3687 where rep = cmmRegRep dest
3688 r_dest = getRegisterReg dest
3690 outOfLineFloatOp mop =
3692 mopExpr <- cmmMakeDynamicReference addImportNat True $
3693 mkForeignLabel functionName Nothing True
3694 let mopLabelOrExpr = case mopExpr of
3695 CmmLit (CmmLabel lbl) -> Left lbl
3697 return (mopLabelOrExpr, reduce)
3699 (functionName, reduce) = case mop of
3700 MO_F32_Exp -> (FSLIT("exp"), True)
3701 MO_F32_Log -> (FSLIT("log"), True)
3702 MO_F32_Sqrt -> (FSLIT("sqrt"), True)
3704 MO_F32_Sin -> (FSLIT("sin"), True)
3705 MO_F32_Cos -> (FSLIT("cos"), True)
3706 MO_F32_Tan -> (FSLIT("tan"), True)
3708 MO_F32_Asin -> (FSLIT("asin"), True)
3709 MO_F32_Acos -> (FSLIT("acos"), True)
3710 MO_F32_Atan -> (FSLIT("atan"), True)
3712 MO_F32_Sinh -> (FSLIT("sinh"), True)
3713 MO_F32_Cosh -> (FSLIT("cosh"), True)
3714 MO_F32_Tanh -> (FSLIT("tanh"), True)
3715 MO_F32_Pwr -> (FSLIT("pow"), True)
3717 MO_F64_Exp -> (FSLIT("exp"), False)
3718 MO_F64_Log -> (FSLIT("log"), False)
3719 MO_F64_Sqrt -> (FSLIT("sqrt"), False)
3721 MO_F64_Sin -> (FSLIT("sin"), False)
3722 MO_F64_Cos -> (FSLIT("cos"), False)
3723 MO_F64_Tan -> (FSLIT("tan"), False)
3725 MO_F64_Asin -> (FSLIT("asin"), False)
3726 MO_F64_Acos -> (FSLIT("acos"), False)
3727 MO_F64_Atan -> (FSLIT("atan"), False)
3729 MO_F64_Sinh -> (FSLIT("sinh"), False)
3730 MO_F64_Cosh -> (FSLIT("cosh"), False)
3731 MO_F64_Tanh -> (FSLIT("tanh"), False)
3732 MO_F64_Pwr -> (FSLIT("pow"), False)
3733 other -> pprPanic "genCCall(ppc): unknown callish op"
3734 (pprCallishMachOp other)
3736 #endif /* darwin_TARGET_OS || linux_TARGET_OS */
3738 #endif /* powerpc_TARGET_ARCH */
3741 -- -----------------------------------------------------------------------------
3742 -- Generating a table-branch
3744 genSwitch :: CmmExpr -> [Maybe BlockId] -> NatM InstrBlock
3746 #if i386_TARGET_ARCH || x86_64_TARGET_ARCH
3747 genSwitch expr ids = do
3748 (reg,e_code) <- getSomeReg expr
3749 lbl <- getNewLabelNat
3751 jumpTable = map jumpTableEntry ids
3752 op = OpAddr (AddrBaseIndex Nothing (Just (reg,wORD_SIZE)) (ImmCLbl lbl))
3753 code = e_code `appOL` toOL [
3754 LDATA ReadOnlyData (CmmDataLabel lbl : jumpTable),
3755 JMP_TBL op [ id | Just id <- ids ]
3759 #elif powerpc_TARGET_ARCH
3763 (reg,e_code) <- getSomeReg expr
3764 tmp <- getNewRegNat I32
3765 lbl <- getNewLabelNat
3766 dynRef <- cmmMakeDynamicReference addImportNat False lbl
3767 (tableReg,t_code) <- getSomeReg $ dynRef
3769 jumpTable = map jumpTableEntryRel ids
3771 jumpTableEntryRel Nothing
3772 = CmmStaticLit (CmmInt 0 wordRep)
3773 jumpTableEntryRel (Just (BlockId id))
3774 = CmmStaticLit (CmmLabelDiffOff blockLabel lbl 0)
3775 where blockLabel = mkAsmTempLabel id
3777 code = e_code `appOL` t_code `appOL` toOL [
3778 LDATA ReadOnlyData (CmmDataLabel lbl : jumpTable),
3779 SLW tmp reg (RIImm (ImmInt 2)),
3780 LD I32 tmp (AddrRegReg tableReg tmp),
3781 ADD tmp tmp (RIReg tableReg),
3783 BCTR [ id | Just id <- ids ]
3788 (reg,e_code) <- getSomeReg expr
3789 tmp <- getNewRegNat I32
3790 lbl <- getNewLabelNat
3792 jumpTable = map jumpTableEntry ids
3794 code = e_code `appOL` toOL [
3795 LDATA ReadOnlyData (CmmDataLabel lbl : jumpTable),
3796 SLW tmp reg (RIImm (ImmInt 2)),
3797 ADDIS tmp tmp (HA (ImmCLbl lbl)),
3798 LD I32 tmp (AddrRegImm tmp (LO (ImmCLbl lbl))),
3800 BCTR [ id | Just id <- ids ]
3804 genSwitch expr ids = panic "ToDo: genSwitch"
3807 jumpTableEntry Nothing = CmmStaticLit (CmmInt 0 wordRep)
3808 jumpTableEntry (Just (BlockId id)) = CmmStaticLit (CmmLabel blockLabel)
3809 where blockLabel = mkAsmTempLabel id
3811 -- -----------------------------------------------------------------------------
3813 -- -----------------------------------------------------------------------------
3816 -- -----------------------------------------------------------------------------
3817 -- 'condIntReg' and 'condFltReg': condition codes into registers
3819 -- Turn those condition codes into integers now (when they appear on
3820 -- the right hand side of an assignment).
3822 -- (If applicable) Do not fill the delay slots here; you will confuse the
3823 -- register allocator.
3825 condIntReg, condFltReg :: Cond -> CmmExpr -> CmmExpr -> NatM Register
3827 -- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
3829 #if alpha_TARGET_ARCH
3830 condIntReg = panic "MachCode.condIntReg (not on Alpha)"
3831 condFltReg = panic "MachCode.condFltReg (not on Alpha)"
3832 #endif /* alpha_TARGET_ARCH */
3834 -- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
3836 #if i386_TARGET_ARCH || x86_64_TARGET_ARCH
3838 condIntReg cond x y = do
3839 CondCode _ cond cond_code <- condIntCode cond x y
3840 tmp <- getNewRegNat I8
3842 code dst = cond_code `appOL` toOL [
3843 SETCC cond (OpReg tmp),
3844 MOV I32 (OpReg tmp) (OpReg dst),
3845 AND I32 (OpImm (ImmInt 1)) (OpReg dst)
3847 -- NB. (1) Tha AND is needed here because the x86 only
3848 -- sets the low byte in the SETCC instruction.
3849 -- NB. (2) The extra temporary register is a hack to
3850 -- work around the fact that the setcc instructions only
3851 -- accept byte registers. dst might not be a byte-able reg,
3852 -- but currently all free registers are byte-able, so we're
3853 -- guaranteed that a new temporary is byte-able.
3855 return (Any I32 code)
3858 condFltReg cond x y = do
3859 lbl1 <- getBlockIdNat
3860 lbl2 <- getBlockIdNat
3861 CondCode _ cond cond_code <- condFltCode cond x y
3863 code dst = cond_code `appOL` toOL [
3865 MOV I32 (OpImm (ImmInt 0)) (OpReg dst),
3868 MOV I32 (OpImm (ImmInt 1)) (OpReg dst),
3871 -- SIGH, have to split up this block somehow...
3873 return (Any I32 code)
3875 #endif /* i386_TARGET_ARCH */
3877 -- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
3879 #if sparc_TARGET_ARCH
3881 condIntReg EQQ x (StInt 0)
3882 = getRegister x `thenNat` \ register ->
3883 getNewRegNat IntRep `thenNat` \ tmp ->
3885 code = registerCode register tmp
3886 src = registerName register tmp
3887 code__2 dst = code `appOL` toOL [
3888 SUB False True g0 (RIReg src) g0,
3889 SUB True False g0 (RIImm (ImmInt (-1))) dst]
3891 return (Any IntRep code__2)
3894 = getRegister x `thenNat` \ register1 ->
3895 getRegister y `thenNat` \ register2 ->
3896 getNewRegNat IntRep `thenNat` \ tmp1 ->
3897 getNewRegNat IntRep `thenNat` \ tmp2 ->
3899 code1 = registerCode register1 tmp1
3900 src1 = registerName register1 tmp1
3901 code2 = registerCode register2 tmp2
3902 src2 = registerName register2 tmp2
3903 code__2 dst = code1 `appOL` code2 `appOL` toOL [
3904 XOR False src1 (RIReg src2) dst,
3905 SUB False True g0 (RIReg dst) g0,
3906 SUB True False g0 (RIImm (ImmInt (-1))) dst]
3908 return (Any IntRep code__2)
3910 condIntReg NE x (StInt 0)
3911 = getRegister x `thenNat` \ register ->
3912 getNewRegNat IntRep `thenNat` \ tmp ->
3914 code = registerCode register tmp
3915 src = registerName register tmp
3916 code__2 dst = code `appOL` toOL [
3917 SUB False True g0 (RIReg src) g0,
3918 ADD True False g0 (RIImm (ImmInt 0)) dst]
3920 return (Any IntRep code__2)
3923 = getRegister x `thenNat` \ register1 ->
3924 getRegister y `thenNat` \ register2 ->
3925 getNewRegNat IntRep `thenNat` \ tmp1 ->
3926 getNewRegNat IntRep `thenNat` \ tmp2 ->
3928 code1 = registerCode register1 tmp1
3929 src1 = registerName register1 tmp1
3930 code2 = registerCode register2 tmp2
3931 src2 = registerName register2 tmp2
3932 code__2 dst = code1 `appOL` code2 `appOL` toOL [
3933 XOR False src1 (RIReg src2) dst,
3934 SUB False True g0 (RIReg dst) g0,
3935 ADD True False g0 (RIImm (ImmInt 0)) dst]
3937 return (Any IntRep code__2)
3940 = getBlockIdNat `thenNat` \ lbl1 ->
3941 getBlockIdNat `thenNat` \ lbl2 ->
3942 condIntCode cond x y `thenNat` \ condition ->
3944 code = condCode condition
3945 cond = condName condition
3946 code__2 dst = code `appOL` toOL [
3947 BI cond False (ImmCLbl lbl1), NOP,
3948 OR False g0 (RIImm (ImmInt 0)) dst,
3949 BI ALWAYS False (ImmCLbl lbl2), NOP,
3951 OR False g0 (RIImm (ImmInt 1)) dst,
3954 return (Any IntRep code__2)
3957 = getBlockIdNat `thenNat` \ lbl1 ->
3958 getBlockIdNat `thenNat` \ lbl2 ->
3959 condFltCode cond x y `thenNat` \ condition ->
3961 code = condCode condition
3962 cond = condName condition
3963 code__2 dst = code `appOL` toOL [
3965 BF cond False (ImmCLbl lbl1), NOP,
3966 OR False g0 (RIImm (ImmInt 0)) dst,
3967 BI ALWAYS False (ImmCLbl lbl2), NOP,
3969 OR False g0 (RIImm (ImmInt 1)) dst,
3972 return (Any IntRep code__2)
3974 #endif /* sparc_TARGET_ARCH */
3976 #if powerpc_TARGET_ARCH
3977 condReg getCond = do
3978 lbl1 <- getBlockIdNat
3979 lbl2 <- getBlockIdNat
3980 CondCode _ cond cond_code <- getCond
3982 {- code dst = cond_code `appOL` toOL [
3991 code dst = cond_code
3995 RLWINM dst dst (bit + 1) 31 31
3998 negate_code | do_negate = unitOL (CRNOR bit bit bit)
4001 (bit, do_negate) = case cond of
4015 return (Any I32 code)
4017 condIntReg cond x y = condReg (condIntCode cond x y)
4018 condFltReg cond x y = condReg (condFltCode cond x y)
4019 #endif /* powerpc_TARGET_ARCH */
4022 -- -----------------------------------------------------------------------------
4023 -- 'trivial*Code': deal with trivial instructions
4025 -- Trivial (dyadic: 'trivialCode', floating-point: 'trivialFCode',
4026 -- unary: 'trivialUCode', unary fl-pt:'trivialUFCode') instructions.
4027 -- Only look for constants on the right hand side, because that's
4028 -- where the generic optimizer will have put them.
4030 -- Similarly, for unary instructions, we don't have to worry about
4031 -- matching an StInt as the argument, because genericOpt will already
4032 -- have handled the constant-folding.
4036 -> IF_ARCH_alpha((Reg -> RI -> Reg -> Instr)
4037 ,IF_ARCH_i386 ((Operand -> Operand -> Instr)
4038 -> Maybe (Operand -> Operand -> Instr)
4039 ,IF_ARCH_x86_64 ((Operand -> Operand -> Instr)
4040 -> Maybe (Operand -> Operand -> Instr)
4041 ,IF_ARCH_sparc((Reg -> RI -> Reg -> Instr)
4042 ,IF_ARCH_powerpc(Bool -> (Reg -> Reg -> RI -> Instr)
4044 -> CmmExpr -> CmmExpr -- the two arguments
4047 #ifndef powerpc_TARGET_ARCH
4050 -> IF_ARCH_alpha((Reg -> Reg -> Reg -> Instr)
4051 ,IF_ARCH_sparc((Size -> Reg -> Reg -> Reg -> Instr)
4052 ,IF_ARCH_i386 ((MachRep -> Reg -> Reg -> Reg -> Instr)
4053 ,IF_ARCH_x86_64 ((MachRep -> Operand -> Operand -> Instr)
4055 -> CmmExpr -> CmmExpr -- the two arguments
4061 -> IF_ARCH_alpha((RI -> Reg -> Instr)
4062 ,IF_ARCH_i386 ((Operand -> Instr)
4063 ,IF_ARCH_x86_64 ((Operand -> Instr)
4064 ,IF_ARCH_sparc((RI -> Reg -> Instr)
4065 ,IF_ARCH_powerpc((Reg -> Reg -> Instr)
4067 -> CmmExpr -- the one argument
4070 #ifndef powerpc_TARGET_ARCH
4073 -> IF_ARCH_alpha((Reg -> Reg -> Instr)
4074 ,IF_ARCH_i386 ((Reg -> Reg -> Instr)
4075 ,IF_ARCH_x86_64 ((Reg -> Reg -> Instr)
4076 ,IF_ARCH_sparc((Reg -> Reg -> Instr)
4078 -> CmmExpr -- the one argument
4082 -- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
4084 #if alpha_TARGET_ARCH
4086 trivialCode instr x (StInt y)
4088 = getRegister x `thenNat` \ register ->
4089 getNewRegNat IntRep `thenNat` \ tmp ->
4091 code = registerCode register tmp
4092 src1 = registerName register tmp
4093 src2 = ImmInt (fromInteger y)
4094 code__2 dst = code . mkSeqInstr (instr src1 (RIImm src2) dst)
4096 return (Any IntRep code__2)
4098 trivialCode instr x y
4099 = getRegister x `thenNat` \ register1 ->
4100 getRegister y `thenNat` \ register2 ->
4101 getNewRegNat IntRep `thenNat` \ tmp1 ->
4102 getNewRegNat IntRep `thenNat` \ tmp2 ->
4104 code1 = registerCode register1 tmp1 []
4105 src1 = registerName register1 tmp1
4106 code2 = registerCode register2 tmp2 []
4107 src2 = registerName register2 tmp2
4108 code__2 dst = asmSeqThen [code1, code2] .
4109 mkSeqInstr (instr src1 (RIReg src2) dst)
4111 return (Any IntRep code__2)
4114 trivialUCode instr x
4115 = getRegister x `thenNat` \ register ->
4116 getNewRegNat IntRep `thenNat` \ tmp ->
4118 code = registerCode register tmp
4119 src = registerName register tmp
4120 code__2 dst = code . mkSeqInstr (instr (RIReg src) dst)
4122 return (Any IntRep code__2)
4125 trivialFCode _ instr x y
4126 = getRegister x `thenNat` \ register1 ->
4127 getRegister y `thenNat` \ register2 ->
4128 getNewRegNat F64 `thenNat` \ tmp1 ->
4129 getNewRegNat F64 `thenNat` \ tmp2 ->
4131 code1 = registerCode register1 tmp1
4132 src1 = registerName register1 tmp1
4134 code2 = registerCode register2 tmp2
4135 src2 = registerName register2 tmp2
4137 code__2 dst = asmSeqThen [code1 [], code2 []] .
4138 mkSeqInstr (instr src1 src2 dst)
4140 return (Any F64 code__2)
4142 trivialUFCode _ instr x
4143 = getRegister x `thenNat` \ register ->
4144 getNewRegNat F64 `thenNat` \ tmp ->
4146 code = registerCode register tmp
4147 src = registerName register tmp
4148 code__2 dst = code . mkSeqInstr (instr src dst)
4150 return (Any F64 code__2)
4152 #endif /* alpha_TARGET_ARCH */
4154 -- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
4156 #if i386_TARGET_ARCH || x86_64_TARGET_ARCH
4159 The Rules of the Game are:
4161 * You cannot assume anything about the destination register dst;
4162 it may be anything, including a fixed reg.
4164 * You may compute an operand into a fixed reg, but you may not
4165 subsequently change the contents of that fixed reg. If you
4166 want to do so, first copy the value either to a temporary
4167 or into dst. You are free to modify dst even if it happens
4168 to be a fixed reg -- that's not your problem.
4170 * You cannot assume that a fixed reg will stay live over an
4171 arbitrary computation. The same applies to the dst reg.
4173 * Temporary regs obtained from getNewRegNat are distinct from
4174 each other and from all other regs, and stay live over
4175 arbitrary computations.
4177 --------------------
4179 SDM's version of The Rules:
4181 * If getRegister returns Any, that means it can generate correct
4182 code which places the result in any register, period. Even if that
4183 register happens to be read during the computation.
4185 Corollary #1: this means that if you are generating code for an
4186 operation with two arbitrary operands, you cannot assign the result
4187 of the first operand into the destination register before computing
4188 the second operand. The second operand might require the old value
4189 of the destination register.
4191 Corollary #2: A function might be able to generate more efficient
4192 code if it knows the destination register is a new temporary (and
4193 therefore not read by any of the sub-computations).
4195 * If getRegister returns Any, then the code it generates may modify only:
4196 (a) fresh temporaries
4197 (b) the destination register
4198 (c) known registers (eg. %ecx is used by shifts)
4199 In particular, it may *not* modify global registers, unless the global
4200 register happens to be the destination register.
4203 trivialCode rep instr (Just revinstr) (CmmLit lit_a) b
4204 | not (is64BitLit lit_a) = do
4205 b_code <- getAnyReg b
4208 = b_code dst `snocOL`
4209 revinstr (OpImm (litToImm lit_a)) (OpReg dst)
4211 return (Any rep code)
4213 trivialCode rep instr maybe_revinstr a b = do
4214 (b_op, b_code) <- getNonClobberedOperand b
4215 a_code <- getAnyReg a
4216 tmp <- getNewRegNat rep
4218 -- We want the value of b to stay alive across the computation of a.
4219 -- But, we want to calculate a straight into the destination register,
4220 -- because the instruction only has two operands (dst := dst `op` src).
4221 -- The troublesome case is when the result of b is in the same register
4222 -- as the destination reg. In this case, we have to save b in a
4223 -- new temporary across the computation of a.
4225 | dst `clashesWith` b_op =
4227 unitOL (MOV rep b_op (OpReg tmp)) `appOL`
4229 instr (OpReg tmp) (OpReg dst)
4233 instr b_op (OpReg dst)
4235 return (Any rep code)
4237 reg `clashesWith` OpReg reg2 = reg == reg2
4238 reg `clashesWith` OpAddr amode = any (==reg) (addrModeRegs amode)
4239 reg `clashesWith` _ = False
4243 trivialUCode rep instr x = do
4244 x_code <- getAnyReg x
4250 return (Any rep code)
4254 #if i386_TARGET_ARCH
4256 trivialFCode pk instr x y = do
4257 (x_reg, x_code) <- getNonClobberedReg x -- these work for float regs too
4258 (y_reg, y_code) <- getSomeReg y
4263 instr pk x_reg y_reg dst
4265 return (Any pk code)
4269 #if x86_64_TARGET_ARCH
4271 -- We use the 2-operand SSE2 floating pt instructions. ToDo: improve on
4272 -- this by using some of the special cases in trivialCode above.
4273 trivialFCode pk instr x y = do
4274 (y_reg, y_code) <- getNonClobberedReg y -- these work for float regs too
4275 x_code <- getAnyReg x
4280 instr pk (IF_ARCH_x86_64(OpReg,) y_reg)
4281 (IF_ARCH_x86_64(OpReg,) dst)
4283 return (Any pk code)
4289 trivialUFCode rep instr x = do
4290 (x_reg, x_code) <- getSomeReg x
4296 return (Any rep code)
4298 #endif /* i386_TARGET_ARCH */
4300 -- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
4302 #if sparc_TARGET_ARCH
4304 trivialCode instr x (StInt y)
4306 = getRegister x `thenNat` \ register ->
4307 getNewRegNat IntRep `thenNat` \ tmp ->
4309 code = registerCode register tmp
4310 src1 = registerName register tmp
4311 src2 = ImmInt (fromInteger y)
4312 code__2 dst = code `snocOL` instr src1 (RIImm src2) dst
4314 return (Any IntRep code__2)
4316 trivialCode instr x y
4317 = getRegister x `thenNat` \ register1 ->
4318 getRegister y `thenNat` \ register2 ->
4319 getNewRegNat IntRep `thenNat` \ tmp1 ->
4320 getNewRegNat IntRep `thenNat` \ tmp2 ->
4322 code1 = registerCode register1 tmp1
4323 src1 = registerName register1 tmp1
4324 code2 = registerCode register2 tmp2
4325 src2 = registerName register2 tmp2
4326 code__2 dst = code1 `appOL` code2 `snocOL`
4327 instr src1 (RIReg src2) dst
4329 return (Any IntRep code__2)
4332 trivialFCode pk instr x y
4333 = getRegister x `thenNat` \ register1 ->
4334 getRegister y `thenNat` \ register2 ->
4335 getNewRegNat (registerRep register1)
4337 getNewRegNat (registerRep register2)
4339 getNewRegNat F64 `thenNat` \ tmp ->
4341 promote x = FxTOy F DF x tmp
4343 pk1 = registerRep register1
4344 code1 = registerCode register1 tmp1
4345 src1 = registerName register1 tmp1
4347 pk2 = registerRep register2
4348 code2 = registerCode register2 tmp2
4349 src2 = registerName register2 tmp2
4353 code1 `appOL` code2 `snocOL`
4354 instr (primRepToSize pk) src1 src2 dst
4355 else if pk1 == F32 then
4356 code1 `snocOL` promote src1 `appOL` code2 `snocOL`
4357 instr DF tmp src2 dst
4359 code1 `appOL` code2 `snocOL` promote src2 `snocOL`
4360 instr DF src1 tmp dst
4362 return (Any (if pk1 == pk2 then pk1 else F64) code__2)
4365 trivialUCode instr x
4366 = getRegister x `thenNat` \ register ->
4367 getNewRegNat IntRep `thenNat` \ tmp ->
4369 code = registerCode register tmp
4370 src = registerName register tmp
4371 code__2 dst = code `snocOL` instr (RIReg src) dst
4373 return (Any IntRep code__2)
4376 trivialUFCode pk instr x
4377 = getRegister x `thenNat` \ register ->
4378 getNewRegNat pk `thenNat` \ tmp ->
4380 code = registerCode register tmp
4381 src = registerName register tmp
4382 code__2 dst = code `snocOL` instr src dst
4384 return (Any pk code__2)
4386 #endif /* sparc_TARGET_ARCH */
4388 #if powerpc_TARGET_ARCH
4391 Wolfgang's PowerPC version of The Rules:
4393 A slightly modified version of The Rules to take advantage of the fact
4394 that PowerPC instructions work on all registers and don't implicitly
4395 clobber any fixed registers.
4397 * The only expression for which getRegister returns Fixed is (CmmReg reg).
4399 * If getRegister returns Any, then the code it generates may modify only:
4400 (a) fresh temporaries
4401 (b) the destination register
4402 It may *not* modify global registers, unless the global
4403 register happens to be the destination register.
4404 It may not clobber any other registers. In fact, only ccalls clobber any
4406 Also, it may not modify the counter register (used by genCCall).
4408 Corollary: If a getRegister for a subexpression returns Fixed, you need
4409 not move it to a fresh temporary before evaluating the next subexpression.
4410 The Fixed register won't be modified.
4411 Therefore, we don't need a counterpart for the x86's getStableReg on PPC.
4413 * SDM's First Rule is valid for PowerPC, too: subexpressions can depend on
4414 the value of the destination register.
4417 trivialCode rep signed instr x (CmmLit (CmmInt y _))
4418 | Just imm <- makeImmediate rep signed y
4420 (src1, code1) <- getSomeReg x
4421 let code dst = code1 `snocOL` instr dst src1 (RIImm imm)
4422 return (Any rep code)
4424 trivialCode rep signed instr x y = do
4425 (src1, code1) <- getSomeReg x
4426 (src2, code2) <- getSomeReg y
4427 let code dst = code1 `appOL` code2 `snocOL` instr dst src1 (RIReg src2)
4428 return (Any rep code)
4430 trivialCodeNoImm :: MachRep -> (Reg -> Reg -> Reg -> Instr)
4431 -> CmmExpr -> CmmExpr -> NatM Register
4432 trivialCodeNoImm rep instr x y = do
4433 (src1, code1) <- getSomeReg x
4434 (src2, code2) <- getSomeReg y
4435 let code dst = code1 `appOL` code2 `snocOL` instr dst src1 src2
4436 return (Any rep code)
4438 trivialUCode rep instr x = do
4439 (src, code) <- getSomeReg x
4440 let code' dst = code `snocOL` instr dst src
4441 return (Any rep code')
4443 -- There is no "remainder" instruction on the PPC, so we have to do
4445 -- The "div" parameter is the division instruction to use (DIVW or DIVWU)
4447 remainderCode :: MachRep -> (Reg -> Reg -> Reg -> Instr)
4448 -> CmmExpr -> CmmExpr -> NatM Register
4449 remainderCode rep div x y = do
4450 (src1, code1) <- getSomeReg x
4451 (src2, code2) <- getSomeReg y
4452 let code dst = code1 `appOL` code2 `appOL` toOL [
4454 MULLW dst dst (RIReg src2),
4457 return (Any rep code)
4459 #endif /* powerpc_TARGET_ARCH */
4462 -- -----------------------------------------------------------------------------
4463 -- Coercing to/from integer/floating-point...
4465 -- @coerce(Int2FP|FP2Int)@ are more complicated integer/float
4466 -- conversions. We have to store temporaries in memory to move
4467 -- between the integer and the floating point register sets.
4469 -- @coerceDbl2Flt@ and @coerceFlt2Dbl@ are done this way because we
4470 -- pretend, on sparc at least, that double and float regs are seperate
4471 -- kinds, so the value has to be computed into one kind before being
4472 -- explicitly "converted" to live in the other kind.
4474 coerceInt2FP :: MachRep -> MachRep -> CmmExpr -> NatM Register
4475 coerceFP2Int :: MachRep -> MachRep -> CmmExpr -> NatM Register
4477 #if sparc_TARGET_ARCH
4478 coerceDbl2Flt :: CmmExpr -> NatM Register
4479 coerceFlt2Dbl :: CmmExpr -> NatM Register
4482 -- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
4484 #if alpha_TARGET_ARCH
4487 = getRegister x `thenNat` \ register ->
4488 getNewRegNat IntRep `thenNat` \ reg ->
4490 code = registerCode register reg
4491 src = registerName register reg
4493 code__2 dst = code . mkSeqInstrs [
4495 LD TF dst (spRel 0),
4498 return (Any F64 code__2)
4502 = getRegister x `thenNat` \ register ->
4503 getNewRegNat F64 `thenNat` \ tmp ->
4505 code = registerCode register tmp
4506 src = registerName register tmp
4508 code__2 dst = code . mkSeqInstrs [
4510 ST TF tmp (spRel 0),
4513 return (Any IntRep code__2)
4515 #endif /* alpha_TARGET_ARCH */
4517 -- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
4519 #if i386_TARGET_ARCH
4521 coerceInt2FP from to x = do
4522 (x_reg, x_code) <- getSomeReg x
4524 opc = case to of F32 -> GITOF; F64 -> GITOD
4525 code dst = x_code `snocOL` opc x_reg dst
4526 -- ToDo: works for non-I32 reps?
4528 return (Any to code)
4532 coerceFP2Int from to x = do
4533 (x_reg, x_code) <- getSomeReg x
4535 opc = case from of F32 -> GFTOI; F64 -> GDTOI
4536 code dst = x_code `snocOL` opc x_reg dst
4537 -- ToDo: works for non-I32 reps?
4539 return (Any to code)
4541 #endif /* i386_TARGET_ARCH */
4543 -- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
4545 #if x86_64_TARGET_ARCH
4547 coerceFP2Int from to x = do
4548 (x_op, x_code) <- getOperand x -- ToDo: could be a safe operand
4550 opc = case to of F32 -> CVTSS2SI; F64 -> CVTSD2SI
4551 code dst = x_code `snocOL` opc x_op dst
4553 return (Any to code) -- works even if the destination rep is <I32
4555 coerceInt2FP from to x = do
4556 (x_op, x_code) <- getOperand x -- ToDo: could be a safe operand
4558 opc = case to of F32 -> CVTSI2SS; F64 -> CVTSI2SD
4559 code dst = x_code `snocOL` opc x_op dst
4561 return (Any to code) -- works even if the destination rep is <I32
4563 coerceFP2FP :: MachRep -> CmmExpr -> NatM Register
4564 coerceFP2FP to x = do
4565 (x_reg, x_code) <- getSomeReg x
4567 opc = case to of F32 -> CVTSD2SS; F64 -> CVTSS2SD
4568 code dst = x_code `snocOL` opc x_reg dst
4570 return (Any to code)
4574 -- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
4576 #if sparc_TARGET_ARCH
4579 = getRegister x `thenNat` \ register ->
4580 getNewRegNat IntRep `thenNat` \ reg ->
4582 code = registerCode register reg
4583 src = registerName register reg
4585 code__2 dst = code `appOL` toOL [
4586 ST W src (spRel (-2)),
4587 LD W (spRel (-2)) dst,
4588 FxTOy W (primRepToSize pk) dst dst]
4590 return (Any pk code__2)
4593 coerceFP2Int fprep x
4594 = ASSERT(fprep == F64 || fprep == F32)
4595 getRegister x `thenNat` \ register ->
4596 getNewRegNat fprep `thenNat` \ reg ->
4597 getNewRegNat F32 `thenNat` \ tmp ->
4599 code = registerCode register reg
4600 src = registerName register reg
4601 code__2 dst = code `appOL` toOL [
4602 FxTOy (primRepToSize fprep) W src tmp,
4603 ST W tmp (spRel (-2)),
4604 LD W (spRel (-2)) dst]
4606 return (Any IntRep code__2)
4610 = getRegister x `thenNat` \ register ->
4611 getNewRegNat F64 `thenNat` \ tmp ->
4612 let code = registerCode register tmp
4613 src = registerName register tmp
4616 (\dst -> code `snocOL` FxTOy DF F src dst))
4620 = getRegister x `thenNat` \ register ->
4621 getNewRegNat F32 `thenNat` \ tmp ->
4622 let code = registerCode register tmp
4623 src = registerName register tmp
4626 (\dst -> code `snocOL` FxTOy F DF src dst))
4628 #endif /* sparc_TARGET_ARCH */
4630 #if powerpc_TARGET_ARCH
4631 coerceInt2FP fromRep toRep x = do
4632 (src, code) <- getSomeReg x
4633 lbl <- getNewLabelNat
4634 itmp <- getNewRegNat I32
4635 ftmp <- getNewRegNat F64
4636 dynRef <- cmmMakeDynamicReference addImportNat False lbl
4637 Amode addr addr_code <- getAmode dynRef
4639 code' dst = code `appOL` maybe_exts `appOL` toOL [
4642 CmmStaticLit (CmmInt 0x43300000 I32),
4643 CmmStaticLit (CmmInt 0x80000000 I32)],
4644 XORIS itmp src (ImmInt 0x8000),
4645 ST I32 itmp (spRel 3),
4646 LIS itmp (ImmInt 0x4330),
4647 ST I32 itmp (spRel 2),
4648 LD F64 ftmp (spRel 2)
4649 ] `appOL` addr_code `appOL` toOL [
4651 FSUB F64 dst ftmp dst
4652 ] `appOL` maybe_frsp dst
4654 maybe_exts = case fromRep of
4655 I8 -> unitOL $ EXTS I8 src src
4656 I16 -> unitOL $ EXTS I16 src src
4658 maybe_frsp dst = case toRep of
4659 F32 -> unitOL $ FRSP dst dst
4661 return (Any toRep code')
4663 coerceFP2Int fromRep toRep x = do
4664 -- the reps don't really matter: F*->F64 and I32->I* are no-ops
4665 (src, code) <- getSomeReg x
4666 tmp <- getNewRegNat F64
4668 code' dst = code `appOL` toOL [
4669 -- convert to int in FP reg
4671 -- store value (64bit) from FP to stack
4672 ST F64 tmp (spRel 2),
4673 -- read low word of value (high word is undefined)
4674 LD I32 dst (spRel 3)]
4675 return (Any toRep code')
4676 #endif /* powerpc_TARGET_ARCH */
4679 -- -----------------------------------------------------------------------------
4680 -- eXTRA_STK_ARGS_HERE
4682 -- We (allegedly) put the first six C-call arguments in registers;
4683 -- where do we start putting the rest of them?
4685 -- Moved from MachInstrs (SDM):
4687 #if alpha_TARGET_ARCH || sparc_TARGET_ARCH
4688 eXTRA_STK_ARGS_HERE :: Int
4690 = IF_ARCH_alpha(0, IF_ARCH_sparc(23, ???))