2 -- The above warning supression flag is a temporary kludge.
3 -- While working on this module you are encouraged to remove it and fix
4 -- any warnings in the module. See
5 -- http://hackage.haskell.org/trac/ghc/wiki/Commentary/CodingStyle#Warnings
8 -----------------------------------------------------------------------------
10 -- Generating machine code (instruction selection)
12 -- (c) The University of Glasgow 1996-2004
14 -----------------------------------------------------------------------------
16 -- This is a big module, but, if you pay attention to
17 -- (a) the sectioning, (b) the type signatures, and
18 -- (c) the #if blah_TARGET_ARCH} things, the
19 -- structure should not be too overwhelming.
28 #include "HsVersions.h"
29 #include "nativeGen/NCG.h"
30 #include "../includes/MachDeps.h"
46 -- Our intermediate code:
49 import PprCmm ( pprExpr )
53 import ClosureInfo ( C_SRT(..) )
56 import StaticFlags ( opt_PIC )
57 import ForeignCall ( CCallConv(..) )
60 import qualified Outputable as O
64 import FastBool ( isFastTrue )
65 import Constants ( wORD_SIZE )
68 import Debug.Trace ( trace )
70 import Control.Monad ( mapAndUnzipM )
71 import Data.Maybe ( fromJust )
76 sse2Enabled :: NatM Bool
77 #if x86_64_TARGET_ARCH
78 -- SSE2 is fixed on for x86_64. It would be possible to make it optional,
79 -- but we'd need to fix at least the foreign call code where the calling
80 -- convention specifies the use of xmm regs, and possibly other places.
81 sse2Enabled = return True
84 dflags <- getDynFlagsNat
85 return (dopt Opt_SSE2 dflags)
88 if_sse2 :: NatM a -> NatM a -> NatM a
91 if b then sse2 else x87
96 -> NatM [NatCmmTop Instr]
98 cmmTopCodeGen dynflags (CmmProc info lab (ListGraph blocks)) = do
99 (nat_blocks,statics) <- mapAndUnzipM basicBlockCodeGen blocks
100 picBaseMb <- getPicBaseMaybeNat
101 let proc = CmmProc info lab (ListGraph $ concat nat_blocks)
102 tops = proc : concat statics
103 os = platformOS $ targetPlatform dynflags
106 Just picBase -> initializePicBase_x86 ArchX86 os picBase tops
107 Nothing -> return tops
109 cmmTopCodeGen _ (CmmData sec dat) = do
110 return [CmmData sec dat] -- no translation, we just use CmmStatic
115 -> NatM ( [NatBasicBlock Instr]
118 basicBlockCodeGen (BasicBlock id stmts) = do
119 instrs <- stmtsToInstrs stmts
120 -- code generation may introduce new basic block boundaries, which
121 -- are indicated by the NEWBLOCK instruction. We must split up the
122 -- instruction stream into basic blocks again. Also, we extract
125 (top,other_blocks,statics) = foldrOL mkBlocks ([],[],[]) instrs
127 mkBlocks (NEWBLOCK id) (instrs,blocks,statics)
128 = ([], BasicBlock id instrs : blocks, statics)
129 mkBlocks (LDATA sec dat) (instrs,blocks,statics)
130 = (instrs, blocks, CmmData sec dat:statics)
131 mkBlocks instr (instrs,blocks,statics)
132 = (instr:instrs, blocks, statics)
134 return (BasicBlock id top : other_blocks, statics)
137 stmtsToInstrs :: [CmmStmt] -> NatM InstrBlock
139 = do instrss <- mapM stmtToInstrs stmts
140 return (concatOL instrss)
143 stmtToInstrs :: CmmStmt -> NatM InstrBlock
144 stmtToInstrs stmt = case stmt of
145 CmmNop -> return nilOL
146 CmmComment s -> return (unitOL (COMMENT s))
149 | isFloatType ty -> assignReg_FltCode size reg src
150 #if WORD_SIZE_IN_BITS==32
151 | isWord64 ty -> assignReg_I64Code reg src
153 | otherwise -> assignReg_IntCode size reg src
154 where ty = cmmRegType reg
155 size = cmmTypeSize ty
158 | isFloatType ty -> assignMem_FltCode size addr src
159 #if WORD_SIZE_IN_BITS==32
160 | isWord64 ty -> assignMem_I64Code addr src
162 | otherwise -> assignMem_IntCode size addr src
163 where ty = cmmExprType src
164 size = cmmTypeSize ty
166 CmmCall target result_regs args _ _
167 -> genCCall target result_regs args
169 CmmBranch id -> genBranch id
170 CmmCondBranch arg id -> genCondJump id arg
171 CmmSwitch arg ids -> genSwitch arg ids
172 CmmJump arg params -> genJump arg
174 panic "stmtToInstrs: return statement should have been cps'd away"
177 --------------------------------------------------------------------------------
178 -- | 'InstrBlock's are the insn sequences generated by the insn selectors.
179 -- They are really trees of insns to facilitate fast appending, where a
180 -- left-to-right traversal yields the insns in the correct order.
186 -- | Condition codes passed up the tree.
189 = CondCode Bool Cond InstrBlock
192 -- | a.k.a "Register64"
193 -- Reg is the lower 32-bit temporary which contains the result.
194 -- Use getHiVRegFromLo to find the other VRegUnique.
196 -- Rules of this simplified insn selection game are therefore that
197 -- the returned Reg may be modified
205 -- | Register's passed up the tree. If the stix code forces the register
206 -- to live in a pre-decided machine register, it comes out as @Fixed@;
207 -- otherwise, it comes out as @Any@, and the parent can decide which
208 -- register to put it in.
211 = Fixed Size Reg InstrBlock
212 | Any Size (Reg -> InstrBlock)
215 swizzleRegisterRep :: Register -> Size -> Register
216 swizzleRegisterRep (Fixed _ reg code) size = Fixed size reg code
217 swizzleRegisterRep (Any _ codefn) size = Any size codefn
220 -- | Grab the Reg for a CmmReg
221 getRegisterReg :: Bool -> CmmReg -> Reg
223 getRegisterReg use_sse2 (CmmLocal (LocalReg u pk))
224 = let sz = cmmTypeSize pk in
225 if isFloatSize sz && not use_sse2
226 then RegVirtual (mkVirtualReg u FF80)
227 else RegVirtual (mkVirtualReg u sz)
229 getRegisterReg _ (CmmGlobal mid)
230 = case globalRegMaybe mid of
231 Just reg -> RegReal $ reg
232 Nothing -> pprPanic "getRegisterReg-memory" (ppr $ CmmGlobal mid)
233 -- By this stage, the only MagicIds remaining should be the
234 -- ones which map to a real machine register on this
235 -- platform. Hence ...
238 -- | Memory addressing modes passed up the tree.
240 = Amode AddrMode InstrBlock
243 Now, given a tree (the argument to an CmmLoad) that references memory,
244 produce a suitable addressing mode.
246 A Rule of the Game (tm) for Amodes: use of the addr bit must
247 immediately follow use of the code part, since the code part puts
248 values in registers which the addr then refers to. So you can't put
249 anything in between, lest it overwrite some of those registers. If
250 you need to do some other computation between the code part and use of
251 the addr bit, first store the effective address from the amode in a
252 temporary, then do the other computation, and then use the temporary:
256 ... other computation ...
261 -- | Check whether an integer will fit in 32 bits.
262 -- A CmmInt is intended to be truncated to the appropriate
263 -- number of bits, so here we truncate it to Int64. This is
264 -- important because e.g. -1 as a CmmInt might be either
265 -- -1 or 18446744073709551615.
267 is32BitInteger :: Integer -> Bool
268 is32BitInteger i = i64 <= 0x7fffffff && i64 >= -0x80000000
269 where i64 = fromIntegral i :: Int64
272 -- | Convert a BlockId to some CmmStatic data
273 jumpTableEntry :: Maybe BlockId -> CmmStatic
274 jumpTableEntry Nothing = CmmStaticLit (CmmInt 0 wordWidth)
275 jumpTableEntry (Just blockid) = CmmStaticLit (CmmLabel blockLabel)
276 where blockLabel = mkAsmTempLabel (getUnique blockid)
279 -- -----------------------------------------------------------------------------
280 -- General things for putting together code sequences
282 -- Expand CmmRegOff. ToDo: should we do it this way around, or convert
283 -- CmmExprs into CmmRegOff?
284 mangleIndexTree :: CmmExpr -> CmmExpr
285 mangleIndexTree (CmmRegOff reg off)
286 = CmmMachOp (MO_Add width) [CmmReg reg, CmmLit (CmmInt (fromIntegral off) width)]
287 where width = typeWidth (cmmRegType reg)
289 -- | The dual to getAnyReg: compute an expression into a register, but
290 -- we don't mind which one it is.
291 getSomeReg :: CmmExpr -> NatM (Reg, InstrBlock)
293 r <- getRegister expr
296 tmp <- getNewRegNat rep
297 return (tmp, code tmp)
305 assignMem_I64Code :: CmmExpr -> CmmExpr -> NatM InstrBlock
306 assignMem_I64Code addrTree valueTree = do
307 Amode addr addr_code <- getAmode addrTree
308 ChildCode64 vcode rlo <- iselExpr64 valueTree
310 rhi = getHiVRegFromLo rlo
312 -- Little-endian store
313 mov_lo = MOV II32 (OpReg rlo) (OpAddr addr)
314 mov_hi = MOV II32 (OpReg rhi) (OpAddr (fromJust (addrOffset addr 4)))
316 return (vcode `appOL` addr_code `snocOL` mov_lo `snocOL` mov_hi)
319 assignReg_I64Code :: CmmReg -> CmmExpr -> NatM InstrBlock
320 assignReg_I64Code (CmmLocal (LocalReg u_dst pk)) valueTree = do
321 ChildCode64 vcode r_src_lo <- iselExpr64 valueTree
323 r_dst_lo = RegVirtual $ mkVirtualReg u_dst II32
324 r_dst_hi = getHiVRegFromLo r_dst_lo
325 r_src_hi = getHiVRegFromLo r_src_lo
326 mov_lo = MOV II32 (OpReg r_src_lo) (OpReg r_dst_lo)
327 mov_hi = MOV II32 (OpReg r_src_hi) (OpReg r_dst_hi)
330 vcode `snocOL` mov_lo `snocOL` mov_hi
333 assignReg_I64Code lvalue valueTree
334 = panic "assignReg_I64Code(i386): invalid lvalue"
339 iselExpr64 :: CmmExpr -> NatM ChildCode64
340 iselExpr64 (CmmLit (CmmInt i _)) = do
341 (rlo,rhi) <- getNewRegPairNat II32
343 r = fromIntegral (fromIntegral i :: Word32)
344 q = fromIntegral ((fromIntegral i `shiftR` 32) :: Word32)
346 MOV II32 (OpImm (ImmInteger r)) (OpReg rlo),
347 MOV II32 (OpImm (ImmInteger q)) (OpReg rhi)
350 return (ChildCode64 code rlo)
352 iselExpr64 (CmmLoad addrTree ty) | isWord64 ty = do
353 Amode addr addr_code <- getAmode addrTree
354 (rlo,rhi) <- getNewRegPairNat II32
356 mov_lo = MOV II32 (OpAddr addr) (OpReg rlo)
357 mov_hi = MOV II32 (OpAddr (fromJust (addrOffset addr 4))) (OpReg rhi)
360 ChildCode64 (addr_code `snocOL` mov_lo `snocOL` mov_hi)
364 iselExpr64 (CmmReg (CmmLocal (LocalReg vu ty))) | isWord64 ty
365 = return (ChildCode64 nilOL (RegVirtual $ mkVirtualReg vu II32))
367 -- we handle addition, but rather badly
368 iselExpr64 (CmmMachOp (MO_Add _) [e1, CmmLit (CmmInt i _)]) = do
369 ChildCode64 code1 r1lo <- iselExpr64 e1
370 (rlo,rhi) <- getNewRegPairNat II32
372 r = fromIntegral (fromIntegral i :: Word32)
373 q = fromIntegral ((fromIntegral i `shiftR` 32) :: Word32)
374 r1hi = getHiVRegFromLo r1lo
376 toOL [ MOV II32 (OpReg r1lo) (OpReg rlo),
377 ADD II32 (OpImm (ImmInteger r)) (OpReg rlo),
378 MOV II32 (OpReg r1hi) (OpReg rhi),
379 ADC II32 (OpImm (ImmInteger q)) (OpReg rhi) ]
381 return (ChildCode64 code rlo)
383 iselExpr64 (CmmMachOp (MO_Add _) [e1,e2]) = do
384 ChildCode64 code1 r1lo <- iselExpr64 e1
385 ChildCode64 code2 r2lo <- iselExpr64 e2
386 (rlo,rhi) <- getNewRegPairNat II32
388 r1hi = getHiVRegFromLo r1lo
389 r2hi = getHiVRegFromLo r2lo
392 toOL [ MOV II32 (OpReg r1lo) (OpReg rlo),
393 ADD II32 (OpReg r2lo) (OpReg rlo),
394 MOV II32 (OpReg r1hi) (OpReg rhi),
395 ADC II32 (OpReg r2hi) (OpReg rhi) ]
397 return (ChildCode64 code rlo)
399 iselExpr64 (CmmMachOp (MO_UU_Conv _ W64) [expr]) = do
401 r_dst_lo <- getNewRegNat II32
402 let r_dst_hi = getHiVRegFromLo r_dst_lo
405 ChildCode64 (code `snocOL`
406 MOV II32 (OpImm (ImmInt 0)) (OpReg r_dst_hi))
411 = pprPanic "iselExpr64(i386)" (ppr expr)
415 --------------------------------------------------------------------------------
416 getRegister :: CmmExpr -> NatM Register
418 #if !x86_64_TARGET_ARCH
419 -- on x86_64, we have %rip for PicBaseReg, but it's not a full-featured
420 -- register, it can only be used for rip-relative addressing.
421 getRegister (CmmReg (CmmGlobal PicBaseReg))
423 reg <- getPicBaseNat archWordSize
424 return (Fixed archWordSize reg nilOL)
427 getRegister (CmmReg reg)
428 = do use_sse2 <- sse2Enabled
430 sz = cmmTypeSize (cmmRegType reg)
431 size | not use_sse2 && isFloatSize sz = FF80
434 return (Fixed sz (getRegisterReg use_sse2 reg) nilOL)
437 getRegister tree@(CmmRegOff _ _)
438 = getRegister (mangleIndexTree tree)
441 #if WORD_SIZE_IN_BITS==32
442 -- for 32-bit architectuers, support some 64 -> 32 bit conversions:
443 -- TO_W_(x), TO_W_(x >> 32)
445 getRegister (CmmMachOp (MO_UU_Conv W64 W32)
446 [CmmMachOp (MO_U_Shr W64) [x,CmmLit (CmmInt 32 _)]]) = do
447 ChildCode64 code rlo <- iselExpr64 x
448 return $ Fixed II32 (getHiVRegFromLo rlo) code
450 getRegister (CmmMachOp (MO_SS_Conv W64 W32)
451 [CmmMachOp (MO_U_Shr W64) [x,CmmLit (CmmInt 32 _)]]) = do
452 ChildCode64 code rlo <- iselExpr64 x
453 return $ Fixed II32 (getHiVRegFromLo rlo) code
455 getRegister (CmmMachOp (MO_UU_Conv W64 W32) [x]) = do
456 ChildCode64 code rlo <- iselExpr64 x
457 return $ Fixed II32 rlo code
459 getRegister (CmmMachOp (MO_SS_Conv W64 W32) [x]) = do
460 ChildCode64 code rlo <- iselExpr64 x
461 return $ Fixed II32 rlo code
466 getRegister (CmmLit lit@(CmmFloat f w)) =
467 if_sse2 float_const_sse2 float_const_x87
473 code dst = unitOL (XOR size (OpReg dst) (OpReg dst))
474 -- I don't know why there are xorpd, xorps, and pxor instructions.
475 -- They all appear to do the same thing --SDM
476 return (Any size code)
479 Amode addr code <- memConstant (widthInBytes w) lit
480 loadFloatAmode True w addr code
482 float_const_x87 = case w of
485 let code dst = unitOL (GLDZ dst)
486 in return (Any FF80 code)
489 let code dst = unitOL (GLD1 dst)
490 in return (Any FF80 code)
493 Amode addr code <- memConstant (widthInBytes w) lit
494 loadFloatAmode False w addr code
496 -- catch simple cases of zero- or sign-extended load
497 getRegister (CmmMachOp (MO_UU_Conv W8 W32) [CmmLoad addr _]) = do
498 code <- intLoadCode (MOVZxL II8) addr
499 return (Any II32 code)
501 getRegister (CmmMachOp (MO_SS_Conv W8 W32) [CmmLoad addr _]) = do
502 code <- intLoadCode (MOVSxL II8) addr
503 return (Any II32 code)
505 getRegister (CmmMachOp (MO_UU_Conv W16 W32) [CmmLoad addr _]) = do
506 code <- intLoadCode (MOVZxL II16) addr
507 return (Any II32 code)
509 getRegister (CmmMachOp (MO_SS_Conv W16 W32) [CmmLoad addr _]) = do
510 code <- intLoadCode (MOVSxL II16) addr
511 return (Any II32 code)
514 #if x86_64_TARGET_ARCH
516 -- catch simple cases of zero- or sign-extended load
517 getRegister (CmmMachOp (MO_UU_Conv W8 W64) [CmmLoad addr _]) = do
518 code <- intLoadCode (MOVZxL II8) addr
519 return (Any II64 code)
521 getRegister (CmmMachOp (MO_SS_Conv W8 W64) [CmmLoad addr _]) = do
522 code <- intLoadCode (MOVSxL II8) addr
523 return (Any II64 code)
525 getRegister (CmmMachOp (MO_UU_Conv W16 W64) [CmmLoad addr _]) = do
526 code <- intLoadCode (MOVZxL II16) addr
527 return (Any II64 code)
529 getRegister (CmmMachOp (MO_SS_Conv W16 W64) [CmmLoad addr _]) = do
530 code <- intLoadCode (MOVSxL II16) addr
531 return (Any II64 code)
533 getRegister (CmmMachOp (MO_UU_Conv W32 W64) [CmmLoad addr _]) = do
534 code <- intLoadCode (MOV II32) addr -- 32-bit loads zero-extend
535 return (Any II64 code)
537 getRegister (CmmMachOp (MO_SS_Conv W32 W64) [CmmLoad addr _]) = do
538 code <- intLoadCode (MOVSxL II32) addr
539 return (Any II64 code)
541 getRegister (CmmMachOp (MO_Add W64) [CmmReg (CmmGlobal PicBaseReg),
542 CmmLit displacement])
543 = return $ Any II64 (\dst -> unitOL $
544 LEA II64 (OpAddr (ripRel (litToImm displacement))) (OpReg dst))
546 #endif /* x86_64_TARGET_ARCH */
552 getRegister (CmmMachOp mop [x]) = do -- unary MachOps
556 | sse2 -> sse2NegCode w x
557 | otherwise -> trivialUFCode FF80 (GNEG FF80) x
559 MO_S_Neg w -> triv_ucode NEGI (intSize w)
560 MO_Not w -> triv_ucode NOT (intSize w)
563 MO_UU_Conv W32 W8 -> toI8Reg W32 x
564 MO_SS_Conv W32 W8 -> toI8Reg W32 x
565 MO_UU_Conv W16 W8 -> toI8Reg W16 x
566 MO_SS_Conv W16 W8 -> toI8Reg W16 x
567 MO_UU_Conv W32 W16 -> toI16Reg W32 x
568 MO_SS_Conv W32 W16 -> toI16Reg W32 x
570 #if x86_64_TARGET_ARCH
571 MO_UU_Conv W64 W32 -> conversionNop II64 x
572 MO_SS_Conv W64 W32 -> conversionNop II64 x
573 MO_UU_Conv W64 W16 -> toI16Reg W64 x
574 MO_SS_Conv W64 W16 -> toI16Reg W64 x
575 MO_UU_Conv W64 W8 -> toI8Reg W64 x
576 MO_SS_Conv W64 W8 -> toI8Reg W64 x
579 MO_UU_Conv rep1 rep2 | rep1 == rep2 -> conversionNop (intSize rep1) x
580 MO_SS_Conv rep1 rep2 | rep1 == rep2 -> conversionNop (intSize rep1) x
583 MO_UU_Conv W8 W32 -> integerExtend W8 W32 MOVZxL x
584 MO_UU_Conv W16 W32 -> integerExtend W16 W32 MOVZxL x
585 MO_UU_Conv W8 W16 -> integerExtend W8 W16 MOVZxL x
587 MO_SS_Conv W8 W32 -> integerExtend W8 W32 MOVSxL x
588 MO_SS_Conv W16 W32 -> integerExtend W16 W32 MOVSxL x
589 MO_SS_Conv W8 W16 -> integerExtend W8 W16 MOVSxL x
591 #if x86_64_TARGET_ARCH
592 MO_UU_Conv W8 W64 -> integerExtend W8 W64 MOVZxL x
593 MO_UU_Conv W16 W64 -> integerExtend W16 W64 MOVZxL x
594 MO_UU_Conv W32 W64 -> integerExtend W32 W64 MOVZxL x
595 MO_SS_Conv W8 W64 -> integerExtend W8 W64 MOVSxL x
596 MO_SS_Conv W16 W64 -> integerExtend W16 W64 MOVSxL x
597 MO_SS_Conv W32 W64 -> integerExtend W32 W64 MOVSxL x
598 -- for 32-to-64 bit zero extension, amd64 uses an ordinary movl.
599 -- However, we don't want the register allocator to throw it
600 -- away as an unnecessary reg-to-reg move, so we keep it in
601 -- the form of a movzl and print it as a movl later.
605 | sse2 -> coerceFP2FP W64 x
606 | otherwise -> conversionNop FF80 x
609 | sse2 -> coerceFP2FP W32 x
610 | otherwise -> conversionNop FF80 x
612 MO_FS_Conv from to -> coerceFP2Int from to x
613 MO_SF_Conv from to -> coerceInt2FP from to x
615 other -> pprPanic "getRegister" (pprMachOp mop)
617 triv_ucode :: (Size -> Operand -> Instr) -> Size -> NatM Register
618 triv_ucode instr size = trivialUCode size (instr size) x
620 -- signed or unsigned extension.
621 integerExtend :: Width -> Width
622 -> (Size -> Operand -> Operand -> Instr)
623 -> CmmExpr -> NatM Register
624 integerExtend from to instr expr = do
625 (reg,e_code) <- if from == W8 then getByteReg expr
630 instr (intSize from) (OpReg reg) (OpReg dst)
631 return (Any (intSize to) code)
633 toI8Reg :: Width -> CmmExpr -> NatM Register
635 = do codefn <- getAnyReg expr
636 return (Any (intSize new_rep) codefn)
637 -- HACK: use getAnyReg to get a byte-addressable register.
638 -- If the source was a Fixed register, this will add the
639 -- mov instruction to put it into the desired destination.
640 -- We're assuming that the destination won't be a fixed
641 -- non-byte-addressable register; it won't be, because all
642 -- fixed registers are word-sized.
644 toI16Reg = toI8Reg -- for now
646 conversionNop :: Size -> CmmExpr -> NatM Register
647 conversionNop new_size expr
648 = do e_code <- getRegister expr
649 return (swizzleRegisterRep e_code new_size)
652 getRegister e@(CmmMachOp mop [x, y]) = do -- dyadic MachOps
655 MO_F_Eq w -> condFltReg EQQ x y
656 MO_F_Ne w -> condFltReg NE x y
657 MO_F_Gt w -> condFltReg GTT x y
658 MO_F_Ge w -> condFltReg GE x y
659 MO_F_Lt w -> condFltReg LTT x y
660 MO_F_Le w -> condFltReg LE x y
662 MO_Eq rep -> condIntReg EQQ x y
663 MO_Ne rep -> condIntReg NE x y
665 MO_S_Gt rep -> condIntReg GTT x y
666 MO_S_Ge rep -> condIntReg GE x y
667 MO_S_Lt rep -> condIntReg LTT x y
668 MO_S_Le rep -> condIntReg LE x y
670 MO_U_Gt rep -> condIntReg GU x y
671 MO_U_Ge rep -> condIntReg GEU x y
672 MO_U_Lt rep -> condIntReg LU x y
673 MO_U_Le rep -> condIntReg LEU x y
675 MO_F_Add w | sse2 -> trivialFCode_sse2 w ADD x y
676 | otherwise -> trivialFCode_x87 w GADD x y
677 MO_F_Sub w | sse2 -> trivialFCode_sse2 w SUB x y
678 | otherwise -> trivialFCode_x87 w GSUB x y
679 MO_F_Quot w | sse2 -> trivialFCode_sse2 w FDIV x y
680 | otherwise -> trivialFCode_x87 w GDIV x y
681 MO_F_Mul w | sse2 -> trivialFCode_sse2 w MUL x y
682 | otherwise -> trivialFCode_x87 w GMUL x y
684 MO_Add rep -> add_code rep x y
685 MO_Sub rep -> sub_code rep x y
687 MO_S_Quot rep -> div_code rep True True x y
688 MO_S_Rem rep -> div_code rep True False x y
689 MO_U_Quot rep -> div_code rep False True x y
690 MO_U_Rem rep -> div_code rep False False x y
692 MO_S_MulMayOflo rep -> imulMayOflo rep x y
694 MO_Mul rep -> triv_op rep IMUL
695 MO_And rep -> triv_op rep AND
696 MO_Or rep -> triv_op rep OR
697 MO_Xor rep -> triv_op rep XOR
699 {- Shift ops on x86s have constraints on their source, it
700 either has to be Imm, CL or 1
701 => trivialCode is not restrictive enough (sigh.)
703 MO_Shl rep -> shift_code rep SHL x y {-False-}
704 MO_U_Shr rep -> shift_code rep SHR x y {-False-}
705 MO_S_Shr rep -> shift_code rep SAR x y {-False-}
707 other -> pprPanic "getRegister(x86) - binary CmmMachOp (1)" (pprMachOp mop)
710 triv_op width instr = trivialCode width op (Just op) x y
711 where op = instr (intSize width)
713 imulMayOflo :: Width -> CmmExpr -> CmmExpr -> NatM Register
714 imulMayOflo rep a b = do
715 (a_reg, a_code) <- getNonClobberedReg a
716 b_code <- getAnyReg b
718 shift_amt = case rep of
721 _ -> panic "shift_amt"
724 code = a_code `appOL` b_code eax `appOL`
726 IMUL2 size (OpReg a_reg), -- result in %edx:%eax
727 SAR size (OpImm (ImmInt shift_amt)) (OpReg eax),
728 -- sign extend lower part
729 SUB size (OpReg edx) (OpReg eax)
730 -- compare against upper
731 -- eax==0 if high part == sign extended low part
734 return (Fixed size eax code)
738 -> (Size -> Operand -> Operand -> Instr)
743 {- Case1: shift length as immediate -}
744 shift_code width instr x y@(CmmLit lit) = do
745 x_code <- getAnyReg x
749 = x_code dst `snocOL`
750 instr size (OpImm (litToImm lit)) (OpReg dst)
752 return (Any size code)
754 {- Case2: shift length is complex (non-immediate)
756 * we cannot do y first *and* put its result in %ecx, because
757 %ecx might be clobbered by x.
758 * if we do y second, then x cannot be
759 in a clobbered reg. Also, we cannot clobber x's reg
760 with the instruction itself.
762 - do y first, put its result in a fresh tmp, then copy it to %ecx later
763 - do y second and put its result into %ecx. x gets placed in a fresh
764 tmp. This is likely to be better, becuase the reg alloc can
765 eliminate this reg->reg move here (it won't eliminate the other one,
766 because the move is into the fixed %ecx).
768 shift_code width instr x y{-amount-} = do
769 x_code <- getAnyReg x
770 let size = intSize width
771 tmp <- getNewRegNat size
772 y_code <- getAnyReg y
774 code = x_code tmp `appOL`
776 instr size (OpReg ecx) (OpReg tmp)
778 return (Fixed size tmp code)
781 add_code :: Width -> CmmExpr -> CmmExpr -> NatM Register
782 add_code rep x (CmmLit (CmmInt y _))
783 | is32BitInteger y = add_int rep x y
784 add_code rep x y = trivialCode rep (ADD size) (Just (ADD size)) x y
785 where size = intSize rep
788 sub_code :: Width -> CmmExpr -> CmmExpr -> NatM Register
789 sub_code rep x (CmmLit (CmmInt y _))
790 | is32BitInteger (-y) = add_int rep x (-y)
791 sub_code rep x y = trivialCode rep (SUB (intSize rep)) Nothing x y
793 -- our three-operand add instruction:
794 add_int width x y = do
795 (x_reg, x_code) <- getSomeReg x
798 imm = ImmInt (fromInteger y)
802 (OpAddr (AddrBaseIndex (EABaseReg x_reg) EAIndexNone imm))
805 return (Any size code)
807 ----------------------
808 div_code width signed quotient x y = do
809 (y_op, y_code) <- getRegOrMem y -- cannot be clobbered
810 x_code <- getAnyReg x
813 widen | signed = CLTD size
814 | otherwise = XOR size (OpReg edx) (OpReg edx)
816 instr | signed = IDIV
819 code = y_code `appOL`
821 toOL [widen, instr size y_op]
823 result | quotient = eax
827 return (Fixed size result code)
830 getRegister (CmmLoad mem pk)
833 Amode addr mem_code <- getAmode mem
834 use_sse2 <- sse2Enabled
835 loadFloatAmode use_sse2 (typeWidth pk) addr mem_code
838 getRegister (CmmLoad mem pk)
841 code <- intLoadCode instr mem
842 return (Any size code)
846 instr = case width of
849 -- We always zero-extend 8-bit loads, if we
850 -- can't think of anything better. This is because
851 -- we can't guarantee access to an 8-bit variant of every register
852 -- (esi and edi don't have 8-bit variants), so to make things
853 -- simpler we do our 8-bit arithmetic with full 32-bit registers.
856 #if x86_64_TARGET_ARCH
857 -- Simpler memory load code on x86_64
858 getRegister (CmmLoad mem pk)
860 code <- intLoadCode (MOV size) mem
861 return (Any size code)
862 where size = intSize $ typeWidth pk
865 getRegister (CmmLit (CmmInt 0 width))
869 -- x86_64: 32-bit xor is one byte shorter, and zero-extends to 64 bits
870 adj_size = case size of II64 -> II32; _ -> size
871 size1 = IF_ARCH_i386( size, adj_size )
873 = unitOL (XOR size1 (OpReg dst) (OpReg dst))
875 return (Any size code)
877 #if x86_64_TARGET_ARCH
878 -- optimisation for loading small literals on x86_64: take advantage
879 -- of the automatic zero-extension from 32 to 64 bits, because the 32-bit
880 -- instruction forms are shorter.
881 getRegister (CmmLit lit)
882 | isWord64 (cmmLitType lit), not (isBigLit lit)
885 code dst = unitOL (MOV II32 (OpImm imm) (OpReg dst))
887 return (Any II64 code)
889 isBigLit (CmmInt i _) = i < 0 || i > 0xffffffff
891 -- note1: not the same as (not.is32BitLit), because that checks for
892 -- signed literals that fit in 32 bits, but we want unsigned
894 -- note2: all labels are small, because we're assuming the
895 -- small memory model (see gcc docs, -mcmodel=small).
898 getRegister (CmmLit lit)
900 size = cmmTypeSize (cmmLitType lit)
902 code dst = unitOL (MOV size (OpImm imm) (OpReg dst))
904 return (Any size code)
906 getRegister other = pprPanic "getRegister(x86)" (ppr other)
909 intLoadCode :: (Operand -> Operand -> Instr) -> CmmExpr
910 -> NatM (Reg -> InstrBlock)
911 intLoadCode instr mem = do
912 Amode src mem_code <- getAmode mem
913 return (\dst -> mem_code `snocOL` instr (OpAddr src) (OpReg dst))
915 -- Compute an expression into *any* register, adding the appropriate
916 -- move instruction if necessary.
917 getAnyReg :: CmmExpr -> NatM (Reg -> InstrBlock)
919 r <- getRegister expr
922 anyReg :: Register -> NatM (Reg -> InstrBlock)
923 anyReg (Any _ code) = return code
924 anyReg (Fixed rep reg fcode) = return (\dst -> fcode `snocOL` reg2reg rep reg dst)
926 -- A bit like getSomeReg, but we want a reg that can be byte-addressed.
927 -- Fixed registers might not be byte-addressable, so we make sure we've
928 -- got a temporary, inserting an extra reg copy if necessary.
929 getByteReg :: CmmExpr -> NatM (Reg, InstrBlock)
930 #if x86_64_TARGET_ARCH
931 getByteReg = getSomeReg -- all regs are byte-addressable on x86_64
934 r <- getRegister expr
937 tmp <- getNewRegNat rep
938 return (tmp, code tmp)
940 | isVirtualReg reg -> return (reg,code)
942 tmp <- getNewRegNat rep
943 return (tmp, code `snocOL` reg2reg rep reg tmp)
944 -- ToDo: could optimise slightly by checking for byte-addressable
945 -- real registers, but that will happen very rarely if at all.
948 -- Another variant: this time we want the result in a register that cannot
949 -- be modified by code to evaluate an arbitrary expression.
950 getNonClobberedReg :: CmmExpr -> NatM (Reg, InstrBlock)
951 getNonClobberedReg expr = do
952 r <- getRegister expr
955 tmp <- getNewRegNat rep
956 return (tmp, code tmp)
958 -- only free regs can be clobbered
959 | RegReal (RealRegSingle rr) <- reg
960 , isFastTrue (freeReg rr)
962 tmp <- getNewRegNat rep
963 return (tmp, code `snocOL` reg2reg rep reg tmp)
967 reg2reg :: Size -> Reg -> Reg -> Instr
969 | size == FF80 = GMOV src dst
970 | otherwise = MOV size (OpReg src) (OpReg dst)
973 --------------------------------------------------------------------------------
974 getAmode :: CmmExpr -> NatM Amode
975 getAmode tree@(CmmRegOff _ _) = getAmode (mangleIndexTree tree)
977 #if x86_64_TARGET_ARCH
979 getAmode (CmmMachOp (MO_Add W64) [CmmReg (CmmGlobal PicBaseReg),
980 CmmLit displacement])
981 = return $ Amode (ripRel (litToImm displacement)) nilOL
986 -- This is all just ridiculous, since it carefully undoes
987 -- what mangleIndexTree has just done.
988 getAmode (CmmMachOp (MO_Sub rep) [x, CmmLit lit@(CmmInt i _)])
990 -- ASSERT(rep == II32)???
991 = do (x_reg, x_code) <- getSomeReg x
992 let off = ImmInt (-(fromInteger i))
993 return (Amode (AddrBaseIndex (EABaseReg x_reg) EAIndexNone off) x_code)
995 getAmode (CmmMachOp (MO_Add rep) [x, CmmLit lit])
997 -- ASSERT(rep == II32)???
998 = do (x_reg, x_code) <- getSomeReg x
999 let off = litToImm lit
1000 return (Amode (AddrBaseIndex (EABaseReg x_reg) EAIndexNone off) x_code)
1002 -- Turn (lit1 << n + lit2) into (lit2 + lit1 << n) so it will be
1003 -- recognised by the next rule.
1004 getAmode (CmmMachOp (MO_Add rep) [a@(CmmMachOp (MO_Shl _) _),
1006 = getAmode (CmmMachOp (MO_Add rep) [b,a])
1008 getAmode (CmmMachOp (MO_Add rep) [x, CmmMachOp (MO_Shl _)
1009 [y, CmmLit (CmmInt shift _)]])
1010 | shift == 0 || shift == 1 || shift == 2 || shift == 3
1011 = x86_complex_amode x y shift 0
1013 getAmode (CmmMachOp (MO_Add rep)
1014 [x, CmmMachOp (MO_Add _)
1015 [CmmMachOp (MO_Shl _) [y, CmmLit (CmmInt shift _)],
1016 CmmLit (CmmInt offset _)]])
1017 | shift == 0 || shift == 1 || shift == 2 || shift == 3
1018 && is32BitInteger offset
1019 = x86_complex_amode x y shift offset
1021 getAmode (CmmMachOp (MO_Add rep) [x,y])
1022 = x86_complex_amode x y 0 0
1024 getAmode (CmmLit lit) | is32BitLit lit
1025 = return (Amode (ImmAddr (litToImm lit) 0) nilOL)
1028 (reg,code) <- getSomeReg expr
1029 return (Amode (AddrBaseIndex (EABaseReg reg) EAIndexNone (ImmInt 0)) code)
1032 x86_complex_amode :: CmmExpr -> CmmExpr -> Integer -> Integer -> NatM Amode
1033 x86_complex_amode base index shift offset
1034 = do (x_reg, x_code) <- getNonClobberedReg base
1035 -- x must be in a temp, because it has to stay live over y_code
1036 -- we could compre x_reg and y_reg and do something better here...
1037 (y_reg, y_code) <- getSomeReg index
1039 code = x_code `appOL` y_code
1040 base = case shift of 0 -> 1; 1 -> 2; 2 -> 4; 3 -> 8
1041 return (Amode (AddrBaseIndex (EABaseReg x_reg) (EAIndex y_reg base) (ImmInt (fromIntegral offset)))
1047 -- -----------------------------------------------------------------------------
1048 -- getOperand: sometimes any operand will do.
1050 -- getNonClobberedOperand: the value of the operand will remain valid across
1051 -- the computation of an arbitrary expression, unless the expression
1052 -- is computed directly into a register which the operand refers to
1053 -- (see trivialCode where this function is used for an example).
1055 getNonClobberedOperand :: CmmExpr -> NatM (Operand, InstrBlock)
1056 getNonClobberedOperand (CmmLit lit) = do
1057 use_sse2 <- sse2Enabled
1058 if use_sse2 && isSuitableFloatingPointLit lit
1060 let CmmFloat _ w = lit
1061 Amode addr code <- memConstant (widthInBytes w) lit
1062 return (OpAddr addr, code)
1065 if is32BitLit lit && not (isFloatType (cmmLitType lit))
1066 then return (OpImm (litToImm lit), nilOL)
1067 else getNonClobberedOperand_generic (CmmLit lit)
1069 getNonClobberedOperand (CmmLoad mem pk) = do
1070 use_sse2 <- sse2Enabled
1071 if (not (isFloatType pk) || use_sse2)
1072 && IF_ARCH_i386(not (isWord64 pk), True)
1074 Amode src mem_code <- getAmode mem
1076 if (amodeCouldBeClobbered src)
1078 tmp <- getNewRegNat archWordSize
1079 return (AddrBaseIndex (EABaseReg tmp) EAIndexNone (ImmInt 0),
1080 unitOL (LEA II32 (OpAddr src) (OpReg tmp)))
1083 return (OpAddr src', save_code `appOL` mem_code)
1085 getNonClobberedOperand_generic (CmmLoad mem pk)
1087 getNonClobberedOperand e = getNonClobberedOperand_generic e
1089 getNonClobberedOperand_generic :: CmmExpr -> NatM (Operand, InstrBlock)
1090 getNonClobberedOperand_generic e = do
1091 (reg, code) <- getNonClobberedReg e
1092 return (OpReg reg, code)
1094 amodeCouldBeClobbered :: AddrMode -> Bool
1095 amodeCouldBeClobbered amode = any regClobbered (addrModeRegs amode)
1097 regClobbered (RegReal (RealRegSingle rr)) = isFastTrue (freeReg rr)
1098 regClobbered _ = False
1100 -- getOperand: the operand is not required to remain valid across the
1101 -- computation of an arbitrary expression.
1102 getOperand :: CmmExpr -> NatM (Operand, InstrBlock)
1104 getOperand (CmmLit lit) = do
1105 use_sse2 <- sse2Enabled
1106 if (use_sse2 && isSuitableFloatingPointLit lit)
1108 let CmmFloat _ w = lit
1109 Amode addr code <- memConstant (widthInBytes w) lit
1110 return (OpAddr addr, code)
1113 if is32BitLit lit && not (isFloatType (cmmLitType lit))
1114 then return (OpImm (litToImm lit), nilOL)
1115 else getOperand_generic (CmmLit lit)
1117 getOperand (CmmLoad mem pk) = do
1118 use_sse2 <- sse2Enabled
1119 if (not (isFloatType pk) || use_sse2) && IF_ARCH_i386(not (isWord64 pk), True)
1121 Amode src mem_code <- getAmode mem
1122 return (OpAddr src, mem_code)
1124 getOperand_generic (CmmLoad mem pk)
1126 getOperand e = getOperand_generic e
1128 getOperand_generic e = do
1129 (reg, code) <- getSomeReg e
1130 return (OpReg reg, code)
1132 isOperand :: CmmExpr -> Bool
1133 isOperand (CmmLoad _ _) = True
1134 isOperand (CmmLit lit) = is32BitLit lit
1135 || isSuitableFloatingPointLit lit
1138 memConstant :: Int -> CmmLit -> NatM Amode
1139 memConstant align lit = do
1140 #ifdef x86_64_TARGET_ARCH
1141 lbl <- getNewLabelNat
1142 let addr = ripRel (ImmCLbl lbl)
1145 lbl <- getNewLabelNat
1146 dflags <- getDynFlagsNat
1147 dynRef <- cmmMakeDynamicReference dflags addImportNat DataReference lbl
1148 Amode addr addr_code <- getAmode dynRef
1156 return (Amode addr code)
1159 loadFloatAmode :: Bool -> Width -> AddrMode -> InstrBlock -> NatM Register
1160 loadFloatAmode use_sse2 w addr addr_code = do
1161 let size = floatSize w
1162 code dst = addr_code `snocOL`
1164 then MOV size (OpAddr addr) (OpReg dst)
1165 else GLD size addr dst
1167 return (Any (if use_sse2 then size else FF80) code)
1170 -- if we want a floating-point literal as an operand, we can
1171 -- use it directly from memory. However, if the literal is
1172 -- zero, we're better off generating it into a register using
1174 isSuitableFloatingPointLit (CmmFloat f _) = f /= 0.0
1175 isSuitableFloatingPointLit _ = False
1177 getRegOrMem :: CmmExpr -> NatM (Operand, InstrBlock)
1178 getRegOrMem e@(CmmLoad mem pk) = do
1179 use_sse2 <- sse2Enabled
1180 if (not (isFloatType pk) || use_sse2) && IF_ARCH_i386(not (isWord64 pk), True)
1182 Amode src mem_code <- getAmode mem
1183 return (OpAddr src, mem_code)
1185 (reg, code) <- getNonClobberedReg e
1186 return (OpReg reg, code)
1188 (reg, code) <- getNonClobberedReg e
1189 return (OpReg reg, code)
1191 #if x86_64_TARGET_ARCH
1192 is32BitLit (CmmInt i W64) = is32BitInteger i
1193 -- assume that labels are in the range 0-2^31-1: this assumes the
1194 -- small memory model (see gcc docs, -mcmodel=small).
1201 -- Set up a condition code for a conditional branch.
1203 getCondCode :: CmmExpr -> NatM CondCode
1205 -- yes, they really do seem to want exactly the same!
1207 getCondCode (CmmMachOp mop [x, y])
1210 MO_F_Eq W32 -> condFltCode EQQ x y
1211 MO_F_Ne W32 -> condFltCode NE x y
1212 MO_F_Gt W32 -> condFltCode GTT x y
1213 MO_F_Ge W32 -> condFltCode GE x y
1214 MO_F_Lt W32 -> condFltCode LTT x y
1215 MO_F_Le W32 -> condFltCode LE x y
1217 MO_F_Eq W64 -> condFltCode EQQ x y
1218 MO_F_Ne W64 -> condFltCode NE x y
1219 MO_F_Gt W64 -> condFltCode GTT x y
1220 MO_F_Ge W64 -> condFltCode GE x y
1221 MO_F_Lt W64 -> condFltCode LTT x y
1222 MO_F_Le W64 -> condFltCode LE x y
1224 MO_Eq rep -> condIntCode EQQ x y
1225 MO_Ne rep -> condIntCode NE x y
1227 MO_S_Gt rep -> condIntCode GTT x y
1228 MO_S_Ge rep -> condIntCode GE x y
1229 MO_S_Lt rep -> condIntCode LTT x y
1230 MO_S_Le rep -> condIntCode LE x y
1232 MO_U_Gt rep -> condIntCode GU x y
1233 MO_U_Ge rep -> condIntCode GEU x y
1234 MO_U_Lt rep -> condIntCode LU x y
1235 MO_U_Le rep -> condIntCode LEU x y
1237 other -> pprPanic "getCondCode(x86,x86_64,sparc)" (ppr (CmmMachOp mop [x,y]))
1239 getCondCode other = pprPanic "getCondCode(2)(x86,sparc)" (ppr other)
1244 -- @cond(Int|Flt)Code@: Turn a boolean expression into a condition, to be
1245 -- passed back up the tree.
1247 condIntCode :: Cond -> CmmExpr -> CmmExpr -> NatM CondCode
1249 -- memory vs immediate
1250 condIntCode cond (CmmLoad x pk) (CmmLit lit) | is32BitLit lit = do
1251 Amode x_addr x_code <- getAmode x
1254 code = x_code `snocOL`
1255 CMP (cmmTypeSize pk) (OpImm imm) (OpAddr x_addr)
1257 return (CondCode False cond code)
1259 -- anything vs zero, using a mask
1260 -- TODO: Add some sanity checking!!!!
1261 condIntCode cond (CmmMachOp (MO_And rep) [x,o2]) (CmmLit (CmmInt 0 pk))
1262 | (CmmLit lit@(CmmInt mask pk2)) <- o2, is32BitLit lit
1264 (x_reg, x_code) <- getSomeReg x
1266 code = x_code `snocOL`
1267 TEST (intSize pk) (OpImm (ImmInteger mask)) (OpReg x_reg)
1269 return (CondCode False cond code)
1272 condIntCode cond x (CmmLit (CmmInt 0 pk)) = do
1273 (x_reg, x_code) <- getSomeReg x
1275 code = x_code `snocOL`
1276 TEST (intSize pk) (OpReg x_reg) (OpReg x_reg)
1278 return (CondCode False cond code)
1280 -- anything vs operand
1281 condIntCode cond x y | isOperand y = do
1282 (x_reg, x_code) <- getNonClobberedReg x
1283 (y_op, y_code) <- getOperand y
1285 code = x_code `appOL` y_code `snocOL`
1286 CMP (cmmTypeSize (cmmExprType x)) y_op (OpReg x_reg)
1288 return (CondCode False cond code)
1290 -- anything vs anything
1291 condIntCode cond x y = do
1292 (y_reg, y_code) <- getNonClobberedReg y
1293 (x_op, x_code) <- getRegOrMem x
1295 code = y_code `appOL`
1297 CMP (cmmTypeSize (cmmExprType x)) (OpReg y_reg) x_op
1299 return (CondCode False cond code)
1303 --------------------------------------------------------------------------------
1304 condFltCode :: Cond -> CmmExpr -> CmmExpr -> NatM CondCode
1306 condFltCode cond x y
1307 = if_sse2 condFltCode_sse2 condFltCode_x87
1311 = ASSERT(cond `elem` ([EQQ, NE, LE, LTT, GE, GTT])) do
1312 (x_reg, x_code) <- getNonClobberedReg x
1313 (y_reg, y_code) <- getSomeReg y
1314 use_sse2 <- sse2Enabled
1316 code = x_code `appOL` y_code `snocOL`
1317 GCMP cond x_reg y_reg
1318 -- The GCMP insn does the test and sets the zero flag if comparable
1319 -- and true. Hence we always supply EQQ as the condition to test.
1320 return (CondCode True EQQ code)
1322 -- in the SSE2 comparison ops (ucomiss, ucomisd) the left arg may be
1323 -- an operand, but the right must be a reg. We can probably do better
1324 -- than this general case...
1325 condFltCode_sse2 = do
1326 (x_reg, x_code) <- getNonClobberedReg x
1327 (y_op, y_code) <- getOperand y
1329 code = x_code `appOL`
1331 CMP (floatSize $ cmmExprWidth x) y_op (OpReg x_reg)
1332 -- NB(1): we need to use the unsigned comparison operators on the
1333 -- result of this comparison.
1335 return (CondCode True (condToUnsigned cond) code)
1337 -- -----------------------------------------------------------------------------
1338 -- Generating assignments
1340 -- Assignments are really at the heart of the whole code generation
1341 -- business. Almost all top-level nodes of any real importance are
1342 -- assignments, which correspond to loads, stores, or register
1343 -- transfers. If we're really lucky, some of the register transfers
1344 -- will go away, because we can use the destination register to
1345 -- complete the code generation for the right hand side. This only
1346 -- fails when the right hand side is forced into a fixed register
1347 -- (e.g. the result of a call).
1349 assignMem_IntCode :: Size -> CmmExpr -> CmmExpr -> NatM InstrBlock
1350 assignReg_IntCode :: Size -> CmmReg -> CmmExpr -> NatM InstrBlock
1352 assignMem_FltCode :: Size -> CmmExpr -> CmmExpr -> NatM InstrBlock
1353 assignReg_FltCode :: Size -> CmmReg -> CmmExpr -> NatM InstrBlock
1356 -- integer assignment to memory
1358 -- specific case of adding/subtracting an integer to a particular address.
1359 -- ToDo: catch other cases where we can use an operation directly on a memory
1361 assignMem_IntCode pk addr (CmmMachOp op [CmmLoad addr2 _,
1362 CmmLit (CmmInt i _)])
1363 | addr == addr2, pk /= II64 || is32BitInteger i,
1364 Just instr <- check op
1365 = do Amode amode code_addr <- getAmode addr
1366 let code = code_addr `snocOL`
1367 instr pk (OpImm (ImmInt (fromIntegral i))) (OpAddr amode)
1370 check (MO_Add _) = Just ADD
1371 check (MO_Sub _) = Just SUB
1376 assignMem_IntCode pk addr src = do
1377 Amode addr code_addr <- getAmode addr
1378 (code_src, op_src) <- get_op_RI src
1380 code = code_src `appOL`
1382 MOV pk op_src (OpAddr addr)
1383 -- NOTE: op_src is stable, so it will still be valid
1384 -- after code_addr. This may involve the introduction
1385 -- of an extra MOV to a temporary register, but we hope
1386 -- the register allocator will get rid of it.
1390 get_op_RI :: CmmExpr -> NatM (InstrBlock,Operand) -- code, operator
1391 get_op_RI (CmmLit lit) | is32BitLit lit
1392 = return (nilOL, OpImm (litToImm lit))
1394 = do (reg,code) <- getNonClobberedReg op
1395 return (code, OpReg reg)
1398 -- Assign; dst is a reg, rhs is mem
1399 assignReg_IntCode pk reg (CmmLoad src _) = do
1400 load_code <- intLoadCode (MOV pk) src
1401 return (load_code (getRegisterReg False{-no sse2-} reg))
1403 -- dst is a reg, but src could be anything
1404 assignReg_IntCode pk reg src = do
1405 code <- getAnyReg src
1406 return (code (getRegisterReg False{-no sse2-} reg))
1409 -- Floating point assignment to memory
1410 assignMem_FltCode pk addr src = do
1411 (src_reg, src_code) <- getNonClobberedReg src
1412 Amode addr addr_code <- getAmode addr
1413 use_sse2 <- sse2Enabled
1415 code = src_code `appOL`
1417 if use_sse2 then MOV pk (OpReg src_reg) (OpAddr addr)
1418 else GST pk src_reg addr
1421 -- Floating point assignment to a register/temporary
1422 assignReg_FltCode pk reg src = do
1423 use_sse2 <- sse2Enabled
1424 src_code <- getAnyReg src
1425 return (src_code (getRegisterReg use_sse2 reg))
1428 genJump :: CmmExpr{-the branch target-} -> NatM InstrBlock
1430 genJump (CmmLoad mem pk) = do
1431 Amode target code <- getAmode mem
1432 return (code `snocOL` JMP (OpAddr target))
1434 genJump (CmmLit lit) = do
1435 return (unitOL (JMP (OpImm (litToImm lit))))
1438 (reg,code) <- getSomeReg expr
1439 return (code `snocOL` JMP (OpReg reg))
1442 -- -----------------------------------------------------------------------------
1443 -- Unconditional branches
1445 genBranch :: BlockId -> NatM InstrBlock
1446 genBranch = return . toOL . mkJumpInstr
1450 -- -----------------------------------------------------------------------------
1451 -- Conditional jumps
1454 Conditional jumps are always to local labels, so we can use branch
1455 instructions. We peek at the arguments to decide what kind of
1458 I386: First, we have to ensure that the condition
1459 codes are set according to the supplied comparison operation.
1463 :: BlockId -- the branch target
1464 -> CmmExpr -- the condition on which to branch
1467 genCondJump id bool = do
1468 CondCode is_float cond cond_code <- getCondCode bool
1469 use_sse2 <- sse2Enabled
1470 if not is_float || not use_sse2
1472 return (cond_code `snocOL` JXX cond id)
1474 lbl <- getBlockIdNat
1476 -- see comment with condFltReg
1477 let code = case cond of
1483 plain_test = unitOL (
1486 or_unordered = toOL [
1490 and_ordered = toOL [
1496 return (cond_code `appOL` code)
1499 -- -----------------------------------------------------------------------------
1500 -- Generating C calls
1502 -- Now the biggest nightmare---calls. Most of the nastiness is buried in
1503 -- @get_arg@, which moves the arguments to the correct registers/stack
1504 -- locations. Apart from that, the code is easy.
1506 -- (If applicable) Do not fill the delay slots here; you will confuse the
1507 -- register allocator.
1510 :: CmmCallTarget -- function to call
1511 -> HintedCmmFormals -- where to put the result
1512 -> HintedCmmActuals -- arguments (of mixed type)
1515 -- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
1517 #if i386_TARGET_ARCH
1519 genCCall (CmmPrim MO_WriteBarrier) _ _ = return nilOL
1520 -- write barrier compiles to no code on x86/x86-64;
1521 -- we keep it this long in order to prevent earlier optimisations.
1523 -- we only cope with a single result for foreign calls
1524 genCCall (CmmPrim op) [CmmHinted r _] args = do
1525 l1 <- getNewLabelNat
1526 l2 <- getNewLabelNat
1530 outOfLineFloatOp op r args
1532 MO_F32_Sqrt -> actuallyInlineFloatOp GSQRT FF32 args
1533 MO_F64_Sqrt -> actuallyInlineFloatOp GSQRT FF64 args
1535 MO_F32_Sin -> actuallyInlineFloatOp (\s -> GSIN s l1 l2) FF32 args
1536 MO_F64_Sin -> actuallyInlineFloatOp (\s -> GSIN s l1 l2) FF64 args
1538 MO_F32_Cos -> actuallyInlineFloatOp (\s -> GCOS s l1 l2) FF32 args
1539 MO_F64_Cos -> actuallyInlineFloatOp (\s -> GCOS s l1 l2) FF64 args
1541 MO_F32_Tan -> actuallyInlineFloatOp (\s -> GTAN s l1 l2) FF32 args
1542 MO_F64_Tan -> actuallyInlineFloatOp (\s -> GTAN s l1 l2) FF64 args
1544 other_op -> outOfLineFloatOp op r args
1547 actuallyInlineFloatOp instr size [CmmHinted x _]
1548 = do res <- trivialUFCode size (instr size) x
1550 return (any (getRegisterReg False (CmmLocal r)))
1552 genCCall target dest_regs args = do
1554 sizes = map (arg_size . cmmExprType . hintlessCmm) (reverse args)
1555 #if !darwin_TARGET_OS
1556 tot_arg_size = sum sizes
1558 raw_arg_size = sum sizes
1559 tot_arg_size = roundTo 16 raw_arg_size
1560 arg_pad_size = tot_arg_size - raw_arg_size
1561 delta0 <- getDeltaNat
1562 setDeltaNat (delta0 - arg_pad_size)
1565 use_sse2 <- sse2Enabled
1566 push_codes <- mapM (push_arg use_sse2) (reverse args)
1567 delta <- getDeltaNat
1570 -- deal with static vs dynamic call targets
1571 (callinsns,cconv) <-
1574 CmmCallee (CmmLit (CmmLabel lbl)) conv
1575 -> -- ToDo: stdcall arg sizes
1576 return (unitOL (CALL (Left fn_imm) []), conv)
1577 where fn_imm = ImmCLbl lbl
1579 -> do { (dyn_r, dyn_c) <- getSomeReg expr
1580 ; ASSERT( isWord32 (cmmExprType expr) )
1581 return (dyn_c `snocOL` CALL (Right dyn_r) [], conv) }
1584 #if darwin_TARGET_OS
1586 = toOL [SUB II32 (OpImm (ImmInt arg_pad_size)) (OpReg esp),
1587 DELTA (delta0 - arg_pad_size)]
1588 `appOL` concatOL push_codes
1591 = concatOL push_codes
1592 call = callinsns `appOL`
1594 -- Deallocate parameters after call for ccall;
1595 -- but not for stdcall (callee does it)
1596 (if cconv == StdCallConv || tot_arg_size==0 then [] else
1597 [ADD II32 (OpImm (ImmInt tot_arg_size)) (OpReg esp)])
1599 [DELTA (delta + tot_arg_size)]
1602 setDeltaNat (delta + tot_arg_size)
1605 -- assign the results, if necessary
1606 assign_code [] = nilOL
1607 assign_code [CmmHinted dest _hint]
1610 then let tmp_amode = AddrBaseIndex (EABaseReg esp)
1614 in toOL [ SUB II32 (OpImm (ImmInt b)) (OpReg esp),
1615 GST sz fake0 tmp_amode,
1616 MOV sz (OpAddr tmp_amode) (OpReg r_dest),
1617 ADD II32 (OpImm (ImmInt b)) (OpReg esp)]
1618 else unitOL (GMOV fake0 r_dest)
1619 | isWord64 ty = toOL [MOV II32 (OpReg eax) (OpReg r_dest),
1620 MOV II32 (OpReg edx) (OpReg r_dest_hi)]
1621 | otherwise = unitOL (MOV (intSize w) (OpReg eax) (OpReg r_dest))
1623 ty = localRegType dest
1626 r_dest_hi = getHiVRegFromLo r_dest
1627 r_dest = getRegisterReg use_sse2 (CmmLocal dest)
1628 assign_code many = pprPanic "genCCall.assign_code - too many return values:" (ppr many)
1630 return (push_code `appOL`
1632 assign_code dest_regs)
1635 arg_size :: CmmType -> Int -- Width in bytes
1636 arg_size ty = widthInBytes (typeWidth ty)
1638 roundTo a x | x `mod` a == 0 = x
1639 | otherwise = x + a - (x `mod` a)
1642 push_arg :: Bool -> HintedCmmActual {-current argument-}
1643 -> NatM InstrBlock -- code
1645 push_arg use_sse2 (CmmHinted arg _hint) -- we don't need the hints on x86
1646 | isWord64 arg_ty = do
1647 ChildCode64 code r_lo <- iselExpr64 arg
1648 delta <- getDeltaNat
1649 setDeltaNat (delta - 8)
1651 r_hi = getHiVRegFromLo r_lo
1653 return ( code `appOL`
1654 toOL [PUSH II32 (OpReg r_hi), DELTA (delta - 4),
1655 PUSH II32 (OpReg r_lo), DELTA (delta - 8),
1659 | isFloatType arg_ty = do
1660 (reg, code) <- getSomeReg arg
1661 delta <- getDeltaNat
1662 setDeltaNat (delta-size)
1663 return (code `appOL`
1664 toOL [SUB II32 (OpImm (ImmInt size)) (OpReg esp),
1666 let addr = AddrBaseIndex (EABaseReg esp)
1669 size = floatSize (typeWidth arg_ty)
1672 then MOV size (OpReg reg) (OpAddr addr)
1673 else GST size reg addr
1678 (operand, code) <- getOperand arg
1679 delta <- getDeltaNat
1680 setDeltaNat (delta-size)
1681 return (code `snocOL`
1682 PUSH II32 operand `snocOL`
1686 arg_ty = cmmExprType arg
1687 size = arg_size arg_ty -- Byte size
1689 #elif x86_64_TARGET_ARCH
1691 genCCall (CmmPrim MO_WriteBarrier) _ _ = return nilOL
1692 -- write barrier compiles to no code on x86/x86-64;
1693 -- we keep it this long in order to prevent earlier optimisations.
1696 genCCall (CmmPrim op) [CmmHinted r _] args =
1697 outOfLineFloatOp op r args
1699 genCCall target dest_regs args = do
1701 -- load up the register arguments
1702 (stack_args, aregs, fregs, load_args_code)
1703 <- load_args args allArgRegs allFPArgRegs nilOL
1706 fp_regs_used = reverse (drop (length fregs) (reverse allFPArgRegs))
1707 int_regs_used = reverse (drop (length aregs) (reverse allArgRegs))
1708 arg_regs = [eax] ++ int_regs_used ++ fp_regs_used
1709 -- for annotating the call instruction with
1711 sse_regs = length fp_regs_used
1713 tot_arg_size = arg_size * length stack_args
1715 -- On entry to the called function, %rsp should be aligned
1716 -- on a 16-byte boundary +8 (i.e. the first stack arg after
1717 -- the return address is 16-byte aligned). In STG land
1718 -- %rsp is kept 16-byte aligned (see StgCRun.c), so we just
1719 -- need to make sure we push a multiple of 16-bytes of args,
1720 -- plus the return address, to get the correct alignment.
1721 -- Urg, this is hard. We need to feed the delta back into
1722 -- the arg pushing code.
1723 (real_size, adjust_rsp) <-
1724 if tot_arg_size `rem` 16 == 0
1725 then return (tot_arg_size, nilOL)
1726 else do -- we need to adjust...
1727 delta <- getDeltaNat
1728 setDeltaNat (delta-8)
1729 return (tot_arg_size+8, toOL [
1730 SUB II64 (OpImm (ImmInt 8)) (OpReg rsp),
1734 -- push the stack args, right to left
1735 push_code <- push_args (reverse stack_args) nilOL
1736 delta <- getDeltaNat
1738 -- deal with static vs dynamic call targets
1739 (callinsns,cconv) <-
1742 CmmCallee (CmmLit (CmmLabel lbl)) conv
1743 -> -- ToDo: stdcall arg sizes
1744 return (unitOL (CALL (Left fn_imm) arg_regs), conv)
1745 where fn_imm = ImmCLbl lbl
1747 -> do (dyn_r, dyn_c) <- getSomeReg expr
1748 return (dyn_c `snocOL` CALL (Right dyn_r) arg_regs, conv)
1751 -- The x86_64 ABI requires us to set %al to the number of SSE2
1752 -- registers that contain arguments, if the called routine
1753 -- is a varargs function. We don't know whether it's a
1754 -- varargs function or not, so we have to assume it is.
1756 -- It's not safe to omit this assignment, even if the number
1757 -- of SSE2 regs in use is zero. If %al is larger than 8
1758 -- on entry to a varargs function, seg faults ensue.
1759 assign_eax n = unitOL (MOV II32 (OpImm (ImmInt n)) (OpReg eax))
1761 let call = callinsns `appOL`
1763 -- Deallocate parameters after call for ccall;
1764 -- but not for stdcall (callee does it)
1765 (if cconv == StdCallConv || real_size==0 then [] else
1766 [ADD (intSize wordWidth) (OpImm (ImmInt real_size)) (OpReg esp)])
1768 [DELTA (delta + real_size)]
1771 setDeltaNat (delta + real_size)
1774 -- assign the results, if necessary
1775 assign_code [] = nilOL
1776 assign_code [CmmHinted dest _hint] =
1777 case typeWidth rep of
1778 W32 | isFloatType rep -> unitOL (MOV (floatSize W32) (OpReg xmm0) (OpReg r_dest))
1779 W64 | isFloatType rep -> unitOL (MOV (floatSize W64) (OpReg xmm0) (OpReg r_dest))
1780 _ -> unitOL (MOV (cmmTypeSize rep) (OpReg rax) (OpReg r_dest))
1782 rep = localRegType dest
1783 r_dest = getRegisterReg True (CmmLocal dest)
1784 assign_code many = panic "genCCall.assign_code many"
1786 return (load_args_code `appOL`
1789 assign_eax sse_regs `appOL`
1791 assign_code dest_regs)
1794 arg_size = 8 -- always, at the mo
1796 load_args :: [CmmHinted CmmExpr]
1797 -> [Reg] -- int regs avail for args
1798 -> [Reg] -- FP regs avail for args
1800 -> NatM ([CmmHinted CmmExpr],[Reg],[Reg],InstrBlock)
1801 load_args args [] [] code = return (args, [], [], code)
1802 -- no more regs to use
1803 load_args [] aregs fregs code = return ([], aregs, fregs, code)
1804 -- no more args to push
1805 load_args ((CmmHinted arg hint) : rest) aregs fregs code
1806 | isFloatType arg_rep =
1810 arg_code <- getAnyReg arg
1811 load_args rest aregs rs (code `appOL` arg_code r)
1816 arg_code <- getAnyReg arg
1817 load_args rest rs fregs (code `appOL` arg_code r)
1819 arg_rep = cmmExprType arg
1822 (args',ars,frs,code') <- load_args rest aregs fregs code
1823 return ((CmmHinted arg hint):args', ars, frs, code')
1825 push_args [] code = return code
1826 push_args ((CmmHinted arg hint):rest) code
1827 | isFloatType arg_rep = do
1828 (arg_reg, arg_code) <- getSomeReg arg
1829 delta <- getDeltaNat
1830 setDeltaNat (delta-arg_size)
1831 let code' = code `appOL` arg_code `appOL` toOL [
1832 SUB (intSize wordWidth) (OpImm (ImmInt arg_size)) (OpReg rsp) ,
1833 DELTA (delta-arg_size),
1834 MOV (floatSize width) (OpReg arg_reg) (OpAddr (spRel 0))]
1835 push_args rest code'
1838 -- we only ever generate word-sized function arguments. Promotion
1839 -- has already happened: our Int8# type is kept sign-extended
1840 -- in an Int#, for example.
1841 ASSERT(width == W64) return ()
1842 (arg_op, arg_code) <- getOperand arg
1843 delta <- getDeltaNat
1844 setDeltaNat (delta-arg_size)
1845 let code' = code `appOL` arg_code `appOL` toOL [
1847 DELTA (delta-arg_size)]
1848 push_args rest code'
1850 arg_rep = cmmExprType arg
1851 width = typeWidth arg_rep
1854 genCCall = panic "X86.genCCAll: not defined"
1856 #endif /* x86_64_TARGET_ARCH */
1861 outOfLineFloatOp :: CallishMachOp -> CmmFormal -> HintedCmmActuals -> NatM InstrBlock
1862 outOfLineFloatOp mop res args
1864 dflags <- getDynFlagsNat
1865 targetExpr <- cmmMakeDynamicReference dflags addImportNat CallReference lbl
1866 let target = CmmCallee targetExpr CCallConv
1868 stmtToInstrs (CmmCall target [CmmHinted res NoHint] args CmmUnsafe CmmMayReturn)
1870 -- Assume we can call these functions directly, and that they're not in a dynamic library.
1871 -- TODO: Why is this ok? Under linux this code will be in libm.so
1872 -- Is is because they're really implemented as a primitive instruction by the assembler?? -- BL 2009/12/31
1873 lbl = mkForeignLabel fn Nothing ForeignLabelInThisPackage IsFunction
1876 MO_F32_Sqrt -> fsLit "sqrtf"
1877 MO_F32_Sin -> fsLit "sinf"
1878 MO_F32_Cos -> fsLit "cosf"
1879 MO_F32_Tan -> fsLit "tanf"
1880 MO_F32_Exp -> fsLit "expf"
1881 MO_F32_Log -> fsLit "logf"
1883 MO_F32_Asin -> fsLit "asinf"
1884 MO_F32_Acos -> fsLit "acosf"
1885 MO_F32_Atan -> fsLit "atanf"
1887 MO_F32_Sinh -> fsLit "sinhf"
1888 MO_F32_Cosh -> fsLit "coshf"
1889 MO_F32_Tanh -> fsLit "tanhf"
1890 MO_F32_Pwr -> fsLit "powf"
1892 MO_F64_Sqrt -> fsLit "sqrt"
1893 MO_F64_Sin -> fsLit "sin"
1894 MO_F64_Cos -> fsLit "cos"
1895 MO_F64_Tan -> fsLit "tan"
1896 MO_F64_Exp -> fsLit "exp"
1897 MO_F64_Log -> fsLit "log"
1899 MO_F64_Asin -> fsLit "asin"
1900 MO_F64_Acos -> fsLit "acos"
1901 MO_F64_Atan -> fsLit "atan"
1903 MO_F64_Sinh -> fsLit "sinh"
1904 MO_F64_Cosh -> fsLit "cosh"
1905 MO_F64_Tanh -> fsLit "tanh"
1906 MO_F64_Pwr -> fsLit "pow"
1912 -- -----------------------------------------------------------------------------
1913 -- Generating a table-branch
1915 genSwitch :: CmmExpr -> [Maybe BlockId] -> NatM InstrBlock
1920 (reg,e_code) <- getSomeReg expr
1921 lbl <- getNewLabelNat
1922 dflags <- getDynFlagsNat
1923 dynRef <- cmmMakeDynamicReference dflags addImportNat DataReference lbl
1924 (tableReg,t_code) <- getSomeReg $ dynRef
1926 jumpTable = map jumpTableEntryRel ids
1928 jumpTableEntryRel Nothing
1929 = CmmStaticLit (CmmInt 0 wordWidth)
1930 jumpTableEntryRel (Just blockid)
1931 = CmmStaticLit (CmmLabelDiffOff blockLabel lbl 0)
1932 where blockLabel = mkAsmTempLabel (getUnique blockid)
1934 op = OpAddr (AddrBaseIndex (EABaseReg tableReg)
1935 (EAIndex reg wORD_SIZE) (ImmInt 0))
1937 #if x86_64_TARGET_ARCH
1938 #if darwin_TARGET_OS
1939 -- on Mac OS X/x86_64, put the jump table in the text section
1940 -- to work around a limitation of the linker.
1941 -- ld64 is unable to handle the relocations for
1943 -- if L0 is not preceded by a non-anonymous label in its section.
1945 code = e_code `appOL` t_code `appOL` toOL [
1946 ADD (intSize wordWidth) op (OpReg tableReg),
1947 JMP_TBL (OpReg tableReg) [ id | Just id <- ids ],
1948 LDATA Text (CmmDataLabel lbl : jumpTable)
1951 -- HACK: On x86_64 binutils<2.17 is only able to generate PC32
1952 -- relocations, hence we only get 32-bit offsets in the jump
1953 -- table. As these offsets are always negative we need to properly
1954 -- sign extend them to 64-bit. This hack should be removed in
1955 -- conjunction with the hack in PprMach.hs/pprDataItem once
1956 -- binutils 2.17 is standard.
1957 code = e_code `appOL` t_code `appOL` toOL [
1958 LDATA ReadOnlyData (CmmDataLabel lbl : jumpTable),
1960 (OpAddr (AddrBaseIndex (EABaseReg tableReg)
1961 (EAIndex reg wORD_SIZE) (ImmInt 0)))
1963 ADD (intSize wordWidth) (OpReg reg) (OpReg tableReg),
1964 JMP_TBL (OpReg tableReg) [ id | Just id <- ids ]
1968 code = e_code `appOL` t_code `appOL` toOL [
1969 LDATA ReadOnlyData (CmmDataLabel lbl : jumpTable),
1970 ADD (intSize wordWidth) op (OpReg tableReg),
1971 JMP_TBL (OpReg tableReg) [ id | Just id <- ids ]
1977 (reg,e_code) <- getSomeReg expr
1978 lbl <- getNewLabelNat
1980 jumpTable = map jumpTableEntry ids
1981 op = OpAddr (AddrBaseIndex EABaseNone (EAIndex reg wORD_SIZE) (ImmCLbl lbl))
1982 code = e_code `appOL` toOL [
1983 LDATA ReadOnlyData (CmmDataLabel lbl : jumpTable),
1984 JMP_TBL op [ id | Just id <- ids ]
1990 -- -----------------------------------------------------------------------------
1991 -- 'condIntReg' and 'condFltReg': condition codes into registers
1993 -- Turn those condition codes into integers now (when they appear on
1994 -- the right hand side of an assignment).
1996 -- (If applicable) Do not fill the delay slots here; you will confuse the
1997 -- register allocator.
1999 condIntReg :: Cond -> CmmExpr -> CmmExpr -> NatM Register
2001 condIntReg cond x y = do
2002 CondCode _ cond cond_code <- condIntCode cond x y
2003 tmp <- getNewRegNat II8
2005 code dst = cond_code `appOL` toOL [
2006 SETCC cond (OpReg tmp),
2007 MOVZxL II8 (OpReg tmp) (OpReg dst)
2010 return (Any II32 code)
2014 condFltReg :: Cond -> CmmExpr -> CmmExpr -> NatM Register
2015 condFltReg cond x y = if_sse2 condFltReg_sse2 condFltReg_x87
2018 CondCode _ cond cond_code <- condFltCode cond x y
2019 tmp <- getNewRegNat II8
2021 code dst = cond_code `appOL` toOL [
2022 SETCC cond (OpReg tmp),
2023 MOVZxL II8 (OpReg tmp) (OpReg dst)
2026 return (Any II32 code)
2028 condFltReg_sse2 = do
2029 CondCode _ cond cond_code <- condFltCode cond x y
2030 tmp1 <- getNewRegNat archWordSize
2031 tmp2 <- getNewRegNat archWordSize
2033 -- We have to worry about unordered operands (eg. comparisons
2034 -- against NaN). If the operands are unordered, the comparison
2035 -- sets the parity flag, carry flag and zero flag.
2036 -- All comparisons are supposed to return false for unordered
2037 -- operands except for !=, which returns true.
2039 -- Optimisation: we don't have to test the parity flag if we
2040 -- know the test has already excluded the unordered case: eg >
2041 -- and >= test for a zero carry flag, which can only occur for
2042 -- ordered operands.
2044 -- ToDo: by reversing comparisons we could avoid testing the
2045 -- parity flag in more cases.
2050 NE -> or_unordered dst
2051 GU -> plain_test dst
2052 GEU -> plain_test dst
2053 _ -> and_ordered dst)
2055 plain_test dst = toOL [
2056 SETCC cond (OpReg tmp1),
2057 MOVZxL II8 (OpReg tmp1) (OpReg dst)
2059 or_unordered dst = toOL [
2060 SETCC cond (OpReg tmp1),
2061 SETCC PARITY (OpReg tmp2),
2062 OR II8 (OpReg tmp1) (OpReg tmp2),
2063 MOVZxL II8 (OpReg tmp2) (OpReg dst)
2065 and_ordered dst = toOL [
2066 SETCC cond (OpReg tmp1),
2067 SETCC NOTPARITY (OpReg tmp2),
2068 AND II8 (OpReg tmp1) (OpReg tmp2),
2069 MOVZxL II8 (OpReg tmp2) (OpReg dst)
2072 return (Any II32 code)
2075 -- -----------------------------------------------------------------------------
2076 -- 'trivial*Code': deal with trivial instructions
2078 -- Trivial (dyadic: 'trivialCode', floating-point: 'trivialFCode',
2079 -- unary: 'trivialUCode', unary fl-pt:'trivialUFCode') instructions.
2080 -- Only look for constants on the right hand side, because that's
2081 -- where the generic optimizer will have put them.
2083 -- Similarly, for unary instructions, we don't have to worry about
2084 -- matching an StInt as the argument, because genericOpt will already
2085 -- have handled the constant-folding.
2089 The Rules of the Game are:
2091 * You cannot assume anything about the destination register dst;
2092 it may be anything, including a fixed reg.
2094 * You may compute an operand into a fixed reg, but you may not
2095 subsequently change the contents of that fixed reg. If you
2096 want to do so, first copy the value either to a temporary
2097 or into dst. You are free to modify dst even if it happens
2098 to be a fixed reg -- that's not your problem.
2100 * You cannot assume that a fixed reg will stay live over an
2101 arbitrary computation. The same applies to the dst reg.
2103 * Temporary regs obtained from getNewRegNat are distinct from
2104 each other and from all other regs, and stay live over
2105 arbitrary computations.
2107 --------------------
2109 SDM's version of The Rules:
2111 * If getRegister returns Any, that means it can generate correct
2112 code which places the result in any register, period. Even if that
2113 register happens to be read during the computation.
2115 Corollary #1: this means that if you are generating code for an
2116 operation with two arbitrary operands, you cannot assign the result
2117 of the first operand into the destination register before computing
2118 the second operand. The second operand might require the old value
2119 of the destination register.
2121 Corollary #2: A function might be able to generate more efficient
2122 code if it knows the destination register is a new temporary (and
2123 therefore not read by any of the sub-computations).
2125 * If getRegister returns Any, then the code it generates may modify only:
2126 (a) fresh temporaries
2127 (b) the destination register
2128 (c) known registers (eg. %ecx is used by shifts)
2129 In particular, it may *not* modify global registers, unless the global
2130 register happens to be the destination register.
2133 trivialCode width instr (Just revinstr) (CmmLit lit_a) b
2134 | is32BitLit lit_a = do
2135 b_code <- getAnyReg b
2138 = b_code dst `snocOL`
2139 revinstr (OpImm (litToImm lit_a)) (OpReg dst)
2141 return (Any (intSize width) code)
2143 trivialCode width instr maybe_revinstr a b
2144 = genTrivialCode (intSize width) instr a b
2146 -- This is re-used for floating pt instructions too.
2147 genTrivialCode rep instr a b = do
2148 (b_op, b_code) <- getNonClobberedOperand b
2149 a_code <- getAnyReg a
2150 tmp <- getNewRegNat rep
2152 -- We want the value of b to stay alive across the computation of a.
2153 -- But, we want to calculate a straight into the destination register,
2154 -- because the instruction only has two operands (dst := dst `op` src).
2155 -- The troublesome case is when the result of b is in the same register
2156 -- as the destination reg. In this case, we have to save b in a
2157 -- new temporary across the computation of a.
2159 | dst `regClashesWithOp` b_op =
2161 unitOL (MOV rep b_op (OpReg tmp)) `appOL`
2163 instr (OpReg tmp) (OpReg dst)
2167 instr b_op (OpReg dst)
2169 return (Any rep code)
2171 reg `regClashesWithOp` OpReg reg2 = reg == reg2
2172 reg `regClashesWithOp` OpAddr amode = any (==reg) (addrModeRegs amode)
2173 reg `regClashesWithOp` _ = False
2177 trivialUCode rep instr x = do
2178 x_code <- getAnyReg x
2183 return (Any rep code)
2187 trivialFCode_x87 width instr x y = do
2188 (x_reg, x_code) <- getNonClobberedReg x -- these work for float regs too
2189 (y_reg, y_code) <- getSomeReg y
2191 size = FF80 -- always, on x87
2195 instr size x_reg y_reg dst
2196 return (Any size code)
2198 trivialFCode_sse2 pk instr x y
2199 = genTrivialCode size (instr size) x y
2200 where size = floatSize pk
2203 trivialUFCode size instr x = do
2204 (x_reg, x_code) <- getSomeReg x
2210 return (Any size code)
2213 --------------------------------------------------------------------------------
2214 coerceInt2FP :: Width -> Width -> CmmExpr -> NatM Register
2215 coerceInt2FP from to x = if_sse2 coerce_sse2 coerce_x87
2218 (x_reg, x_code) <- getSomeReg x
2220 opc = case to of W32 -> GITOF; W64 -> GITOD
2221 code dst = x_code `snocOL` opc x_reg dst
2222 -- ToDo: works for non-II32 reps?
2223 return (Any FF80 code)
2226 (x_op, x_code) <- getOperand x -- ToDo: could be a safe operand
2228 opc = case to of W32 -> CVTSI2SS; W64 -> CVTSI2SD
2229 code dst = x_code `snocOL` opc (intSize from) x_op dst
2231 return (Any (floatSize to) code)
2232 -- works even if the destination rep is <II32
2234 --------------------------------------------------------------------------------
2235 coerceFP2Int :: Width -> Width -> CmmExpr -> NatM Register
2236 coerceFP2Int from to x = if_sse2 coerceFP2Int_sse2 coerceFP2Int_x87
2238 coerceFP2Int_x87 = do
2239 (x_reg, x_code) <- getSomeReg x
2241 opc = case from of W32 -> GFTOI; W64 -> GDTOI
2242 code dst = x_code `snocOL` opc x_reg dst
2243 -- ToDo: works for non-II32 reps?
2245 return (Any (intSize to) code)
2247 coerceFP2Int_sse2 = do
2248 (x_op, x_code) <- getOperand x -- ToDo: could be a safe operand
2250 opc = case from of W32 -> CVTTSS2SIQ; W64 -> CVTTSD2SIQ
2251 code dst = x_code `snocOL` opc (intSize to) x_op dst
2253 return (Any (intSize to) code)
2254 -- works even if the destination rep is <II32
2257 --------------------------------------------------------------------------------
2258 coerceFP2FP :: Width -> CmmExpr -> NatM Register
2259 coerceFP2FP to x = do
2260 (x_reg, x_code) <- getSomeReg x
2262 opc = case to of W32 -> CVTSD2SS; W64 -> CVTSS2SD
2263 code dst = x_code `snocOL` opc x_reg dst
2265 return (Any (floatSize to) code)
2267 --------------------------------------------------------------------------------
2269 sse2NegCode :: Width -> CmmExpr -> NatM Register
2270 sse2NegCode w x = do
2271 let sz = floatSize w
2272 x_code <- getAnyReg x
2273 -- This is how gcc does it, so it can't be that bad:
2275 const | FF32 <- sz = CmmInt 0x80000000 W32
2276 | otherwise = CmmInt 0x8000000000000000 W64
2277 Amode amode amode_code <- memConstant (widthInBytes w) const
2278 tmp <- getNewRegNat sz
2280 code dst = x_code dst `appOL` amode_code `appOL` toOL [
2281 MOV sz (OpAddr amode) (OpReg tmp),
2282 XOR sz (OpReg tmp) (OpReg dst)
2285 return (Any sz code)