import RegAlloc.Linear.State
import RegAlloc.Linear.Base
import RegAlloc.Linear.FreeRegs
+import RegAlloc.Liveness
+import Instruction
+import Reg
import BlockId
-import Instrs
-import Regs
-import RegAllocInfo
-import RegLiveness
-import Cmm hiding (RegSet)
-
+import OldCmm hiding (RegSet)
import Digraph
import Outputable
import Unique
-- vregs are in the correct regs for its destination.
--
joinToTargets
- :: BlockMap RegSet -- ^ maps the unique of the blockid to the set of vregs
+ :: Instruction instr
+ => BlockMap RegSet -- ^ maps the unique of the blockid to the set of vregs
-- that are known to be live on the entry to each block.
-> BlockId -- ^ id of the current block
- -> Instr -- ^ branch instr on the end of the source block.
+ -> instr -- ^ branch instr on the end of the source block.
- -> RegM ([NatBasicBlock] -- fresh blocks of fixup code.
- , Instr) -- the original branch instruction, but maybe patched to jump
+ -> RegM ([NatBasicBlock instr] -- fresh blocks of fixup code.
+ , instr) -- the original branch instruction, but maybe patched to jump
-- to a fixup block first.
joinToTargets block_live id instr
-- we only need to worry about jump instructions.
- | not $ isJumpish instr
+ | not $ isJumpishInstr instr
= return ([], instr)
| otherwise
- = joinToTargets' block_live [] id instr (jumpDests instr [])
+ = joinToTargets' block_live [] id instr (jumpDestsOfInstr instr)
-----
joinToTargets'
- :: BlockMap RegSet -- ^ maps the unique of the blockid to the set of vregs
+ :: Instruction instr
+ => BlockMap RegSet -- ^ maps the unique of the blockid to the set of vregs
-- that are known to be live on the entry to each block.
- -> [NatBasicBlock] -- ^ acc blocks of fixup code.
+ -> [NatBasicBlock instr] -- ^ acc blocks of fixup code.
-> BlockId -- ^ id of the current block
- -> Instr -- ^ branch instr on the end of the source block.
+ -> instr -- ^ branch instr on the end of the source block.
-> [BlockId] -- ^ branch destinations still to consider.
- -> RegM ( [NatBasicBlock]
- , Instr)
+ -> RegM ( [NatBasicBlock instr]
+ , instr)
-- no more targets to consider. all done.
joinToTargets' _ new_blocks _ instr []
-- adjust the current assignment to remove any vregs that are not live
-- on entry to the destination block.
- let Just live_set = lookupBlockEnv block_live dest
+ let Just live_set = mapLookup dest block_live
let still_live uniq _ = uniq `elemUniqSet_Directly` live_set
let adjusted_assig = filterUFM_Directly still_live assig
, not (elemUniqSet_Directly reg live_set)
, r <- regsOfLoc loc ]
- case lookupBlockEnv block_assig dest of
+ case mapLookup dest block_assig of
Nothing
-> joinToTargets_first
block_live new_blocks block_id instr dest dests
-- this is the first time we jumped to this block.
joinToTargets_first block_live new_blocks block_id instr dest dests
- block_assig src_assig to_free
+ block_assig src_assig
+ (to_free :: [RealReg])
= do -- free up the regs that are not live on entry to this block.
freeregs <- getFreeRegsR
let freeregs' = foldr releaseReg freeregs to_free
-- remember the current assignment on entry to this block.
- setBlockAssigR (extendBlockEnv block_assig dest
- (freeregs', src_assig))
+ setBlockAssigR (mapInsert dest (freeregs', src_assig) block_assig)
joinToTargets' block_live new_blocks block_id instr dests
--
let sccs = stronglyConnCompFromEdgedVerticesR graph
- -- debugging
-{- pprTrace
+{- -- debugging
+ pprTrace
("joinToTargets: making fixup code")
(vcat [ text " in block: " <> ppr block_id
, text " jmp instruction: " <> ppr instr
-- A the end of the current block we will jump to the fixup one,
-- then that will jump to our original destination.
fixup_block_id <- getUniqueR
- let block = BasicBlock (BlockId fixup_block_id)
- $ fixUpInstrs ++ mkBranchInstr dest
+ let block = BasicBlock (mkBlockId fixup_block_id)
+ $ fixUpInstrs ++ mkJumpInstr dest
{- pprTrace
("joinToTargets: fixup code is:")
-- patch the original branch instruction so it goes to our
-- fixup block instead.
- _ -> let instr' = patchJump instr dest (BlockId fixup_block_id)
+ _ -> let instr' = patchJumpInstr instr
+ (\bid -> if bid == dest
+ then mkBlockId fixup_block_id
+ else dest)
+
in joinToTargets' block_live (block : new_blocks) block_id instr' dests
-- destinations. We have eliminated any possibility of single-node
-- cycles in expandNode above.
--
-handleComponent :: Int -> Instr -> SCC (Unique, Loc, [Loc]) -> RegM [Instr]
+handleComponent
+ :: Instruction instr
+ => Int -> instr -> SCC (Unique, Loc, [Loc]) -> RegM [instr]
-- If the graph is acyclic then we won't get the swapping problem below.
-- In this case we can just do the moves directly, and avoid having to
-- require a fixup.
--
handleComponent delta instr
- (CyclicSCC ( (vreg, InReg sreg, [InReg dreg]) : rest))
+ (CyclicSCC ( (vreg, InReg sreg, (InReg dreg: _)) : rest))
+ -- dest list may have more than one element, if the reg is also InMem.
= do
-- spill the source into its slot
(instrSpill, slot)
- <- spillR (RealReg sreg) vreg
+ <- spillR (RegReal sreg) vreg
-- reload into destination reg
- instrLoad <- loadR (RealReg dreg) slot
+ instrLoad <- loadR (RegReal dreg) slot
remainingFixUps <- mapM (handleComponent delta instr)
(stronglyConnCompFromEdgedVerticesR rest)
-- | Move a vreg between these two locations.
--
makeMove
- :: Int -- ^ current C stack delta.
+ :: Instruction instr
+ => Int -- ^ current C stack delta.
-> Unique -- ^ unique of the vreg that we're moving.
-> Loc -- ^ source location.
-> Loc -- ^ destination location.
- -> RegM Instr -- ^ move instruction.
+ -> RegM instr -- ^ move instruction.
makeMove _ vreg (InReg src) (InReg dst)
= do recordSpill (SpillJoinRR vreg)
- return $ mkRegRegMoveInstr (RealReg src) (RealReg dst)
+ return $ mkRegRegMoveInstr (RegReal src) (RegReal dst)
makeMove delta vreg (InMem src) (InReg dst)
= do recordSpill (SpillJoinRM vreg)
- return $ mkLoadInstr (RealReg dst) delta src
+ return $ mkLoadInstr (RegReal dst) delta src
makeMove delta vreg (InReg src) (InMem dst)
= do recordSpill (SpillJoinRM vreg)
- return $ mkSpillInstr (RealReg src) delta dst
+ return $ mkSpillInstr (RegReal src) delta dst
-- we don't handle memory to memory moves.
-- they shouldn't happen because we don't share stack slots between vregs.