Skip to content

Commit

Permalink
[AMDGPU] RA inserted scalar instructions can be at the BB top (llvm#7…
Browse files Browse the repository at this point in the history
…2140)

We adjust the insertion point at the BB top for spills/copies during RA
to ensure they are placed after the exec restore instructions required
for the divergent control flow execution. This is, however, required
only for the vector operations. The insertions for scalar registers can
still go to the BB top.
  • Loading branch information
cdevadas authored Nov 16, 2023
1 parent e8fc282 commit ce7fd49
Show file tree
Hide file tree
Showing 10 changed files with 382 additions and 79 deletions.
6 changes: 4 additions & 2 deletions llvm/include/llvm/CodeGen/MachineBasicBlock.h
Original file line number Diff line number Diff line change
Expand Up @@ -846,8 +846,10 @@ class MachineBasicBlock

/// Return the first instruction in MBB after I that is not a PHI, label or
/// debug. This is the correct point to insert copies at the beginning of a
/// basic block.
iterator SkipPHIsLabelsAndDebug(iterator I, bool SkipPseudoOp = true);
/// basic block. \p Reg is the register being used by a spill or defined for a
/// restore/split during register allocation.
iterator SkipPHIsLabelsAndDebug(iterator I, Register Reg = Register(),
bool SkipPseudoOp = true);

/// Returns an iterator to the first terminator instruction of this basic
/// block. If a terminator does not exist, it returns end().
Expand Down
6 changes: 4 additions & 2 deletions llvm/include/llvm/CodeGen/TargetInstrInfo.h
Original file line number Diff line number Diff line change
Expand Up @@ -1988,8 +1988,10 @@ class TargetInstrInfo : public MCInstrInfo {

/// True if the instruction is bound to the top of its basic block and no
/// other instructions shall be inserted before it. This can be implemented
/// to prevent register allocator to insert spills before such instructions.
virtual bool isBasicBlockPrologue(const MachineInstr &MI) const {
/// to prevent register allocator to insert spills for \p Reg before such
/// instructions.
virtual bool isBasicBlockPrologue(const MachineInstr &MI,
Register Reg = Register()) const {
return false;
}

Expand Down
3 changes: 2 additions & 1 deletion llvm/lib/CodeGen/FixupStatepointCallerSaved.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -461,7 +461,8 @@ class StatepointState {

if (EHPad && !RC.hasReload(Reg, RegToSlotIdx[Reg], EHPad)) {
RC.recordReload(Reg, RegToSlotIdx[Reg], EHPad);
auto EHPadInsertPoint = EHPad->SkipPHIsLabelsAndDebug(EHPad->begin());
auto EHPadInsertPoint =
EHPad->SkipPHIsLabelsAndDebug(EHPad->begin(), Reg);
insertReloadBefore(Reg, EHPadInsertPoint, EHPad);
LLVM_DEBUG(dbgs() << "...also reload at EHPad "
<< printMBBReference(*EHPad) << "\n");
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/CodeGen/InlineSpiller.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -469,7 +469,7 @@ bool InlineSpiller::hoistSpillInsideBB(LiveInterval &SpillLI,
MachineBasicBlock *MBB = LIS.getMBBFromIndex(SrcVNI->def);
MachineBasicBlock::iterator MII;
if (SrcVNI->isPHIDef())
MII = MBB->SkipPHIsLabelsAndDebug(MBB->begin());
MII = MBB->SkipPHIsLabelsAndDebug(MBB->begin(), SrcReg);
else {
MachineInstr *DefMI = LIS.getInstructionFromIndex(SrcVNI->def);
assert(DefMI && "Defining instruction disappeared");
Expand Down
4 changes: 2 additions & 2 deletions llvm/lib/CodeGen/MachineBasicBlock.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -223,13 +223,13 @@ MachineBasicBlock::SkipPHIsAndLabels(MachineBasicBlock::iterator I) {

MachineBasicBlock::iterator
MachineBasicBlock::SkipPHIsLabelsAndDebug(MachineBasicBlock::iterator I,
bool SkipPseudoOp) {
Register Reg, bool SkipPseudoOp) {
const TargetInstrInfo *TII = getParent()->getSubtarget().getInstrInfo();

iterator E = end();
while (I != E && (I->isPHI() || I->isPosition() || I->isDebugInstr() ||
(SkipPseudoOp && I->isPseudoProbe()) ||
TII->isBasicBlockPrologue(*I)))
TII->isBasicBlockPrologue(*I, Reg)))
++I;
// FIXME: This needs to change if we wish to bundle labels / dbg_values
// inside the bundle.
Expand Down
6 changes: 4 additions & 2 deletions llvm/lib/CodeGen/SplitKit.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -795,8 +795,10 @@ SlotIndex SplitEditor::leaveIntvAtTop(MachineBasicBlock &MBB) {
return Start;
}

VNInfo *VNI = defFromParent(0, ParentVNI, Start, MBB,
MBB.SkipPHIsLabelsAndDebug(MBB.begin()));
unsigned RegIdx = 0;
Register Reg = LIS.getInterval(Edit->get(RegIdx)).reg();
VNInfo *VNI = defFromParent(RegIdx, ParentVNI, Start, MBB,
MBB.SkipPHIsLabelsAndDebug(MBB.begin(), Reg));
RegAssign.insert(Start, VNI->def, OpenIdx);
LLVM_DEBUG(dump());
return VNI->def;
Expand Down
17 changes: 13 additions & 4 deletions llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -8476,16 +8476,25 @@ unsigned SIInstrInfo::getLiveRangeSplitOpcode(Register SrcReg,
return AMDGPU::COPY;
}

bool SIInstrInfo::isBasicBlockPrologue(const MachineInstr &MI) const {
bool SIInstrInfo::isBasicBlockPrologue(const MachineInstr &MI,
Register Reg) const {
// We need to handle instructions which may be inserted during register
// allocation to handle the prolog. The initial prolog instruction may have
// been separated from the start of the block by spills and copies inserted
// needed by the prolog.
uint16_t Opc = MI.getOpcode();
// needed by the prolog. However, the insertions for scalar registers can
// always be placed at the BB top as they are independent of the exec mask
// value.
bool IsNullOrVectorRegister = true;
if (Reg) {
const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
IsNullOrVectorRegister = !RI.isSGPRClass(RI.getRegClassForReg(MRI, Reg));
}

uint16_t Opc = MI.getOpcode();
// FIXME: Copies inserted in the block prolog for live-range split should also
// be included.
return (isSpillOpcode(Opc) || (!MI.isTerminator() && Opc != AMDGPU::COPY &&
return IsNullOrVectorRegister &&
(isSpillOpcode(Opc) || (!MI.isTerminator() && Opc != AMDGPU::COPY &&
MI.modifiesRegister(AMDGPU::EXEC, &RI)));
}

Expand Down
3 changes: 2 additions & 1 deletion llvm/lib/Target/AMDGPU/SIInstrInfo.h
Original file line number Diff line number Diff line change
Expand Up @@ -1179,7 +1179,8 @@ class SIInstrInfo final : public AMDGPUGenInstrInfo {
unsigned getLiveRangeSplitOpcode(Register Reg,
const MachineFunction &MF) const override;

bool isBasicBlockPrologue(const MachineInstr &MI) const override;
bool isBasicBlockPrologue(const MachineInstr &MI,
Register Reg = Register()) const override;

MachineInstr *createPHIDestinationCopy(MachineBasicBlock &MBB,
MachineBasicBlock::iterator InsPt,
Expand Down
119 changes: 60 additions & 59 deletions llvm/test/CodeGen/AMDGPU/identical-subrange-spill-infloop.ll
Original file line number Diff line number Diff line change
Expand Up @@ -168,7 +168,6 @@ define void @main(i1 %arg) #0 {
; CHECK-NEXT: s_mov_b64 vcc, vcc
; CHECK-NEXT: s_cbranch_vccnz .LBB0_2
; CHECK-NEXT: .LBB0_3: ; %Flow14
; CHECK-NEXT: s_or_saveexec_b64 s[20:21], s[26:27]
; CHECK-NEXT: s_waitcnt lgkmcnt(0)
; CHECK-NEXT: v_readlane_b32 s12, v5, 32
; CHECK-NEXT: v_readlane_b32 s13, v5, 33
Expand All @@ -178,39 +177,39 @@ define void @main(i1 %arg) #0 {
; CHECK-NEXT: v_readlane_b32 s17, v5, 37
; CHECK-NEXT: v_readlane_b32 s18, v5, 38
; CHECK-NEXT: v_readlane_b32 s19, v5, 39
; CHECK-NEXT: v_writelane_b32 v5, s4, 56
; CHECK-NEXT: v_writelane_b32 v5, s5, 57
; CHECK-NEXT: v_writelane_b32 v5, s6, 58
; CHECK-NEXT: v_writelane_b32 v5, s7, 59
; CHECK-NEXT: v_writelane_b32 v5, s8, 60
; CHECK-NEXT: v_writelane_b32 v5, s9, 61
; CHECK-NEXT: v_writelane_b32 v5, s10, 62
; CHECK-NEXT: v_writelane_b32 v5, s11, 63
; CHECK-NEXT: v_writelane_b32 v5, s52, 40
; CHECK-NEXT: v_writelane_b32 v5, s53, 41
; CHECK-NEXT: v_writelane_b32 v5, s54, 42
; CHECK-NEXT: v_writelane_b32 v5, s55, 43
; CHECK-NEXT: v_writelane_b32 v5, s56, 44
; CHECK-NEXT: v_writelane_b32 v5, s57, 45
; CHECK-NEXT: v_writelane_b32 v5, s58, 46
; CHECK-NEXT: v_writelane_b32 v5, s59, 47
; CHECK-NEXT: v_writelane_b32 v4, s12, 0
; CHECK-NEXT: v_writelane_b32 v5, s60, 48
; CHECK-NEXT: v_writelane_b32 v4, s13, 1
; CHECK-NEXT: v_writelane_b32 v5, s61, 49
; CHECK-NEXT: v_writelane_b32 v4, s14, 2
; CHECK-NEXT: v_writelane_b32 v5, s62, 50
; CHECK-NEXT: v_writelane_b32 v4, s15, 3
; CHECK-NEXT: v_writelane_b32 v5, s63, 51
; CHECK-NEXT: v_writelane_b32 v4, s16, 4
; CHECK-NEXT: v_writelane_b32 v5, s64, 52
; CHECK-NEXT: v_writelane_b32 v4, s17, 5
; CHECK-NEXT: v_writelane_b32 v5, s65, 53
; CHECK-NEXT: v_writelane_b32 v4, s18, 6
; CHECK-NEXT: v_writelane_b32 v5, s66, 54
; CHECK-NEXT: v_writelane_b32 v4, s19, 7
; CHECK-NEXT: v_writelane_b32 v5, s67, 55
; CHECK-NEXT: s_xor_b64 exec, exec, s[20:21]
; CHECK-NEXT: v_writelane_b32 v5, s4, 40
; CHECK-NEXT: v_writelane_b32 v5, s5, 41
; CHECK-NEXT: v_writelane_b32 v5, s6, 42
; CHECK-NEXT: v_writelane_b32 v5, s7, 43
; CHECK-NEXT: v_writelane_b32 v5, s8, 44
; CHECK-NEXT: v_writelane_b32 v5, s9, 45
; CHECK-NEXT: v_writelane_b32 v5, s10, 46
; CHECK-NEXT: v_writelane_b32 v5, s11, 47
; CHECK-NEXT: v_writelane_b32 v5, s12, 48
; CHECK-NEXT: v_writelane_b32 v5, s13, 49
; CHECK-NEXT: v_writelane_b32 v5, s14, 50
; CHECK-NEXT: v_writelane_b32 v5, s15, 51
; CHECK-NEXT: v_writelane_b32 v5, s16, 52
; CHECK-NEXT: v_writelane_b32 v5, s17, 53
; CHECK-NEXT: v_writelane_b32 v5, s18, 54
; CHECK-NEXT: v_writelane_b32 v5, s19, 55
; CHECK-NEXT: v_writelane_b32 v5, s52, 56
; CHECK-NEXT: v_writelane_b32 v4, s60, 0
; CHECK-NEXT: v_writelane_b32 v5, s53, 57
; CHECK-NEXT: v_writelane_b32 v4, s61, 1
; CHECK-NEXT: v_writelane_b32 v5, s54, 58
; CHECK-NEXT: v_writelane_b32 v4, s62, 2
; CHECK-NEXT: v_writelane_b32 v5, s55, 59
; CHECK-NEXT: v_writelane_b32 v4, s63, 3
; CHECK-NEXT: v_writelane_b32 v5, s56, 60
; CHECK-NEXT: v_writelane_b32 v4, s64, 4
; CHECK-NEXT: v_writelane_b32 v5, s57, 61
; CHECK-NEXT: v_writelane_b32 v4, s65, 5
; CHECK-NEXT: v_writelane_b32 v5, s58, 62
; CHECK-NEXT: v_writelane_b32 v4, s66, 6
; CHECK-NEXT: v_writelane_b32 v5, s59, 63
; CHECK-NEXT: v_writelane_b32 v4, s67, 7
; CHECK-NEXT: s_andn2_saveexec_b64 s[20:21], s[26:27]
; CHECK-NEXT: s_cbranch_execz .LBB0_10
; CHECK-NEXT: ; %bb.4: ; %bb32
; CHECK-NEXT: s_and_saveexec_b64 s[8:9], s[24:25]
Expand Down Expand Up @@ -265,59 +264,61 @@ define void @main(i1 %arg) #0 {
; CHECK-NEXT: s_waitcnt vmcnt(1)
; CHECK-NEXT: buffer_store_dwordx4 v[2:5], off, s[8:11], 0
; CHECK-NEXT: .LBB0_6: ; %Flow12
; CHECK-NEXT: s_andn2_saveexec_b64 s[4:5], s[22:23]
; CHECK-NEXT: s_or_saveexec_b64 s[4:5], s[22:23]
; CHECK-NEXT: v_readlane_b32 s52, v5, 40
; CHECK-NEXT: v_readlane_b32 s53, v5, 41
; CHECK-NEXT: v_readlane_b32 s54, v5, 42
; CHECK-NEXT: v_readlane_b32 s55, v5, 43
; CHECK-NEXT: v_readlane_b32 s56, v5, 44
; CHECK-NEXT: v_readlane_b32 s57, v5, 45
; CHECK-NEXT: v_readlane_b32 s58, v5, 46
; CHECK-NEXT: v_readlane_b32 s59, v5, 47
; CHECK-NEXT: v_readlane_b32 s60, v5, 48
; CHECK-NEXT: v_readlane_b32 s61, v5, 49
; CHECK-NEXT: v_readlane_b32 s62, v5, 50
; CHECK-NEXT: v_readlane_b32 s63, v5, 51
; CHECK-NEXT: v_readlane_b32 s64, v5, 52
; CHECK-NEXT: v_readlane_b32 s65, v5, 53
; CHECK-NEXT: v_readlane_b32 s66, v5, 54
; CHECK-NEXT: v_readlane_b32 s67, v5, 55
; CHECK-NEXT: s_xor_b64 exec, exec, s[4:5]
; CHECK-NEXT: s_cbranch_execz .LBB0_9
; CHECK-NEXT: ; %bb.7: ; %bb33.preheader
; CHECK-NEXT: s_mov_b32 s8, 0
; CHECK-NEXT: s_mov_b32 s6, s8
; CHECK-NEXT: v_readlane_b32 s36, v5, 40
; CHECK-NEXT: s_mov_b32 s7, s8
; CHECK-NEXT: v_mov_b32_e32 v2, s6
; CHECK-NEXT: v_readlane_b32 s37, v5, 41
; CHECK-NEXT: v_readlane_b32 s36, v5, 56
; CHECK-NEXT: s_mov_b32 s9, s8
; CHECK-NEXT: s_mov_b32 s10, s8
; CHECK-NEXT: s_mov_b32 s11, s8
; CHECK-NEXT: v_mov_b32_e32 v3, s7
; CHECK-NEXT: v_readlane_b32 s38, v5, 42
; CHECK-NEXT: v_readlane_b32 s39, v5, 43
; CHECK-NEXT: v_readlane_b32 s40, v5, 44
; CHECK-NEXT: v_readlane_b32 s41, v5, 45
; CHECK-NEXT: v_readlane_b32 s42, v5, 46
; CHECK-NEXT: v_readlane_b32 s43, v5, 47
; CHECK-NEXT: v_readlane_b32 s44, v5, 48
; CHECK-NEXT: v_readlane_b32 s45, v5, 49
; CHECK-NEXT: v_readlane_b32 s46, v5, 50
; CHECK-NEXT: v_readlane_b32 s47, v5, 51
; CHECK-NEXT: v_readlane_b32 s48, v5, 52
; CHECK-NEXT: v_readlane_b32 s49, v5, 53
; CHECK-NEXT: v_readlane_b32 s50, v5, 54
; CHECK-NEXT: v_readlane_b32 s51, v5, 55
; CHECK-NEXT: s_mov_b64 s[12:13], s[36:37]
; CHECK-NEXT: s_mov_b64 s[14:15], s[38:39]
; CHECK-NEXT: s_mov_b64 s[16:17], s[40:41]
; CHECK-NEXT: s_mov_b64 s[18:19], s[42:43]
; CHECK-NEXT: image_sample_lz v6, v[2:3], s[36:43], s[8:11] dmask:0x1
; CHECK-NEXT: v_readlane_b32 s36, v5, 56
; CHECK-NEXT: v_readlane_b32 s37, v5, 57
; CHECK-NEXT: v_readlane_b32 s38, v5, 58
; CHECK-NEXT: v_readlane_b32 s39, v5, 59
; CHECK-NEXT: v_readlane_b32 s40, v5, 60
; CHECK-NEXT: v_readlane_b32 s41, v5, 61
; CHECK-NEXT: v_readlane_b32 s42, v5, 62
; CHECK-NEXT: v_readlane_b32 s43, v5, 63
; CHECK-NEXT: s_nop 4
; CHECK-NEXT: image_sample_lz v6, v[2:3], s[36:43], s[8:11] dmask:0x1
; CHECK-NEXT: image_sample_lz v7, v[2:3], s[52:59], s[8:11] dmask:0x1
; CHECK-NEXT: ; kill: killed $vgpr2_vgpr3
; CHECK-NEXT: s_mov_b64 s[12:13], s[36:37]
; CHECK-NEXT: s_and_b64 vcc, exec, 0
; CHECK-NEXT: v_readlane_b32 s44, v4, 0
; CHECK-NEXT: v_readlane_b32 s45, v4, 1
; CHECK-NEXT: v_readlane_b32 s46, v4, 2
; CHECK-NEXT: v_readlane_b32 s47, v4, 3
; CHECK-NEXT: image_sample_lz v7, v[2:3], s[36:43], s[8:11] dmask:0x1
; CHECK-NEXT: v_readlane_b32 s48, v4, 4
; CHECK-NEXT: v_readlane_b32 s49, v4, 5
; CHECK-NEXT: v_readlane_b32 s50, v4, 6
; CHECK-NEXT: v_readlane_b32 s51, v4, 7
; CHECK-NEXT: s_mov_b64 s[14:15], s[38:39]
; CHECK-NEXT: s_mov_b64 s[16:17], s[40:41]
; CHECK-NEXT: s_mov_b64 s[18:19], s[42:43]
; CHECK-NEXT: ; kill: killed $sgpr12_sgpr13_sgpr14_sgpr15_sgpr16_sgpr17_sgpr18_sgpr19
; CHECK-NEXT: ; kill: killed $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43
; CHECK-NEXT: ; kill: killed $sgpr52_sgpr53_sgpr54_sgpr55_sgpr56_sgpr57_sgpr58_sgpr59
; CHECK-NEXT: ; kill: killed $sgpr8_sgpr9_sgpr10 killed $sgpr11
; CHECK-NEXT: s_waitcnt vmcnt(0)
; CHECK-NEXT: v_sub_f32_e32 v2, v7, v6
Expand Down
Loading

0 comments on commit ce7fd49

Please sign in to comment.