/external/swiftshader/third_party/llvm-7.0/llvm/lib/Target/AMDGPU/ |
D | AMDGPURegisterBankInfo.cpp | 45 const RegisterBank &RBSGPR = getRegBank(AMDGPU::SGPRRegBankID); in AMDGPURegisterBankInfo() 47 assert(&RBSGPR == &AMDGPU::SGPRRegBank); in AMDGPURegisterBankInfo() 49 const RegisterBank &RBVGPR = getRegBank(AMDGPU::VGPRRegBankID); in AMDGPURegisterBankInfo() 51 assert(&RBVGPR == &AMDGPU::VGPRRegBank); in AMDGPURegisterBankInfo() 62 if (Def->getOpcode() == AMDGPU::G_CONSTANT) { in isConstant() 67 if (Def->getOpcode() == AMDGPU::COPY) in isConstant() 76 if (Dst.getID() == AMDGPU::SGPRRegBankID && in copyCost() 77 Src.getID() == AMDGPU::VGPRRegBankID) in copyCost() 82 if (Size == 1 && Dst.getID() == AMDGPU::SCCRegBankID && in copyCost() 83 Src.getID() == AMDGPU::SGPRRegBankID) in copyCost() [all …]
|
D | SIRegisterInfo.cpp | 76 classifyPressureSet(i, AMDGPU::SGPR0, SGPRPressureSets); in SIRegisterInfo() 77 classifyPressureSet(i, AMDGPU::VGPR0, VGPRPressureSets); in SIRegisterInfo() 111 unsigned BaseReg(AMDGPU::SGPR_32RegClass.getRegister(BaseIdx)); in reservedPrivateSegmentBufferReg() 112 return getMatchingSuperReg(BaseReg, AMDGPU::sub0, &AMDGPU::SReg_128RegClass); in reservedPrivateSegmentBufferReg() 136 return AMDGPU::SGPR_32RegClass.getRegister(Reg); in reservedPrivateSegmentWaveByteOffsetReg() 141 return AMDGPU::SGPR32; in reservedStackPtrOffsetReg() 149 reserveRegisterTuples(Reserved, AMDGPU::EXEC); in getReservedRegs() 150 reserveRegisterTuples(Reserved, AMDGPU::FLAT_SCR); in getReservedRegs() 153 reserveRegisterTuples(Reserved, AMDGPU::M0); in getReservedRegs() 156 reserveRegisterTuples(Reserved, AMDGPU::SRC_SHARED_BASE); in getReservedRegs() [all …]
|
D | SIInstrInfo.cpp | 71 namespace AMDGPU { namespace 88 : AMDGPUGenInstrInfo(AMDGPU::ADJCALLSTACKUP, AMDGPU::ADJCALLSTACKDOWN), in SIInstrInfo() 114 int Op0Idx = AMDGPU::getNamedOperandIdx(Opc0, OpName); in nodesHaveSameOperandValue() 115 int Op1Idx = AMDGPU::getNamedOperandIdx(Opc1, OpName); in nodesHaveSameOperandValue() 141 case AMDGPU::V_MOV_B32_e32: in isReallyTriviallyReMaterializable() 142 case AMDGPU::V_MOV_B32_e64: in isReallyTriviallyReMaterializable() 143 case AMDGPU::V_MOV_B64_PSEUDO: in isReallyTriviallyReMaterializable() 180 if (AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::data1) != -1 || in areLoadsFromSameBasePtr() 181 AMDGPU::getNamedOperandIdx(Opc1, AMDGPU::OpName::data1) != -1) in areLoadsFromSameBasePtr() 191 if (AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::sbase) == -1 || in areLoadsFromSameBasePtr() [all …]
|
D | SIFoldOperands.cpp | 130 case AMDGPU::V_MAC_F32_e64: in isInlineConstantIfFolded() 131 case AMDGPU::V_MAC_F16_e64: in isInlineConstantIfFolded() 132 case AMDGPU::V_FMAC_F32_e64: { in isInlineConstantIfFolded() 135 int Src2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2); in isInlineConstantIfFolded() 137 bool IsFMA = Opc == AMDGPU::V_FMAC_F32_e64; in isInlineConstantIfFolded() 138 bool IsF32 = Opc == AMDGPU::V_MAC_F32_e64; in isInlineConstantIfFolded() 141 AMDGPU::V_FMA_F32 : (IsF32 ? AMDGPU::V_MAD_F32 : AMDGPU::V_MAD_F16); in isInlineConstantIfFolded() 169 if (OpNo == AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0)) in updateOperand() 170 ModIdx = AMDGPU::OpName::src0_modifiers; in updateOperand() 171 else if (OpNo == AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1)) in updateOperand() [all …]
|
D | SIOptimizeExecMasking.cpp | 62 case AMDGPU::COPY: in isCopyFromExec() 63 case AMDGPU::S_MOV_B64: in isCopyFromExec() 64 case AMDGPU::S_MOV_B64_term: { in isCopyFromExec() 66 if (Src.isReg() && Src.getReg() == AMDGPU::EXEC) in isCopyFromExec() 71 return AMDGPU::NoRegister; in isCopyFromExec() 77 case AMDGPU::COPY: in isCopyToExec() 78 case AMDGPU::S_MOV_B64: { in isCopyToExec() 80 if (Dst.isReg() && Dst.getReg() == AMDGPU::EXEC && MI.getOperand(1).isReg()) in isCopyToExec() 84 case AMDGPU::S_MOV_B64_term: in isCopyToExec() 88 return AMDGPU::NoRegister; in isCopyToExec() [all …]
|
D | SIPeepholeSDWA.cpp | 136 using namespace AMDGPU::SDWA; 337 if (TII->getNamedOperand(*MI, AMDGPU::OpName::src0) == SrcOp) { in getSrcMods() 338 if (auto *Mod = TII->getNamedOperand(*MI, AMDGPU::OpName::src0_modifiers)) { in getSrcMods() 341 } else if (TII->getNamedOperand(*MI, AMDGPU::OpName::src1) == SrcOp) { in getSrcMods() 342 if (auto *Mod = TII->getNamedOperand(*MI, AMDGPU::OpName::src1_modifiers)) { in getSrcMods() 372 MachineOperand *Src = TII->getNamedOperand(MI, AMDGPU::OpName::src0); in convertToSDWA() 373 MachineOperand *SrcSel = TII->getNamedOperand(MI, AMDGPU::OpName::src0_sel); in convertToSDWA() 375 TII->getNamedOperand(MI, AMDGPU::OpName::src0_modifiers); in convertToSDWA() 379 Src = TII->getNamedOperand(MI, AMDGPU::OpName::src1); in convertToSDWA() 380 SrcSel = TII->getNamedOperand(MI, AMDGPU::OpName::src1_sel); in convertToSDWA() [all …]
|
D | SILoadStoreOptimizer.cpp | 320 AddrOpName[NumAddresses++] = AMDGPU::OpName::addr; in findMatchingInst() 323 AddrOpName[NumAddresses++] = AMDGPU::OpName::sbase; in findMatchingInst() 327 AddrOpName[NumAddresses++] = AMDGPU::OpName::srsrc; in findMatchingInst() 328 AddrOpName[NumAddresses++] = AMDGPU::OpName::vaddr; in findMatchingInst() 329 AddrOpName[NumAddresses++] = AMDGPU::OpName::soffset; in findMatchingInst() 333 AddrOpName[NumAddresses++] = AMDGPU::OpName::srsrc; in findMatchingInst() 334 AddrOpName[NumAddresses++] = AMDGPU::OpName::soffset; in findMatchingInst() 339 AddrIdx[i] = AMDGPU::getNamedOperandIdx(CI.I->getOpcode(), AddrOpName[i]); in findMatchingInst() 426 int OffsetIdx = AMDGPU::getNamedOperandIdx(CI.I->getOpcode(), in findMatchingInst() 427 AMDGPU::OpName::offset); in findMatchingInst() [all …]
|
D | SIShrinkInstructions.cpp | 71 const MachineOperand *Src2 = TII->getNamedOperand(MI, AMDGPU::OpName::src2); in canShrink() 82 case AMDGPU::V_ADDC_U32_e64: in canShrink() 83 case AMDGPU::V_SUBB_U32_e64: in canShrink() 84 case AMDGPU::V_SUBBREV_U32_e64: { in canShrink() 86 = TII->getNamedOperand(MI, AMDGPU::OpName::src1); in canShrink() 92 case AMDGPU::V_MAC_F32_e64: in canShrink() 93 case AMDGPU::V_MAC_F16_e64: in canShrink() 94 case AMDGPU::V_FMAC_F32_e64: in canShrink() 96 TII->hasModifiersSet(MI, AMDGPU::OpName::src2_modifiers)) in canShrink() 100 case AMDGPU::V_CNDMASK_B32_e64: in canShrink() [all …]
|
D | AMDGPURegisterInfo.cpp | 32 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3, AMDGPU::sub4, in getSubRegFromChannel() 33 AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7, AMDGPU::sub8, AMDGPU::sub9, in getSubRegFromChannel() 34 AMDGPU::sub10, AMDGPU::sub11, AMDGPU::sub12, AMDGPU::sub13, AMDGPU::sub14, in getSubRegFromChannel() 35 AMDGPU::sub15 in getSubRegFromChannel() 63 static const MCPhysReg NoCalleeSavedReg = AMDGPU::NoRegister; in getCalleeSavedRegs()
|
D | SIInsertSkips.cpp | 132 if (I->getOpcode() == AMDGPU::S_CBRANCH_VCCNZ || in shouldSkip() 133 I->getOpcode() == AMDGPU::S_CBRANCH_VCCZ) in shouldSkip() 161 BuildMI(&MBB, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ)) in skipIfDead() 167 BuildMI(*SkipBB, Insert, DL, TII->get(AMDGPU::EXP_DONE)) in skipIfDead() 169 .addReg(AMDGPU::VGPR0, RegState::Undef) in skipIfDead() 170 .addReg(AMDGPU::VGPR0, RegState::Undef) in skipIfDead() 171 .addReg(AMDGPU::VGPR0, RegState::Undef) in skipIfDead() 172 .addReg(AMDGPU::VGPR0, RegState::Undef) in skipIfDead() 178 BuildMI(*SkipBB, Insert, DL, TII->get(AMDGPU::S_ENDPGM)); in skipIfDead() 188 case AMDGPU::SI_KILL_F32_COND_IMM_TERMINATOR: { in kill() [all …]
|
D | SILowerControlFlow.cpp | 131 assert(ImpDefSCC.getReg() == AMDGPU::SCC && ImpDefSCC.isDef()); in setImpSCCDefDead() 145 U->getOpcode() != AMDGPU::SI_END_CF) in isSimpleIf() 178 assert(SaveExec.getSubReg() == AMDGPU::NoSubRegister && in emitIf() 179 Cond.getSubReg() == AMDGPU::NoSubRegister); in emitIf() 184 assert(ImpDefSCC.getReg() == AMDGPU::SCC && ImpDefSCC.isDef()); in emitIf() 194 : MRI->createVirtualRegister(&AMDGPU::SReg_64RegClass); in emitIf() 196 BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), CopyReg) in emitIf() 197 .addReg(AMDGPU::EXEC) in emitIf() 198 .addReg(AMDGPU::EXEC, RegState::ImplicitDefine); in emitIf() 200 unsigned Tmp = MRI->createVirtualRegister(&AMDGPU::SReg_64RegClass); in emitIf() [all …]
|
D | SIFrameLowering.cpp | 28 return makeArrayRef(AMDGPU::SGPR_128RegClass.begin(), in getAllSGPR128() 34 return makeArrayRef(AMDGPU::SGPR_32RegClass.begin(), in getAllSGPRs() 67 unsigned FlatScrInitLo = TRI->getSubReg(FlatScratchInitReg, AMDGPU::sub0); in emitFlatScratchInit() 68 unsigned FlatScrInitHi = TRI->getSubReg(FlatScratchInitReg, AMDGPU::sub1); in emitFlatScratchInit() 74 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADD_U32), AMDGPU::FLAT_SCR_LO) in emitFlatScratchInit() 77 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADDC_U32), AMDGPU::FLAT_SCR_HI) in emitFlatScratchInit() 85 BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), AMDGPU::FLAT_SCR_LO) in emitFlatScratchInit() 90 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADD_U32), FlatScrInitLo) in emitFlatScratchInit() 95 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_LSHR_B32), AMDGPU::FLAT_SCR_HI) in emitFlatScratchInit() 110 if (ScratchRsrcReg == AMDGPU::NoRegister || in getReservedPrivateSegmentBufferReg() [all …]
|
/external/llvm/lib/Target/AMDGPU/ |
D | SIInstrInfo.cpp | 57 int Op0Idx = AMDGPU::getNamedOperandIdx(Opc0, OpName); in nodesHaveSameOperandValue() 58 int Op1Idx = AMDGPU::getNamedOperandIdx(Opc1, OpName); in nodesHaveSameOperandValue() 84 case AMDGPU::V_MOV_B32_e32: in isReallyTriviallyReMaterializable() 85 case AMDGPU::V_MOV_B32_e64: in isReallyTriviallyReMaterializable() 86 case AMDGPU::V_MOV_B64_PSEUDO: in isReallyTriviallyReMaterializable() 123 if (AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::data1) != -1 || in areLoadsFromSameBasePtr() 124 AMDGPU::getNamedOperandIdx(Opc1, AMDGPU::OpName::data1) != -1) in areLoadsFromSameBasePtr() 160 if (!nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::soffset) || in areLoadsFromSameBasePtr() 162 !nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::vaddr) || in areLoadsFromSameBasePtr() 163 !nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::srsrc)) in areLoadsFromSameBasePtr() [all …]
|
D | SIRegisterInfo.cpp | 106 classifyPressureSet(i, AMDGPU::SGPR0, SGPRPressureSets); in SIRegisterInfo() 107 classifyPressureSet(i, AMDGPU::VGPR0, VGPRPressureSets); in SIRegisterInfo() 123 unsigned BaseReg(AMDGPU::SGPR_32RegClass.getRegister(BaseIdx)); in reservedPrivateSegmentBufferReg() 124 return getMatchingSuperReg(BaseReg, AMDGPU::sub0, &AMDGPU::SReg_128RegClass); in reservedPrivateSegmentBufferReg() 142 return AMDGPU::SGPR_32RegClass.getRegister(Reg); in reservedPrivateSegmentWaveByteOffsetReg() 147 Reserved.set(AMDGPU::INDIRECT_BASE_ADDR); in getReservedRegs() 151 reserveRegisterTuples(Reserved, AMDGPU::EXEC); in getReservedRegs() 152 reserveRegisterTuples(Reserved, AMDGPU::FLAT_SCR); in getReservedRegs() 155 reserveRegisterTuples(Reserved, AMDGPU::TBA); in getReservedRegs() 156 reserveRegisterTuples(Reserved, AMDGPU::TMA); in getReservedRegs() [all …]
|
D | R600InstrInfo.cpp | 47 if ((AMDGPU::R600_Reg128RegClass.contains(DestReg) || in copyPhysReg() 48 AMDGPU::R600_Reg128VerticalRegClass.contains(DestReg)) && in copyPhysReg() 49 (AMDGPU::R600_Reg128RegClass.contains(SrcReg) || in copyPhysReg() 50 AMDGPU::R600_Reg128VerticalRegClass.contains(SrcReg))) { in copyPhysReg() 52 } else if((AMDGPU::R600_Reg64RegClass.contains(DestReg) || in copyPhysReg() 53 AMDGPU::R600_Reg64VerticalRegClass.contains(DestReg)) && in copyPhysReg() 54 (AMDGPU::R600_Reg64RegClass.contains(SrcReg) || in copyPhysReg() 55 AMDGPU::R600_Reg64VerticalRegClass.contains(SrcReg))) { in copyPhysReg() 62 buildDefaultInstruction(MBB, MI, AMDGPU::MOV, in copyPhysReg() 69 MachineInstr *NewMI = buildDefaultInstruction(MBB, MI, AMDGPU::MOV, in copyPhysReg() [all …]
|
D | SIMachineFunctionInfo.cpp | 34 TIDReg(AMDGPU::NoRegister), in SIMachineFunctionInfo() 35 ScratchRSrcReg(AMDGPU::NoRegister), in SIMachineFunctionInfo() 36 ScratchWaveOffsetReg(AMDGPU::NoRegister), in SIMachineFunctionInfo() 37 PrivateSegmentBufferUserSGPR(AMDGPU::NoRegister), in SIMachineFunctionInfo() 38 DispatchPtrUserSGPR(AMDGPU::NoRegister), in SIMachineFunctionInfo() 39 QueuePtrUserSGPR(AMDGPU::NoRegister), in SIMachineFunctionInfo() 40 KernargSegmentPtrUserSGPR(AMDGPU::NoRegister), in SIMachineFunctionInfo() 41 DispatchIDUserSGPR(AMDGPU::NoRegister), in SIMachineFunctionInfo() 42 FlatScratchInitUserSGPR(AMDGPU::NoRegister), in SIMachineFunctionInfo() 43 PrivateSegmentSizeUserSGPR(AMDGPU::NoRegister), in SIMachineFunctionInfo() [all …]
|
D | AMDGPURegisterInfo.cpp | 28 static const MCPhysReg CalleeSavedReg = AMDGPU::NoRegister; 36 return AMDGPU::NoRegister; in getFrameRegister() 41 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3, AMDGPU::sub4, in getSubRegFromChannel() 42 AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7, AMDGPU::sub8, AMDGPU::sub9, in getSubRegFromChannel() 43 AMDGPU::sub10, AMDGPU::sub11, AMDGPU::sub12, AMDGPU::sub13, AMDGPU::sub14, in getSubRegFromChannel() 44 AMDGPU::sub15 in getSubRegFromChannel()
|
D | SILowerControlFlow.cpp | 177 if (I->getOpcode() == AMDGPU::S_CBRANCH_VCCNZ || in shouldSkip() 178 I->getOpcode() == AMDGPU::S_CBRANCH_VCCZ) in shouldSkip() 207 BuildMI(*From.getParent(), &From, DL, TII->get(AMDGPU::S_CBRANCH_EXECZ)) in Skip() 225 BuildMI(&MBB, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ)) in skipIfDead() 231 BuildMI(*SkipBB, Insert, DL, TII->get(AMDGPU::EXP)) in skipIfDead() 237 .addReg(AMDGPU::VGPR0, RegState::Undef) in skipIfDead() 238 .addReg(AMDGPU::VGPR0, RegState::Undef) in skipIfDead() 239 .addReg(AMDGPU::VGPR0, RegState::Undef) in skipIfDead() 240 .addReg(AMDGPU::VGPR0, RegState::Undef); in skipIfDead() 243 BuildMI(*SkipBB, Insert, DL, TII->get(AMDGPU::S_ENDPGM)); in skipIfDead() [all …]
|
D | R600RegisterInfo.cpp | 34 Reserved.set(AMDGPU::ZERO); in getReservedRegs() 35 Reserved.set(AMDGPU::HALF); in getReservedRegs() 36 Reserved.set(AMDGPU::ONE); in getReservedRegs() 37 Reserved.set(AMDGPU::ONE_INT); in getReservedRegs() 38 Reserved.set(AMDGPU::NEG_HALF); in getReservedRegs() 39 Reserved.set(AMDGPU::NEG_ONE); in getReservedRegs() 40 Reserved.set(AMDGPU::PV_X); in getReservedRegs() 41 Reserved.set(AMDGPU::ALU_LITERAL_X); in getReservedRegs() 42 Reserved.set(AMDGPU::ALU_CONST); in getReservedRegs() 43 Reserved.set(AMDGPU::PREDICATE_BIT); in getReservedRegs() [all …]
|
D | R600ExpandSpecialInstrs.cpp | 83 int DstIdx = TII->getOperandIdx(MI.getOpcode(), AMDGPU::OpName::dst); in runOnMachineFunction() 87 DstOp.getReg(), AMDGPU::OQAP); in runOnMachineFunction() 88 DstOp.setReg(AMDGPU::OQAP); in runOnMachineFunction() 90 AMDGPU::OpName::pred_sel); in runOnMachineFunction() 92 AMDGPU::OpName::pred_sel); in runOnMachineFunction() 101 case AMDGPU::PRED_X: { in runOnMachineFunction() 109 AMDGPU::ZERO); // src1 in runOnMachineFunction() 112 TII->setImmOperand(*PredSet, AMDGPU::OpName::update_exec_mask, 1); in runOnMachineFunction() 114 TII->setImmOperand(*PredSet, AMDGPU::OpName::update_pred, 1); in runOnMachineFunction() 120 case AMDGPU::INTERP_PAIR_XY: { in runOnMachineFunction() [all …]
|
D | SIShrinkInstructions.cpp | 84 const MachineOperand *Src2 = TII->getNamedOperand(MI, AMDGPU::OpName::src2); in canShrink() 95 case AMDGPU::V_MAC_F32_e64: in canShrink() 97 TII->hasModifiersSet(MI, AMDGPU::OpName::src2_modifiers)) in canShrink() 101 case AMDGPU::V_CNDMASK_B32_e64: in canShrink() 106 const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1); in canShrink() 108 TII->getNamedOperand(MI, AMDGPU::OpName::src1_modifiers); in canShrink() 115 if (TII->hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers)) in canShrink() 119 if (TII->hasModifiersSet(MI, AMDGPU::OpName::omod)) in canShrink() 122 return !TII->hasModifiersSet(MI, AMDGPU::OpName::clamp); in canShrink() 138 int Src0Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::src0); in foldImmediates() [all …]
|
D | R600ControlFlowFinalizer.cpp | 79 if (Opcode == AMDGPU::CF_ALU_PUSH_BEFORE && ST->hasCaymanISA() && in requiresWorkAroundForInst() 88 case AMDGPU::CF_ALU_PUSH_BEFORE: in requiresWorkAroundForInst() 89 case AMDGPU::CF_ALU_ELSE_AFTER: in requiresWorkAroundForInst() 90 case AMDGPU::CF_ALU_BREAK: in requiresWorkAroundForInst() 91 case AMDGPU::CF_ALU_CONTINUE: in requiresWorkAroundForInst() 153 case AMDGPU::CF_PUSH_EG: in pushBranch() 154 case AMDGPU::CF_ALU_PUSH_BEFORE: in pushBranch() 227 case AMDGPU::KILL: in IsTrivialInst() 228 case AMDGPU::RETURN: in IsTrivialInst() 240 Opcode = isEg ? AMDGPU::CF_TC_EG : AMDGPU::CF_TC_R600; in getHWInstrDesc() [all …]
|
D | SIFrameLowering.cpp | 31 return makeArrayRef(AMDGPU::SGPR_128RegClass.begin(), in getAllSGPR128() 32 AMDGPU::SGPR_128RegClass.getNumRegs()); in getAllSGPR128() 36 return makeArrayRef(AMDGPU::SGPR_32RegClass.begin(), in getAllSGPRs() 37 AMDGPU::SGPR_32RegClass.getNumRegs()); in getAllSGPRs() 70 assert(ScratchRsrcReg != AMDGPU::NoRegister); in emitPrologue() 73 assert(ScratchWaveOffsetReg != AMDGPU::NoRegister); in emitPrologue() 78 unsigned PreloadedPrivateBufferReg = AMDGPU::NoRegister; in emitPrologue() 106 unsigned FlatScrInitHi = TRI->getSubReg(FlatScratchInitReg, AMDGPU::sub1); in emitPrologue() 107 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::FLAT_SCR_LO) in emitPrologue() 110 unsigned FlatScrInitLo = TRI->getSubReg(FlatScratchInitReg, AMDGPU::sub0); in emitPrologue() [all …]
|
D | SILoadStoreOptimizer.cpp | 171 int AddrIdx = AMDGPU::getNamedOperandIdx(I->getOpcode(), AMDGPU::OpName::addr); in findMatchingDSInst() 179 int OffsetIdx = AMDGPU::getNamedOperandIdx(I->getOpcode(), in findMatchingDSInst() 180 AMDGPU::OpName::offset); in findMatchingDSInst() 200 const MachineOperand *AddrReg = TII->getNamedOperand(*I, AMDGPU::OpName::addr); in mergeRead2Pair() 202 const MachineOperand *Dest0 = TII->getNamedOperand(*I, AMDGPU::OpName::vdst); in mergeRead2Pair() 203 const MachineOperand *Dest1 = TII->getNamedOperand(*Paired, AMDGPU::OpName::vdst); in mergeRead2Pair() 206 = TII->getNamedOperand(*I, AMDGPU::OpName::offset)->getImm() & 0xffff; in mergeRead2Pair() 208 = TII->getNamedOperand(*Paired, AMDGPU::OpName::offset)->getImm() & 0xffff; in mergeRead2Pair() 212 unsigned Opc = (EltSize == 4) ? AMDGPU::DS_READ2_B32 : AMDGPU::DS_READ2_B64; in mergeRead2Pair() 220 Opc = (EltSize == 4) ? AMDGPU::DS_READ2ST64_B32 : AMDGPU::DS_READ2ST64_B64; in mergeRead2Pair() [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/lib/Target/AMDGPU/Disassembler/ |
D | AMDGPUDisassembler.cpp | 67 int OpIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), NameIdx); in insertNamedMCOperand() 180 if (!STI.getFeatureBits()[AMDGPU::FeatureGCN3Encoding]) in getInstruction() 204 if (STI.getFeatureBits()[AMDGPU::FeatureUnpackedD16VMem]) { in getInstruction() 213 if (STI.getFeatureBits()[AMDGPU::FeatureFmaMixInsts]) { in getInstruction() 246 if (Res && (MI.getOpcode() == AMDGPU::V_MAC_F32_e64_vi || in getInstruction() 247 MI.getOpcode() == AMDGPU::V_MAC_F32_e64_si || in getInstruction() 248 MI.getOpcode() == AMDGPU::V_MAC_F16_e64_vi || in getInstruction() 249 MI.getOpcode() == AMDGPU::V_FMAC_F32_e64_vi)) { in getInstruction() 252 AMDGPU::OpName::src2_modifiers); in getInstruction() 270 if (STI.getFeatureBits()[AMDGPU::FeatureGFX9]) { in convertSDWAInst() [all …]
|