/external/swiftshader/third_party/llvm-10.0/llvm/lib/Target/AMDGPU/ |
D | AMDGPURegisterInfo.cpp | 31 { AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3, 32 AMDGPU::sub4, AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7, 33 AMDGPU::sub8, AMDGPU::sub9, AMDGPU::sub10, AMDGPU::sub11, 34 AMDGPU::sub12, AMDGPU::sub13, AMDGPU::sub14, AMDGPU::sub15, 35 AMDGPU::sub16, AMDGPU::sub17, AMDGPU::sub18, AMDGPU::sub19, 36 AMDGPU::sub20, AMDGPU::sub21, AMDGPU::sub22, AMDGPU::sub23, 37 AMDGPU::sub24, AMDGPU::sub25, AMDGPU::sub26, AMDGPU::sub27, 38 AMDGPU::sub28, AMDGPU::sub29, AMDGPU::sub30, AMDGPU::sub31 41 AMDGPU::sub0_sub1, AMDGPU::sub1_sub2, AMDGPU::sub2_sub3, AMDGPU::sub3_sub4, 42 AMDGPU::sub4_sub5, AMDGPU::sub5_sub6, AMDGPU::sub6_sub7, AMDGPU::sub7_sub8, [all …]
|
D | SIRegisterInfo.cpp | 72 classifyPressureSet(i, AMDGPU::SGPR0, SGPRPressureSets); in SIRegisterInfo() 73 classifyPressureSet(i, AMDGPU::VGPR0, VGPRPressureSets); in SIRegisterInfo() 74 classifyPressureSet(i, AMDGPU::AGPR0, AGPRPressureSets); in SIRegisterInfo() 112 unsigned BaseReg(AMDGPU::SGPR_32RegClass.getRegister(BaseIdx)); in reservedPrivateSegmentBufferReg() 113 return getMatchingSuperReg(BaseReg, AMDGPU::sub0, &AMDGPU::SGPR_128RegClass); in reservedPrivateSegmentBufferReg() 136 return AMDGPU::SGPR_32RegClass.getRegister(Reg); in reservedPrivateSegmentWaveByteOffsetReg() 144 reserveRegisterTuples(Reserved, AMDGPU::EXEC); in getReservedRegs() 145 reserveRegisterTuples(Reserved, AMDGPU::FLAT_SCR); in getReservedRegs() 148 reserveRegisterTuples(Reserved, AMDGPU::M0); in getReservedRegs() 151 reserveRegisterTuples(Reserved, AMDGPU::SRC_VCCZ); in getReservedRegs() [all …]
|
D | SIOptimizeExecMasking.cpp | 62 case AMDGPU::COPY: in isCopyFromExec() 63 case AMDGPU::S_MOV_B64: in isCopyFromExec() 64 case AMDGPU::S_MOV_B64_term: in isCopyFromExec() 65 case AMDGPU::S_MOV_B32: in isCopyFromExec() 66 case AMDGPU::S_MOV_B32_term: { in isCopyFromExec() 69 Src.getReg() == (ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC)) in isCopyFromExec() 74 return AMDGPU::NoRegister; in isCopyFromExec() 80 case AMDGPU::COPY: in isCopyToExec() 81 case AMDGPU::S_MOV_B64: in isCopyToExec() 82 case AMDGPU::S_MOV_B32: { in isCopyToExec() [all …]
|
D | AMDGPURegisterBankInfo.cpp | 62 if (Opc == AMDGPU::G_ANYEXT || Opc == AMDGPU::G_ZEXT || in applyBank() 63 Opc == AMDGPU::G_SEXT) { in applyBank() 70 if (SrcBank == &AMDGPU::VCCRegBank) { in applyBank() 74 assert(NewBank == &AMDGPU::VGPRRegBank); in applyBank() 79 auto True = B.buildConstant(S32, Opc == AMDGPU::G_SEXT ? -1 : 1); in applyBank() 93 if (Opc == AMDGPU::G_TRUNC) { in applyBank() 96 assert(DstBank != &AMDGPU::VCCRegBank); in applyBank() 110 assert(NewBank == &AMDGPU::VGPRRegBank && in applyBank() 112 assert((MI.getOpcode() != AMDGPU::G_TRUNC && in applyBank() 113 MI.getOpcode() != AMDGPU::G_ANYEXT) && in applyBank() [all …]
|
D | SIInstrInfo.cpp | 70 namespace AMDGPU { namespace 87 : AMDGPUGenInstrInfo(AMDGPU::ADJCALLSTACKUP, AMDGPU::ADJCALLSTACKDOWN), in SIInstrInfo() 109 int Op0Idx = AMDGPU::getNamedOperandIdx(Opc0, OpName); in nodesHaveSameOperandValue() 110 int Op1Idx = AMDGPU::getNamedOperandIdx(Opc1, OpName); in nodesHaveSameOperandValue() 136 case AMDGPU::V_MOV_B32_e32: in isReallyTriviallyReMaterializable() 137 case AMDGPU::V_MOV_B32_e64: in isReallyTriviallyReMaterializable() 138 case AMDGPU::V_MOV_B64_PSEUDO: in isReallyTriviallyReMaterializable() 172 int Offset0Idx = AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::offset); in areLoadsFromSameBasePtr() 173 int Offset1Idx = AMDGPU::getNamedOperandIdx(Opc1, AMDGPU::OpName::offset); in areLoadsFromSameBasePtr() 190 if (AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::sbase) == -1 || in areLoadsFromSameBasePtr() [all …]
|
D | SIPeepholeSDWA.cpp | 138 using namespace AMDGPU::SDWA; 334 if (TII->getNamedOperand(*MI, AMDGPU::OpName::src0) == SrcOp) { in getSrcMods() 335 if (auto *Mod = TII->getNamedOperand(*MI, AMDGPU::OpName::src0_modifiers)) { in getSrcMods() 338 } else if (TII->getNamedOperand(*MI, AMDGPU::OpName::src1) == SrcOp) { in getSrcMods() 339 if (auto *Mod = TII->getNamedOperand(*MI, AMDGPU::OpName::src1_modifiers)) { in getSrcMods() 369 MachineOperand *Src = TII->getNamedOperand(MI, AMDGPU::OpName::src0); in convertToSDWA() 370 MachineOperand *SrcSel = TII->getNamedOperand(MI, AMDGPU::OpName::src0_sel); in convertToSDWA() 372 TII->getNamedOperand(MI, AMDGPU::OpName::src0_modifiers); in convertToSDWA() 376 Src = TII->getNamedOperand(MI, AMDGPU::OpName::src1); in convertToSDWA() 377 SrcSel = TII->getNamedOperand(MI, AMDGPU::OpName::src1_sel); in convertToSDWA() [all …]
|
D | GCNDPPCombine.cpp | 125 auto DPP32 = AMDGPU::getDPPOp32(Op); in getDPPOp() 127 auto E32 = AMDGPU::getVOPe32(Op); in getDPPOp() 128 DPP32 = (E32 == -1)? -1 : AMDGPU::getDPPOp32(E32); in getDPPOp() 144 case AMDGPU::IMPLICIT_DEF: in getOldOpndValue() 146 case AMDGPU::COPY: in getOldOpndValue() 147 case AMDGPU::V_MOV_B32_e32: { in getOldOpndValue() 161 assert(MovMI.getOpcode() == AMDGPU::V_MOV_B32_dpp); in createDPPInst() 174 auto *Dst = TII->getNamedOperand(OrigMI, AMDGPU::OpName::vdst); in createDPPInst() 179 const int OldIdx = AMDGPU::getNamedOperandIdx(DPPOp, AMDGPU::OpName::old); in createDPPInst() 182 assert(isOfRegClass(CombOldVGPR, AMDGPU::VGPR_32RegClass, *MRI)); in createDPPInst() [all …]
|
D | SIFoldOperands.cpp | 143 case AMDGPU::V_MAC_F32_e64: in isInlineConstantIfFolded() 144 case AMDGPU::V_MAC_F16_e64: in isInlineConstantIfFolded() 145 case AMDGPU::V_FMAC_F32_e64: in isInlineConstantIfFolded() 146 case AMDGPU::V_FMAC_F16_e64: { in isInlineConstantIfFolded() 149 int Src2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2); in isInlineConstantIfFolded() 151 bool IsFMA = Opc == AMDGPU::V_FMAC_F32_e64 || in isInlineConstantIfFolded() 152 Opc == AMDGPU::V_FMAC_F16_e64; in isInlineConstantIfFolded() 153 bool IsF32 = Opc == AMDGPU::V_MAC_F32_e64 || in isInlineConstantIfFolded() 154 Opc == AMDGPU::V_FMAC_F32_e64; in isInlineConstantIfFolded() 157 (IsF32 ? AMDGPU::V_FMA_F32 : AMDGPU::V_FMA_F16_gfx9) : in isInlineConstantIfFolded() [all …]
|
/external/llvm-project/llvm/lib/Target/AMDGPU/ |
D | SIRegisterInfo.cpp | 53 : AMDGPUGenRegisterInfo(AMDGPU::PC_REG, ST.getAMDGPUDwarfFlavour()), ST(ST), in SIRegisterInfo() 56 assert(getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() == 3 && in SIRegisterInfo() 57 getSubRegIndexLaneMask(AMDGPU::sub31).getAsInteger() == (3ULL << 62) && in SIRegisterInfo() 58 (getSubRegIndexLaneMask(AMDGPU::lo16) | in SIRegisterInfo() 59 getSubRegIndexLaneMask(AMDGPU::hi16)).getAsInteger() == in SIRegisterInfo() 60 getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() && in SIRegisterInfo() 65 *MCRegUnitIterator(MCRegister::from(AMDGPU::M0), this)); in SIRegisterInfo() 66 for (auto Reg : AMDGPU::VGPR_HI16RegClass) in SIRegisterInfo() 94 Row.fill(AMDGPU::NoSubRegister); in SIRegisterInfo() 134 static const MCPhysReg NoCalleeSavedReg = AMDGPU::NoRegister; in getCalleeSavedRegs() [all …]
|
D | SIOptimizeExecMasking.cpp | 62 case AMDGPU::COPY: in isCopyFromExec() 63 case AMDGPU::S_MOV_B64: in isCopyFromExec() 64 case AMDGPU::S_MOV_B64_term: in isCopyFromExec() 65 case AMDGPU::S_MOV_B32: in isCopyFromExec() 66 case AMDGPU::S_MOV_B32_term: { in isCopyFromExec() 69 Src.getReg() == (ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC)) in isCopyFromExec() 74 return AMDGPU::NoRegister; in isCopyFromExec() 80 case AMDGPU::COPY: in isCopyToExec() 81 case AMDGPU::S_MOV_B64: in isCopyToExec() 82 case AMDGPU::S_MOV_B32: { in isCopyToExec() [all …]
|
D | AMDGPURegisterBankInfo.cpp | 121 if (Opc == AMDGPU::G_ANYEXT || Opc == AMDGPU::G_ZEXT || in applyBank() 122 Opc == AMDGPU::G_SEXT) { in applyBank() 129 if (SrcBank == &AMDGPU::VCCRegBank) { in applyBank() 133 assert(NewBank == &AMDGPU::VGPRRegBank); in applyBank() 138 auto True = B.buildConstant(S32, Opc == AMDGPU::G_SEXT ? -1 : 1); in applyBank() 152 if (Opc == AMDGPU::G_TRUNC) { in applyBank() 155 assert(DstBank != &AMDGPU::VCCRegBank); in applyBank() 170 assert(NewBank == &AMDGPU::VGPRRegBank && in applyBank() 172 assert((MI.getOpcode() != AMDGPU::G_TRUNC && in applyBank() 173 MI.getOpcode() != AMDGPU::G_ANYEXT) && in applyBank() [all …]
|
D | SIInstrInfo.cpp | 73 namespace AMDGPU { namespace 96 : AMDGPUGenInstrInfo(AMDGPU::ADJCALLSTACKUP, AMDGPU::ADJCALLSTACKDOWN), in SIInstrInfo() 118 int Op0Idx = AMDGPU::getNamedOperandIdx(Opc0, OpName); in nodesHaveSameOperandValue() 119 int Op1Idx = AMDGPU::getNamedOperandIdx(Opc1, OpName); in nodesHaveSameOperandValue() 145 case AMDGPU::V_MOV_B32_e32: in isReallyTriviallyReMaterializable() 146 case AMDGPU::V_MOV_B32_e64: in isReallyTriviallyReMaterializable() 147 case AMDGPU::V_MOV_B64_PSEUDO: in isReallyTriviallyReMaterializable() 148 case AMDGPU::V_ACCVGPR_READ_B32: in isReallyTriviallyReMaterializable() 149 case AMDGPU::V_ACCVGPR_WRITE_B32: in isReallyTriviallyReMaterializable() 183 int Offset0Idx = AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::offset); in areLoadsFromSameBasePtr() [all …]
|
D | SIPeepholeSDWA.cpp | 138 using namespace AMDGPU::SDWA; 334 if (TII->getNamedOperand(*MI, AMDGPU::OpName::src0) == SrcOp) { in getSrcMods() 335 if (auto *Mod = TII->getNamedOperand(*MI, AMDGPU::OpName::src0_modifiers)) { in getSrcMods() 338 } else if (TII->getNamedOperand(*MI, AMDGPU::OpName::src1) == SrcOp) { in getSrcMods() 339 if (auto *Mod = TII->getNamedOperand(*MI, AMDGPU::OpName::src1_modifiers)) { in getSrcMods() 369 MachineOperand *Src = TII->getNamedOperand(MI, AMDGPU::OpName::src0); in convertToSDWA() 370 MachineOperand *SrcSel = TII->getNamedOperand(MI, AMDGPU::OpName::src0_sel); in convertToSDWA() 372 TII->getNamedOperand(MI, AMDGPU::OpName::src0_modifiers); in convertToSDWA() 376 Src = TII->getNamedOperand(MI, AMDGPU::OpName::src1); in convertToSDWA() 377 SrcSel = TII->getNamedOperand(MI, AMDGPU::OpName::src1_sel); in convertToSDWA() [all …]
|
D | SIFoldOperands.cpp | 143 case AMDGPU::V_MAC_F32_e64: in isInlineConstantIfFolded() 144 case AMDGPU::V_MAC_F16_e64: in isInlineConstantIfFolded() 145 case AMDGPU::V_FMAC_F32_e64: in isInlineConstantIfFolded() 146 case AMDGPU::V_FMAC_F16_e64: { in isInlineConstantIfFolded() 149 int Src2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2); in isInlineConstantIfFolded() 151 bool IsFMA = Opc == AMDGPU::V_FMAC_F32_e64 || in isInlineConstantIfFolded() 152 Opc == AMDGPU::V_FMAC_F16_e64; in isInlineConstantIfFolded() 153 bool IsF32 = Opc == AMDGPU::V_MAC_F32_e64 || in isInlineConstantIfFolded() 154 Opc == AMDGPU::V_FMAC_F32_e64; in isInlineConstantIfFolded() 157 (IsF32 ? AMDGPU::V_FMA_F32 : AMDGPU::V_FMA_F16_gfx9) : in isInlineConstantIfFolded() [all …]
|
D | GCNDPPCombine.cpp | 130 auto DPP32 = AMDGPU::getDPPOp32(Op); in getDPPOp() 132 auto E32 = AMDGPU::getVOPe32(Op); in getDPPOp() 133 DPP32 = (E32 == -1)? -1 : AMDGPU::getDPPOp32(E32); in getDPPOp() 149 case AMDGPU::IMPLICIT_DEF: in getOldOpndValue() 151 case AMDGPU::COPY: in getOldOpndValue() 152 case AMDGPU::V_MOV_B32_e32: { in getOldOpndValue() 166 assert(MovMI.getOpcode() == AMDGPU::V_MOV_B32_dpp); in createDPPInst() 181 auto *Dst = TII->getNamedOperand(OrigMI, AMDGPU::OpName::vdst); in createDPPInst() 186 const int OldIdx = AMDGPU::getNamedOperandIdx(DPPOp, AMDGPU::OpName::old); in createDPPInst() 189 assert(isOfRegClass(CombOldVGPR, AMDGPU::VGPR_32RegClass, *MRI)); in createDPPInst() [all …]
|
D | SILoadStoreOptimizer.cpp | 311 return AMDGPU::getMUBUFElements(Opc); in getOpcodeWidth() 315 TII.getNamedOperand(MI, AMDGPU::OpName::dmask)->getImm(); in getOpcodeWidth() 319 return AMDGPU::getMTBUFElements(Opc); in getOpcodeWidth() 323 case AMDGPU::S_BUFFER_LOAD_DWORD_IMM: in getOpcodeWidth() 325 case AMDGPU::S_BUFFER_LOAD_DWORDX2_IMM: in getOpcodeWidth() 327 case AMDGPU::S_BUFFER_LOAD_DWORDX4_IMM: in getOpcodeWidth() 339 switch (AMDGPU::getMUBUFBaseOpcode(Opc)) { in getInstClass() 342 case AMDGPU::BUFFER_LOAD_DWORD_OFFEN: in getInstClass() 343 case AMDGPU::BUFFER_LOAD_DWORD_OFFEN_exact: in getInstClass() 344 case AMDGPU::BUFFER_LOAD_DWORD_OFFSET: in getInstClass() [all …]
|
/external/llvm/lib/Target/AMDGPU/ |
D | SIInstrInfo.cpp | 57 int Op0Idx = AMDGPU::getNamedOperandIdx(Opc0, OpName); in nodesHaveSameOperandValue() 58 int Op1Idx = AMDGPU::getNamedOperandIdx(Opc1, OpName); in nodesHaveSameOperandValue() 84 case AMDGPU::V_MOV_B32_e32: in isReallyTriviallyReMaterializable() 85 case AMDGPU::V_MOV_B32_e64: in isReallyTriviallyReMaterializable() 86 case AMDGPU::V_MOV_B64_PSEUDO: in isReallyTriviallyReMaterializable() 123 if (AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::data1) != -1 || in areLoadsFromSameBasePtr() 124 AMDGPU::getNamedOperandIdx(Opc1, AMDGPU::OpName::data1) != -1) in areLoadsFromSameBasePtr() 160 if (!nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::soffset) || in areLoadsFromSameBasePtr() 162 !nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::vaddr) || in areLoadsFromSameBasePtr() 163 !nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::srsrc)) in areLoadsFromSameBasePtr() [all …]
|
D | SIRegisterInfo.cpp | 106 classifyPressureSet(i, AMDGPU::SGPR0, SGPRPressureSets); in SIRegisterInfo() 107 classifyPressureSet(i, AMDGPU::VGPR0, VGPRPressureSets); in SIRegisterInfo() 123 unsigned BaseReg(AMDGPU::SGPR_32RegClass.getRegister(BaseIdx)); in reservedPrivateSegmentBufferReg() 124 return getMatchingSuperReg(BaseReg, AMDGPU::sub0, &AMDGPU::SReg_128RegClass); in reservedPrivateSegmentBufferReg() 142 return AMDGPU::SGPR_32RegClass.getRegister(Reg); in reservedPrivateSegmentWaveByteOffsetReg() 147 Reserved.set(AMDGPU::INDIRECT_BASE_ADDR); in getReservedRegs() 151 reserveRegisterTuples(Reserved, AMDGPU::EXEC); in getReservedRegs() 152 reserveRegisterTuples(Reserved, AMDGPU::FLAT_SCR); in getReservedRegs() 155 reserveRegisterTuples(Reserved, AMDGPU::TBA); in getReservedRegs() 156 reserveRegisterTuples(Reserved, AMDGPU::TMA); in getReservedRegs() [all …]
|
D | R600InstrInfo.cpp | 47 if ((AMDGPU::R600_Reg128RegClass.contains(DestReg) || in copyPhysReg() 48 AMDGPU::R600_Reg128VerticalRegClass.contains(DestReg)) && in copyPhysReg() 49 (AMDGPU::R600_Reg128RegClass.contains(SrcReg) || in copyPhysReg() 50 AMDGPU::R600_Reg128VerticalRegClass.contains(SrcReg))) { in copyPhysReg() 52 } else if((AMDGPU::R600_Reg64RegClass.contains(DestReg) || in copyPhysReg() 53 AMDGPU::R600_Reg64VerticalRegClass.contains(DestReg)) && in copyPhysReg() 54 (AMDGPU::R600_Reg64RegClass.contains(SrcReg) || in copyPhysReg() 55 AMDGPU::R600_Reg64VerticalRegClass.contains(SrcReg))) { in copyPhysReg() 62 buildDefaultInstruction(MBB, MI, AMDGPU::MOV, in copyPhysReg() 69 MachineInstr *NewMI = buildDefaultInstruction(MBB, MI, AMDGPU::MOV, in copyPhysReg() [all …]
|
D | AMDGPURegisterInfo.cpp | 28 static const MCPhysReg CalleeSavedReg = AMDGPU::NoRegister; 36 return AMDGPU::NoRegister; in getFrameRegister() 41 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3, AMDGPU::sub4, in getSubRegFromChannel() 42 AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7, AMDGPU::sub8, AMDGPU::sub9, in getSubRegFromChannel() 43 AMDGPU::sub10, AMDGPU::sub11, AMDGPU::sub12, AMDGPU::sub13, AMDGPU::sub14, in getSubRegFromChannel() 44 AMDGPU::sub15 in getSubRegFromChannel()
|
D | SIMachineFunctionInfo.cpp | 34 TIDReg(AMDGPU::NoRegister), in SIMachineFunctionInfo() 35 ScratchRSrcReg(AMDGPU::NoRegister), in SIMachineFunctionInfo() 36 ScratchWaveOffsetReg(AMDGPU::NoRegister), in SIMachineFunctionInfo() 37 PrivateSegmentBufferUserSGPR(AMDGPU::NoRegister), in SIMachineFunctionInfo() 38 DispatchPtrUserSGPR(AMDGPU::NoRegister), in SIMachineFunctionInfo() 39 QueuePtrUserSGPR(AMDGPU::NoRegister), in SIMachineFunctionInfo() 40 KernargSegmentPtrUserSGPR(AMDGPU::NoRegister), in SIMachineFunctionInfo() 41 DispatchIDUserSGPR(AMDGPU::NoRegister), in SIMachineFunctionInfo() 42 FlatScratchInitUserSGPR(AMDGPU::NoRegister), in SIMachineFunctionInfo() 43 PrivateSegmentSizeUserSGPR(AMDGPU::NoRegister), in SIMachineFunctionInfo() [all …]
|
D | SILowerControlFlow.cpp | 177 if (I->getOpcode() == AMDGPU::S_CBRANCH_VCCNZ || in shouldSkip() 178 I->getOpcode() == AMDGPU::S_CBRANCH_VCCZ) in shouldSkip() 207 BuildMI(*From.getParent(), &From, DL, TII->get(AMDGPU::S_CBRANCH_EXECZ)) in Skip() 225 BuildMI(&MBB, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ)) in skipIfDead() 231 BuildMI(*SkipBB, Insert, DL, TII->get(AMDGPU::EXP)) in skipIfDead() 237 .addReg(AMDGPU::VGPR0, RegState::Undef) in skipIfDead() 238 .addReg(AMDGPU::VGPR0, RegState::Undef) in skipIfDead() 239 .addReg(AMDGPU::VGPR0, RegState::Undef) in skipIfDead() 240 .addReg(AMDGPU::VGPR0, RegState::Undef); in skipIfDead() 243 BuildMI(*SkipBB, Insert, DL, TII->get(AMDGPU::S_ENDPGM)); in skipIfDead() [all …]
|
/external/swiftshader/third_party/llvm-10.0/llvm/lib/Target/AMDGPU/Disassembler/ |
D | AMDGPUDisassembler.cpp | 55 #define SGPR_MAX (isGFX10() ? AMDGPU::EncValues::SGPR_MAX_GFX10 \ 56 : AMDGPU::EncValues::SGPR_MAX_SI) 67 if (!STI.getFeatureBits()[AMDGPU::FeatureGCN3Encoding] && !isGFX10()) in AMDGPUDisassembler() 81 int OpIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), NameIdx); in insertNamedMCOperand() 259 using namespace llvm::AMDGPU::DPP; in isValidDPP8() 260 int FiIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::fi); in isValidDPP8() 306 if (STI.getFeatureBits()[AMDGPU::FeatureUnpackedD16VMem]) { in getInstruction() 315 if (STI.getFeatureBits()[AMDGPU::FeatureFmaMixInsts]) { in getInstruction() 361 if (Res && (MI.getOpcode() == AMDGPU::V_MAC_F32_e64_vi || in getInstruction() 362 MI.getOpcode() == AMDGPU::V_MAC_F32_e64_gfx6_gfx7 || in getInstruction() [all …]
|
/external/swiftshader/third_party/llvm-10.0/llvm/lib/Target/AMDGPU/AsmParser/ |
D | AMDGPUAsmParser.cpp | 65 using namespace llvm::AMDGPU; 251 return isRegOrImmWithInputMods(AMDGPU::VS_32RegClassID, MVT::i16); in isRegOrImmWithInt16InputMods() 255 return isRegOrImmWithInputMods(AMDGPU::VS_32RegClassID, MVT::i32); in isRegOrImmWithInt32InputMods() 259 return isRegOrImmWithInputMods(AMDGPU::VS_64RegClassID, MVT::i64); in isRegOrImmWithInt64InputMods() 263 return isRegOrImmWithInputMods(AMDGPU::VS_32RegClassID, MVT::f16); in isRegOrImmWithFP16InputMods() 267 return isRegOrImmWithInputMods(AMDGPU::VS_32RegClassID, MVT::f32); in isRegOrImmWithFP32InputMods() 271 return isRegOrImmWithInputMods(AMDGPU::VS_64RegClassID, MVT::f64); in isRegOrImmWithFP64InputMods() 275 return isRegClass(AMDGPU::VGPR_32RegClassID) || in isVReg() 276 isRegClass(AMDGPU::VReg_64RegClassID) || in isVReg() 277 isRegClass(AMDGPU::VReg_96RegClassID) || in isVReg() [all …]
|
/external/llvm-project/llvm/lib/Target/AMDGPU/AsmParser/ |
D | AMDGPUAsmParser.cpp | 65 using namespace llvm::AMDGPU; 282 return isRegOrImmWithInputMods(AMDGPU::VS_32RegClassID, MVT::i16); in isRegOrImmWithInt16InputMods() 286 return isRegOrImmWithInputMods(AMDGPU::VS_32RegClassID, MVT::i32); in isRegOrImmWithInt32InputMods() 290 return isRegOrImmWithInputMods(AMDGPU::VS_64RegClassID, MVT::i64); in isRegOrImmWithInt64InputMods() 294 return isRegOrImmWithInputMods(AMDGPU::VS_32RegClassID, MVT::f16); in isRegOrImmWithFP16InputMods() 298 return isRegOrImmWithInputMods(AMDGPU::VS_32RegClassID, MVT::f32); in isRegOrImmWithFP32InputMods() 302 return isRegOrImmWithInputMods(AMDGPU::VS_64RegClassID, MVT::f64); in isRegOrImmWithFP64InputMods() 306 return isRegClass(AMDGPU::VGPR_32RegClassID) || in isVReg() 307 isRegClass(AMDGPU::VReg_64RegClassID) || in isVReg() 308 isRegClass(AMDGPU::VReg_96RegClassID) || in isVReg() [all …]
|