Home
last modified time | relevance | path

Searched refs:AMDGPU (Results 1 – 25 of 133) sorted by relevance

123456

/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/AMDGPU/
DAMDGPURegisterInfo.cpp31 { AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3,
32 AMDGPU::sub4, AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7,
33 AMDGPU::sub8, AMDGPU::sub9, AMDGPU::sub10, AMDGPU::sub11,
34 AMDGPU::sub12, AMDGPU::sub13, AMDGPU::sub14, AMDGPU::sub15,
35 AMDGPU::sub16, AMDGPU::sub17, AMDGPU::sub18, AMDGPU::sub19,
36 AMDGPU::sub20, AMDGPU::sub21, AMDGPU::sub22, AMDGPU::sub23,
37 AMDGPU::sub24, AMDGPU::sub25, AMDGPU::sub26, AMDGPU::sub27,
38 AMDGPU::sub28, AMDGPU::sub29, AMDGPU::sub30, AMDGPU::sub31
41 AMDGPU::sub0_sub1, AMDGPU::sub1_sub2, AMDGPU::sub2_sub3, AMDGPU::sub3_sub4,
42 AMDGPU::sub4_sub5, AMDGPU::sub5_sub6, AMDGPU::sub6_sub7, AMDGPU::sub7_sub8,
[all …]
DSIRegisterInfo.cpp72 classifyPressureSet(i, AMDGPU::SGPR0, SGPRPressureSets); in SIRegisterInfo()
73 classifyPressureSet(i, AMDGPU::VGPR0, VGPRPressureSets); in SIRegisterInfo()
74 classifyPressureSet(i, AMDGPU::AGPR0, AGPRPressureSets); in SIRegisterInfo()
112 unsigned BaseReg(AMDGPU::SGPR_32RegClass.getRegister(BaseIdx)); in reservedPrivateSegmentBufferReg()
113 return getMatchingSuperReg(BaseReg, AMDGPU::sub0, &AMDGPU::SGPR_128RegClass); in reservedPrivateSegmentBufferReg()
136 return AMDGPU::SGPR_32RegClass.getRegister(Reg); in reservedPrivateSegmentWaveByteOffsetReg()
144 reserveRegisterTuples(Reserved, AMDGPU::EXEC); in getReservedRegs()
145 reserveRegisterTuples(Reserved, AMDGPU::FLAT_SCR); in getReservedRegs()
148 reserveRegisterTuples(Reserved, AMDGPU::M0); in getReservedRegs()
151 reserveRegisterTuples(Reserved, AMDGPU::SRC_VCCZ); in getReservedRegs()
[all …]
DSIOptimizeExecMasking.cpp62 case AMDGPU::COPY: in isCopyFromExec()
63 case AMDGPU::S_MOV_B64: in isCopyFromExec()
64 case AMDGPU::S_MOV_B64_term: in isCopyFromExec()
65 case AMDGPU::S_MOV_B32: in isCopyFromExec()
66 case AMDGPU::S_MOV_B32_term: { in isCopyFromExec()
69 Src.getReg() == (ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC)) in isCopyFromExec()
74 return AMDGPU::NoRegister; in isCopyFromExec()
80 case AMDGPU::COPY: in isCopyToExec()
81 case AMDGPU::S_MOV_B64: in isCopyToExec()
82 case AMDGPU::S_MOV_B32: { in isCopyToExec()
[all …]
DAMDGPURegisterBankInfo.cpp62 if (Opc == AMDGPU::G_ANYEXT || Opc == AMDGPU::G_ZEXT || in applyBank()
63 Opc == AMDGPU::G_SEXT) { in applyBank()
70 if (SrcBank == &AMDGPU::VCCRegBank) { in applyBank()
74 assert(NewBank == &AMDGPU::VGPRRegBank); in applyBank()
79 auto True = B.buildConstant(S32, Opc == AMDGPU::G_SEXT ? -1 : 1); in applyBank()
93 if (Opc == AMDGPU::G_TRUNC) { in applyBank()
96 assert(DstBank != &AMDGPU::VCCRegBank); in applyBank()
110 assert(NewBank == &AMDGPU::VGPRRegBank && in applyBank()
112 assert((MI.getOpcode() != AMDGPU::G_TRUNC && in applyBank()
113 MI.getOpcode() != AMDGPU::G_ANYEXT) && in applyBank()
[all …]
DSIInstrInfo.cpp70 namespace AMDGPU { namespace
87 : AMDGPUGenInstrInfo(AMDGPU::ADJCALLSTACKUP, AMDGPU::ADJCALLSTACKDOWN), in SIInstrInfo()
109 int Op0Idx = AMDGPU::getNamedOperandIdx(Opc0, OpName); in nodesHaveSameOperandValue()
110 int Op1Idx = AMDGPU::getNamedOperandIdx(Opc1, OpName); in nodesHaveSameOperandValue()
136 case AMDGPU::V_MOV_B32_e32: in isReallyTriviallyReMaterializable()
137 case AMDGPU::V_MOV_B32_e64: in isReallyTriviallyReMaterializable()
138 case AMDGPU::V_MOV_B64_PSEUDO: in isReallyTriviallyReMaterializable()
172 int Offset0Idx = AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::offset); in areLoadsFromSameBasePtr()
173 int Offset1Idx = AMDGPU::getNamedOperandIdx(Opc1, AMDGPU::OpName::offset); in areLoadsFromSameBasePtr()
190 if (AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::sbase) == -1 || in areLoadsFromSameBasePtr()
[all …]
DSIPeepholeSDWA.cpp138 using namespace AMDGPU::SDWA;
334 if (TII->getNamedOperand(*MI, AMDGPU::OpName::src0) == SrcOp) { in getSrcMods()
335 if (auto *Mod = TII->getNamedOperand(*MI, AMDGPU::OpName::src0_modifiers)) { in getSrcMods()
338 } else if (TII->getNamedOperand(*MI, AMDGPU::OpName::src1) == SrcOp) { in getSrcMods()
339 if (auto *Mod = TII->getNamedOperand(*MI, AMDGPU::OpName::src1_modifiers)) { in getSrcMods()
369 MachineOperand *Src = TII->getNamedOperand(MI, AMDGPU::OpName::src0); in convertToSDWA()
370 MachineOperand *SrcSel = TII->getNamedOperand(MI, AMDGPU::OpName::src0_sel); in convertToSDWA()
372 TII->getNamedOperand(MI, AMDGPU::OpName::src0_modifiers); in convertToSDWA()
376 Src = TII->getNamedOperand(MI, AMDGPU::OpName::src1); in convertToSDWA()
377 SrcSel = TII->getNamedOperand(MI, AMDGPU::OpName::src1_sel); in convertToSDWA()
[all …]
DGCNDPPCombine.cpp125 auto DPP32 = AMDGPU::getDPPOp32(Op); in getDPPOp()
127 auto E32 = AMDGPU::getVOPe32(Op); in getDPPOp()
128 DPP32 = (E32 == -1)? -1 : AMDGPU::getDPPOp32(E32); in getDPPOp()
144 case AMDGPU::IMPLICIT_DEF: in getOldOpndValue()
146 case AMDGPU::COPY: in getOldOpndValue()
147 case AMDGPU::V_MOV_B32_e32: { in getOldOpndValue()
161 assert(MovMI.getOpcode() == AMDGPU::V_MOV_B32_dpp); in createDPPInst()
174 auto *Dst = TII->getNamedOperand(OrigMI, AMDGPU::OpName::vdst); in createDPPInst()
179 const int OldIdx = AMDGPU::getNamedOperandIdx(DPPOp, AMDGPU::OpName::old); in createDPPInst()
182 assert(isOfRegClass(CombOldVGPR, AMDGPU::VGPR_32RegClass, *MRI)); in createDPPInst()
[all …]
DSIFoldOperands.cpp143 case AMDGPU::V_MAC_F32_e64: in isInlineConstantIfFolded()
144 case AMDGPU::V_MAC_F16_e64: in isInlineConstantIfFolded()
145 case AMDGPU::V_FMAC_F32_e64: in isInlineConstantIfFolded()
146 case AMDGPU::V_FMAC_F16_e64: { in isInlineConstantIfFolded()
149 int Src2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2); in isInlineConstantIfFolded()
151 bool IsFMA = Opc == AMDGPU::V_FMAC_F32_e64 || in isInlineConstantIfFolded()
152 Opc == AMDGPU::V_FMAC_F16_e64; in isInlineConstantIfFolded()
153 bool IsF32 = Opc == AMDGPU::V_MAC_F32_e64 || in isInlineConstantIfFolded()
154 Opc == AMDGPU::V_FMAC_F32_e64; in isInlineConstantIfFolded()
157 (IsF32 ? AMDGPU::V_FMA_F32 : AMDGPU::V_FMA_F16_gfx9) : in isInlineConstantIfFolded()
[all …]
DSILoadStoreOptimizer.cpp285 return AMDGPU::getMUBUFElements(Opc); in getOpcodeWidth()
289 TII.getNamedOperand(MI, AMDGPU::OpName::dmask)->getImm(); in getOpcodeWidth()
293 return AMDGPU::getMTBUFElements(Opc); in getOpcodeWidth()
297 case AMDGPU::S_BUFFER_LOAD_DWORD_IMM: in getOpcodeWidth()
299 case AMDGPU::S_BUFFER_LOAD_DWORDX2_IMM: in getOpcodeWidth()
301 case AMDGPU::S_BUFFER_LOAD_DWORDX4_IMM: in getOpcodeWidth()
313 switch (AMDGPU::getMUBUFBaseOpcode(Opc)) { in getInstClass()
316 case AMDGPU::BUFFER_LOAD_DWORD_OFFEN: in getInstClass()
317 case AMDGPU::BUFFER_LOAD_DWORD_OFFEN_exact: in getInstClass()
318 case AMDGPU::BUFFER_LOAD_DWORD_OFFSET: in getInstClass()
[all …]
DSIInsertSkips.cpp101 case AMDGPU::SI_MASK_BRANCH: in opcodeEmitsNoInsts()
128 if (I->getOpcode() == AMDGPU::S_CBRANCH_VCCNZ || in shouldSkip()
129 I->getOpcode() == AMDGPU::S_CBRANCH_VCCZ) in shouldSkip()
137 I->getOpcode() == AMDGPU::S_WAITCNT) in shouldSkip()
162 BuildMI(&MBB, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ)) in skipIfDead()
168 BuildMI(*SkipBB, Insert, DL, TII->get(AMDGPU::EXP_DONE)) in skipIfDead()
170 .addReg(AMDGPU::VGPR0, RegState::Undef) in skipIfDead()
171 .addReg(AMDGPU::VGPR0, RegState::Undef) in skipIfDead()
172 .addReg(AMDGPU::VGPR0, RegState::Undef) in skipIfDead()
173 .addReg(AMDGPU::VGPR0, RegState::Undef) in skipIfDead()
[all …]
DSIOptimizeExecMaskingPreRA.cpp89 return MI.getOpcode() == AMDGPU::S_OR_B32 && in isEndCF()
90 MI.modifiesRegister(AMDGPU::EXEC_LO, TRI); in isEndCF()
93 return MI.getOpcode() == AMDGPU::S_OR_B64 && in isEndCF()
94 MI.modifiesRegister(AMDGPU::EXEC, TRI); in isEndCF()
98 unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC; in isFullExecCopy()
111 unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC; in getOrNonExecReg()
112 auto Op = TII.getNamedOperand(MI, AMDGPU::OpName::src1); in getOrNonExecReg()
115 Op = TII.getNamedOperand(MI, AMDGPU::OpName::src0); in getOrNonExecReg()
118 return AMDGPU::NoRegister; in getOrNonExecReg()
126 if (SavedExec == AMDGPU::NoRegister) in getOrExecSource()
[all …]
DAMDGPUInstructionSelector.cpp88 return RB->getID() == AMDGPU::VCCRegBankID; in isVCC()
102 if (SrcReg == AMDGPU::SCC) { in selectCOPY()
125 AMDGPU::S_AND_B32 : AMDGPU::V_AND_B32_e32; in selectCOPY()
129 BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_CMP_NE_U32_e64), DstReg) in selectCOPY()
215 BuildMI(*BB, MI, MI->getDebugLoc(), TII.get(AMDGPU::COPY), DstReg) in getSubOperand64()
231 case AMDGPU::sub0: in getSubOperand64()
233 case AMDGPU::sub1: in getSubOperand64()
240 case AMDGPU::G_AND: in getLogicalBitOpcode()
241 return Is64 ? AMDGPU::S_AND_B64 : AMDGPU::S_AND_B32; in getLogicalBitOpcode()
242 case AMDGPU::G_OR: in getLogicalBitOpcode()
[all …]
DSIShrinkInstructions.cpp75 int Src0Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::src0); in foldImmediates()
192 int SOPKOpc = AMDGPU::getSOPKOp(MI.getOpcode()); in shrinkScalarCompare()
198 if (SOPKOpc == AMDGPU::S_CMPK_EQ_U32 || SOPKOpc == AMDGPU::S_CMPK_LG_U32) { in shrinkScalarCompare()
202 SOPKOpc = (SOPKOpc == AMDGPU::S_CMPK_EQ_U32) ? in shrinkScalarCompare()
203 AMDGPU::S_CMPK_EQ_I32 : AMDGPU::S_CMPK_LG_I32; in shrinkScalarCompare()
222 const AMDGPU::MIMGInfo *Info = AMDGPU::getMIMGInfo(MI.getOpcode()); in shrinkMIMG()
223 if (Info->MIMGEncoding != AMDGPU::MIMGEncGfx10NSA) in shrinkMIMG()
231 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vaddr0); in shrinkMIMG()
236 RC = &AMDGPU::VReg_64RegClass; in shrinkMIMG()
238 RC = &AMDGPU::VReg_96RegClass; in shrinkMIMG()
[all …]
DGCNHazardRecognizer.cpp49 MaxLookAhead = MF.getRegInfo().isPhysRegUsed(AMDGPU::AGPR0) ? 18 : 5; in GCNHazardRecognizer()
62 return Opcode == AMDGPU::V_DIV_FMAS_F32 || Opcode == AMDGPU::V_DIV_FMAS_F64; in isDivFMas()
66 return Opcode == AMDGPU::S_GETREG_B32; in isSGetReg()
70 return Opcode == AMDGPU::S_SETREG_B32 || Opcode == AMDGPU::S_SETREG_IMM32_B32; in isSSetReg()
74 return Opcode == AMDGPU::V_READLANE_B32 || Opcode == AMDGPU::V_WRITELANE_B32; in isRWLane()
78 return Opcode == AMDGPU::S_RFE_B64; in isRFE()
83 case AMDGPU::S_MOVRELS_B32: in isSMovRel()
84 case AMDGPU::S_MOVRELS_B64: in isSMovRel()
85 case AMDGPU::S_MOVRELD_B32: in isSMovRel()
86 case AMDGPU::S_MOVRELD_B64: in isSMovRel()
[all …]
DSIFrameLowering.cpp29 return makeArrayRef(AMDGPU::SGPR_128RegClass.begin(), in getAllSGPR128()
35 return makeArrayRef(AMDGPU::SGPR_32RegClass.begin(), in getAllSGPRs()
79 return AMDGPU::NoRegister; in findScratchNonCalleeSaveRegister()
86 MRI, LiveRegs, AMDGPU::SReg_32_XM0_XEXECRegClass, true); in findUnusedSGPRNonCalleeSaved()
106 BuildMI(MBB, I, DebugLoc(), TII->get(AMDGPU::BUFFER_STORE_DWORD_OFFSET)) in buildPrologSpill()
121 MF->getRegInfo(), LiveRegs, AMDGPU::VGPR_32RegClass); in buildPrologSpill()
123 BuildMI(MBB, I, DebugLoc(), TII->get(AMDGPU::V_MOV_B32_e32), OffsetReg) in buildPrologSpill()
126 BuildMI(MBB, I, DebugLoc(), TII->get(AMDGPU::BUFFER_STORE_DWORD_OFFEN)) in buildPrologSpill()
154 TII->get(AMDGPU::BUFFER_LOAD_DWORD_OFFSET), SpillReg) in buildEpilogReload()
168 MF->getRegInfo(), LiveRegs, AMDGPU::VGPR_32RegClass); in buildEpilogReload()
[all …]
DSIInsertWaitcnts.cpp169 void addWait(AMDGPU::Waitcnt &Wait, InstCounterType T, unsigned Count) { in addWait()
274 bool simplifyWaitcnt(AMDGPU::Waitcnt &Wait) const;
277 AMDGPU::Waitcnt &Wait) const;
278 void applyWaitcnt(const AMDGPU::Waitcnt &Wait);
374 AMDGPU::IsaVersion IV;
544 AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::addr); in updateByEvent()
552 if (AMDGPU::getNamedOperandIdx(Inst.getOpcode(), in updateByEvent()
553 AMDGPU::OpName::data0) != -1) { in updateByEvent()
556 AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::data0), in updateByEvent()
559 if (AMDGPU::getNamedOperandIdx(Inst.getOpcode(), in updateByEvent()
[all …]
DAMDGPUAsmPrinter.cpp49 using namespace llvm::AMDGPU;
50 using namespace llvm::AMDGPU::HSAMD;
72 static uint32_t getFPMode(AMDGPU::SIModeRegisterDefaults Mode) { in getFPMode()
188 return (MBB->back().getOpcode() != AMDGPU::S_SETPC_B64); in isBlockOnlyReachableByFallthrough()
331 if (AMDGPU::isGFX10(STI) && in doFinalization()
623 Info.UsesFlatScratch = MRI.isPhysRegUsed(AMDGPU::FLAT_SCR_LO) || in analyzeResourceUsage()
624 MRI.isPhysRegUsed(AMDGPU::FLAT_SCR_HI); in analyzeResourceUsage()
633 (!hasAnyNonFlatUseOfReg(MRI, *TII, AMDGPU::FLAT_SCR) && in analyzeResourceUsage()
634 !hasAnyNonFlatUseOfReg(MRI, *TII, AMDGPU::FLAT_SCR_LO) && in analyzeResourceUsage()
635 !hasAnyNonFlatUseOfReg(MRI, *TII, AMDGPU::FLAT_SCR_HI))) { in analyzeResourceUsage()
[all …]
DSILowerControlFlow.cpp140 assert(ImpDefSCC.getReg() == AMDGPU::SCC && ImpDefSCC.isDef()); in setImpSCCDefDead()
154 U->getOpcode() != AMDGPU::SI_END_CF) in isSimpleIf()
183 assert(SaveExec.getSubReg() == AMDGPU::NoSubRegister); in getSaveExec()
187 TII->isWave32() ? AMDGPU::S_MOV_B32_term : AMDGPU::S_MOV_B64_term; in getSaveExec()
204 assert(Cond.getSubReg() == AMDGPU::NoSubRegister); in emitIf()
207 assert(ImpDefSCC.getReg() == AMDGPU::SCC && ImpDefSCC.isDef()); in emitIf()
219 BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), CopyReg) in emitIf()
249 MachineInstr *NewBr = BuildMI(MBB, I, DL, TII->get(AMDGPU::SI_MASK_BRANCH)) in emitIf()
268 LIS->removeAllRegUnitsForPhysReg(AMDGPU::EXEC); in emitIf()
295 BuildMI(MBB, Start, DL, TII->get(AMDGPU::COPY), CopyReg) in emitElse()
[all …]
DAMDGPUArgumentUsageInfo.cpp87 &AMDGPU::SGPR_128RegClass); in getPreloadedValue()
91 &AMDGPU::SGPR_64RegClass); in getPreloadedValue()
94 &AMDGPU::SGPR_32RegClass); in getPreloadedValue()
98 &AMDGPU::SGPR_32RegClass); in getPreloadedValue()
101 &AMDGPU::SGPR_32RegClass); in getPreloadedValue()
105 &AMDGPU::SGPR_32RegClass); in getPreloadedValue()
108 &AMDGPU::SGPR_64RegClass); in getPreloadedValue()
111 &AMDGPU::SGPR_64RegClass); in getPreloadedValue()
114 &AMDGPU::SGPR_64RegClass); in getPreloadedValue()
117 &AMDGPU::SGPR_64RegClass); in getPreloadedValue()
[all …]
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/AMDGPU/Disassembler/
DAMDGPUDisassembler.cpp55 #define SGPR_MAX (isGFX10() ? AMDGPU::EncValues::SGPR_MAX_GFX10 \
56 : AMDGPU::EncValues::SGPR_MAX_SI)
67 if (!STI.getFeatureBits()[AMDGPU::FeatureGCN3Encoding] && !isGFX10()) in AMDGPUDisassembler()
81 int OpIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), NameIdx); in insertNamedMCOperand()
259 using namespace llvm::AMDGPU::DPP; in isValidDPP8()
260 int FiIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::fi); in isValidDPP8()
306 if (STI.getFeatureBits()[AMDGPU::FeatureUnpackedD16VMem]) { in getInstruction()
315 if (STI.getFeatureBits()[AMDGPU::FeatureFmaMixInsts]) { in getInstruction()
361 if (Res && (MI.getOpcode() == AMDGPU::V_MAC_F32_e64_vi || in getInstruction()
362 MI.getOpcode() == AMDGPU::V_MAC_F32_e64_gfx6_gfx7 || in getInstruction()
[all …]
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/AMDGPU/AsmParser/
DAMDGPUAsmParser.cpp65 using namespace llvm::AMDGPU;
251 return isRegOrImmWithInputMods(AMDGPU::VS_32RegClassID, MVT::i16); in isRegOrImmWithInt16InputMods()
255 return isRegOrImmWithInputMods(AMDGPU::VS_32RegClassID, MVT::i32); in isRegOrImmWithInt32InputMods()
259 return isRegOrImmWithInputMods(AMDGPU::VS_64RegClassID, MVT::i64); in isRegOrImmWithInt64InputMods()
263 return isRegOrImmWithInputMods(AMDGPU::VS_32RegClassID, MVT::f16); in isRegOrImmWithFP16InputMods()
267 return isRegOrImmWithInputMods(AMDGPU::VS_32RegClassID, MVT::f32); in isRegOrImmWithFP32InputMods()
271 return isRegOrImmWithInputMods(AMDGPU::VS_64RegClassID, MVT::f64); in isRegOrImmWithFP64InputMods()
275 return isRegClass(AMDGPU::VGPR_32RegClassID) || in isVReg()
276 isRegClass(AMDGPU::VReg_64RegClassID) || in isVReg()
277 isRegClass(AMDGPU::VReg_96RegClassID) || in isVReg()
[all …]
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/AMDGPU/MCTargetDesc/
DSIMCCodeEmitter.cpp138 STI.getFeatureBits()[AMDGPU::FeatureInv2PiInlineImm]) in getLit16Encoding()
174 STI.getFeatureBits()[AMDGPU::FeatureInv2PiInlineImm]) in getLit32Encoding()
210 STI.getFeatureBits()[AMDGPU::FeatureInv2PiInlineImm]) in getLit64Encoding()
237 case AMDGPU::OPERAND_REG_IMM_INT32: in getLitEncoding()
238 case AMDGPU::OPERAND_REG_IMM_FP32: in getLitEncoding()
239 case AMDGPU::OPERAND_REG_INLINE_C_INT32: in getLitEncoding()
240 case AMDGPU::OPERAND_REG_INLINE_C_FP32: in getLitEncoding()
241 case AMDGPU::OPERAND_REG_INLINE_AC_INT32: in getLitEncoding()
242 case AMDGPU::OPERAND_REG_INLINE_AC_FP32: in getLitEncoding()
245 case AMDGPU::OPERAND_REG_IMM_INT64: in getLitEncoding()
[all …]
DAMDGPUInstPrinter.cpp27 using namespace llvm::AMDGPU;
134 if (AMDGPU::isGFX10(STI)) { in printFlatOffset()
186 if (AMDGPU::isGFX10(STI)) in printDLC()
222 const AMDGPU::MIMGDimInfo *DimInfo = AMDGPU::getMIMGDimInfoByEncoding(Dim); in printDim()
241 if (STI.hasFeature(AMDGPU::FeatureR128A16)) in printR128A16()
275 if (AMDGPU::isGFX10(STI)) in printFORMAT()
288 case AMDGPU::FP_REG: in printRegOperand()
289 case AMDGPU::SP_REG: in printRegOperand()
290 case AMDGPU::SCRATCH_WAVE_OFFSET_REG: in printRegOperand()
291 case AMDGPU::PRIVATE_RSRC_REG: in printRegOperand()
[all …]
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Support/
DTargetParser.cpp21 using namespace AMDGPU;
28 AMDGPU::GPUKind Kind;
107 const GPUInfo *getArchEntry(AMDGPU::GPUKind AK, ArrayRef<GPUInfo> Table) { in getArchEntry()
108 GPUInfo Search = { {""}, {""}, AK, AMDGPU::FEATURE_NONE }; in getArchEntry()
122 StringRef llvm::AMDGPU::getArchNameAMDGCN(GPUKind AK) { in getArchNameAMDGCN()
128 StringRef llvm::AMDGPU::getArchNameR600(GPUKind AK) { in getArchNameR600()
134 AMDGPU::GPUKind llvm::AMDGPU::parseArchAMDGCN(StringRef CPU) { in parseArchAMDGCN()
140 return AMDGPU::GPUKind::GK_NONE; in parseArchAMDGCN()
143 AMDGPU::GPUKind llvm::AMDGPU::parseArchR600(StringRef CPU) { in parseArchR600()
149 return AMDGPU::GPUKind::GK_NONE; in parseArchR600()
[all …]
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/AMDGPU/Utils/
DAMDGPUBaseInfo.cpp104 namespace AMDGPU { namespace
420 STI->getFeatureBits().test(AMDGPU::FeatureXNACK)); in getNumExtraSGPRs()
922 return STI.getFeatureBits()[AMDGPU::FeatureXNACK]; in hasXNACK()
926 return STI.getFeatureBits()[AMDGPU::FeatureSRAMECC]; in hasSRAMECC()
930 return STI.getFeatureBits()[AMDGPU::FeatureMIMG_R128]; in hasMIMG_R128()
934 return !STI.getFeatureBits()[AMDGPU::FeatureUnpackedD16VMem]; in hasPackedD16()
938 return STI.getFeatureBits()[AMDGPU::FeatureSouthernIslands]; in isSI()
942 return STI.getFeatureBits()[AMDGPU::FeatureSeaIslands]; in isCI()
946 return STI.getFeatureBits()[AMDGPU::FeatureVolcanicIslands]; in isVI()
950 return STI.getFeatureBits()[AMDGPU::FeatureGFX9]; in isGFX9()
[all …]

123456