Lines Matching refs:AMDGPU
57 int Op0Idx = AMDGPU::getNamedOperandIdx(Opc0, OpName); in nodesHaveSameOperandValue()
58 int Op1Idx = AMDGPU::getNamedOperandIdx(Opc1, OpName); in nodesHaveSameOperandValue()
84 case AMDGPU::V_MOV_B32_e32: in isReallyTriviallyReMaterializable()
85 case AMDGPU::V_MOV_B32_e64: in isReallyTriviallyReMaterializable()
86 case AMDGPU::V_MOV_B64_PSEUDO: in isReallyTriviallyReMaterializable()
123 if (AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::data1) != -1 || in areLoadsFromSameBasePtr()
124 AMDGPU::getNamedOperandIdx(Opc1, AMDGPU::OpName::data1) != -1) in areLoadsFromSameBasePtr()
160 if (!nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::soffset) || in areLoadsFromSameBasePtr()
162 !nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::vaddr) || in areLoadsFromSameBasePtr()
163 !nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::srsrc)) in areLoadsFromSameBasePtr()
166 int OffIdx0 = AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::offset); in areLoadsFromSameBasePtr()
167 int OffIdx1 = AMDGPU::getNamedOperandIdx(Opc1, AMDGPU::OpName::offset); in areLoadsFromSameBasePtr()
195 case AMDGPU::DS_READ2ST64_B32: in isStride64()
196 case AMDGPU::DS_READ2ST64_B64: in isStride64()
197 case AMDGPU::DS_WRITE2ST64_B32: in isStride64()
198 case AMDGPU::DS_WRITE2ST64_B64: in isStride64()
212 getNamedOperand(LdSt, AMDGPU::OpName::offset); in getMemOpBaseRegImmOfs()
216 getNamedOperand(LdSt, AMDGPU::OpName::addr); in getMemOpBaseRegImmOfs()
227 getNamedOperand(LdSt, AMDGPU::OpName::offset0); in getMemOpBaseRegImmOfs()
229 getNamedOperand(LdSt, AMDGPU::OpName::offset1); in getMemOpBaseRegImmOfs()
243 int Data0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::data0); in getMemOpBaseRegImmOfs()
251 getNamedOperand(LdSt, AMDGPU::OpName::addr); in getMemOpBaseRegImmOfs()
261 if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::soffset) != -1) in getMemOpBaseRegImmOfs()
265 getNamedOperand(LdSt, AMDGPU::OpName::vaddr); in getMemOpBaseRegImmOfs()
270 getNamedOperand(LdSt, AMDGPU::OpName::offset); in getMemOpBaseRegImmOfs()
278 getNamedOperand(LdSt, AMDGPU::OpName::offset); in getMemOpBaseRegImmOfs()
283 getNamedOperand(LdSt, AMDGPU::OpName::sbase); in getMemOpBaseRegImmOfs()
290 const MachineOperand *AddrReg = getNamedOperand(LdSt, AMDGPU::OpName::addr); in getMemOpBaseRegImmOfs()
306 FirstDst = getNamedOperand(FirstLdSt, AMDGPU::OpName::vdst); in shouldClusterMemOps()
307 SecondDst = getNamedOperand(SecondLdSt, AMDGPU::OpName::vdst); in shouldClusterMemOps()
311 FirstDst = getNamedOperand(FirstLdSt, AMDGPU::OpName::sdst); in shouldClusterMemOps()
312 SecondDst = getNamedOperand(SecondLdSt, AMDGPU::OpName::sdst); in shouldClusterMemOps()
317 FirstDst = getNamedOperand(FirstLdSt, AMDGPU::OpName::vdata); in shouldClusterMemOps()
318 SecondDst = getNamedOperand(SecondLdSt, AMDGPU::OpName::vdata); in shouldClusterMemOps()
349 assert(DestReg != AMDGPU::SCC && SrcReg != AMDGPU::SCC); in copyPhysReg()
352 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3, in copyPhysReg()
353 AMDGPU::sub4, AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7, in copyPhysReg()
354 AMDGPU::sub8, AMDGPU::sub9, AMDGPU::sub10, AMDGPU::sub11, in copyPhysReg()
355 AMDGPU::sub12, AMDGPU::sub13, AMDGPU::sub14, AMDGPU::sub15, in copyPhysReg()
359 AMDGPU::sub0_sub1, AMDGPU::sub2_sub3, in copyPhysReg()
360 AMDGPU::sub4_sub5, AMDGPU::sub6_sub7, in copyPhysReg()
361 AMDGPU::sub8_sub9, AMDGPU::sub10_sub11, in copyPhysReg()
362 AMDGPU::sub12_sub13, AMDGPU::sub14_sub15, in copyPhysReg()
366 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3, in copyPhysReg()
367 AMDGPU::sub4, AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7, in copyPhysReg()
371 AMDGPU::sub0_sub1, AMDGPU::sub2_sub3, in copyPhysReg()
372 AMDGPU::sub4_sub5, AMDGPU::sub6_sub7, in copyPhysReg()
376 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3, in copyPhysReg()
380 AMDGPU::sub0_sub1, AMDGPU::sub2_sub3, in copyPhysReg()
384 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, in copyPhysReg()
388 AMDGPU::sub0, AMDGPU::sub1, in copyPhysReg()
395 if (AMDGPU::SReg_32RegClass.contains(DestReg)) { in copyPhysReg()
396 assert(AMDGPU::SReg_32RegClass.contains(SrcReg)); in copyPhysReg()
397 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), DestReg) in copyPhysReg()
401 } else if (AMDGPU::SReg_64RegClass.contains(DestReg)) { in copyPhysReg()
402 if (DestReg == AMDGPU::VCC) { in copyPhysReg()
403 if (AMDGPU::SReg_64RegClass.contains(SrcReg)) { in copyPhysReg()
404 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), AMDGPU::VCC) in copyPhysReg()
408 assert(AMDGPU::VGPR_32RegClass.contains(SrcReg)); in copyPhysReg()
409 BuildMI(MBB, MI, DL, get(AMDGPU::V_CMP_NE_I32_e32)) in copyPhysReg()
417 assert(AMDGPU::SReg_64RegClass.contains(SrcReg)); in copyPhysReg()
418 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), DestReg) in copyPhysReg()
422 } else if (AMDGPU::SReg_128RegClass.contains(DestReg)) { in copyPhysReg()
423 assert(AMDGPU::SReg_128RegClass.contains(SrcReg)); in copyPhysReg()
424 Opcode = AMDGPU::S_MOV_B64; in copyPhysReg()
427 } else if (AMDGPU::SReg_256RegClass.contains(DestReg)) { in copyPhysReg()
428 assert(AMDGPU::SReg_256RegClass.contains(SrcReg)); in copyPhysReg()
429 Opcode = AMDGPU::S_MOV_B64; in copyPhysReg()
432 } else if (AMDGPU::SReg_512RegClass.contains(DestReg)) { in copyPhysReg()
433 assert(AMDGPU::SReg_512RegClass.contains(SrcReg)); in copyPhysReg()
434 Opcode = AMDGPU::S_MOV_B64; in copyPhysReg()
437 } else if (AMDGPU::VGPR_32RegClass.contains(DestReg)) { in copyPhysReg()
438 assert(AMDGPU::VGPR_32RegClass.contains(SrcReg) || in copyPhysReg()
439 AMDGPU::SReg_32RegClass.contains(SrcReg)); in copyPhysReg()
440 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DestReg) in copyPhysReg()
444 } else if (AMDGPU::VReg_64RegClass.contains(DestReg)) { in copyPhysReg()
445 assert(AMDGPU::VReg_64RegClass.contains(SrcReg) || in copyPhysReg()
446 AMDGPU::SReg_64RegClass.contains(SrcReg)); in copyPhysReg()
447 Opcode = AMDGPU::V_MOV_B32_e32; in copyPhysReg()
450 } else if (AMDGPU::VReg_96RegClass.contains(DestReg)) { in copyPhysReg()
451 assert(AMDGPU::VReg_96RegClass.contains(SrcReg)); in copyPhysReg()
452 Opcode = AMDGPU::V_MOV_B32_e32; in copyPhysReg()
455 } else if (AMDGPU::VReg_128RegClass.contains(DestReg)) { in copyPhysReg()
456 assert(AMDGPU::VReg_128RegClass.contains(SrcReg) || in copyPhysReg()
457 AMDGPU::SReg_128RegClass.contains(SrcReg)); in copyPhysReg()
458 Opcode = AMDGPU::V_MOV_B32_e32; in copyPhysReg()
461 } else if (AMDGPU::VReg_256RegClass.contains(DestReg)) { in copyPhysReg()
462 assert(AMDGPU::VReg_256RegClass.contains(SrcReg) || in copyPhysReg()
463 AMDGPU::SReg_256RegClass.contains(SrcReg)); in copyPhysReg()
464 Opcode = AMDGPU::V_MOV_B32_e32; in copyPhysReg()
467 } else if (AMDGPU::VReg_512RegClass.contains(DestReg)) { in copyPhysReg()
468 assert(AMDGPU::VReg_512RegClass.contains(SrcReg) || in copyPhysReg()
469 AMDGPU::SReg_512RegClass.contains(SrcReg)); in copyPhysReg()
470 Opcode = AMDGPU::V_MOV_B32_e32; in copyPhysReg()
508 NewOpc = AMDGPU::getCommuteRev(Opcode); in commuteOpcode()
514 NewOpc = AMDGPU::getCommuteOrig(Opcode); in commuteOpcode()
525 return RI.isSGPRClass(DstRC) ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32; in getMovOpcode()
527 return AMDGPU::S_MOV_B64; in getMovOpcode()
529 return AMDGPU::V_MOV_B64_PSEUDO; in getMovOpcode()
531 return AMDGPU::COPY; in getMovOpcode()
537 return AMDGPU::SI_SPILL_S32_SAVE; in getSGPRSpillSaveOpcode()
539 return AMDGPU::SI_SPILL_S64_SAVE; in getSGPRSpillSaveOpcode()
541 return AMDGPU::SI_SPILL_S128_SAVE; in getSGPRSpillSaveOpcode()
543 return AMDGPU::SI_SPILL_S256_SAVE; in getSGPRSpillSaveOpcode()
545 return AMDGPU::SI_SPILL_S512_SAVE; in getSGPRSpillSaveOpcode()
554 return AMDGPU::SI_SPILL_V32_SAVE; in getVGPRSpillSaveOpcode()
556 return AMDGPU::SI_SPILL_V64_SAVE; in getVGPRSpillSaveOpcode()
558 return AMDGPU::SI_SPILL_V96_SAVE; in getVGPRSpillSaveOpcode()
560 return AMDGPU::SI_SPILL_V128_SAVE; in getVGPRSpillSaveOpcode()
562 return AMDGPU::SI_SPILL_V256_SAVE; in getVGPRSpillSaveOpcode()
564 return AMDGPU::SI_SPILL_V512_SAVE; in getVGPRSpillSaveOpcode()
595 MRI.constrainRegClass(SrcReg, &AMDGPU::SReg_32_XM0RegClass); in storeRegToStackSlot()
614 BuildMI(MBB, MI, DL, get(AMDGPU::KILL)) in storeRegToStackSlot()
636 return AMDGPU::SI_SPILL_S32_RESTORE; in getSGPRSpillRestoreOpcode()
638 return AMDGPU::SI_SPILL_S64_RESTORE; in getSGPRSpillRestoreOpcode()
640 return AMDGPU::SI_SPILL_S128_RESTORE; in getSGPRSpillRestoreOpcode()
642 return AMDGPU::SI_SPILL_S256_RESTORE; in getSGPRSpillRestoreOpcode()
644 return AMDGPU::SI_SPILL_S512_RESTORE; in getSGPRSpillRestoreOpcode()
653 return AMDGPU::SI_SPILL_V32_RESTORE; in getVGPRSpillRestoreOpcode()
655 return AMDGPU::SI_SPILL_V64_RESTORE; in getVGPRSpillRestoreOpcode()
657 return AMDGPU::SI_SPILL_V96_RESTORE; in getVGPRSpillRestoreOpcode()
659 return AMDGPU::SI_SPILL_V128_RESTORE; in getVGPRSpillRestoreOpcode()
661 return AMDGPU::SI_SPILL_V256_RESTORE; in getVGPRSpillRestoreOpcode()
663 return AMDGPU::SI_SPILL_V512_RESTORE; in getVGPRSpillRestoreOpcode()
695 MRI.constrainRegClass(DestReg, &AMDGPU::SReg_32_XM0RegClass); in loadRegFromStackSlot()
709 BuildMI(MBB, MI, DL, get(AMDGPU::IMPLICIT_DEF), DestReg); in loadRegFromStackSlot()
743 TIDReg = RI.findUnusedRegister(MF->getRegInfo(), &AMDGPU::VGPR_32RegClass); in calculateLDSSpillAddress()
744 if (TIDReg == AMDGPU::NoRegister) in calculateLDSSpillAddress()
747 if (!AMDGPU::isShader(MF->getFunction()->getCallingConv()) && in calculateLDSSpillAddress()
765 unsigned STmp0 = RS->scavengeRegister(&AMDGPU::SGPR_32RegClass, 0); in calculateLDSSpillAddress()
766 unsigned STmp1 = RS->scavengeRegister(&AMDGPU::SGPR_32RegClass, 0); in calculateLDSSpillAddress()
767 BuildMI(Entry, Insert, DL, get(AMDGPU::S_LOAD_DWORD_IMM), STmp0) in calculateLDSSpillAddress()
770 BuildMI(Entry, Insert, DL, get(AMDGPU::S_LOAD_DWORD_IMM), STmp1) in calculateLDSSpillAddress()
775 BuildMI(Entry, Insert, DL, get(AMDGPU::S_MUL_I32), STmp1) in calculateLDSSpillAddress()
779 BuildMI(Entry, Insert, DL, get(AMDGPU::V_MUL_U32_U24_e32), TIDReg) in calculateLDSSpillAddress()
783 BuildMI(Entry, Insert, DL, get(AMDGPU::V_MAD_U32_U24), TIDReg) in calculateLDSSpillAddress()
788 BuildMI(Entry, Insert, DL, get(AMDGPU::V_ADD_I32_e32), TIDReg) in calculateLDSSpillAddress()
793 BuildMI(Entry, Insert, DL, get(AMDGPU::V_MBCNT_LO_U32_B32_e64), in calculateLDSSpillAddress()
798 BuildMI(Entry, Insert, DL, get(AMDGPU::V_MBCNT_HI_U32_B32_e64), in calculateLDSSpillAddress()
804 BuildMI(Entry, Insert, DL, get(AMDGPU::V_LSHLREV_B32_e32), in calculateLDSSpillAddress()
813 BuildMI(MBB, MI, DL, get(AMDGPU::V_ADD_I32_e32), TmpReg) in calculateLDSSpillAddress()
831 BuildMI(MBB, MI, DL, get(AMDGPU::S_NOP)) in insertWaitStates()
845 case AMDGPU::S_NOP: in getNumWaitStates()
856 case AMDGPU::V_MOV_B64_PSEUDO: { in expandPostRAPseudo()
858 unsigned DstLo = RI.getSubReg(Dst, AMDGPU::sub0); in expandPostRAPseudo()
859 unsigned DstHi = RI.getSubReg(Dst, AMDGPU::sub1); in expandPostRAPseudo()
866 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstLo) in expandPostRAPseudo()
869 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstHi) in expandPostRAPseudo()
874 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstLo) in expandPostRAPseudo()
875 .addReg(RI.getSubReg(SrcOp.getReg(), AMDGPU::sub0)) in expandPostRAPseudo()
877 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstHi) in expandPostRAPseudo()
878 .addReg(RI.getSubReg(SrcOp.getReg(), AMDGPU::sub1)) in expandPostRAPseudo()
885 case AMDGPU::V_CNDMASK_B64_PSEUDO: { in expandPostRAPseudo()
887 unsigned DstLo = RI.getSubReg(Dst, AMDGPU::sub0); in expandPostRAPseudo()
888 unsigned DstHi = RI.getSubReg(Dst, AMDGPU::sub1); in expandPostRAPseudo()
893 BuildMI(MBB, MI, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstLo) in expandPostRAPseudo()
894 .addReg(RI.getSubReg(Src0, AMDGPU::sub0)) in expandPostRAPseudo()
895 .addReg(RI.getSubReg(Src1, AMDGPU::sub0)) in expandPostRAPseudo()
898 BuildMI(MBB, MI, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstHi) in expandPostRAPseudo()
899 .addReg(RI.getSubReg(Src0, AMDGPU::sub1)) in expandPostRAPseudo()
900 .addReg(RI.getSubReg(Src1, AMDGPU::sub1)) in expandPostRAPseudo()
907 case AMDGPU::SI_PC_ADD_REL_OFFSET: { in expandPostRAPseudo()
912 unsigned RegLo = TRI->getSubReg(Reg, AMDGPU::sub0); in expandPostRAPseudo()
913 unsigned RegHi = TRI->getSubReg(Reg, AMDGPU::sub1); in expandPostRAPseudo()
918 Bundler.append(BuildMI(MF, DL, get(AMDGPU::S_GETPC_B64), Reg)); in expandPostRAPseudo()
922 Bundler.append(BuildMI(MF, DL, get(AMDGPU::S_ADD_U32), RegLo) in expandPostRAPseudo()
925 Bundler.append(BuildMI(MF, DL, get(AMDGPU::S_ADDC_U32), RegHi) in expandPostRAPseudo()
953 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::src0); in commuteInstructionImpl()
959 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::src1); in commuteInstructionImpl()
990 getNamedOperand(MI, AMDGPU::OpName::src0_modifiers)) { in commuteInstructionImpl()
992 getNamedOperand(MI, AMDGPU::OpName::src1_modifiers); in commuteInstructionImpl()
1037 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0); in findCommutedOpIndices()
1047 int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1); in findCommutedOpIndices()
1060 if (hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) || in findCommutedOpIndices()
1061 hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers)) in findCommutedOpIndices()
1072 return AMDGPU::S_CBRANCH_SCC1; in getBranchOpcode()
1074 return AMDGPU::S_CBRANCH_SCC0; in getBranchOpcode()
1076 return AMDGPU::S_CBRANCH_VCCNZ; in getBranchOpcode()
1078 return AMDGPU::S_CBRANCH_VCCZ; in getBranchOpcode()
1080 return AMDGPU::S_CBRANCH_EXECNZ; in getBranchOpcode()
1082 return AMDGPU::S_CBRANCH_EXECZ; in getBranchOpcode()
1090 case AMDGPU::S_CBRANCH_SCC0: in getBranchPredicate()
1092 case AMDGPU::S_CBRANCH_SCC1: in getBranchPredicate()
1094 case AMDGPU::S_CBRANCH_VCCNZ: in getBranchPredicate()
1096 case AMDGPU::S_CBRANCH_VCCZ: in getBranchPredicate()
1098 case AMDGPU::S_CBRANCH_EXECNZ: in getBranchPredicate()
1100 case AMDGPU::S_CBRANCH_EXECZ: in getBranchPredicate()
1116 if (I->getOpcode() == AMDGPU::S_BRANCH) { in analyzeBranch()
1137 if (I->getOpcode() == AMDGPU::S_BRANCH) { in analyzeBranch()
1167 BuildMI(&MBB, DL, get(AMDGPU::S_BRANCH)) in InsertBranch()
1187 BuildMI(&MBB, DL, get(AMDGPU::S_BRANCH)) in InsertBranch()
1202 int Src0ModIdx = AMDGPU::getNamedOperandIdx(Opc, in removeModOperands()
1203 AMDGPU::OpName::src0_modifiers); in removeModOperands()
1204 int Src1ModIdx = AMDGPU::getNamedOperandIdx(Opc, in removeModOperands()
1205 AMDGPU::OpName::src1_modifiers); in removeModOperands()
1206 int Src2ModIdx = AMDGPU::getNamedOperandIdx(Opc, in removeModOperands()
1207 AMDGPU::OpName::src2_modifiers); in removeModOperands()
1222 if (Opc == AMDGPU::V_MAD_F32 || Opc == AMDGPU::V_MAC_F32_e64) { in FoldImmediate()
1225 if (hasModifiersSet(UseMI, AMDGPU::OpName::src0_modifiers) || in FoldImmediate()
1226 hasModifiersSet(UseMI, AMDGPU::OpName::src1_modifiers) || in FoldImmediate()
1227 hasModifiersSet(UseMI, AMDGPU::OpName::src2_modifiers)) { in FoldImmediate()
1239 MachineOperand *Src0 = getNamedOperand(UseMI, AMDGPU::OpName::src0); in FoldImmediate()
1240 MachineOperand *Src1 = getNamedOperand(UseMI, AMDGPU::OpName::src1); in FoldImmediate()
1241 MachineOperand *Src2 = getNamedOperand(UseMI, AMDGPU::OpName::src2); in FoldImmediate()
1261 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::omod)); in FoldImmediate()
1263 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::clamp)); in FoldImmediate()
1271 if (Opc == AMDGPU::V_MAC_F32_e64) { in FoldImmediate()
1273 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2)); in FoldImmediate()
1279 UseMI.setDesc(get(AMDGPU::V_MADMK_F32)); in FoldImmediate()
1306 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::omod)); in FoldImmediate()
1308 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::clamp)); in FoldImmediate()
1310 if (Opc == AMDGPU::V_MAC_F32_e64) { in FoldImmediate()
1312 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2)); in FoldImmediate()
1320 UseMI.setDesc(get(AMDGPU::V_MADAK_F32)); in FoldImmediate()
1422 case AMDGPU::V_MAC_F32_e64: in convertToThreeAddress()
1424 case AMDGPU::V_MAC_F32_e32: { in convertToThreeAddress()
1425 const MachineOperand *Src0 = getNamedOperand(MI, AMDGPU::OpName::src0); in convertToThreeAddress()
1432 const MachineOperand *Dst = getNamedOperand(MI, AMDGPU::OpName::vdst); in convertToThreeAddress()
1433 const MachineOperand *Src0 = getNamedOperand(MI, AMDGPU::OpName::src0); in convertToThreeAddress()
1434 const MachineOperand *Src1 = getNamedOperand(MI, AMDGPU::OpName::src1); in convertToThreeAddress()
1435 const MachineOperand *Src2 = getNamedOperand(MI, AMDGPU::OpName::src2); in convertToThreeAddress()
1437 return BuildMI(*MBB, MI, MI.getDebugLoc(), get(AMDGPU::V_MAD_F32)) in convertToThreeAddress()
1458 MI.modifiesRegister(AMDGPU::EXEC, &RI); in isSchedulingBoundary()
1556 int Op32 = AMDGPU::getVOPe32(Opcode); in hasVALU32BitEncoding()
1567 return AMDGPU::getNamedOperandIdx(Opcode, in hasModifiers()
1568 AMDGPU::OpName::src0_modifiers) != -1; in hasModifiers()
1591 if (!MO.isImplicit() && (MO.getReg() == AMDGPU::FLAT_SCR)) in usesConstantBus()
1595 if (!MO.isImplicit() && MO.getReg() == AMDGPU::EXEC) in usesConstantBus()
1599 return (MO.getReg() == AMDGPU::VCC || MO.getReg() == AMDGPU::M0 || in usesConstantBus()
1601 (AMDGPU::SGPR_32RegClass.contains(MO.getReg()) || in usesConstantBus()
1602 AMDGPU::SGPR_64RegClass.contains(MO.getReg())))); in usesConstantBus()
1612 case AMDGPU::VCC: in findImplicitSGPRRead()
1613 case AMDGPU::M0: in findImplicitSGPRRead()
1614 case AMDGPU::FLAT_SCR: in findImplicitSGPRRead()
1622 return AMDGPU::NoRegister; in findImplicitSGPRRead()
1628 case AMDGPU::V_READLANE_B32: in shouldReadExec()
1629 case AMDGPU::V_READLANE_B32_si: in shouldReadExec()
1630 case AMDGPU::V_READLANE_B32_vi: in shouldReadExec()
1631 case AMDGPU::V_WRITELANE_B32: in shouldReadExec()
1632 case AMDGPU::V_WRITELANE_B32_si: in shouldReadExec()
1633 case AMDGPU::V_WRITELANE_B32_vi: in shouldReadExec()
1652 int Src0Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0); in verifyInstruction()
1653 int Src1Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1); in verifyInstruction()
1654 int Src2Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2); in verifyInstruction()
1681 case AMDGPU::OPERAND_REG_IMM32: in verifyInstruction()
1683 case AMDGPU::OPERAND_REG_INLINE_C: in verifyInstruction()
1691 case AMDGPU::OPERAND_KIMM32: in verifyInstruction()
1709 if (Reg == AMDGPU::NoRegister || in verifyInstruction()
1730 if (AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::imm) != -1) in verifyInstruction()
1734 if (SGPRUsed != AMDGPU::NoRegister) in verifyInstruction()
1758 if (Desc.getOpcode() == AMDGPU::V_DIV_SCALE_F32 || in verifyInstruction()
1759 Desc.getOpcode() == AMDGPU::V_DIV_SCALE_F64) { in verifyInstruction()
1775 if (!MI.hasRegisterImplicitUseOperand(AMDGPU::EXEC)) { in verifyInstruction()
1786 default: return AMDGPU::INSTRUCTION_LIST_END; in getVALUOp()
1787 case AMDGPU::REG_SEQUENCE: return AMDGPU::REG_SEQUENCE; in getVALUOp()
1788 case AMDGPU::COPY: return AMDGPU::COPY; in getVALUOp()
1789 case AMDGPU::PHI: return AMDGPU::PHI; in getVALUOp()
1790 case AMDGPU::INSERT_SUBREG: return AMDGPU::INSERT_SUBREG; in getVALUOp()
1791 case AMDGPU::S_MOV_B32: in getVALUOp()
1793 AMDGPU::COPY : AMDGPU::V_MOV_B32_e32; in getVALUOp()
1794 case AMDGPU::S_ADD_I32: in getVALUOp()
1795 case AMDGPU::S_ADD_U32: return AMDGPU::V_ADD_I32_e32; in getVALUOp()
1796 case AMDGPU::S_ADDC_U32: return AMDGPU::V_ADDC_U32_e32; in getVALUOp()
1797 case AMDGPU::S_SUB_I32: in getVALUOp()
1798 case AMDGPU::S_SUB_U32: return AMDGPU::V_SUB_I32_e32; in getVALUOp()
1799 case AMDGPU::S_SUBB_U32: return AMDGPU::V_SUBB_U32_e32; in getVALUOp()
1800 case AMDGPU::S_MUL_I32: return AMDGPU::V_MUL_LO_I32; in getVALUOp()
1801 case AMDGPU::S_AND_B32: return AMDGPU::V_AND_B32_e32; in getVALUOp()
1802 case AMDGPU::S_OR_B32: return AMDGPU::V_OR_B32_e32; in getVALUOp()
1803 case AMDGPU::S_XOR_B32: return AMDGPU::V_XOR_B32_e32; in getVALUOp()
1804 case AMDGPU::S_MIN_I32: return AMDGPU::V_MIN_I32_e32; in getVALUOp()
1805 case AMDGPU::S_MIN_U32: return AMDGPU::V_MIN_U32_e32; in getVALUOp()
1806 case AMDGPU::S_MAX_I32: return AMDGPU::V_MAX_I32_e32; in getVALUOp()
1807 case AMDGPU::S_MAX_U32: return AMDGPU::V_MAX_U32_e32; in getVALUOp()
1808 case AMDGPU::S_ASHR_I32: return AMDGPU::V_ASHR_I32_e32; in getVALUOp()
1809 case AMDGPU::S_ASHR_I64: return AMDGPU::V_ASHR_I64; in getVALUOp()
1810 case AMDGPU::S_LSHL_B32: return AMDGPU::V_LSHL_B32_e32; in getVALUOp()
1811 case AMDGPU::S_LSHL_B64: return AMDGPU::V_LSHL_B64; in getVALUOp()
1812 case AMDGPU::S_LSHR_B32: return AMDGPU::V_LSHR_B32_e32; in getVALUOp()
1813 case AMDGPU::S_LSHR_B64: return AMDGPU::V_LSHR_B64; in getVALUOp()
1814 case AMDGPU::S_SEXT_I32_I8: return AMDGPU::V_BFE_I32; in getVALUOp()
1815 case AMDGPU::S_SEXT_I32_I16: return AMDGPU::V_BFE_I32; in getVALUOp()
1816 case AMDGPU::S_BFE_U32: return AMDGPU::V_BFE_U32; in getVALUOp()
1817 case AMDGPU::S_BFE_I32: return AMDGPU::V_BFE_I32; in getVALUOp()
1818 case AMDGPU::S_BFM_B32: return AMDGPU::V_BFM_B32_e64; in getVALUOp()
1819 case AMDGPU::S_BREV_B32: return AMDGPU::V_BFREV_B32_e32; in getVALUOp()
1820 case AMDGPU::S_NOT_B32: return AMDGPU::V_NOT_B32_e32; in getVALUOp()
1821 case AMDGPU::S_NOT_B64: return AMDGPU::V_NOT_B32_e32; in getVALUOp()
1822 case AMDGPU::S_CMP_EQ_I32: return AMDGPU::V_CMP_EQ_I32_e32; in getVALUOp()
1823 case AMDGPU::S_CMP_LG_I32: return AMDGPU::V_CMP_NE_I32_e32; in getVALUOp()
1824 case AMDGPU::S_CMP_GT_I32: return AMDGPU::V_CMP_GT_I32_e32; in getVALUOp()
1825 case AMDGPU::S_CMP_GE_I32: return AMDGPU::V_CMP_GE_I32_e32; in getVALUOp()
1826 case AMDGPU::S_CMP_LT_I32: return AMDGPU::V_CMP_LT_I32_e32; in getVALUOp()
1827 case AMDGPU::S_CMP_LE_I32: return AMDGPU::V_CMP_LE_I32_e32; in getVALUOp()
1828 case AMDGPU::S_CMP_EQ_U32: return AMDGPU::V_CMP_EQ_U32_e32; in getVALUOp()
1829 case AMDGPU::S_CMP_LG_U32: return AMDGPU::V_CMP_NE_U32_e32; in getVALUOp()
1830 case AMDGPU::S_CMP_GT_U32: return AMDGPU::V_CMP_GT_U32_e32; in getVALUOp()
1831 case AMDGPU::S_CMP_GE_U32: return AMDGPU::V_CMP_GE_U32_e32; in getVALUOp()
1832 case AMDGPU::S_CMP_LT_U32: return AMDGPU::V_CMP_LT_U32_e32; in getVALUOp()
1833 case AMDGPU::S_CMP_LE_U32: return AMDGPU::V_CMP_LE_U32_e32; in getVALUOp()
1834 case AMDGPU::S_BCNT1_I32_B32: return AMDGPU::V_BCNT_U32_B32_e64; in getVALUOp()
1835 case AMDGPU::S_FF1_I32_B32: return AMDGPU::V_FFBL_B32_e32; in getVALUOp()
1836 case AMDGPU::S_FLBIT_I32_B32: return AMDGPU::V_FFBH_U32_e32; in getVALUOp()
1837 case AMDGPU::S_FLBIT_I32: return AMDGPU::V_FFBH_I32_e64; in getVALUOp()
1838 case AMDGPU::S_CBRANCH_SCC0: return AMDGPU::S_CBRANCH_VCCZ; in getVALUOp()
1839 case AMDGPU::S_CBRANCH_SCC1: return AMDGPU::S_CBRANCH_VCCNZ; in getVALUOp()
1844 return getVALUOp(MI) != AMDGPU::INSTRUCTION_LIST_END; in isSALUOpSupportedOnVALU()
1866 case AMDGPU::COPY: in canReadVGPR()
1867 case AMDGPU::REG_SEQUENCE: in canReadVGPR()
1868 case AMDGPU::PHI: in canReadVGPR()
1869 case AMDGPU::INSERT_SUBREG: in canReadVGPR()
1883 unsigned Opcode = AMDGPU::V_MOV_B32_e32; in legalizeOpWithMove()
1885 Opcode = AMDGPU::COPY; in legalizeOpWithMove()
1887 Opcode = AMDGPU::S_MOV_B32; in legalizeOpWithMove()
1890 if (RI.getCommonSubClass(&AMDGPU::VReg_64RegClass, VRC)) in legalizeOpWithMove()
1891 VRC = &AMDGPU::VReg_64RegClass; in legalizeOpWithMove()
1893 VRC = &AMDGPU::VGPR_32RegClass; in legalizeOpWithMove()
1912 if (SuperReg.getSubReg() == AMDGPU::NoSubRegister) { in buildExtractSubReg()
1942 if (SubIdx == AMDGPU::sub0) in buildExtractSubRegOrImm()
1944 if (SubIdx == AMDGPU::sub1) in buildExtractSubRegOrImm()
2027 } else if (InstDesc.OpInfo[i].OperandType == AMDGPU::OPERAND_KIMM32) { in isOperandLegal()
2054 int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1); in legalizeOperandsVOP2()
2063 bool HasImplicitSGPR = findImplicitSGPRRead(MI) != AMDGPU::NoRegister; in legalizeOperandsVOP2()
2065 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0); in legalizeOperandsVOP2()
2086 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0); in legalizeOperandsVOP2()
2133 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0), in legalizeOperandsVOP3()
2134 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1), in legalizeOperandsVOP3()
2135 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2) in legalizeOperandsVOP3()
2154 if (SGPRReg == AMDGPU::NoRegister || SGPRReg == MO.getReg()) { in legalizeOperandsVOP3()
2175 unsigned SGPR = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); in readlaneVGPRToSGPR()
2177 get(AMDGPU::V_READFIRSTLANE_B32), SGPR) in readlaneVGPRToSGPR()
2184 get(AMDGPU::REG_SEQUENCE), DstReg); in readlaneVGPRToSGPR()
2199 MachineOperand *SBase = getNamedOperand(MI, AMDGPU::OpName::sbase); in legalizeOperandsSMRD()
2230 if (MI.getOpcode() == AMDGPU::PHI) { in legalizeOperands()
2269 BuildMI(*InsertBB, Insert, MI.getDebugLoc(), get(AMDGPU::COPY), DstReg) in legalizeOperands()
2278 if (MI.getOpcode() == AMDGPU::REG_SEQUENCE) { in legalizeOperands()
2297 BuildMI(*MBB, MI, MI.getDebugLoc(), get(AMDGPU::COPY), DstReg) in legalizeOperands()
2310 if (MI.getOpcode() == AMDGPU::INSERT_SUBREG) { in legalizeOperands()
2318 BuildMI(MBB, MI, MI.getDebugLoc(), get(AMDGPU::COPY), NewSrc0) in legalizeOperands()
2327 MachineOperand *SRsrc = getNamedOperand(MI, AMDGPU::OpName::srsrc); in legalizeOperands()
2333 MachineOperand *SSamp = getNamedOperand(MI, AMDGPU::OpName::ssamp); in legalizeOperands()
2345 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::srsrc); in legalizeOperands()
2361 &AMDGPU::VReg_128RegClass, AMDGPU::sub0_sub1, &AMDGPU::VReg_64RegClass); in legalizeOperands()
2364 unsigned Zero64 = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); in legalizeOperands()
2365 unsigned SRsrcFormatLo = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); in legalizeOperands()
2366 unsigned SRsrcFormatHi = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); in legalizeOperands()
2367 unsigned NewSRsrc = MRI.createVirtualRegister(&AMDGPU::SReg_128RegClass); in legalizeOperands()
2371 BuildMI(MBB, MI, MI.getDebugLoc(), get(AMDGPU::S_MOV_B64), Zero64) in legalizeOperands()
2375 BuildMI(MBB, MI, MI.getDebugLoc(), get(AMDGPU::S_MOV_B32), SRsrcFormatLo) in legalizeOperands()
2379 BuildMI(MBB, MI, MI.getDebugLoc(), get(AMDGPU::S_MOV_B32), SRsrcFormatHi) in legalizeOperands()
2383 BuildMI(MBB, MI, MI.getDebugLoc(), get(AMDGPU::REG_SEQUENCE), NewSRsrc) in legalizeOperands()
2385 .addImm(AMDGPU::sub0_sub1) in legalizeOperands()
2387 .addImm(AMDGPU::sub2) in legalizeOperands()
2389 .addImm(AMDGPU::sub3); in legalizeOperands()
2391 MachineOperand *VAddr = getNamedOperand(MI, AMDGPU::OpName::vaddr); in legalizeOperands()
2392 unsigned NewVAddr = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); in legalizeOperands()
2396 unsigned NewVAddrLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); in legalizeOperands()
2397 unsigned NewVAddrHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); in legalizeOperands()
2401 BuildMI(MBB, MI, DL, get(AMDGPU::V_ADD_I32_e32), NewVAddrLo) in legalizeOperands()
2402 .addReg(SRsrcPtr, 0, AMDGPU::sub0) in legalizeOperands()
2403 .addReg(VAddr->getReg(), 0, AMDGPU::sub0); in legalizeOperands()
2406 BuildMI(MBB, MI, DL, get(AMDGPU::V_ADDC_U32_e32), NewVAddrHi) in legalizeOperands()
2407 .addReg(SRsrcPtr, 0, AMDGPU::sub1) in legalizeOperands()
2408 .addReg(VAddr->getReg(), 0, AMDGPU::sub1); in legalizeOperands()
2411 BuildMI(MBB, MI, MI.getDebugLoc(), get(AMDGPU::REG_SEQUENCE), NewVAddr) in legalizeOperands()
2413 .addImm(AMDGPU::sub0) in legalizeOperands()
2415 .addImm(AMDGPU::sub1); in legalizeOperands()
2423 MachineOperand *VData = getNamedOperand(MI, AMDGPU::OpName::vdata); in legalizeOperands()
2424 MachineOperand *Offset = getNamedOperand(MI, AMDGPU::OpName::offset); in legalizeOperands()
2425 MachineOperand *SOffset = getNamedOperand(MI, AMDGPU::OpName::soffset); in legalizeOperands()
2426 unsigned Addr64Opcode = AMDGPU::getAddr64Inst(MI.getOpcode()); in legalizeOperands()
2430 MachineOperand *VDataIn = getNamedOperand(MI, AMDGPU::OpName::vdata_in); in legalizeOperands()
2438 .addReg(AMDGPU::NoRegister) // Dummy value for vaddr. in legalizeOperands()
2447 getNamedOperand(MI, AMDGPU::OpName::glc)) { in legalizeOperands()
2451 MIB.addImm(getNamedImmOperand(MI, AMDGPU::OpName::slc)); in legalizeOperands()
2454 getNamedOperand(MI, AMDGPU::OpName::tfe)) { in legalizeOperands()
2465 .addReg(AMDGPU::NoRegister) // Dummy value for vaddr. in legalizeOperands()
2471 .addImm(getNamedImmOperand(MI, AMDGPU::OpName::slc)) in legalizeOperands()
2478 BuildMI(MBB, Addr64, Addr64->getDebugLoc(), get(AMDGPU::REG_SEQUENCE), in legalizeOperands()
2480 .addReg(SRsrcPtr, 0, AMDGPU::sub0) in legalizeOperands()
2481 .addImm(AMDGPU::sub0) in legalizeOperands()
2482 .addReg(SRsrcPtr, 0, AMDGPU::sub1) in legalizeOperands()
2483 .addImm(AMDGPU::sub1); in legalizeOperands()
2485 VAddr = getNamedOperand(*Addr64, AMDGPU::OpName::vaddr); in legalizeOperands()
2486 SRsrc = getNamedOperand(*Addr64, AMDGPU::OpName::srsrc); in legalizeOperands()
2512 case AMDGPU::S_AND_B64: in moveToVALU()
2513 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::V_AND_B32_e64); in moveToVALU()
2517 case AMDGPU::S_OR_B64: in moveToVALU()
2518 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::V_OR_B32_e64); in moveToVALU()
2522 case AMDGPU::S_XOR_B64: in moveToVALU()
2523 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::V_XOR_B32_e64); in moveToVALU()
2527 case AMDGPU::S_NOT_B64: in moveToVALU()
2528 splitScalar64BitUnaryOp(Worklist, Inst, AMDGPU::V_NOT_B32_e32); in moveToVALU()
2532 case AMDGPU::S_BCNT1_I32_B64: in moveToVALU()
2537 case AMDGPU::S_BFE_I64: { in moveToVALU()
2543 case AMDGPU::S_LSHL_B32: in moveToVALU()
2545 NewOpcode = AMDGPU::V_LSHLREV_B32_e64; in moveToVALU()
2549 case AMDGPU::S_ASHR_I32: in moveToVALU()
2551 NewOpcode = AMDGPU::V_ASHRREV_I32_e64; in moveToVALU()
2555 case AMDGPU::S_LSHR_B32: in moveToVALU()
2557 NewOpcode = AMDGPU::V_LSHRREV_B32_e64; in moveToVALU()
2561 case AMDGPU::S_LSHL_B64: in moveToVALU()
2563 NewOpcode = AMDGPU::V_LSHLREV_B64; in moveToVALU()
2567 case AMDGPU::S_ASHR_I64: in moveToVALU()
2569 NewOpcode = AMDGPU::V_ASHRREV_I64; in moveToVALU()
2573 case AMDGPU::S_LSHR_B64: in moveToVALU()
2575 NewOpcode = AMDGPU::V_LSHRREV_B64; in moveToVALU()
2580 case AMDGPU::S_ABS_I32: in moveToVALU()
2585 case AMDGPU::S_CBRANCH_SCC0: in moveToVALU()
2586 case AMDGPU::S_CBRANCH_SCC1: in moveToVALU()
2588 BuildMI(*MBB, Inst, Inst.getDebugLoc(), get(AMDGPU::S_AND_B64), in moveToVALU()
2589 AMDGPU::VCC) in moveToVALU()
2590 .addReg(AMDGPU::EXEC) in moveToVALU()
2591 .addReg(AMDGPU::VCC); in moveToVALU()
2594 case AMDGPU::S_BFE_U64: in moveToVALU()
2595 case AMDGPU::S_BFM_B64: in moveToVALU()
2599 if (NewOpcode == AMDGPU::INSTRUCTION_LIST_END) { in moveToVALU()
2615 if (Op.isReg() && Op.getReg() == AMDGPU::SCC) { in moveToVALU()
2621 if (Opcode == AMDGPU::S_SEXT_I32_I8 || Opcode == AMDGPU::S_SEXT_I32_I16) { in moveToVALU()
2624 unsigned Size = (Opcode == AMDGPU::S_SEXT_I32_I8) ? 8 : 16; in moveToVALU()
2628 } else if (Opcode == AMDGPU::S_BCNT1_I32_B32) { in moveToVALU()
2636 if (Opcode == AMDGPU::S_BFE_I32 || Opcode == AMDGPU::S_BFE_U32) { in moveToVALU()
2652 unsigned NewDstReg = AMDGPU::NoRegister; in moveToVALU()
2681 unsigned TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); in lowerScalarAbs()
2682 unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); in lowerScalarAbs()
2684 BuildMI(MBB, MII, DL, get(AMDGPU::V_SUB_I32_e32), TmpReg) in lowerScalarAbs()
2688 BuildMI(MBB, MII, DL, get(AMDGPU::V_MAX_I32_e64), ResultReg) in lowerScalarAbs()
2711 &AMDGPU::SGPR_32RegClass; in splitScalar64BitUnaryOp()
2713 const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0); in splitScalar64BitUnaryOp()
2716 AMDGPU::sub0, Src0SubRC); in splitScalar64BitUnaryOp()
2720 const TargetRegisterClass *NewDestSubRC = RI.getSubRegClass(NewDestRC, AMDGPU::sub0); in splitScalar64BitUnaryOp()
2727 AMDGPU::sub1, Src0SubRC); in splitScalar64BitUnaryOp()
2736 .addImm(AMDGPU::sub0) in splitScalar64BitUnaryOp()
2738 .addImm(AMDGPU::sub1); in splitScalar64BitUnaryOp()
2765 &AMDGPU::SGPR_32RegClass; in splitScalar64BitBinaryOp()
2767 const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0); in splitScalar64BitBinaryOp()
2770 &AMDGPU::SGPR_32RegClass; in splitScalar64BitBinaryOp()
2772 const TargetRegisterClass *Src1SubRC = RI.getSubRegClass(Src1RC, AMDGPU::sub0); in splitScalar64BitBinaryOp()
2775 AMDGPU::sub0, Src0SubRC); in splitScalar64BitBinaryOp()
2777 AMDGPU::sub0, Src1SubRC); in splitScalar64BitBinaryOp()
2781 const TargetRegisterClass *NewDestSubRC = RI.getSubRegClass(NewDestRC, AMDGPU::sub0); in splitScalar64BitBinaryOp()
2789 AMDGPU::sub1, Src0SubRC); in splitScalar64BitBinaryOp()
2791 AMDGPU::sub1, Src1SubRC); in splitScalar64BitBinaryOp()
2801 .addImm(AMDGPU::sub0) in splitScalar64BitBinaryOp()
2803 .addImm(AMDGPU::sub1); in splitScalar64BitBinaryOp()
2827 const MCInstrDesc &InstDesc = get(AMDGPU::V_BCNT_U32_B32_e64); in splitScalar64BitBCNT()
2830 &AMDGPU::SGPR_32RegClass; in splitScalar64BitBCNT()
2832 unsigned MidReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); in splitScalar64BitBCNT()
2833 unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); in splitScalar64BitBCNT()
2835 const TargetRegisterClass *SrcSubRC = RI.getSubRegClass(SrcRC, AMDGPU::sub0); in splitScalar64BitBCNT()
2838 AMDGPU::sub0, SrcSubRC); in splitScalar64BitBCNT()
2840 AMDGPU::sub1, SrcSubRC); in splitScalar64BitBCNT()
2872 assert(Inst.getOpcode() == AMDGPU::S_BFE_I64 && BitWidth <= 32 && in splitScalar64BitBFE()
2876 unsigned MidRegLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); in splitScalar64BitBFE()
2877 unsigned MidRegHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); in splitScalar64BitBFE()
2878 unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); in splitScalar64BitBFE()
2880 BuildMI(MBB, MII, DL, get(AMDGPU::V_BFE_I32), MidRegLo) in splitScalar64BitBFE()
2881 .addReg(Inst.getOperand(1).getReg(), 0, AMDGPU::sub0) in splitScalar64BitBFE()
2885 BuildMI(MBB, MII, DL, get(AMDGPU::V_ASHRREV_I32_e32), MidRegHi) in splitScalar64BitBFE()
2891 .addImm(AMDGPU::sub0) in splitScalar64BitBFE()
2893 .addImm(AMDGPU::sub1); in splitScalar64BitBFE()
2901 unsigned TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); in splitScalar64BitBFE()
2902 unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); in splitScalar64BitBFE()
2904 BuildMI(MBB, MII, DL, get(AMDGPU::V_ASHRREV_I32_e64), TmpReg) in splitScalar64BitBFE()
2906 .addReg(Src.getReg(), 0, AMDGPU::sub0); in splitScalar64BitBFE()
2909 .addReg(Src.getReg(), 0, AMDGPU::sub0) in splitScalar64BitBFE()
2910 .addImm(AMDGPU::sub0) in splitScalar64BitBFE()
2912 .addImm(AMDGPU::sub1); in splitScalar64BitBFE()
2939 if (MI.findRegisterDefOperandIdx(AMDGPU::SCC) != -1) in addSCCDefUsersToVALUWorklist()
2942 if (MI.findRegisterUseOperandIdx(AMDGPU::SCC) != -1) in addSCCDefUsersToVALUWorklist()
2955 case AMDGPU::COPY: in getDestEquivalentVGPRClass()
2956 case AMDGPU::PHI: in getDestEquivalentVGPRClass()
2957 case AMDGPU::REG_SEQUENCE: in getDestEquivalentVGPRClass()
2958 case AMDGPU::INSERT_SUBREG: in getDestEquivalentVGPRClass()
2986 if (SGPRReg != AMDGPU::NoRegister) in findUsedSGPR()
2989 unsigned UsedSGPRs[3] = { AMDGPU::NoRegister }; in findUsedSGPR()
3028 if (UsedSGPRs[0] != AMDGPU::NoRegister) { in findUsedSGPR()
3033 if (SGPRReg == AMDGPU::NoRegister && UsedSGPRs[1] != AMDGPU::NoRegister) { in findUsedSGPR()
3043 int Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), OperandName); in getNamedOperand()
3051 uint64_t RsrcDataFormat = AMDGPU::RSRC_DATA_FORMAT; in getDefaultRsrcDataFormat()
3065 AMDGPU::RSRC_TID_ENABLE | in getScratchRsrcWords23()
3070 Rsrc23 |= (EltSizeValue << AMDGPU::RSRC_ELEMENT_SIZE_SHIFT) | in getScratchRsrcWords23()
3072 (UINT64_C(3) << AMDGPU::RSRC_INDEX_STRIDE_SHIFT); in getScratchRsrcWords23()
3077 Rsrc23 &= ~AMDGPU::RSRC_DATA_FORMAT; in getScratchRsrcWords23()
3109 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0); in getInstSizeInBytes()
3116 int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1); in getInstSizeInBytes()
3146 {AMDGPU::TI_CONSTDATA_START, "amdgpu-constdata-start"}, in getSerializableTargetIndices()
3147 {AMDGPU::TI_SCRATCH_RSRC_DWORD0, "amdgpu-scratch-rsrc-dword0"}, in getSerializableTargetIndices()
3148 {AMDGPU::TI_SCRATCH_RSRC_DWORD1, "amdgpu-scratch-rsrc-dword1"}, in getSerializableTargetIndices()
3149 {AMDGPU::TI_SCRATCH_RSRC_DWORD2, "amdgpu-scratch-rsrc-dword2"}, in getSerializableTargetIndices()
3150 {AMDGPU::TI_SCRATCH_RSRC_DWORD3, "amdgpu-scratch-rsrc-dword3"}}; in getSerializableTargetIndices()