• Home
  • Raw
  • Download

Lines Matching refs:AMDGPU

143   case AMDGPU::V_MAC_F32_e64:  in isInlineConstantIfFolded()
144 case AMDGPU::V_MAC_F16_e64: in isInlineConstantIfFolded()
145 case AMDGPU::V_FMAC_F32_e64: in isInlineConstantIfFolded()
146 case AMDGPU::V_FMAC_F16_e64: { in isInlineConstantIfFolded()
149 int Src2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2); in isInlineConstantIfFolded()
151 bool IsFMA = Opc == AMDGPU::V_FMAC_F32_e64 || in isInlineConstantIfFolded()
152 Opc == AMDGPU::V_FMAC_F16_e64; in isInlineConstantIfFolded()
153 bool IsF32 = Opc == AMDGPU::V_MAC_F32_e64 || in isInlineConstantIfFolded()
154 Opc == AMDGPU::V_FMAC_F32_e64; in isInlineConstantIfFolded()
157 (IsF32 ? AMDGPU::V_FMA_F32 : AMDGPU::V_FMA_F16_gfx9) : in isInlineConstantIfFolded()
158 (IsF32 ? AMDGPU::V_MAD_F32 : AMDGPU::V_MAD_F16); in isInlineConstantIfFolded()
177 OpNo == AMDGPU::getNamedOperandIdx(UseMI.getOpcode(), AMDGPU::OpName::vaddr); in frameIndexMayFold()
195 AMDGPU::isFoldableLiteralV216(Fold.ImmToFold, in updateOperand()
202 if (OpNo == AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0)) in updateOperand()
203 ModIdx = AMDGPU::OpName::src0_modifiers; in updateOperand()
204 else if (OpNo == AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1)) in updateOperand()
205 ModIdx = AMDGPU::OpName::src1_modifiers; in updateOperand()
206 else if (OpNo == AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2)) in updateOperand()
207 ModIdx = AMDGPU::OpName::src2_modifiers; in updateOperand()
209 ModIdx = AMDGPU::getNamedOperandIdx(Opcode, ModIdx); in updateOperand()
216 case AMDGPU::OPERAND_REG_IMM_V2FP16: in updateOperand()
217 case AMDGPU::OPERAND_REG_IMM_V2INT16: in updateOperand()
218 case AMDGPU::OPERAND_REG_INLINE_C_V2FP16: in updateOperand()
219 case AMDGPU::OPERAND_REG_INLINE_C_V2INT16: in updateOperand()
242 auto Liveness = MBB->computeRegisterLiveness(&TRI, AMDGPU::VCC, MI, 16); in updateOperand()
262 BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::COPY), Dst1.getReg()) in updateOperand()
263 .addReg(AMDGPU::VCC, RegState::Kill); in updateOperand()
275 MI->setDesc(TII.get(AMDGPU::IMPLICIT_DEF)); in updateOperand()
335 if ((Opc == AMDGPU::V_MAC_F32_e64 || Opc == AMDGPU::V_MAC_F16_e64 || in tryAddToFoldList()
336 Opc == AMDGPU::V_FMAC_F32_e64 || Opc == AMDGPU::V_FMAC_F16_e64) && in tryAddToFoldList()
337 (int)OpNo == AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2)) { in tryAddToFoldList()
338 bool IsFMA = Opc == AMDGPU::V_FMAC_F32_e64 || in tryAddToFoldList()
339 Opc == AMDGPU::V_FMAC_F16_e64; in tryAddToFoldList()
340 bool IsF32 = Opc == AMDGPU::V_MAC_F32_e64 || in tryAddToFoldList()
341 Opc == AMDGPU::V_FMAC_F32_e64; in tryAddToFoldList()
343 (IsF32 ? AMDGPU::V_FMA_F32 : AMDGPU::V_FMA_F16_gfx9) : in tryAddToFoldList()
344 (IsF32 ? AMDGPU::V_MAD_F32 : AMDGPU::V_MAD_F16); in tryAddToFoldList()
360 if (Opc == AMDGPU::S_SETREG_B32) in tryAddToFoldList()
361 ImmOpc = AMDGPU::S_SETREG_IMM32_B32; in tryAddToFoldList()
362 else if (Opc == AMDGPU::S_SETREG_B32_mode) in tryAddToFoldList()
363 ImmOpc = AMDGPU::S_SETREG_IMM32_B32_mode; in tryAddToFoldList()
406 if ((Opc == AMDGPU::V_ADD_CO_U32_e64 || in tryAddToFoldList()
407 Opc == AMDGPU::V_SUB_CO_U32_e64 || in tryAddToFoldList()
408 Opc == AMDGPU::V_SUBREV_CO_U32_e64) && // FIXME in tryAddToFoldList()
424 int Op32 = AMDGPU::getVOPe32(MaybeCommutedOpc); in tryAddToFoldList()
474 case AMDGPU::V_MOV_B32_e32: in isUseSafeToFold()
475 case AMDGPU::V_MOV_B32_e64: in isUseSafeToFold()
476 case AMDGPU::V_MOV_B64_PSEUDO: in isUseSafeToFold()
478 return !MI.hasRegisterImplicitUseOperand(AMDGPU::M0); in isUseSafeToFold()
532 if (OpTy < AMDGPU::OPERAND_REG_INLINE_AC_FIRST || in tryToFoldACImm()
533 OpTy > AMDGPU::OPERAND_REG_INLINE_AC_LAST) in tryToFoldACImm()
594 if (UseOp.isImplicit() || UseOp.getSubReg() != AMDGPU::NoSubRegister) in foldOperand()
634 if (TII->getNamedOperand(*UseMI, AMDGPU::OpName::srsrc)->getReg() != in foldOperand()
640 *TII->getNamedOperand(*UseMI, AMDGPU::OpName::soffset); in foldOperand()
694 if (DestRC == &AMDGPU::AGPR_32RegClass && in foldOperand()
695 TII->isInlineConstant(OpToFold, AMDGPU::OPERAND_REG_INLINE_C_INT32)) { in foldOperand()
696 UseMI->setDesc(TII->get(AMDGPU::V_ACCVGPR_WRITE_B32)); in foldOperand()
707 if (MovOp == AMDGPU::COPY) in foldOperand()
739 getRegSeqInit(Defs, UseReg, AMDGPU::OPERAND_REG_INLINE_C_INT32, TII, in foldOperand()
744 UseMI->setDesc(TII->get(AMDGPU::REG_SEQUENCE)); in foldOperand()
755 TII->isInlineConstant(*Def, AMDGPU::OPERAND_REG_INLINE_C_INT32)) { in foldOperand()
758 auto Tmp = MRI->createVirtualRegister(&AMDGPU::AGPR_32RegClass); in foldOperand()
760 TII->get(AMDGPU::V_ACCVGPR_WRITE_B32), Tmp).addImm(Imm); in foldOperand()
785 auto Tmp = MRI->createVirtualRegister(&AMDGPU::AGPR_32RegClass); in foldOperand()
786 BuildMI(MBB, UseMI, DL, TII->get(AMDGPU::COPY), Tmp).add(*Def); in foldOperand()
796 Vgpr = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass); in foldOperand()
797 BuildMI(MBB, UseMI, DL, TII->get(AMDGPU::COPY), Vgpr).add(*Def); in foldOperand()
800 auto Tmp = MRI->createVirtualRegister(&AMDGPU::AGPR_32RegClass); in foldOperand()
802 TII->get(AMDGPU::V_ACCVGPR_WRITE_B32), Tmp).addReg(Vgpr); in foldOperand()
816 UseMI->setDesc(TII->get(AMDGPU::V_ACCVGPR_WRITE_B32)); in foldOperand()
819 UseMI->setDesc(TII->get(AMDGPU::V_ACCVGPR_READ_B32)); in foldOperand()
824 if (UseOpc == AMDGPU::V_READFIRSTLANE_B32 || in foldOperand()
825 (UseOpc == AMDGPU::V_READLANE_B32 && in foldOperand()
827 AMDGPU::getNamedOperandIdx(UseOpc, AMDGPU::OpName::src0))) { in foldOperand()
839 UseMI->setDesc(TII->get(AMDGPU::S_MOV_B32)); in foldOperand()
860 UseMI->setDesc(TII->get(AMDGPU::COPY)); in foldOperand()
894 if (UseOp.getSubReg() && AMDGPU::getRegBitWidth(FoldRC->getID()) == 64) { in foldOperand()
898 if (AMDGPU::getRegBitWidth(UseRC->getID()) != 64) in foldOperand()
902 if (UseOp.getSubReg() == AMDGPU::sub0) { in foldOperand()
905 assert(UseOp.getSubReg() == AMDGPU::sub1); in foldOperand()
922 case AMDGPU::V_AND_B32_e64: in evalBinaryInstruction()
923 case AMDGPU::V_AND_B32_e32: in evalBinaryInstruction()
924 case AMDGPU::S_AND_B32: in evalBinaryInstruction()
927 case AMDGPU::V_OR_B32_e64: in evalBinaryInstruction()
928 case AMDGPU::V_OR_B32_e32: in evalBinaryInstruction()
929 case AMDGPU::S_OR_B32: in evalBinaryInstruction()
932 case AMDGPU::V_XOR_B32_e64: in evalBinaryInstruction()
933 case AMDGPU::V_XOR_B32_e32: in evalBinaryInstruction()
934 case AMDGPU::S_XOR_B32: in evalBinaryInstruction()
937 case AMDGPU::S_XNOR_B32: in evalBinaryInstruction()
940 case AMDGPU::S_NAND_B32: in evalBinaryInstruction()
943 case AMDGPU::S_NOR_B32: in evalBinaryInstruction()
946 case AMDGPU::S_ANDN2_B32: in evalBinaryInstruction()
949 case AMDGPU::S_ORN2_B32: in evalBinaryInstruction()
952 case AMDGPU::V_LSHL_B32_e64: in evalBinaryInstruction()
953 case AMDGPU::V_LSHL_B32_e32: in evalBinaryInstruction()
954 case AMDGPU::S_LSHL_B32: in evalBinaryInstruction()
958 case AMDGPU::V_LSHLREV_B32_e64: in evalBinaryInstruction()
959 case AMDGPU::V_LSHLREV_B32_e32: in evalBinaryInstruction()
962 case AMDGPU::V_LSHR_B32_e64: in evalBinaryInstruction()
963 case AMDGPU::V_LSHR_B32_e32: in evalBinaryInstruction()
964 case AMDGPU::S_LSHR_B32: in evalBinaryInstruction()
967 case AMDGPU::V_LSHRREV_B32_e64: in evalBinaryInstruction()
968 case AMDGPU::V_LSHRREV_B32_e32: in evalBinaryInstruction()
971 case AMDGPU::V_ASHR_I32_e64: in evalBinaryInstruction()
972 case AMDGPU::V_ASHR_I32_e32: in evalBinaryInstruction()
973 case AMDGPU::S_ASHR_I32: in evalBinaryInstruction()
976 case AMDGPU::V_ASHRREV_I32_e64: in evalBinaryInstruction()
977 case AMDGPU::V_ASHRREV_I32_e32: in evalBinaryInstruction()
986 return IsScalar ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32; in getMovOpc()
1011 if (Op.getSubReg() != AMDGPU::NoSubRegister || !Op.getReg().isVirtual()) in getImmOrMaterializedImm()
1033 if (Opc == AMDGPU::V_NOT_B32_e64 || Opc == AMDGPU::V_NOT_B32_e32 || in tryConstantFoldOp()
1034 Opc == AMDGPU::S_NOT_B32) { in tryConstantFoldOp()
1036 mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_NOT_B32))); in tryConstantFoldOp()
1040 int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1); in tryConstantFoldOp()
1044 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0); in tryConstantFoldOp()
1079 if (Opc == AMDGPU::V_OR_B32_e64 || in tryConstantFoldOp()
1080 Opc == AMDGPU::V_OR_B32_e32 || in tryConstantFoldOp()
1081 Opc == AMDGPU::S_OR_B32) { in tryConstantFoldOp()
1085 mutateCopyOp(*MI, TII->get(AMDGPU::COPY)); in tryConstantFoldOp()
1089 mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_OR_B32))); in tryConstantFoldOp()
1096 if (MI->getOpcode() == AMDGPU::V_AND_B32_e64 || in tryConstantFoldOp()
1097 MI->getOpcode() == AMDGPU::V_AND_B32_e32 || in tryConstantFoldOp()
1098 MI->getOpcode() == AMDGPU::S_AND_B32) { in tryConstantFoldOp()
1102 mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_AND_B32))); in tryConstantFoldOp()
1106 mutateCopyOp(*MI, TII->get(AMDGPU::COPY)); in tryConstantFoldOp()
1114 if (MI->getOpcode() == AMDGPU::V_XOR_B32_e64 || in tryConstantFoldOp()
1115 MI->getOpcode() == AMDGPU::V_XOR_B32_e32 || in tryConstantFoldOp()
1116 MI->getOpcode() == AMDGPU::S_XOR_B32) { in tryConstantFoldOp()
1120 mutateCopyOp(*MI, TII->get(AMDGPU::COPY)); in tryConstantFoldOp()
1133 if (Opc == AMDGPU::V_CNDMASK_B32_e32 || in tryFoldInst()
1134 Opc == AMDGPU::V_CNDMASK_B32_e64 || in tryFoldInst()
1135 Opc == AMDGPU::V_CNDMASK_B64_PSEUDO) { in tryFoldInst()
1136 const MachineOperand *Src0 = TII->getNamedOperand(*MI, AMDGPU::OpName::src0); in tryFoldInst()
1137 const MachineOperand *Src1 = TII->getNamedOperand(*MI, AMDGPU::OpName::src1); in tryFoldInst()
1138 int Src1ModIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1_modifiers); in tryFoldInst()
1139 int Src0ModIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0_modifiers); in tryFoldInst()
1145 TII->get(Src0->isReg() ? (unsigned)AMDGPU::COPY : getMovOpc(false)); in tryFoldInst()
1146 int Src2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2); in tryFoldInst()
1149 MI->RemoveOperand(AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1)); in tryFoldInst()
1265 if (DefMI->readsRegister(AMDGPU::EXEC, TRI) && in foldInstOperand()
1294 case AMDGPU::V_MAX_F32_e64: in isClamp()
1295 case AMDGPU::V_MAX_F16_e64: in isClamp()
1296 case AMDGPU::V_MAX_F64: in isClamp()
1297 case AMDGPU::V_PK_MAX_F16: { in isClamp()
1298 if (!TII->getNamedOperand(MI, AMDGPU::OpName::clamp)->getImm()) in isClamp()
1302 const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0); in isClamp()
1303 const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1); in isClamp()
1307 Src0->getSubReg() != AMDGPU::NoSubRegister) in isClamp()
1311 if (TII->hasModifiersSet(MI, AMDGPU::OpName::omod)) in isClamp()
1315 = TII->getNamedOperand(MI, AMDGPU::OpName::src0_modifiers)->getImm(); in isClamp()
1317 = TII->getNamedOperand(MI, AMDGPU::OpName::src1_modifiers)->getImm(); in isClamp()
1321 unsigned UnsetMods = (Op == AMDGPU::V_PK_MAX_F16) ? SISrcMods::OP_SEL_1 in isClamp()
1357 MachineOperand *DefClamp = TII->getNamedOperand(*Def, AMDGPU::OpName::clamp); in tryFoldClamp()
1373 case AMDGPU::V_MUL_F32_e64: { in getOModValue()
1385 case AMDGPU::V_MUL_F16_e64: { in getOModValue()
1409 case AMDGPU::V_MUL_F32_e64: in isOMod()
1410 case AMDGPU::V_MUL_F16_e64: { in isOMod()
1412 if ((Op == AMDGPU::V_MUL_F32_e64 && MFI->getMode().FP32OutputDenormals) || in isOMod()
1413 (Op == AMDGPU::V_MUL_F16_e64 && MFI->getMode().FP64FP16OutputDenormals)) in isOMod()
1418 const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0); in isOMod()
1419 const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1); in isOMod()
1431 TII->hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) || in isOMod()
1432 TII->hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) || in isOMod()
1433 TII->hasModifiersSet(MI, AMDGPU::OpName::omod) || in isOMod()
1434 TII->hasModifiersSet(MI, AMDGPU::OpName::clamp)) in isOMod()
1439 case AMDGPU::V_ADD_F32_e64: in isOMod()
1440 case AMDGPU::V_ADD_F16_e64: { in isOMod()
1442 if ((Op == AMDGPU::V_ADD_F32_e64 && MFI->getMode().FP32OutputDenormals) || in isOMod()
1443 (Op == AMDGPU::V_ADD_F16_e64 && MFI->getMode().FP64FP16OutputDenormals)) in isOMod()
1447 const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0); in isOMod()
1448 const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1); in isOMod()
1452 !TII->hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) && in isOMod()
1453 !TII->hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) && in isOMod()
1454 !TII->hasModifiersSet(MI, AMDGPU::OpName::clamp) && in isOMod()
1455 !TII->hasModifiersSet(MI, AMDGPU::OpName::omod)) in isOMod()
1471 RegOp->getSubReg() != AMDGPU::NoSubRegister || in tryFoldOMod()
1476 MachineOperand *DefOMod = TII->getNamedOperand(*Def, AMDGPU::OpName::omod); in tryFoldOMod()
1482 if (TII->hasModifiersSet(*Def, AMDGPU::OpName::clamp)) in tryFoldOMod()
1522 if (CurrentKnownM0Val && MI.modifiesRegister(AMDGPU::M0, TRI)) in runOnMachineFunction()
1536 if (MI.getOperand(0).getReg() == AMDGPU::M0) { in runOnMachineFunction()