Lines Matching refs:AMDGPU
177 if (I->getOpcode() == AMDGPU::S_CBRANCH_VCCNZ || in shouldSkip()
178 I->getOpcode() == AMDGPU::S_CBRANCH_VCCZ) in shouldSkip()
207 BuildMI(*From.getParent(), &From, DL, TII->get(AMDGPU::S_CBRANCH_EXECZ)) in Skip()
225 BuildMI(&MBB, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ)) in skipIfDead()
231 BuildMI(*SkipBB, Insert, DL, TII->get(AMDGPU::EXP)) in skipIfDead()
237 .addReg(AMDGPU::VGPR0, RegState::Undef) in skipIfDead()
238 .addReg(AMDGPU::VGPR0, RegState::Undef) in skipIfDead()
239 .addReg(AMDGPU::VGPR0, RegState::Undef) in skipIfDead()
240 .addReg(AMDGPU::VGPR0, RegState::Undef); in skipIfDead()
243 BuildMI(*SkipBB, Insert, DL, TII->get(AMDGPU::S_ENDPGM)); in skipIfDead()
254 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_AND_SAVEEXEC_B64), Reg) in If()
257 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_XOR_B64), Reg) in If()
258 .addReg(AMDGPU::EXEC) in If()
264 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::SI_MASK_BRANCH)) in If()
278 TII->get(AMDGPU::S_OR_SAVEEXEC_B64), Dst) in Else()
285 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_AND_B64), Dst) in Else()
286 .addReg(AMDGPU::EXEC) in Else()
290 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_XOR_B64), AMDGPU::EXEC) in Else()
291 .addReg(AMDGPU::EXEC) in Else()
297 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::SI_MASK_BRANCH)) in Else()
311 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_OR_B64), Dst) in Break()
312 .addReg(AMDGPU::EXEC) in Break()
326 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_OR_B64), Dst) in IfBreak()
341 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_OR_B64), Dst) in ElseBreak()
353 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_ANDN2_B64), AMDGPU::EXEC) in Loop()
354 .addReg(AMDGPU::EXEC) in Loop()
357 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ)) in Loop()
369 TII->get(AMDGPU::S_OR_B64), AMDGPU::EXEC) in EndCf()
370 .addReg(AMDGPU::EXEC) in EndCf()
400 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_MOV_B64), AMDGPU::EXEC) in Kill()
404 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::V_CMPX_LE_F32_e32)) in Kill()
423 if (const MachineOperand *Val = TII->getNamedOperand(MI, AMDGPU::OpName::val)) { in splitLoadM0BlockLiveIns()
435 const MachineOperand *Src = TII->getNamedOperand(MI, AMDGPU::OpName::src); in splitLoadM0BlockLiveIns()
452 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), AMDGPU::VCC_LO) in emitLoadM0FromVGPRLoop()
456 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0) in emitLoadM0FromVGPRLoop()
457 .addReg(AMDGPU::VCC_LO); in emitLoadM0FromVGPRLoop()
460 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::V_CMP_EQ_U32_e32)) in emitLoadM0FromVGPRLoop()
461 .addReg(AMDGPU::M0) in emitLoadM0FromVGPRLoop()
465 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_AND_SAVEEXEC_B64), AMDGPU::VCC) in emitLoadM0FromVGPRLoop()
466 .addReg(AMDGPU::VCC); in emitLoadM0FromVGPRLoop()
469 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0) in emitLoadM0FromVGPRLoop()
470 .addReg(AMDGPU::M0) in emitLoadM0FromVGPRLoop()
478 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_XOR_B64), AMDGPU::EXEC) in emitLoadM0FromVGPRLoop()
479 .addReg(AMDGPU::EXEC) in emitLoadM0FromVGPRLoop()
480 .addReg(AMDGPU::VCC); in emitLoadM0FromVGPRLoop()
483 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ)) in emitLoadM0FromVGPRLoop()
531 const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx); in loadM0()
533 if (AMDGPU::SReg_32RegClass.contains(Idx->getReg())) { in loadM0()
535 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0) in loadM0()
539 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0) in loadM0()
548 MachineOperand *SaveOp = TII->getNamedOperand(MI, AMDGPU::OpName::sdst); in loadM0()
553 assert(AMDGPU::SReg_64RegClass.contains(Save) && in loadM0()
554 AMDGPU::VGPR_32RegClass.contains(Idx->getReg())); in loadM0()
557 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_MOV_B64), Save) in loadM0()
558 .addReg(AMDGPU::EXEC); in loadM0()
582 BuildMI(*RemainderBB, First, DL, TII->get(AMDGPU::S_MOV_B64), AMDGPU::EXEC) in loadM0()
600 unsigned SubReg = TRI->getSubReg(VecReg, AMDGPU::sub0); in computeIndirectRegAndOffset()
633 const MachineOperand *SrcVec = TII->getNamedOperand(MI, AMDGPU::OpName::src); in indirectSrc()
634 int Offset = TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm(); in indirectSrc()
639 const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx); in indirectSrc()
640 if (Idx->getReg() == AMDGPU::NoRegister) { in indirectSrc()
642 BuildMI(MBB, MI.getIterator(), DL, TII->get(AMDGPU::V_MOV_B32_e32), Dst) in indirectSrc()
649 BuildMI(*MBB.getParent(), DL, TII->get(AMDGPU::V_MOVRELS_B32_e32), Dst) in indirectSrc()
662 int Offset = TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm(); in indirectDst()
665 const MachineOperand *Val = TII->getNamedOperand(MI, AMDGPU::OpName::val); in indirectDst()
668 MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx); in indirectDst()
669 if (Idx->getReg() == AMDGPU::NoRegister) { in indirectDst()
671 BuildMI(MBB, MI.getIterator(), DL, TII->get(AMDGPU::V_MOV_B32_e32), Reg) in indirectDst()
678 BuildMI(*MBB.getParent(), DL, TII->get(AMDGPU::V_MOVRELD_B32_e32), Reg) in indirectDst()
716 if (I->modifiesRegister(AMDGPU::EXEC, TRI)) in runOnMachineFunction()
721 case AMDGPU::SI_IF: in runOnMachineFunction()
726 case AMDGPU::SI_ELSE: in runOnMachineFunction()
730 case AMDGPU::SI_BREAK: in runOnMachineFunction()
734 case AMDGPU::SI_IF_BREAK: in runOnMachineFunction()
738 case AMDGPU::SI_ELSE_BREAK: in runOnMachineFunction()
742 case AMDGPU::SI_LOOP: in runOnMachineFunction()
747 case AMDGPU::SI_END_CF: in runOnMachineFunction()
760 case AMDGPU::SI_KILL_TERMINATOR: in runOnMachineFunction()
772 case AMDGPU::S_BRANCH: in runOnMachineFunction()
776 case AMDGPU::SI_INDIRECT_SRC_V1: in runOnMachineFunction()
777 case AMDGPU::SI_INDIRECT_SRC_V2: in runOnMachineFunction()
778 case AMDGPU::SI_INDIRECT_SRC_V4: in runOnMachineFunction()
779 case AMDGPU::SI_INDIRECT_SRC_V8: in runOnMachineFunction()
780 case AMDGPU::SI_INDIRECT_SRC_V16: in runOnMachineFunction()
792 case AMDGPU::SI_INDIRECT_DST_V1: in runOnMachineFunction()
793 case AMDGPU::SI_INDIRECT_DST_V2: in runOnMachineFunction()
794 case AMDGPU::SI_INDIRECT_DST_V4: in runOnMachineFunction()
795 case AMDGPU::SI_INDIRECT_DST_V8: in runOnMachineFunction()
796 case AMDGPU::SI_INDIRECT_DST_V16: in runOnMachineFunction()
808 case AMDGPU::SI_RETURN: { in runOnMachineFunction()
822 BuildMI(*BI, I, MI.getDebugLoc(), TII->get(AMDGPU::S_BRANCH)) in runOnMachineFunction()