/external/swiftshader/third_party/llvm-10.0/llvm/lib/Target/ARM/ |
D | MVEVPTBlockPass.cpp | 145 MachineInstr *VCMP = findVCMPToFoldIntoVPST(MI, RDA, NewOpcode); in InsertVPTBlocks() local 146 if (VCMP) { in InsertVPTBlocks() 147 LLVM_DEBUG(dbgs() << " folding VCMP into VPST: "; VCMP->dump()); in InsertVPTBlocks() 150 MIBuilder.add(VCMP->getOperand(1)); in InsertVPTBlocks() 151 MIBuilder.add(VCMP->getOperand(2)); in InsertVPTBlocks() 152 MIBuilder.add(VCMP->getOperand(3)); in InsertVPTBlocks() 157 RemovedVCMPs.insert(VCMP); in InsertVPTBlocks()
|
D | ARMScheduleM4.td | 130 def : M4UnitL1I<(instregex "VMOVS", "FCONSTS", "VCMP", "VNEG", "VABS")>;
|
D | ARMISelLowering.h | 137 VCMP, // Vector compare. enumerator
|
/external/llvm-project/llvm/lib/Target/ARM/ |
D | MVEVPTBlockPass.cpp | 266 if (MachineInstr *VCMP = findVCMPToFoldIntoVPST(MI, TRI, NewOpcode)) { in InsertVPTBlocks() local 267 LLVM_DEBUG(dbgs() << " folding VCMP into VPST: "; VCMP->dump()); in InsertVPTBlocks() 270 MIBuilder.add(VCMP->getOperand(1)); in InsertVPTBlocks() 271 MIBuilder.add(VCMP->getOperand(2)); in InsertVPTBlocks() 272 MIBuilder.add(VCMP->getOperand(3)); in InsertVPTBlocks() 277 make_range(VCMP->getIterator(), MI->getIterator())) { in InsertVPTBlocks() 278 MII.clearRegisterKills(VCMP->getOperand(1).getReg(), TRI); in InsertVPTBlocks() 279 MII.clearRegisterKills(VCMP->getOperand(2).getReg(), TRI); in InsertVPTBlocks() 282 VCMP->eraseFromParent(); in InsertVPTBlocks()
|
D | ARMLowOverheadLoops.cpp | 1488 MachineInstr *VCMP = in ConvertVPTBlocks() local 1495 if (!VCMP) { in ConvertVPTBlocks() 1507 ReplaceVCMPWithVPT(VCMP, VCMP); in ConvertVPTBlocks() 1529 MachineInstr *VCMP = VprDef; in ConvertVPTBlocks() local 1535 if (!std::any_of(++MachineBasicBlock::iterator(VCMP), in ConvertVPTBlocks() 1537 RDA->hasSameReachingDef(VCMP, VPST, VCMP->getOperand(1).getReg()) && in ConvertVPTBlocks() 1538 RDA->hasSameReachingDef(VCMP, VPST, VCMP->getOperand(2).getReg())) { in ConvertVPTBlocks() 1539 ReplaceVCMPWithVPT(VCMP, VPST); in ConvertVPTBlocks()
|
D | ARMScheduleM4.td | 130 def : M4UnitL1I<(instregex "VMOVS", "FCONSTS", "VCMP", "VNEG", "VABS")>;
|
D | ARMISelLowering.h | 140 VCMP, // Vector compare. enumerator
|
D | ARMScheduleM7.td | 418 // VCMP
|
/external/llvm/test/CodeGen/AMDGPU/ |
D | atomic_cmp_swap_local.ll | 10 ; GCN: v_mov_b32_e32 [[VCMP:v[0-9]+]], 7 13 ; GCN: ds_cmpst_rtn_b32 [[RESULT:v[0-9]+]], [[VPTR]], [[VCMP]], [[VSWAP]] offset:16 63 ; GCN-DAG: v_mov_b32_e32 [[VCMP:v[0-9]+]], 7 66 ; GCN: ds_cmpst_b32 [[VPTR]], [[VCMP]], [[VSWAP]] offset:16
|
/external/llvm-project/llvm/test/CodeGen/AMDGPU/ |
D | atomic_cmp_swap_local.ll | 14 ; GCN-DAG: v_mov_b32_e32 [[VCMP:v[0-9]+]], 7 17 ; GCN: ds_cmpst_rtn_b32 [[RESULT:v[0-9]+]], [[VPTR]], [[VCMP]], [[VSWAP]] offset:16 76 ; GCN-DAG: v_mov_b32_e32 [[VCMP:v[0-9]+]], 7 79 ; GCN: ds_cmpst_b32 [[VPTR]], [[VCMP]], [[VSWAP]] offset:16
|
/external/swiftshader/third_party/llvm-10.0/llvm/lib/Target/PowerPC/ |
D | PPCInstrAltivec.td | 783 class VCMP<bits<10> xo, string asmstr, ValueType Ty> 796 def VCMPBFP : VCMP <966, "vcmpbfp $vD, $vA, $vB" , v4f32>; 798 def VCMPEQFP : VCMP <198, "vcmpeqfp $vD, $vA, $vB" , v4f32>; 800 def VCMPGEFP : VCMP <454, "vcmpgefp $vD, $vA, $vB" , v4f32>; 802 def VCMPGTFP : VCMP <710, "vcmpgtfp $vD, $vA, $vB" , v4f32>; 806 def VCMPEQUB : VCMP < 6, "vcmpequb $vD, $vA, $vB" , v16i8>; 808 def VCMPGTSB : VCMP <774, "vcmpgtsb $vD, $vA, $vB" , v16i8>; 810 def VCMPGTUB : VCMP <518, "vcmpgtub $vD, $vA, $vB" , v16i8>; 814 def VCMPEQUH : VCMP < 70, "vcmpequh $vD, $vA, $vB" , v8i16>; 816 def VCMPGTSH : VCMP <838, "vcmpgtsh $vD, $vA, $vB" , v8i16>; [all …]
|
D | README_P9.txt | 9 . Same as other VCMP*, use VCMP/VCMPo form (support intrinsic)
|
D | PPCISelLowering.h | 249 VCMP, enumerator
|
/external/llvm-project/llvm/lib/Target/PowerPC/ |
D | PPCInstrAltivec.td | 783 class VCMP<bits<10> xo, string asmstr, ValueType Ty> 796 def VCMPBFP : VCMP <966, "vcmpbfp $vD, $vA, $vB" , v4f32>; 798 def VCMPEQFP : VCMP <198, "vcmpeqfp $vD, $vA, $vB" , v4f32>; 800 def VCMPGEFP : VCMP <454, "vcmpgefp $vD, $vA, $vB" , v4f32>; 802 def VCMPGTFP : VCMP <710, "vcmpgtfp $vD, $vA, $vB" , v4f32>; 806 def VCMPEQUB : VCMP < 6, "vcmpequb $vD, $vA, $vB" , v16i8>; 808 def VCMPGTSB : VCMP <774, "vcmpgtsb $vD, $vA, $vB" , v16i8>; 810 def VCMPGTUB : VCMP <518, "vcmpgtub $vD, $vA, $vB" , v16i8>; 814 def VCMPEQUH : VCMP < 70, "vcmpequh $vD, $vA, $vB" , v8i16>; 816 def VCMPGTSH : VCMP <838, "vcmpgtsh $vD, $vA, $vB" , v8i16>; [all …]
|
D | README_P9.txt | 9 . Same as other VCMP*, use VCMP/VCMPo form (support intrinsic)
|
D | PPCISelLowering.h | 272 VCMP, enumerator
|
/external/llvm/lib/Target/PowerPC/ |
D | PPCInstrAltivec.td | 772 class VCMP<bits<10> xo, string asmstr, ValueType Ty> 785 def VCMPBFP : VCMP <966, "vcmpbfp $vD, $vA, $vB" , v4f32>; 787 def VCMPEQFP : VCMP <198, "vcmpeqfp $vD, $vA, $vB" , v4f32>; 789 def VCMPGEFP : VCMP <454, "vcmpgefp $vD, $vA, $vB" , v4f32>; 791 def VCMPGTFP : VCMP <710, "vcmpgtfp $vD, $vA, $vB" , v4f32>; 795 def VCMPEQUB : VCMP < 6, "vcmpequb $vD, $vA, $vB" , v16i8>; 797 def VCMPGTSB : VCMP <774, "vcmpgtsb $vD, $vA, $vB" , v16i8>; 799 def VCMPGTUB : VCMP <518, "vcmpgtub $vD, $vA, $vB" , v16i8>; 803 def VCMPEQUH : VCMP < 70, "vcmpequh $vD, $vA, $vB" , v8i16>; 805 def VCMPGTSH : VCMP <838, "vcmpgtsh $vD, $vA, $vB" , v8i16>; [all …]
|
D | README_P9.txt | 9 . Same as other VCMP*, use VCMP/VCMPo form (support intrinsic)
|
D | PPCISelLowering.h | 178 VCMP, enumerator
|
/external/llvm-project/llvm/test/CodeGen/Thumb2/ |
D | mve-vpt-optimisations.mir | 276 ; There shouldn't be any exception for $zr, so the second VCMP should 419 ; Tests that, if the result of the VCMP is killed before the 420 ; second VCMP (that will be converted into a VPNOT) is found, 428 ; Tests that, if the result of the VCMP that has been replaced with a 574 ; Tests that a "VPNOT-like VCMP" with an opcode different from the previous VCMP 600 ; Tests that a VCMP is not transformed into a VPNOT if its CondCode is not 616 ; Tests that a "VPNOT-like VCMP" will not be transformed into a VPNOT if 891 ; Tests that the first VPNOT is moved down when the result of the VCMP is used
|
D | mve-vpt-block-kill.mir | 4 # Check we remove kill flags when combining VCMP into a VPST
|
/external/llvm-project/llvm/test/CodeGen/VE/VELIntrinsics/ |
D | vcmp.ll | 6 ;;; We test VCMP*vvl, VCMP*vvl_v, VCMP*rvl, VCMP*rvl_v, VCMP*ivl, VCMP*ivl_v, 7 ;;; VCMP*vvml_v, VCMP*rvml_v, VCMP*ivml_v, PVCMP*vvl, PVCMP*vvl_v, PVCMP*rvl,
|
/external/llvm-project/llvm/test/CodeGen/Thumb2/LowOverheadLoops/ |
D | iv-two-vcmp-reordered.mir | 4 # TODO: We should be able to handle the VCMP -> VPST -> VCMP -> VCTP case.
|
/external/llvm-project/llvm/test/MC/ARM/ |
D | mve-vcmp.s | 282 # Ensure the scalar FP instructions VCMP and VCMPE are still correctly 284 # version of VCMP with identical encoding.
|
/external/llvm-project/llvm/lib/Target/X86/ |
D | X86SchedSkylakeServer.td | 839 "VCMP(SD|SS)Zrr", 1713 def: InstRW<[SKXWriteResGroup136_2], (instregex "VCMP(PD|PS)Z128rm(b?)i", 1714 "VCMP(SD|SS)Zrm",
|