/external/llvm-project/llvm/test/CodeGen/AArch64/GlobalISel/ |
D | select-frint-nofp16.mir | 52 ; CHECK: [[COPY1:%[0-9]+]]:fpr16 = COPY [[INSERT_SUBREG]].hsub 69 ; CHECK: [[INSERT_SUBREG3:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF3]], [[FCVTHSr]], %subreg.hsub 71 ; CHECK: [[INSERT_SUBREG4:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF4]], [[FCVTHSr1]], %subreg.hsub 74 ; CHECK: [[INSERT_SUBREG5:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF5]], [[FCVTHSr2]], %subreg.hsub 77 ; CHECK: [[INSERT_SUBREG6:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF6]], [[FCVTHSr3]], %subreg.hsub 115 ; CHECK: [[COPY1:%[0-9]+]]:fpr16 = COPY [[COPY]].hsub 148 ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF]], [[FCVTHSr]], %subreg.hsub 150 ; CHECK: [[INSERT_SUBREG1:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF1]], [[FCVTHSr1]], %subreg.hsub 153 ; CHECK: [[INSERT_SUBREG2:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF2]], [[FCVTHSr2]], %subreg.hsub 156 ; CHECK: [[INSERT_SUBREG3:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF3]], [[FCVTHSr3]], %subreg.hsub [all …]
|
D | fp16-copy-gpr.mir | 51 ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:fpr32 = SUBREG_TO_REG 0, [[COPY]], %subreg.hsub 54 ; CHECK: [[SUBREG_TO_REG1:%[0-9]+]]:fpr32 = SUBREG_TO_REG 0, [[COPY1]], %subreg.hsub 92 ; CHECK: [[COPY2:%[0-9]+]]:fpr16 = COPY [[COPY1]].hsub 119 ; CHECK: [[COPY2:%[0-9]+]]:fpr16 = COPY [[COPY1]].hsub
|
D | select-insert-extract.mir | 101 ; CHECK: [[COPY2:%[0-9]+]]:fpr16 = COPY [[COPY1]].hsub 104 ; CHECK: [[COPY4:%[0-9]+]]:fpr16 = COPY [[COPY3]].hsub
|
D | preselect-process-phis.mir | 30 ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:fpr32 = SUBREG_TO_REG 0, [[FCVTHSr]], %subreg.hsub 81 ; CHECK: [[COPY2:%[0-9]+]]:fpr16 = COPY [[COPY1]].hsub
|
D | select-unmerge.mir | 117 ; CHECK: %2:fpr16 = COPY [[INS_SHARED]].hsub 151 ; CHECK: %2:fpr16 = COPY %0.hsub
|
D | select-reduce-add.mir | 50 ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:fpr32 = SUBREG_TO_REG 0, [[ADDVv8i16v]], %subreg.hsub
|
D | select-dup.mir | 153 ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF]], %copy, %subreg.hsub 196 ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF]], %copy, %subreg.hsub
|
D | opt-fold-ext-tbz-tbnz.mir | 80 ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:fpr32 = SUBREG_TO_REG 0, $h0, %subreg.hsub
|
/external/swiftshader/third_party/llvm-10.0/llvm/lib/CodeGen/ |
D | CalcSpillWeights.cpp | 54 unsigned sub, hsub; in copyHint() local 59 hsub = mi->getOperand(1).getSubReg(); in copyHint() 63 hsub = mi->getOperand(0).getSubReg(); in copyHint() 70 return sub == hsub ? hreg : Register(); in copyHint() 73 Register CopiedPReg = (hsub ? tri.getSubReg(hreg, hsub) : hreg); in copyHint()
|
/external/igt-gpu-tools/lib/ |
D | igt_fb.c | 101 uint8_t hsub; member 110 .hsub = 1, .vsub = 1, 118 .hsub = 1, .vsub = 1, 126 .hsub = 1, .vsub = 1, 134 .hsub = 1, .vsub = 1, 142 .hsub = 1, .vsub = 1, 150 .hsub = 1, .vsub = 1, 158 .hsub = 1, .vsub = 1, 165 .hsub = 1, .vsub = 1, 173 .hsub = 1, .vsub = 1, [all …]
|
/external/llvm-project/llvm/test/CodeGen/Thumb2/mve-intrinsics/ |
D | vhsubq.ll | 50 …%2 = tail call <16 x i8> @llvm.arm.mve.hsub.predicated.v16i8.v16i1(<16 x i8> %a, <16 x i8> %b, i32… 56 declare <16 x i8> @llvm.arm.mve.hsub.predicated.v16i8.v16i1(<16 x i8>, <16 x i8>, i32, <16 x i1>, <… 68 …%2 = tail call <8 x i16> @llvm.arm.mve.hsub.predicated.v8i16.v8i1(<8 x i16> %a, <8 x i16> %b, i32 … 74 declare <8 x i16> @llvm.arm.mve.hsub.predicated.v8i16.v8i1(<8 x i16>, <8 x i16>, i32, <8 x i1>, <8 … 86 …%2 = tail call <4 x i32> @llvm.arm.mve.hsub.predicated.v4i32.v4i1(<4 x i32> %a, <4 x i32> %b, i32 … 92 declare <4 x i32> @llvm.arm.mve.hsub.predicated.v4i32.v4i1(<4 x i32>, <4 x i32>, i32, <4 x i1>, <4 … 142 …%2 = call <16 x i8> @llvm.arm.mve.hsub.predicated.v16i8.v16i1(<16 x i8> %a, <16 x i8> %.splat, i32… 158 …%2 = call <8 x i16> @llvm.arm.mve.hsub.predicated.v8i16.v8i1(<8 x i16> %a, <8 x i16> %.splat, i32 … 174 …%2 = call <4 x i32> @llvm.arm.mve.hsub.predicated.v4i32.v4i1(<4 x i32> %a, <4 x i32> %.splat, i32 … 190 …%2 = call <16 x i8> @llvm.arm.mve.hsub.predicated.v16i8.v16i1(<16 x i8> %a, <16 x i8> %.splat, i32… [all …]
|
/external/llvm/test/CodeGen/X86/ |
D | sse3-intrinsics-x86.ll | 37 …%res = call <2 x double> @llvm.x86.sse3.hsub.pd(<2 x double> %a0, <2 x double> %a1) ; <<2 x double… 40 declare <2 x double> @llvm.x86.sse3.hsub.pd(<2 x double>, <2 x double>) nounwind readnone 45 …%res = call <4 x float> @llvm.x86.sse3.hsub.ps(<4 x float> %a0, <4 x float> %a1) ; <<4 x float>> [… 48 declare <4 x float> @llvm.x86.sse3.hsub.ps(<4 x float>, <4 x float>) nounwind readnone
|
D | sse_reload_fold.ll | 20 declare <4 x float> @llvm.x86.sse3.hsub.ps(<4 x float>, <4 x float>) 27 declare <2 x double> @llvm.x86.sse3.hsub.pd(<2 x double>, <2 x double>) 81 %t = call <4 x float> @llvm.x86.sse3.hsub.ps(<4 x float> %y, <4 x float> %f) 116 %t = call <2 x double> @llvm.x86.sse3.hsub.pd(<2 x double> %y, <2 x double> %f) 132 %t = call <2 x double> @llvm.x86.sse3.hsub.pd(<2 x double> %f, <2 x double> %y)
|
D | sse3-intrinsics-fast-isel.ll | 77 %res = call <2 x double> @llvm.x86.sse3.hsub.pd(<2 x double> %a0, <2 x double> %a1) 80 declare <2 x double> @llvm.x86.sse3.hsub.pd(<2 x double>, <2 x double>) nounwind readnone 92 %res = call <4 x float> @llvm.x86.sse3.hsub.ps(<4 x float> %a0, <4 x float> %a1) 95 declare <4 x float> @llvm.x86.sse3.hsub.ps(<4 x float>, <4 x float>) nounwind readnone
|
/external/llvm/lib/CodeGen/ |
D | CalcSpillWeights.cpp | 49 unsigned sub, hreg, hsub; in copyHint() local 53 hsub = mi->getOperand(1).getSubReg(); in copyHint() 57 hsub = mi->getOperand(0).getSubReg(); in copyHint() 64 return sub == hsub ? hreg : 0; in copyHint()
|
/external/llvm-project/llvm/test/CodeGen/X86/ |
D | haddsub-shuf-undef-operand.ll | 19 %t42 = tail call <4 x double> @llvm.x86.avx.hsub.pd.256(<4 x double> %z, <4 x double> %t41) 26 declare <4 x double> @llvm.x86.avx.hsub.pd.256(<4 x double>, <4 x double>)
|
D | horizontal-shuffle.ll | 51 %1 = call <2 x double> @llvm.x86.sse3.hsub.pd(<2 x double> %a0, <2 x double> %a1) 52 %2 = call <2 x double> @llvm.x86.sse3.hsub.pd(<2 x double> %a2, <2 x double> %a3) 67 %1 = call <4 x float> @llvm.x86.sse3.hsub.ps(<4 x float> %a0, <4 x float> %a1) 68 %2 = call <4 x float> @llvm.x86.sse3.hsub.ps(<4 x float> %a2, <4 x float> %a3) 247 %1 = call <4 x double> @llvm.x86.avx.hsub.pd.256(<4 x double> %a0, <4 x double> %a1) 248 %2 = call <4 x double> @llvm.x86.avx.hsub.pd.256(<4 x double> %a2, <4 x double> %a3) 263 %1 = call <8 x float> @llvm.x86.avx.hsub.ps.256(<8 x float> %a0, <8 x float> %a1) 264 %2 = call <8 x float> @llvm.x86.avx.hsub.ps.256(<8 x float> %a2, <8 x float> %a3) 398 declare <4 x float> @llvm.x86.sse3.hsub.ps(<4 x float>, <4 x float>) 400 declare <2 x double> @llvm.x86.sse3.hsub.pd(<2 x double>, <2 x double>) [all …]
|
D | sse_reload_fold.ll | 20 declare <4 x float> @llvm.x86.sse3.hsub.ps(<4 x float>, <4 x float>) 27 declare <2 x double> @llvm.x86.sse3.hsub.pd(<2 x double>, <2 x double>) 81 %t = call <4 x float> @llvm.x86.sse3.hsub.ps(<4 x float> %y, <4 x float> %f) 116 %t = call <2 x double> @llvm.x86.sse3.hsub.pd(<2 x double> %y, <2 x double> %f) 132 %t = call <2 x double> @llvm.x86.sse3.hsub.pd(<2 x double> %f, <2 x double> %y)
|
D | horizontal-shuffle-demanded.ll | 43 %2 = call <4 x float> @llvm.x86.sse3.hsub.ps(<4 x float> %1, <4 x float> %a1) 83 %2 = call <2 x double> @llvm.x86.sse3.hsub.pd(<2 x double> %a0, <2 x double> %1) 215 %2 = call <8 x float> @llvm.x86.avx.hsub.ps.256(<8 x float> %1, <8 x float> %a1) 263 %2 = call <4 x double> @llvm.x86.avx.hsub.pd.256(<4 x double> %a0, <4 x double> %1) 364 declare <4 x float> @llvm.x86.sse3.hsub.ps(<4 x float>, <4 x float>) 366 declare <2 x double> @llvm.x86.sse3.hsub.pd(<2 x double>, <2 x double>) 374 declare <8 x float> @llvm.x86.avx.hsub.ps.256(<8 x float>, <8 x float>) 376 declare <4 x double> @llvm.x86.avx.hsub.pd.256(<4 x double>, <4 x double>)
|
D | sse3-intrinsics-fast-isel.ll | 81 %res = call <2 x double> @llvm.x86.sse3.hsub.pd(<2 x double> %a0, <2 x double> %a1) 84 declare <2 x double> @llvm.x86.sse3.hsub.pd(<2 x double>, <2 x double>) nounwind readnone 96 %res = call <4 x float> @llvm.x86.sse3.hsub.ps(<4 x float> %a0, <4 x float> %a1) 99 declare <4 x float> @llvm.x86.sse3.hsub.ps(<4 x float>, <4 x float>) nounwind readnone
|
D | sse3-intrinsics-x86.ll | 83 …%res = call <2 x double> @llvm.x86.sse3.hsub.pd(<2 x double> %a0, <2 x double> %a1) ; <<2 x double… 86 declare <2 x double> @llvm.x86.sse3.hsub.pd(<2 x double>, <2 x double>) nounwind readnone 99 …%res = call <4 x float> @llvm.x86.sse3.hsub.ps(<4 x float> %a0, <4 x float> %a1) ; <<4 x float>> [… 102 declare <4 x float> @llvm.x86.sse3.hsub.ps(<4 x float>, <4 x float>) nounwind readnone
|
/external/swiftshader/third_party/llvm-10.0/configs/common/lib/Target/AArch64/ |
D | AArch64GenRegisterInfo.inc | 793 hsub, // 7 1122 { 0, 16 }, // hsub 5191 …IndexNameTable[] = { "bsub", "dsub", "dsub0", "dsub1", "dsub2", "dsub3", "hsub", "qhisub", "qsub",… 5202 LaneBitmask(0x00000001), // hsub 5429 0x08010020, 0xffffffc0, 0xffffffff, 0x00000fff, // hsub 8940 …Arch64::dsub, AArch64::dsub0, AArch64::dsub1, AArch64::dsub2, 0, AArch64::hsub, 0, 0, AArch64::qsu… 8950 …1_then_dsub, AArch64::qsub2_then_dsub, AArch64::qsub3_then_dsub, AArch64::hsub, 0, 0, AArch64::zsu… 8951 …1_then_dsub, AArch64::zsub2_then_dsub, AArch64::zsub3_then_dsub, AArch64::hsub, 0, 0, 0, 0, 0, 0, … 9023 &LaneMaskComposeSequences[0], // to hsub 9154 0, // hsub [all …]
|
/external/llvm-project/llvm/lib/Target/AArch64/ |
D | AArch64InstrInfo.td | 2265 defm : ScalToVecROLoadPat<ro16, extloadi16, i32, v4i16, LDRHroW, LDRHroX, hsub>; 2266 defm : ScalToVecROLoadPat<ro16, extloadi16, i32, v8i16, LDRHroW, LDRHroX, hsub>; 2268 defm : ScalToVecROLoadPat<ro16, load, i32, v4f16, LDRHroW, LDRHroX, hsub>; 2269 defm : ScalToVecROLoadPat<ro16, load, i32, v8f16, LDRHroW, LDRHroX, hsub>; 2429 (LDRHui GPR64sp:$Rn, uimm12s2:$offset), hsub)>; 2433 (LDRHui GPR64sp:$Rn, uimm12s2:$offset), hsub)>; 3026 defm : VecROStoreLane0Pat<ro16, truncstorei16, v8i16, i32, hsub, STRHroW, STRHroX>; 3027 defm : VecROStoreLane0Pat<ro16, store, v8f16, f16, hsub, STRHroW, STRHroX>; 3159 defm : VecStoreLane0Pat<am_indexed16, truncstorei16, v8i16, i32, hsub, uimm12s2, STRHui>; 3160 defm : VecStoreLane0Pat<am_indexed16, store, v8f16, f16, hsub, uimm12s2, STRHui>; [all …]
|
/external/swiftshader/third_party/llvm-10.0/llvm/lib/Target/AArch64/ |
D | AArch64RegisterInfo.cpp | 106 if (RC == &AArch64::GPR32allRegClass && Idx == AArch64::hsub) in getSubClassWithSubReg() 108 else if (RC == &AArch64::GPR64allRegClass && Idx == AArch64::hsub) in getSubClassWithSubReg()
|
D | AArch64InstrInfo.td | 2092 defm : ScalToVecROLoadPat<ro16, extloadi16, i32, v4i16, LDRHroW, LDRHroX, hsub>; 2093 defm : ScalToVecROLoadPat<ro16, extloadi16, i32, v8i16, LDRHroW, LDRHroX, hsub>; 2095 defm : ScalToVecROLoadPat<ro16, load, i32, v4f16, LDRHroW, LDRHroX, hsub>; 2096 defm : ScalToVecROLoadPat<ro16, load, i32, v8f16, LDRHroW, LDRHroX, hsub>; 2250 (LDRHui GPR64sp:$Rn, uimm12s2:$offset), hsub)>; 2254 (LDRHui GPR64sp:$Rn, uimm12s2:$offset), hsub)>; 2837 defm : VecROStoreLane0Pat<ro16, truncstorei16, v8i16, i32, hsub, STRHroW, STRHroX>; 2838 defm : VecROStoreLane0Pat<ro16, store, v8f16, f16, hsub, STRHroW, STRHroX>; 2959 defm : VecStoreLane0Pat<am_indexed16, truncstorei16, v8i16, i32, hsub, uimm12s2, STRHui>; 2960 defm : VecStoreLane0Pat<am_indexed16, store, v8f16, f16, hsub, uimm12s2, STRHui>; [all …]
|