/external/libvpx/libvpx/vpx_dsp/mips/ |
D | idct16x16_msa.c | 266 v8i16 vec, res0, res1, res2, res3, res4, res5, res6, res7; in vpx_idct16x16_1_add_msa() local 280 UNPCK_UB_SH(dst3, res3, res7); in vpx_idct16x16_1_add_msa() 282 ADD4(res4, vec, res5, vec, res6, vec, res7, vec, res4, res5, res6, res7); in vpx_idct16x16_1_add_msa() 284 CLIP_SH4_0_255(res4, res5, res6, res7); in vpx_idct16x16_1_add_msa() 285 PCKEV_B4_UB(res4, res0, res5, res1, res6, res2, res7, res3, tmp0, tmp1, in vpx_idct16x16_1_add_msa() 330 v8i16 res0, res1, res2, res3, res4, res5, res6, res7; in vpx_iadst16_1d_columns_addblk_msa() local 444 ILVR_B2_SH(zero, dst6, zero, dst7, res6, res7); in vpx_iadst16_1d_columns_addblk_msa() 445 ADD2(res6, out6, res7, out7, res6, res7); in vpx_iadst16_1d_columns_addblk_msa() 446 CLIP_SH2_0_255(res6, res7); in vpx_iadst16_1d_columns_addblk_msa() 447 PCKEV_B2_SH(res6, res6, res7, res7, res6, res7); in vpx_iadst16_1d_columns_addblk_msa() [all …]
|
D | vpx_convolve8_avg_horiz_msa.c | 497 v8u16 res0, res1, res2, res3, res4, res5, res6, res7, filt; in common_hz_2t_and_aver_dst_16w_msa() local 516 res6, res7); in common_hz_2t_and_aver_dst_16w_msa() 518 SRARI_H4_UH(res4, res5, res6, res7, FILTER_BITS); in common_hz_2t_and_aver_dst_16w_msa() 526 PCKEV_AVG_ST_UB(res7, res6, dst3, dst); in common_hz_2t_and_aver_dst_16w_msa() 541 res6, res7); in common_hz_2t_and_aver_dst_16w_msa() 543 SRARI_H4_UH(res4, res5, res6, res7, FILTER_BITS); in common_hz_2t_and_aver_dst_16w_msa() 551 PCKEV_AVG_ST_UB(res7, res6, dst3, dst); in common_hz_2t_and_aver_dst_16w_msa() 564 v8u16 res0, res1, res2, res3, res4, res5, res6, res7, filt; in common_hz_2t_and_aver_dst_32w_msa() local 591 res6, res7); in common_hz_2t_and_aver_dst_32w_msa() 593 SRARI_H4_UH(res4, res5, res6, res7, FILTER_BITS); in common_hz_2t_and_aver_dst_32w_msa() [all …]
|
D | idct32x32_msa.c | 701 v8i16 res0, res1, res2, res3, res4, res5, res6, res7, vec; in vpx_idct32x32_1_add_msa() local 716 UNPCK_UB_SH(dst3, res3, res7); in vpx_idct32x32_1_add_msa() 718 ADD4(res4, vec, res5, vec, res6, vec, res7, vec, res4, res5, res6, res7); in vpx_idct32x32_1_add_msa() 720 CLIP_SH4_0_255(res4, res5, res6, res7); in vpx_idct32x32_1_add_msa() 721 PCKEV_B4_UB(res4, res0, res5, res1, res6, res2, res7, res3, tmp0, tmp1, in vpx_idct32x32_1_add_msa()
|
D | avg_msa.c | 89 v8i16 res0, res1, res2, res3, res4, res5, res6, res7; in vpx_hadamard_16x16_msa() local 142 res1, res2, res3, res4, res5, res6, res7); in vpx_hadamard_16x16_msa() 153 ST_SH8(res0, res1, res2, res3, res4, res5, res6, res7, dst + 64, 8); in vpx_hadamard_16x16_msa() 198 res1, res2, res3, res4, res5, res6, res7); in vpx_hadamard_16x16_msa() 199 ST_SH8(res0, res1, res2, res3, res4, res5, res6, res7, dst + 3 * 64, 8); in vpx_hadamard_16x16_msa()
|
/external/swiftshader/third_party/subzero/crosstest/ |
D | test_vector_ops_ll.ll | 96 %res7 = zext <8 x i1> %res7_i1 to <8 x i16> 97 ret <8 x i16> %res7 153 %res7 = zext <16 x i1> %res7_i1 to <16 x i8> 154 ret <16 x i8> %res7 257 %res7 = insertelement <8 x i16> %vec, i16 %elt, i32 7 258 ret <8 x i16> %res7 312 %res7 = insertelement <16 x i8> %vec, i8 %elt, i32 7 313 ret <16 x i8> %res7 441 %res7 = zext i1 %res7_i1 to i64 442 ret i64 %res7 [all …]
|
/external/libyuv/files/source/ |
D | rotate_msa.cc | 86 v16u8 res0, res1, res2, res3, res4, res5, res6, res7, res8, res9; in TransposeWx16_MSA() local 111 ILVRL_W(reg2, reg6, reg3, reg7, res4, res5, res6, res7); in TransposeWx16_MSA() 149 ILVRL_D(res6, res8, res7, res9, dst0, dst1, dst2, dst3); in TransposeWx16_MSA() 167 v16u8 res0, res1, res2, res3, res4, res5, res6, res7, res8, res9; in TransposeUVWx16_MSA() local 192 ILVRL_W(reg2, reg6, reg3, reg7, res4, res5, res6, res7); in TransposeUVWx16_MSA() 236 ILVRL_D(res6, res8, res7, res9, dst0, dst1, dst2, dst3); in TransposeUVWx16_MSA()
|
/external/llvm/test/Bitcode/ |
D | miscInstructions.3.2.ll | 93 ; CHECK-NEXT: %res7 = icmp sgt i32 %x1, %x2 94 %res7 = icmp sgt i32 %x1, %x2 135 ; CHECK-NEXT: %res7 = fcmp ogt float %x1, %x2 136 %res7 = fcmp ogt float %x1, %x2
|
D | memInstructions.3.2.ll | 48 ; CHECK-NEXT: %res7 = load i8, i8* %ptr1, align 1, !nontemporal !0 49 %res7 = load i8, i8* %ptr1, align 1, !nontemporal !0 104 ; CHECK-NEXT: %res7 = load atomic volatile i8, i8* %ptr1 acquire, align 1 105 %res7 = load atomic volatile i8, i8* %ptr1 acquire, align 1 253 ; CHECK-NEXT: %res7 = extractvalue { i32, i1 } [[TMP]], 0 254 %res7 = cmpxchg i32* %ptr, i32 %cmp, i32 %new singlethread acquire acquire
|
D | binaryIntInstructions.3.2.ll | 28 ; CHECK: %res7 = add nsw i1 %x1, %x1 29 %res7 = add nsw i1 %x1, %x1
|
/external/llvm/test/CodeGen/X86/ |
D | merge-consecutive-loads-512.ll | 102 %res7 = insertelement <8 x double> %res1, double %val7, i32 7 103 ret <8 x double> %res7 134 %res7 = insertelement <8 x double> %res6, double 0.0, i32 7 135 ret <8 x double> %res7 167 %res7 = insertelement <8 x double> %res5, double %val7, i32 7 168 ret <8 x double> %res7 221 %res7 = insertelement <8 x i64> %res6, i64 0, i32 7 222 ret <8 x i64> %res7 254 %res7 = insertelement <8 x i64> %res5, i64 %val7, i32 7 255 ret <8 x i64> %res7 [all …]
|
D | merge-consecutive-loads-128.ll | 422 %res7 = insertelement <8 x i16> %res5, i16 %val7, i32 7 423 ret <8 x i16> %res7 479 %res7 = insertelement <8 x i16> %res6, i16 0, i32 7 480 ret <8 x i16> %res7 533 %res7 = insertelement <16 x i8> %res6, i8 %val7, i32 7 534 %res8 = insertelement <16 x i8> %res7, i8 %val8, i32 8 570 %res7 = insertelement <16 x i8> %res6, i8 0, i32 7 571 %resD = insertelement <16 x i8> %res7, i8 0, i32 13 610 %res7 = insertelement <16 x i8> %res6, i8 %val7, i32 7 611 %resD = insertelement <16 x i8> %res7, i8 0, i32 13
|
D | merge-consecutive-loads-256.ll | 335 %res7 = insertelement <8 x float> %res6, float 0.0, i32 7 336 ret <8 x float> %res7 364 %res7 = insertelement <8 x float> %res5, float %val7, i32 7 365 ret <8 x float> %res7 428 %res7 = insertelement <8 x i32> %res6, i32 0, i32 7 429 ret <8 x i32> %res7 469 %res7 = insertelement <8 x i32> %res5, i32 %val7, i32 7 470 ret <8 x i32> %res7
|
D | avx-intrinsics-fast-isel.ll | 2003 %res7 = insertelement <32 x i8> %res6, i8 %a24, i32 7 2004 %res8 = insertelement <32 x i8> %res7, i8 %a23, i32 8 2107 %res7 = insertelement <16 x i16> %res6, i16 %a8 , i32 7 2108 %res8 = insertelement <16 x i16> %res7, i16 %a7 , i32 8 2153 %res7 = insertelement <8 x i32> %res6, i32 %a0, i32 7 2154 %res = bitcast <8 x i32> %res7 to <4 x i64> 2305 %res7 = insertelement <8 x float> %res6, float %a0, i32 7 2306 ret <8 x float> %res7 2334 %res7 = insertelement <32 x i8> %res6, i8 %a0, i32 7 2335 %res8 = insertelement <32 x i8> %res7, i8 %a0, i32 8 [all …]
|
D | avx512bw-intrinsics.ll | 85 %res7 = call i64 @llvm.x86.avx512.mask.cmp.b.512(<64 x i8> %a0, <64 x i8> %a1, i32 7, i64 -1) 86 %ret7 = add i64 %ret6, %res7 174 %res7 = call i64 @llvm.x86.avx512.mask.cmp.b.512(<64 x i8> %a0, <64 x i8> %a1, i32 7, i64 %mask) 175 %ret7 = add i64 %ret6, %res7 261 %res7 = call i64 @llvm.x86.avx512.mask.ucmp.b.512(<64 x i8> %a0, <64 x i8> %a1, i32 7, i64 -1) 262 %ret7 = add i64 %ret6, %res7 350 %res7 = call i64 @llvm.x86.avx512.mask.ucmp.b.512(<64 x i8> %a0, <64 x i8> %a1, i32 7, i64 %mask) 351 %ret7 = add i64 %ret6, %res7 424 %res7 = call i32 @llvm.x86.avx512.mask.cmp.w.512(<32 x i16> %a0, <32 x i16> %a1, i32 7, i32 -1) 425 %ret7 = add i32 %ret6, %res7 [all …]
|
D | sse2-intrinsics-fast-isel.ll | 2148 %res7 = insertelement <16 x i8> %res6, i8 %a8 , i32 7 2149 %res8 = insertelement <16 x i8> %res7, i8 %a7 , i32 8 2216 %res7 = insertelement <8 x i16> %res6, i16 %a0, i32 7 2217 %res = bitcast <8 x i16> %res7 to <2 x i64> 2335 %res7 = insertelement <16 x i8> %res6, i8 %a0, i32 7 2336 %res8 = insertelement <16 x i8> %res7, i8 %a0, i32 8 2370 %res7 = insertelement <8 x i16> %res6, i16 %a0, i32 7 2371 %res = bitcast <8 x i16> %res7 to <2 x i64> 2542 %res7 = insertelement <16 x i8> %res6, i8 %a7 , i32 7 2543 %res8 = insertelement <16 x i8> %res7, i8 %a8 , i32 8 [all …]
|
D | avx512bwvl-intrinsics.ll | 49 %res7 = call i32 @llvm.x86.avx512.mask.cmp.b.256(<32 x i8> %a0, <32 x i8> %a1, i32 7, i32 -1) 50 %vec7 = insertelement <8 x i32> %vec6, i32 %res7, i32 7 98 %res7 = call i32 @llvm.x86.avx512.mask.cmp.b.256(<32 x i8> %a0, <32 x i8> %a1, i32 7, i32 %mask) 99 %vec7 = insertelement <8 x i32> %vec6, i32 %res7, i32 7 148 %res7 = call i32 @llvm.x86.avx512.mask.ucmp.b.256(<32 x i8> %a0, <32 x i8> %a1, i32 7, i32 -1) 149 %vec7 = insertelement <8 x i32> %vec6, i32 %res7, i32 7 197 %res7 = call i32 @llvm.x86.avx512.mask.ucmp.b.256(<32 x i8> %a0, <32 x i8> %a1, i32 7, i32 %mask) 198 %vec7 = insertelement <8 x i32> %vec6, i32 %res7, i32 7 246 %res7 = call i16 @llvm.x86.avx512.mask.cmp.w.256(<16 x i16> %a0, <16 x i16> %a1, i32 7, i16 -1) 247 %vec7 = insertelement <8 x i16> %vec6, i16 %res7, i32 7 [all …]
|
D | avx512vl-intrinsics.ll | 48 %res7 = call i8 @llvm.x86.avx512.mask.cmp.d.256(<8 x i32> %a0, <8 x i32> %a1, i32 7, i8 -1) 49 %vec7 = insertelement <8 x i8> %vec6, i8 %res7, i32 7 96 %res7 = call i8 @llvm.x86.avx512.mask.cmp.d.256(<8 x i32> %a0, <8 x i32> %a1, i32 7, i8 %mask) 97 %vec7 = insertelement <8 x i8> %vec6, i8 %res7, i32 7 145 %res7 = call i8 @llvm.x86.avx512.mask.ucmp.d.256(<8 x i32> %a0, <8 x i32> %a1, i32 7, i8 -1) 146 %vec7 = insertelement <8 x i8> %vec6, i8 %res7, i32 7 193 %res7 = call i8 @llvm.x86.avx512.mask.ucmp.d.256(<8 x i32> %a0, <8 x i32> %a1, i32 7, i8 %mask) 194 %vec7 = insertelement <8 x i8> %vec6, i8 %res7, i32 7 242 %res7 = call i8 @llvm.x86.avx512.mask.cmp.q.256(<4 x i64> %a0, <4 x i64> %a1, i32 7, i8 -1) 243 %vec7 = insertelement <8 x i8> %vec6, i8 %res7, i32 7 [all …]
|
/external/libvpx/libvpx/vp9/encoder/x86/ |
D | vp9_dct_ssse3.c | 83 __m128i res0, res1, res2, res3, res4, res5, res6, res7; in vp9_fdct8x8_quant_ssse3() local 188 res7 = _mm_packs_epi32(w2, w3); in vp9_fdct8x8_quant_ssse3() 207 const __m128i tr0_5 = _mm_unpacklo_epi16(res6, res7); in vp9_fdct8x8_quant_ssse3() 209 const __m128i tr0_7 = _mm_unpackhi_epi16(res6, res7); in vp9_fdct8x8_quant_ssse3()
|
D | vp9_dct_intrin_sse2.c | 246 __m128i res0, res1, res2, res3, res4, res5, res6, res7; in vp9_fdct8x8_quant_sse2() local 357 res7 = _mm_packs_epi32(w2, w3); in vp9_fdct8x8_quant_sse2() 376 const __m128i tr0_5 = _mm_unpacklo_epi16(res6, res7); in vp9_fdct8x8_quant_sse2() 378 const __m128i tr0_7 = _mm_unpackhi_epi16(res6, res7); in vp9_fdct8x8_quant_sse2()
|
/external/llvm/test/CodeGen/ARM/ |
D | intrinsics-crypto.ll | 35 %res7 = call <4 x i32> @llvm.arm.neon.sha256h(<4 x i32> %res6, <4 x i32> %tmp3, <4 x i32> %res1) 37 %res8 = call <4 x i32> @llvm.arm.neon.sha256h2(<4 x i32> %res7, <4 x i32> %tmp3, <4 x i32> %res1)
|
/external/clang/test/SemaCXX/ |
D | altivec.cpp | 26 int res7[vec_step(vus) == 8 ? 1 : -1]; in test_vec_step() local
|
/external/clang/test/SemaOpenCL/ |
D | vec_step.cl | 22 int res7[vec_step(int2) == 2 ? 1 : -1];
|
/external/swiftshader/third_party/subzero/tests_lit/assembler/x86/ |
D | opcode_register_encodings.ll | 28 %res7 = sub <8 x i16> %arg0, %arg7 33 %res_acc4 = select <8 x i1> %cond, <8 x i16> %res7, <8 x i16> %res8 68 %res7 = sub <4 x i32> %arg0, %arg7 73 %res_acc4 = select <4 x i1> %cond, <4 x i32> %res7, <4 x i32> %res8
|
/external/libvpx/libvpx/vpx_dsp/x86/ |
D | fwd_txfm_impl_sse2.h | 301 __m128i res0, res1, res2, res3, res4, res5, res6, res7; in FDCT8x8_2D() local 454 res7 = _mm_packs_epi32(w2, w3); in FDCT8x8_2D() 458 overflow = check_epi16_overflow_x4(&res1, &res7, &res5, &res3); in FDCT8x8_2D() 482 const __m128i tr0_5 = _mm_unpacklo_epi16(res6, res7); in FDCT8x8_2D() 484 const __m128i tr0_7 = _mm_unpackhi_epi16(res6, res7); in FDCT8x8_2D()
|
/external/clang/test/SemaObjCXX/ |
D | arc-templates.mm | 139 identity<__unsafe_unretained id> res7 = accept_any_ref(ui); 159 identity<__unsafe_unretained A *> res7 = accept_any_ref(ui);
|