Home
last modified time | relevance | path

Searched refs:res10 (Results 1 – 13 of 13) sorted by relevance

/external/libvpx/libvpx/vpx_dsp/mips/
Didct16x16_msa.c333 v8i16 res8, res9, res10, res11, res12, res13, res14, res15; in vpx_iadst16_1d_columns_addblk_msa() local
457 ILVR_B2_SH(zero, dst10, zero, dst11, res10, res11); in vpx_iadst16_1d_columns_addblk_msa()
458 ADD2(res10, out10, res11, out11, res10, res11); in vpx_iadst16_1d_columns_addblk_msa()
459 CLIP_SH2_0_255(res10, res11); in vpx_iadst16_1d_columns_addblk_msa()
460 PCKEV_B2_SH(res10, res10, res11, res11, res10, res11); in vpx_iadst16_1d_columns_addblk_msa()
461 ST8x1_UB(res10, dst + 6 * dst_stride); in vpx_iadst16_1d_columns_addblk_msa()
/external/llvm/test/Bitcode/
DmiscInstructions.3.2.ll102 ; CHECK-NEXT: %res10 = icmp sle i32 %x1, %x2
103 %res10 = icmp sle i32 %x1, %x2
144 ; CHECK-NEXT: %res10 = fcmp ole float %x1, %x2
145 %res10 = fcmp ole float %x1, %x2
DmemInstructions.3.2.ll57 ; CHECK-NEXT: %res10 = load volatile i8, i8* %ptr1, !invariant.load !1
58 %res10 = load volatile i8, i8* %ptr1, !invariant.load !1
113 ; CHECK-NEXT: %res10 = load atomic i8, i8* %ptr1 singlethread monotonic, align 1
114 %res10 = load atomic i8, i8* %ptr1 singlethread monotonic, align 1
266 ; CHECK-NEXT: %res10 = extractvalue { i32, i1 } [[TMP]], 0
267 %res10 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new release monotonic
/external/llvm/test/CodeGen/AMDGPU/
Dllvm.SI.imageload.ll38 %res10 = call <4 x i32> @llvm.SI.imageload.(<4 x i32> %v10,
56 %t10 = extractelement <4 x i32> %res10, i32 2
57 %t11 = extractelement <4 x i32> %res10, i32 3
Dllvm.SI.resinfo.ll32 %res10 = call <4 x i32> @llvm.SI.resinfo(i32 %a10, <32 x i8> undef, i32 10)
58 %t10 = extractelement <4 x i32> %res10, i32 2
59 %t11 = extractelement <4 x i32> %res10, i32 3
Dllvm.AMDGPU.tex.ll31 %res10 = call <4 x float> @llvm.AMDGPU.tex(<4 x float> %res9, i32 0, i32 0, i32 10)
32 %res11 = call <4 x float> @llvm.AMDGPU.tex(<4 x float> %res10, i32 0, i32 0, i32 11)
Dllvm.SI.sampled.ll56 %res10 = call <4 x float> @llvm.SI.sampled.(<4 x i32> %v10,
89 %t10 = extractelement <4 x float> %res10, i32 2
90 %t11 = extractelement <4 x float> %res10, i32 3
Dllvm.SI.sample.ll56 %res10 = call <4 x float> @llvm.SI.sample.(<4 x i32> %v10,
89 %t10 = extractelement <4 x float> %res10, i32 2
90 %t11 = extractelement <4 x float> %res10, i32 3
Dfetch-limits.r700+.ll48 %res10 = call <4 x float> @llvm.AMDGPU.tex(<4 x float> %10, i32 0, i32 0, i32 1)
60 %f = fadd <4 x float> %res10, %res11
/external/clang/test/SemaCXX/
Daltivec.cpp29 int res10[vec_step(vi) == 4 ? 1 : -1]; in test_vec_step() local
/external/llvm/test/CodeGen/ARM/
Dintrinsics-crypto.ll41 %res10 = call <4 x i32> @llvm.arm.neon.sha256su0(<4 x i32> %res9, <4 x i32> %tmp3)
43 ret <4 x i32> %res10
/external/clang/test/SemaOpenCL/
Dvec_step.cl25 int res10[vec_step(int8) == 8 ? 1 : -1];
/external/libvpx/libvpx/vpx_dsp/x86/
Dfwd_txfm_impl_sse2.h630 __m128i res08, res09, res10, res11, res12, res13, res14, res15; in FDCT16x16_2D() local
858 res10 = mult_round_shift(&t2, &t3, &k__cospi_p12_p20, in FDCT16x16_2D()
864 &res10, &res06); in FDCT16x16_2D()
1012 transpose_and_output8x8(&res08, &res09, &res10, &res11, in FDCT16x16_2D()