Home
last modified time | relevance | path

Searched refs:in11 (Results 1 – 15 of 15) sorted by relevance

/external/libvpx/libvpx/vpx_dsp/mips/
Dfwd_txfm_msa.c32 v8i16 in8, in9, in10, in11, in12, in13, in14, in15; in fdct8x16_1d_column() local
45 in10, in11, in12, in13, in14, in15); in fdct8x16_1d_column()
48 SLLI_4V(in8, in9, in10, in11, 2); in fdct8x16_1d_column()
51 ADD4(in4, in11, in5, in10, in6, in9, in7, in8, tmp4, tmp5, tmp6, tmp7); in fdct8x16_1d_column()
56 SUB4(in4, in11, in5, in10, in6, in9, in7, in8, in11, in10, in9, in8); in fdct8x16_1d_column()
61 ILVL_H2_SH(in10, in13, in11, in12, vec2, vec4); in fdct8x16_1d_column()
62 ILVR_H2_SH(in10, in13, in11, in12, vec3, vec5); in fdct8x16_1d_column()
134 SUB2(stp34, stp25, stp33, stp22, in12, in11); in fdct8x16_1d_column()
135 ILVRL_H2_SH(in12, in11, vec1, vec0); in fdct8x16_1d_column()
151 v8i16 in8, in9, in10, in11, in12, in13, in14, in15; in fdct16x8_1d_row() local
[all …]
Dfwd_dct32x32_msa.c60 v8i16 in8, in9, in10, in11, in12, in13, in14, in15; in fdct8x32_1d_column_even_store() local
70 LD_SH4(input + 64, 8, in8, in9, in10, in11); in fdct8x32_1d_column_even_store()
71 BUTTERFLY_8(in4, in5, in6, in7, in8, in9, in10, in11, vec4, vec5, vec6, vec7, in fdct8x32_1d_column_even_store()
72 in8, in9, in10, in11); in fdct8x32_1d_column_even_store()
102 DOTP_CONST_PAIR(in12, in11, cospi_16_64, cospi_16_64, vec3, vec4); in fdct8x32_1d_column_even_store()
256 v8i16 in8, in9, in10, in11, in12, in13, in14, in15; in fdct8x32_1d_row_load_butterfly() local
260 LD_SH8(temp_buff + 24, 32, in8, in9, in10, in11, in12, in13, in14, in15); in fdct8x32_1d_row_load_butterfly()
263 TRANSPOSE8x8_SH_SH(in8, in9, in10, in11, in12, in13, in14, in15, in8, in9, in fdct8x32_1d_row_load_butterfly()
264 in10, in11, in12, in13, in14, in15); in fdct8x32_1d_row_load_butterfly()
265 BUTTERFLY_16(in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, in10, in11, in fdct8x32_1d_row_load_butterfly()
[all …]
Ddeblock_msa.c74 in10, in11, in12, in13, in14, in15) \ argument
85 ILVR_B2_SH(in9, in8, in11, in10, temp4, temp5); \
86 ILVR_B2_SH(in9, in8, in11, in10, temp4, temp5); \
100 ILVL_B4_SH(in9, in8, in11, in10, in13, in12, in15, in14, temp2, temp3, \
109 in11 = (v16u8)__msa_ilvl_d((v2i64)temp3, (v2i64)temp2); \
113 in9, in10, in11) \ argument
140 in11 = (v16u8)__msa_ilvl_d((v2i64)temp7, (v2i64)temp7); \
Dmacros_msa.h1074 in11, in12, in13, in14, in15, out0, out1, out2, out3, out4, \ argument
1079 ILVR_B4(RTYPE, in8, in9, in10, in11, in12, in13, in14, in15, out4, out5, \
1691 in11, in12, in13, in14, in15, out0, out1, out2, out3, \ argument
1699 out4 = in4 + in11; \
1707 out11 = in4 - in11; \
1743 in10, in11, in12, in13, in14, in15, out0, out1, \ argument
1750 ILVEV_D2_UB(in2, in10, in3, in11, out5, out4); \
Dvpx_convolve8_msa.c829 v16u8 in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, in10, in11, in12; in transpose16x16_to_dst() local
834 LD_UB8(src + 16 * 8, 16, in8, in9, in10, in11, in12, in13, in14, in15); in transpose16x16_to_dst()
837 in11, in12, in13, in14, in15, out0, out1, out2, out3, in transpose16x16_to_dst()
844 SLDI_B4_0_UB(in8, in9, in10, in11, in8, in9, in10, in11, 8); in transpose16x16_to_dst()
848 in11, in12, in13, in14, in15, out8, out9, out10, out11, in transpose16x16_to_dst()
/external/libvpx/libvpx/vp9/encoder/mips/msa/
Dvp9_fdct16x16_msa.c368 v8i16 in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, in10, in11; in postproc_fdct16x8_1d_row() local
373 LD_SH8(temp, 16, in8, in9, in10, in11, in12, in13, in14, in15); in postproc_fdct16x8_1d_row()
376 TRANSPOSE8x8_SH_SH(in8, in9, in10, in11, in12, in13, in14, in15, in8, in9, in postproc_fdct16x8_1d_row()
377 in10, in11, in12, in13, in14, in15); in postproc_fdct16x8_1d_row()
383 FDCT_POSTPROC_2V_NEG_H(in10, in11); in postproc_fdct16x8_1d_row()
386 BUTTERFLY_16(in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, in10, in11, in postproc_fdct16x8_1d_row()
388 tmp7, in8, in9, in10, in11, in12, in13, in14, in15); in postproc_fdct16x8_1d_row()
390 ST_SH8(in8, in9, in10, in11, in12, in13, in14, in15, temp, 16); in postproc_fdct16x8_1d_row()
394 LD_SH8(temp, 16, in8, in9, in10, in11, in12, in13, in14, in15); in postproc_fdct16x8_1d_row()
395 FDCT8x16_ODD(in8, in9, in10, in11, in12, in13, in14, in15, in0, in1, in2, in3, in postproc_fdct16x8_1d_row()
/external/libvpx/libvpx/vpx_dsp/x86/
Dfwd_txfm_impl_sse2.h613 __m128i in08, in09, in10, in11, in12, in13, in14, in15; in FDCT16x16_2D() local
635 in11 = _mm_load_si128((const __m128i *)(in + 11 * stride)); in FDCT16x16_2D()
652 in11 = _mm_slli_epi16(in11, 2); in FDCT16x16_2D()
669 in11 = _mm_load_si128((const __m128i *)(in + 11 * 16)); in FDCT16x16_2D()
686 in11 = _mm_add_epi16(in11, kOne); in FDCT16x16_2D()
702 in11 = _mm_srai_epi16(in11, 2); in FDCT16x16_2D()
715 input4 = ADD_EPI16(in04, in11); in FDCT16x16_2D()
733 step1_3 = SUB_EPI16(in04, in11); in FDCT16x16_2D()
Dfwd_dct32x32_impl_avx2.h304 __m256i in11 = _mm256_loadu_si256((const __m256i *)(in + 11 * 32)); in FDCT32x32_2D_AVX2() local
312 step1[11] = _mm256_add_epi16(in11, in20); in FDCT32x32_2D_AVX2()
313 step1[20] = _mm256_sub_epi16(in11, in20); in FDCT32x32_2D_AVX2()
Dfwd_dct32x32_impl_sse2.h313 __m128i in11 = _mm_loadu_si128((const __m128i *)(in + 11 * 32)); in FDCT32x32_2D() local
321 step1[11] = ADD_EPI16(in11, in20); in FDCT32x32_2D()
322 step1[20] = SUB_EPI16(in11, in20); in FDCT32x32_2D()
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/Reassociate/
Dlong-chains.ll4 …%in3, i8 %in4, i8 %in5, i8 %in6, i8 %in7, i8 %in8, i8 %in9, i8 %in10, i8 %in11, i8 %in12, i8 %in13…
16 %tmp12 = add i8 %tmp11, %in11
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/AMDGPU/
Damdhsa-trap-num-sgprs.ll19 i32 addrspace(1)* %out11, i32 %in11,
50 store i32 %in11, i32 addrspace(1)* %out11
/external/libaom/libaom/aom_dsp/mips/
Dmacros_msa.h1185 in11, in12, in13, in14, in15, out0, out1, out2, out3, out4, \ argument
1190 ILVR_B4(RTYPE, in8, in9, in10, in11, in12, in13, in14, in15, out4, out5, \
1776 in11, in12, in13, in14, in15, out0, out1, out2, out3, \ argument
1784 out4 = in4 + in11; \
1792 out11 = in4 - in11; \
1828 in10, in11, in12, in13, in14, in15, out0, out1, \ argument
1835 ILVEV_D2_UB(in2, in10, in3, in11, out5, out4); \
/external/webp/src/dsp/
Dmsa_macro.h1283 in8, in9, in10, in11, in12, in13, in14, in15, \ argument
1290 ILVEV_W2_SD(in3, in7, in11, in15, tmp0_m, tmp1_m); \
1305 in8, in9, in10, in11, in12, in13, in14, in15, \ argument
1311 ILVEV_D2_UB(in2, in10, in3, in11, out5, out4); \
/external/libvpx/libvpx/vp8/common/mips/msa/
Dvp8_macros_msa.h1573 in10, in11, in12, in13, in14, in15, out0, out1, \ argument
1587 ILVEV_W2_SD(in3, in7, in11, in15, tmp0_m, tmp1_m); \
1607 in10, in11, in12, in13, in14, in15, out0, out1, \ argument
1614 ILVEV_D2_UB(in2, in10, in3, in11, out5, out4); \
/external/libvpx/libvpx/vpx_dsp/ppc/
Dinv_txfm_vsx.c708 #define LOAD_8x32(load, in00, in01, in02, in03, in10, in11, in12, in13, in20, \ argument
719 in11 = load(offset + 5 * 16, input); \