Home
last modified time | relevance | path

Searched refs:in10 (Results 1 – 15 of 15) sorted by relevance

/external/libvpx/libvpx/vpx_dsp/mips/
Dfwd_txfm_msa.c32 v8i16 in8, in9, in10, in11, in12, in13, in14, in15; in fdct8x16_1d_column() local
45 in10, in11, in12, in13, in14, in15); in fdct8x16_1d_column()
48 SLLI_4V(in8, in9, in10, in11, 2); in fdct8x16_1d_column()
51 ADD4(in4, in11, in5, in10, in6, in9, in7, in8, tmp4, tmp5, tmp6, tmp7); in fdct8x16_1d_column()
56 SUB4(in4, in11, in5, in10, in6, in9, in7, in8, in11, in10, in9, in8); in fdct8x16_1d_column()
61 ILVL_H2_SH(in10, in13, in11, in12, vec2, vec4); in fdct8x16_1d_column()
62 ILVR_H2_SH(in10, in13, in11, in12, vec3, vec5); in fdct8x16_1d_column()
121 ADD2(stp34, stp25, stp33, stp22, in13, in10); in fdct8x16_1d_column()
123 ILVRL_H2_SH(in13, in10, vec1, vec0); in fdct8x16_1d_column()
151 v8i16 in8, in9, in10, in11, in12, in13, in14, in15; in fdct16x8_1d_row() local
[all …]
Dfwd_dct32x32_msa.c61 v8i16 in8, in9, in10, in11, in12, in13, in14, in15; in fdct8x32_1d_column_even_store() local
71 LD_SH4(input + 64, 8, in8, in9, in10, in11); in fdct8x32_1d_column_even_store()
72 BUTTERFLY_8(in4, in5, in6, in7, in8, in9, in10, in11, vec4, vec5, vec6, vec7, in fdct8x32_1d_column_even_store()
73 in8, in9, in10, in11); in fdct8x32_1d_column_even_store()
102 DOTP_CONST_PAIR(in13, in10, cospi_16_64, cospi_16_64, vec2, vec5); in fdct8x32_1d_column_even_store()
257 v8i16 in8, in9, in10, in11, in12, in13, in14, in15; in fdct8x32_1d_row_load_butterfly() local
261 LD_SH8(temp_buff + 24, 32, in8, in9, in10, in11, in12, in13, in14, in15); in fdct8x32_1d_row_load_butterfly()
264 TRANSPOSE8x8_SH_SH(in8, in9, in10, in11, in12, in13, in14, in15, in8, in9, in fdct8x32_1d_row_load_butterfly()
265 in10, in11, in12, in13, in14, in15); in fdct8x32_1d_row_load_butterfly()
266 BUTTERFLY_16(in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, in10, in11, in fdct8x32_1d_row_load_butterfly()
[all …]
Ddeblock_msa.c75 in10, in11, in12, in13, in14, in15) \ argument
86 ILVR_B2_SH(in9, in8, in11, in10, temp4, temp5); \
87 ILVR_B2_SH(in9, in8, in11, in10, temp4, temp5); \
101 ILVL_B4_SH(in9, in8, in11, in10, in13, in12, in15, in14, temp2, temp3, \
109 in10 = (v16u8)__msa_ilvr_d((v2i64)temp3, (v2i64)temp2); \
114 in9, in10, in11) \ argument
135 in10 = (v16u8)temp7; \
Dmacros_msa.h1073 #define ILVR_B8(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, in10, \ argument
1079 ILVR_B4(RTYPE, in8, in9, in10, in11, in12, in13, in14, in15, out4, out5, \
1690 #define BUTTERFLY_16(in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, in10, \ argument
1700 out5 = in5 + in10; \
1706 out10 = in5 - in10; \
1743 in10, in11, in12, in13, in14, in15, out0, out1, \ argument
1750 ILVEV_D2_UB(in2, in10, in3, in11, out5, out4); \
Dvpx_convolve8_msa.c829 v16u8 in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, in10, in11, in12; in transpose16x16_to_dst() local
834 LD_UB8(src + 16 * 8, 16, in8, in9, in10, in11, in12, in13, in14, in15); in transpose16x16_to_dst()
836 TRANSPOSE16x8_UB_UB(in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, in10, in transpose16x16_to_dst()
844 SLDI_B4_0_UB(in8, in9, in10, in11, in8, in9, in10, in11, 8); in transpose16x16_to_dst()
847 TRANSPOSE16x8_UB_UB(in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, in10, in transpose16x16_to_dst()
/external/libvpx/libvpx/vp9/encoder/mips/msa/
Dvp9_fdct16x16_msa.c369 v8i16 in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, in10, in11; in postproc_fdct16x8_1d_row() local
374 LD_SH8(temp, 16, in8, in9, in10, in11, in12, in13, in14, in15); in postproc_fdct16x8_1d_row()
377 TRANSPOSE8x8_SH_SH(in8, in9, in10, in11, in12, in13, in14, in15, in8, in9, in postproc_fdct16x8_1d_row()
378 in10, in11, in12, in13, in14, in15); in postproc_fdct16x8_1d_row()
384 FDCT_POSTPROC_2V_NEG_H(in10, in11); in postproc_fdct16x8_1d_row()
387 BUTTERFLY_16(in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, in10, in11, in postproc_fdct16x8_1d_row()
389 tmp7, in8, in9, in10, in11, in12, in13, in14, in15); in postproc_fdct16x8_1d_row()
391 ST_SH8(in8, in9, in10, in11, in12, in13, in14, in15, temp, 16); in postproc_fdct16x8_1d_row()
395 LD_SH8(temp, 16, in8, in9, in10, in11, in12, in13, in14, in15); in postproc_fdct16x8_1d_row()
396 FDCT8x16_ODD(in8, in9, in10, in11, in12, in13, in14, in15, in0, in1, in2, in3, in postproc_fdct16x8_1d_row()
/external/llvm-project/llvm/test/Transforms/Reassociate/
Dlong-chains.ll4 … %in2, i8 %in3, i8 %in4, i8 %in5, i8 %in6, i8 %in7, i8 %in8, i8 %in9, i8 %in10, i8 %in11, i8 %in12…
15 %tmp11 = add i8 %tmp10, %in10
/external/libvpx/libvpx/vpx_dsp/x86/
Dfwd_txfm_impl_sse2.h613 __m128i in08, in09, in10, in11, in12, in13, in14, in15; in FDCT16x16_2D() local
634 in10 = _mm_load_si128((const __m128i *)(in + 10 * stride)); in FDCT16x16_2D()
651 in10 = _mm_slli_epi16(in10, 2); in FDCT16x16_2D()
668 in10 = _mm_load_si128((const __m128i *)(in + 10 * 16)); in FDCT16x16_2D()
685 in10 = _mm_add_epi16(in10, kOne); in FDCT16x16_2D()
701 in10 = _mm_srai_epi16(in10, 2); in FDCT16x16_2D()
716 input5 = ADD_EPI16(in05, in10); in FDCT16x16_2D()
732 step1_2 = SUB_EPI16(in05, in10); in FDCT16x16_2D()
Dfwd_dct32x32_impl_avx2.h303 __m256i in10 = _mm256_loadu_si256((const __m256i *)(in + 10 * 32)); in FDCT32x32_2D_AVX2() local
311 step1[10] = _mm256_add_epi16(in10, in21); in FDCT32x32_2D_AVX2()
314 step1[21] = _mm256_sub_epi16(in10, in21); in FDCT32x32_2D_AVX2()
Dfwd_dct32x32_impl_sse2.h312 __m128i in10 = _mm_loadu_si128((const __m128i *)(in + 10 * 32)); in FDCT32x32_2D() local
320 step1[10] = ADD_EPI16(in10, in21); in FDCT32x32_2D()
323 step1[21] = SUB_EPI16(in10, in21); in FDCT32x32_2D()
/external/llvm-project/llvm/test/CodeGen/AMDGPU/
Damdhsa-trap-num-sgprs.ll18 i32 addrspace(1)* %out10, i32 %in10,
49 store i32 %in10, i32 addrspace(1)* %out10
/external/libaom/libaom/aom_dsp/mips/
Dmacros_msa.h1184 #define ILVR_B8(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, in10, \ argument
1190 ILVR_B4(RTYPE, in8, in9, in10, in11, in12, in13, in14, in15, out4, out5, \
1775 #define BUTTERFLY_16(in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, in10, \ argument
1785 out5 = in5 + in10; \
1791 out10 = in5 - in10; \
1828 in10, in11, in12, in13, in14, in15, out0, out1, \ argument
1835 ILVEV_D2_UB(in2, in10, in3, in11, out5, out4); \
/external/webp/src/dsp/
Dmsa_macro.h1283 in8, in9, in10, in11, in12, in13, in14, in15, \ argument
1289 ILVEV_W2_SD(in2, in6, in10, in14, tmp4_m, tmp5_m); \
1305 in8, in9, in10, in11, in12, in13, in14, in15, \ argument
1311 ILVEV_D2_UB(in2, in10, in3, in11, out5, out4); \
/external/libvpx/libvpx/vp8/common/mips/msa/
Dvp8_macros_msa.h1573 in10, in11, in12, in13, in14, in15, out0, out1, \ argument
1584 ILVEV_W2_SD(in2, in6, in10, in14, tmp0_m, tmp1_m); \
1607 in10, in11, in12, in13, in14, in15, out0, out1, \ argument
1614 ILVEV_D2_UB(in2, in10, in3, in11, out5, out4); \
/external/libvpx/libvpx/vpx_dsp/ppc/
Dinv_txfm_vsx.c708 #define LOAD_8x32(load, in00, in01, in02, in03, in10, in11, in12, in13, in20, \ argument
718 in10 = load(offset + 4 * 16, input); \