Home
last modified time | relevance | path

Searched refs:in5 (Results 1 – 25 of 50) sorted by relevance

12

/external/libvpx/libvpx/vp9/encoder/mips/msa/
Dvp9_fdct8x8_msa.c19 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; in vp9_fht8x8_msa() local
21 LD_SH8(input, stride, in0, in1, in2, in3, in4, in5, in6, in7); in vp9_fht8x8_msa()
23 SLLI_4V(in4, in5, in6, in7, 2); in vp9_fht8x8_msa()
27 VP9_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4, in vp9_fht8x8_msa()
28 in5, in6, in7); in vp9_fht8x8_msa()
29 TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in vp9_fht8x8_msa()
30 in3, in4, in5, in6, in7); in vp9_fht8x8_msa()
31 VP9_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4, in vp9_fht8x8_msa()
32 in5, in6, in7); in vp9_fht8x8_msa()
35 VP9_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4, in vp9_fht8x8_msa()
[all …]
Dvp9_fdct_msa.h18 #define VP9_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, out2, \ argument
47 ILVRL_H2_SH(in2, in5, vec1_m, vec0_m); \
51 cnst2_m, cnst3_m, in5, in2, in6, in1); \
52 BUTTERFLY_4(in7, in0, in2, in5, s1_m, s0_m, in2, in5); \
70 ILVRL_H2_SH(in2, in5, vec1_m, vec0_m); \
Dvp9_fdct16x16_msa.c369 v8i16 in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, in10, in11; in postproc_fdct16x8_1d_row() local
372 LD_SH8(temp, 16, in0, in1, in2, in3, in4, in5, in6, in7); in postproc_fdct16x8_1d_row()
375 TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in postproc_fdct16x8_1d_row()
376 in4, in5, in6, in7); in postproc_fdct16x8_1d_row()
381 FDCT_POSTPROC_2V_NEG_H(in4, in5); in postproc_fdct16x8_1d_row()
387 BUTTERFLY_16(in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, in10, in11, in postproc_fdct16x8_1d_row()
397 in4, in5, in6, in7); in postproc_fdct16x8_1d_row()
401 TRANSPOSE8x8_SH_SH(tmp4, in4, tmp5, in5, tmp6, in6, tmp7, in7, tmp4, in4, in postproc_fdct16x8_1d_row()
402 tmp5, in5, tmp6, in6, tmp7, in7); in postproc_fdct16x8_1d_row()
404 ST_SH8(tmp4, in4, tmp5, in5, tmp6, in6, tmp7, in7, out, 16); in postproc_fdct16x8_1d_row()
/external/libvpx/libvpx/vp9/common/mips/msa/
Dvp9_idct8x8_msa.c19 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; in vp9_iht8x8_64_add_msa() local
22 LD_SH8(input, 8, in0, in1, in2, in3, in4, in5, in6, in7); in vp9_iht8x8_64_add_msa()
24 TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in vp9_iht8x8_64_add_msa()
25 in4, in5, in6, in7); in vp9_iht8x8_64_add_msa()
30 VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in vp9_iht8x8_64_add_msa()
31 in4, in5, in6, in7); in vp9_iht8x8_64_add_msa()
33 TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in vp9_iht8x8_64_add_msa()
34 in3, in4, in5, in6, in7); in vp9_iht8x8_64_add_msa()
35 VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in vp9_iht8x8_64_add_msa()
36 in4, in5, in6, in7); in vp9_iht8x8_64_add_msa()
[all …]
/external/libvpx/libvpx/vpx_dsp/mips/
Didct8x8_msa.c16 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; in vpx_idct8x8_64_add_msa() local
19 LD_SH8(input, 8, in0, in1, in2, in3, in4, in5, in6, in7); in vpx_idct8x8_64_add_msa()
22 TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in vpx_idct8x8_64_add_msa()
23 in4, in5, in6, in7); in vpx_idct8x8_64_add_msa()
25 VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in vpx_idct8x8_64_add_msa()
26 in4, in5, in6, in7); in vpx_idct8x8_64_add_msa()
28 TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in vpx_idct8x8_64_add_msa()
29 in4, in5, in6, in7); in vpx_idct8x8_64_add_msa()
31 VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in vpx_idct8x8_64_add_msa()
32 in4, in5, in6, in7); in vpx_idct8x8_64_add_msa()
[all …]
Dfwd_txfm_msa.c15 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; in vpx_fdct8x8_1_msa() local
18 LD_SH8(input, stride, in0, in1, in2, in3, in4, in5, in6, in7); in vpx_fdct8x8_1_msa()
19 ADD4(in0, in1, in2, in3, in4, in5, in6, in7, in0, in2, in4, in6); in vpx_fdct8x8_1_msa()
31 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; in fdct8x16_1d_column() local
44 LD_SH16(input, src_stride, in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, in fdct8x16_1d_column()
47 SLLI_4V(in4, in5, in6, in7, 2); in fdct8x16_1d_column()
51 ADD4(in4, in11, in5, in10, in6, in9, in7, in8, tmp4, tmp5, tmp6, tmp7); in fdct8x16_1d_column()
56 SUB4(in4, in11, in5, in10, in6, in9, in7, in8, in11, in10, in9, in8); in fdct8x16_1d_column()
150 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; in fdct16x8_1d_row() local
153 LD_SH8(input, 16, in0, in1, in2, in3, in4, in5, in6, in7); in fdct16x8_1d_row()
[all …]
Dfwd_dct32x32_msa.c17 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; in fdct8x32_1d_column_load_butterfly() local
24 LD_SH4(input + (28 * src_stride), src_stride, in4, in5, in6, in7); in fdct8x32_1d_column_load_butterfly()
28 SLLI_4V(in4, in5, in6, in7, 2); in fdct8x32_1d_column_load_butterfly()
31 BUTTERFLY_8(in0, in1, in2, in3, in4, in5, in6, in7, step0, step1, step2, in fdct8x32_1d_column_load_butterfly()
32 step3, in4, in5, in6, in7); in fdct8x32_1d_column_load_butterfly()
36 ST_SH4(in4, in5, in6, in7, temp_buff + (28 * 8), 8); in fdct8x32_1d_column_load_butterfly()
42 LD_SH4(input + (20 * src_stride), src_stride, in4, in5, in6, in7); in fdct8x32_1d_column_load_butterfly()
46 SLLI_4V(in4, in5, in6, in7, 2); in fdct8x32_1d_column_load_butterfly()
49 BUTTERFLY_8(in0, in1, in2, in3, in4, in5, in6, in7, step0, step1, step2, in fdct8x32_1d_column_load_butterfly()
50 step3, in4, in5, in6, in7); in fdct8x32_1d_column_load_butterfly()
[all …]
Dmacros_msa.h331 #define ST_V8(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, pdst, stride) \ argument
334 ST_V4(RTYPE, in4, in5, in6, in7, (pdst) + 4 * stride, stride); \
488 #define AVER_UB4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, \ argument
492 AVER_UB2(RTYPE, in4, in5, in6, in7, out2, out3) \
1005 #define ILVL_B4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, \ argument
1009 ILVL_B2(RTYPE, in4, in5, in6, in7, out2, out3); \
1062 #define ILVR_B4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, \ argument
1066 ILVR_B2(RTYPE, in4, in5, in6, in7, out2, out3); \
1073 #define ILVR_B8(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, in10, \ argument
1077 ILVR_B4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, out2, \
[all …]
Dinv_txfm_msa.h18 #define VP9_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, out2, \ argument
47 ILVRL_H2_SH(in2, in5, vec1_m, vec0_m); \
51 cnst2_m, cnst3_m, in5, in2, in6, in1); \
52 BUTTERFLY_4(in7, in0, in2, in5, s1_m, s0_m, in2, in5); \
70 ILVRL_H2_SH(in2, in5, vec1_m, vec0_m); \
214 #define VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, \ argument
227 VP9_MADD(in1, in7, in3, in5, k0_m, k1_m, k2_m, k3_m, in1, in7, in3, in5); \
228 SUB2(in1, in3, in7, in5, res0_m, res1_m); \
238 tp7_m = in7 + in5; \
247 #define VP9_IADST8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, \ argument
[all …]
Dfwd_txfm_msa.h46 #define SRLI_AVE_S_4V_H(in0, in1, in2, in3, in4, in5, in6, in7) \ argument
51 SRLI_H4_SH(in4, in5, in6, in7, vec4_m, vec5_m, vec6_m, vec7_m, 15); \
54 AVE_SH4_SH(vec4_m, in4, vec5_m, in5, vec6_m, in6, vec7_m, in7, in4, in5, \
58 #define VP9_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, out2, \ argument
67 BUTTERFLY_8(in0, in1, in2, in3, in4, in5, in6, in7, s0_m, s1_m, s2_m, \
118 #define FDCT8x16_EVEN(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, \ argument
127 BUTTERFLY_8(in0, in1, in2, in3, in4, in5, in6, in7, s0_m, s1_m, s2_m, \
Dtxfm_macros_msa.h40 #define DOT_ADD_SUB_SRARI_PCK(in0, in1, in2, in3, in4, in5, in6, in7, dst0, \ argument
46 DOTP_SH4_SW(in0, in1, in0, in1, in4, in4, in5, in5, tp0_m, tp2_m, tp3_m, \
Ddeblock_msa.c19 in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, out2, out3, out4, \ argument
25 ILVR_B4_SH(in1, in0, in3, in2, in5, in4, in7, in6, temp0, temp1, temp2, \
31 ILVL_B4_SH(in1, in0, in3, in2, in5, in4, in7, in6, temp0, temp1, temp2, \
74 #define TRANSPOSE12x16_B(in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, \ argument
82 ILVR_B2_SH(in5, in4, in7, in6, temp0, temp1); \
97 ILVL_B2_SH(in5, in4, in7, in6, temp0, temp1); \
99 in5 = (v16u8)__msa_ilvl_d((v2i64)temp6, (v2i64)temp2); \
113 #define VPX_TRANSPOSE12x8_UB_UB(in0, in1, in2, in3, in4, in5, in6, in7, in8, \ argument
121 ILVR_B2_SH(in5, in4, in7, in6, temp0, temp1); \
127 ILVL_B2_SH(in5, in4, in7, in6, temp6, temp7); \
[all …]
/external/tensorflow/tensorflow/core/kernels/
Daggregate_ops_cpu.h69 typename TTypes<T>::ConstFlat in5) {
70 Add5EigenImpl<CPUDevice, T>::Compute(d, out, in1, in2, in3, in4, in5);
80 typename TTypes<T>::ConstFlat in5,
82 Add6EigenImpl<CPUDevice, T>::Compute(d, out, in1, in2, in3, in4, in5, in6);
92 typename TTypes<T>::ConstFlat in5,
95 Add7EigenImpl<CPUDevice, T>::Compute(d, out, in1, in2, in3, in4, in5, in6,
106 typename TTypes<T>::ConstFlat in5, typename TTypes<T>::ConstFlat in6,
108 Add8EigenImpl<CPUDevice, T>::Compute(d, out, in1, in2, in3, in4, in5, in6,
119 typename TTypes<T>::ConstFlat in5, typename TTypes<T>::ConstFlat in6,
121 Add8pEigenImpl<CPUDevice, T>::Compute(d, out, in1, in2, in3, in4, in5, in6,
[all …]
Daggregate_ops_gpu.cu.cc70 typename TTypes<T>::ConstFlat in5) { in operator ()()
71 Add5EigenImpl<GPUDevice, T>::Compute(d, out, in1, in2, in3, in4, in5); in operator ()()
82 typename TTypes<T>::ConstFlat in5, in operator ()()
84 Add6EigenImpl<GPUDevice, T>::Compute(d, out, in1, in2, in3, in4, in5, in6); in operator ()()
95 typename TTypes<T>::ConstFlat in5, in operator ()()
98 Add7EigenImpl<GPUDevice, T>::Compute(d, out, in1, in2, in3, in4, in5, in6, in operator ()()
109 typename TTypes<T>::ConstFlat in5, typename TTypes<T>::ConstFlat in6, in operator ()()
111 Add8EigenImpl<GPUDevice, T>::Compute(d, out, in1, in2, in3, in4, in5, in6, in operator ()()
122 typename TTypes<T>::ConstFlat in5, typename TTypes<T>::ConstFlat in6, in operator ()()
124 Add8pEigenImpl<GPUDevice, T>::Compute(d, out, in1, in2, in3, in4, in5, in6, in operator ()()
[all …]
Daggregate_ops.h88 typename TTypes<T>::ConstFlat in5);
98 typename TTypes<T>::ConstFlat in5) { in Compute()
99 out.device(d) = in1 + in2 + in3 + in4 + in5; in Compute()
110 typename TTypes<T>::ConstFlat in5,
121 typename TTypes<T>::ConstFlat in5, in Compute()
123 out.device(d) = in1 + in2 + in3 + in4 + in5 + in6; in Compute()
134 typename TTypes<T>::ConstFlat in5,
146 typename TTypes<T>::ConstFlat in5, in Compute()
149 out.device(d) = in1 + in2 + in3 + in4 + in5 + in6 + in7; in Compute()
159 typename TTypes<T>::ConstFlat in5, typename TTypes<T>::ConstFlat in6,
[all …]
/external/boringssl/src/crypto/fipsmodule/aes/asm/
Daesp8-ppc.pl674 my ($in0, $in1, $in2, $in3, $in4, $in5, $in6, $in7 )=map("v$_",(0..3,10..13));
785 lvx_u $in5,$x50,$inp
793 le?vperm $in5,$in5,$in5,$inpperm
798 vxor $out5,$in5,$rndkey0
905 vxor $in5,$in5,v31
923 vncipherlast $out6,$out6,$in5
925 lvx_u $in5,$x50,$inp
930 le?vperm $in5,$in5,$in5,$inpperm
954 vxor $out5,$in5,$rndkey0
1050 vxor $in5,$in5,v31
[all …]
/external/libaom/libaom/aom_dsp/mips/
Dmacros_msa.h423 #define ST_B8(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, pdst, stride) \ argument
426 ST_B4(RTYPE, in4, in5, in6, in7, (pdst) + 4 * stride, stride); \
449 #define ST_H8(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, pdst, stride) \ argument
452 ST_H4(RTYPE, in4, in5, in6, in7, (pdst) + 4 * stride, stride); \
616 #define AVER_UB4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, \ argument
620 AVER_UB2(RTYPE, in4, in5, in6, in7, out2, out3) \
1116 #define ILVL_B4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, \ argument
1120 ILVL_B2(RTYPE, in4, in5, in6, in7, out2, out3); \
1173 #define ILVR_B4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, \ argument
1177 ILVR_B2(RTYPE, in4, in5, in6, in7, out2, out3); \
[all …]
/external/libvpx/libvpx/vp8/common/mips/msa/
Dvp8_macros_msa.h360 #define ST_B8(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, pdst, stride) \ argument
363 ST_B4(RTYPE, in4, in5, in6, in7, (pdst) + 4 * stride, stride); \
558 #define VSHF_B3(RTYPE, in0, in1, in2, in3, in4, in5, mask0, mask1, mask2, \ argument
562 out2 = (RTYPE)__msa_vshf_b((v16i8)mask2, (v16i8)in5, (v16i8)in4); \
953 #define ILVL_B4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, \ argument
957 ILVL_B2(RTYPE, in4, in5, in6, in7, out2, out3); \
1008 #define ILVR_B4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, \ argument
1012 ILVR_B2(RTYPE, in4, in5, in6, in7, out2, out3); \
1035 #define ILVR_H4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, \ argument
1039 ILVR_H2(RTYPE, in4, in5, in6, in7, out2, out3); \
[all …]
/external/libaom/libaom/aom_dsp/x86/
Dfwd_txfm_impl_sse2.h257 __m128i in5 = _mm_load_si128((const __m128i *)(input + 5 * stride)); in FDCT8x8_2D() local
266 in5 = _mm_slli_epi16(in5, 2); in FDCT8x8_2D()
280 const __m128i q2 = ADD_EPI16(in2, in5); in FDCT8x8_2D()
283 const __m128i q5 = SUB_EPI16(in2, in5); in FDCT8x8_2D()
489 in5 = _mm_unpackhi_epi64(tr1_1, tr1_5); in FDCT8x8_2D()
512 const __m128i sign_in5 = _mm_srai_epi16(in5, 15); in FDCT8x8_2D()
520 in5 = _mm_sub_epi16(in5, sign_in5); in FDCT8x8_2D()
528 in5 = _mm_srai_epi16(in5, 1); in FDCT8x8_2D()
537 store_output(&in5, (output + 5 * 8)); in FDCT8x8_2D()
/external/webp/src/dsp/
Dcommon_sse41.h40 __m128i* const in3, __m128i* const in4, __m128i* const in5) { in VP8PlanarTo24b_SSE41() argument
77 WEBP_SSE41_SHUFF(B, in4, in5) in VP8PlanarTo24b_SSE41()
93 *in5 = _mm_or_si128(RG5, B5); in VP8PlanarTo24b_SSE41()
Dmsa_macro.h310 #define ST_B8(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ argument
313 ST_B4(RTYPE, in4, in5, in6, in7, pdst + 4 * stride, stride); \
873 #define ILVR_B4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ argument
876 ILVR_B2(RTYPE, in4, in5, in6, in7, out2, out3); \
899 #define ILVR_H4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ argument
902 ILVR_H2(RTYPE, in4, in5, in6, in7, out2, out3); \
923 #define ILVR_D4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ argument
926 ILVR_D2(RTYPE, in4, in5, in6, in7, out2, out3); \
984 #define PCKEV_B4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ argument
987 PCKEV_B2(RTYPE, in4, in5, in6, in7, out2, out3); \
[all …]
/external/llvm/test/CodeGen/X86/
D2006-05-02-InstrSched1.ll9 @in5 = external global i8* ; <i8**> [#uses=1]
15 %tmp.upgrd.2 = load i8*, i8** @in5 ; <i8*> [#uses=2]
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/X86/
D2006-05-02-InstrSched1.ll9 @in5 = external global i8* ; <i8**> [#uses=1]
15 %tmp.upgrd.2 = load i8*, i8** @in5 ; <i8*> [#uses=2]
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/Reassociate/
Dlong-chains.ll4 define i8 @longchain(i8 %in1, i8 %in2, i8 %in3, i8 %in4, i8 %in5, i8 %in6, i8 %in7, i8 %in8, i8 %in…
10 %tmp6 = add i8 %tmp5, %in5
/external/libvpx/libvpx/vpx_dsp/x86/
Dfwd_txfm_impl_sse2.h282 __m128i in5 = _mm_load_si128((const __m128i *)(input + 5 * stride)); in FDCT8x8_2D() local
291 in5 = _mm_slli_epi16(in5, 2); in FDCT8x8_2D()
305 const __m128i q2 = ADD_EPI16(in2, in5); in FDCT8x8_2D()
308 const __m128i q5 = SUB_EPI16(in2, in5); in FDCT8x8_2D()
514 in5 = _mm_unpackhi_epi64(tr1_1, tr1_5); in FDCT8x8_2D()
537 const __m128i sign_in5 = _mm_srai_epi16(in5, 15); in FDCT8x8_2D()
545 in5 = _mm_sub_epi16(in5, sign_in5); in FDCT8x8_2D()
553 in5 = _mm_srai_epi16(in5, 1); in FDCT8x8_2D()
562 store_output(&in5, (output + 5 * 8)); in FDCT8x8_2D()

12