Home
last modified time | relevance | path

Searched refs:in5 (Results 1 – 25 of 48) sorted by relevance

12

/third_party/ffmpeg/libavcodec/mips/
Didctdsp_msa.c28 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; in put_pixels_clamped_msa() local
30 LD_SH8(block, 8, in0, in1, in2, in3, in4, in5, in6, in7); in put_pixels_clamped_msa()
31 CLIP_SH8_0_255(in0, in1, in2, in3, in4, in5, in6, in7); in put_pixels_clamped_msa()
33 PCKEV_B4_SH(in4, in4, in5, in5, in6, in6, in7, in7, in4, in5, in6, in7); in put_pixels_clamped_msa()
40 in5_d = __msa_copy_u_d((v2i64) in5, 0); in put_pixels_clamped_msa()
52 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; in put_signed_pixels_clamped_msa() local
54 LD_SH8(block, 8, in0, in1, in2, in3, in4, in5, in6, in7); in put_signed_pixels_clamped_msa()
61 in5 += 128; in put_signed_pixels_clamped_msa()
65 CLIP_SH8_0_255(in0, in1, in2, in3, in4, in5, in6, in7); in put_signed_pixels_clamped_msa()
67 PCKEV_B4_SH(in4, in4, in5, in5, in6, in6, in7, in7, in4, in5, in6, in7); in put_signed_pixels_clamped_msa()
[all …]
Dhevc_idct_msa.c94 #define HEVC_IDCT8x8_COL(in0, in1, in2, in3, in4, in5, in6, in7, shift) \ argument
104 ILVR_H4_SH(in4, in0, in6, in2, in5, in1, in3, in7, \
106 ILVL_H4_SH(in4, in0, in6, in2, in5, in1, in3, in7, \
185 PCKEV_H2_SH(sum1_l, sum1_r, sum2_l, sum2_r, in2, in5); \
334 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; in hevc_idct_8x8_msa() local
336 LD_SH8(coeffs, 8, in0, in1, in2, in3, in4, in5, in6, in7); in hevc_idct_8x8_msa()
337 HEVC_IDCT8x8_COL(in0, in1, in2, in3, in4, in5, in6, in7, 7); in hevc_idct_8x8_msa()
338 TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in hevc_idct_8x8_msa()
339 in0, in1, in2, in3, in4, in5, in6, in7); in hevc_idct_8x8_msa()
340 HEVC_IDCT8x8_COL(in0, in1, in2, in3, in4, in5, in6, in7, 12); in hevc_idct_8x8_msa()
[all …]
Dsimple_idct_msa.c28 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; in simple_idct_msa() local
41 LD_SH8(block, 8, in0, in1, in2, in3, in4, in5, in6, in7); in simple_idct_msa()
46 TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in simple_idct_msa()
47 in0, in1, in2, in3, in4, in5, in6, in7); in simple_idct_msa()
48 select_vec = in1 | in2 | in3 | in4 | in5 | in6 | in7; in simple_idct_msa()
81 ILVRL_H2_SW(in5, in7, temp0_r, temp0_l); in simple_idct_msa()
118 in5 = (v8i16) __msa_bmnz_v((v16u8) a2_r, (v16u8) temp, (v16u8) select_vec); in simple_idct_msa()
121 TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in simple_idct_msa()
122 in0, in1, in2, in3, in4, in5, in6, in7); in simple_idct_msa()
160 ILVRL_H2_SW(in5, in7, temp0_r, temp0_l); in simple_idct_msa()
[all …]
Dmpegvideoencdsp_msa.c27 v16u8 in0, in1, in2, in3, in4, in5, in6, in7; in sum_u8src_16width_msa() local
30 LD_UB8(src, stride, in0, in1, in2, in3, in4, in5, in6, in7); in sum_u8src_16width_msa()
35 HADD_UB4_UB(in4, in5, in6, in7, in4, in5, in6, in7); in sum_u8src_16width_msa()
44 sum += HADD_UH_U32(in5); in sum_u8src_16width_msa()
Dvc1dsp_msa.c30 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; in ff_vc1_inv_trans_8x8_msa() local
44 LD_SH8(block, 8, in0, in1, in2, in3, in4, in5, in6, in7); in ff_vc1_inv_trans_8x8_msa()
50 UNPCK_SH_SW(in5, in_r5, in_l5); in ff_vc1_inv_trans_8x8_msa()
136 in4, in5, in6, in7); in ff_vc1_inv_trans_8x8_msa()
137 ST_SH8(in0, in1, in2, in3, in4, in5, in6, in7, block, 8); in ff_vc1_inv_trans_8x8_msa()
142 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; in ff_vc1_inv_trans_4x8_msa() local
159 LD_SH8(block, 8, in0, in1, in2, in3, in4, in5, in6, in7); in ff_vc1_inv_trans_4x8_msa()
165 UNPCK_R_SH_SW(in5, in_r5); in ff_vc1_inv_trans_4x8_msa()
233 v4i32 in0, in1, in2, in3, in4, in5, in6, in7; in ff_vc1_inv_trans_8x4_msa() local
250 UNPCK_SH_SW(t2, in1, in5); in ff_vc1_inv_trans_8x4_msa()
[all …]
Dcompute_antialias_float.h68 float in1, in2, in3, in4, in5, in6, in7, in8; in compute_antialias_mips_float() local
174 [in5] "=&f" (in5), [in6] "=&f" (in6), in compute_antialias_mips_float()
Dmpegaudiodsp_mips_float.c74 float in1, in2, in3, in4, in5, in6, in7, in8; in ff_mpadsp_apply_window_mips_float() local
275 [in5] "=&f" (in5), [in6] "=&f" (in6), in ff_mpadsp_apply_window_mips_float()
798 float in1, in2, in3, in4, in5, in6; in imdct36_mips_float() local
875 [in5] "=&f" (in5), [in6] "=&f" (in6), in imdct36_mips_float()
1003 [in5] "=&f" (in5), [out1] "=&f" (out1), in imdct36_mips_float()
1212 [in5] "=&f" (in5), [in6] "=&f" (in6), in imdct36_mips_float()
Dvp9_idct_msa.c86 #define VP9_DOT_ADD_SUB_SRARI_PCK(in0, in1, in2, in3, in4, in5, in6, in7, \ argument
92 DOTP_SH4_SW(in0, in1, in0, in1, in4, in4, in5, in5, \
116 #define VP9_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, \ argument
146 ILVRL_H2_SH(in2, in5, vec1_m, vec0_m); \
150 cnst1_m, cnst2_m, cnst3_m, in5, in2, \
152 BUTTERFLY_4(in7, in0, in2, in5, s1_m, s0_m, in2, in5); \
172 ILVRL_H2_SH(in2, in5, vec1_m, vec0_m); \
323 #define TRANSPOSE4X8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, \ argument
330 ILVR_H4_SH(in1, in0, in3, in2, in5, in4, in7, in6, \
479 #define VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, \ argument
[all …]
Dhevc_mc_bi_msa.c142 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; in hevc_bi_copy_6w_msa() local
154 LD_SH8(src1_ptr, src2_stride, in0, in1, in2, in3, in4, in5, in6, in7); in hevc_bi_copy_6w_msa()
164 HEVC_BI_RND_CLIP4_MAX_SATU(in4, in5, in6, in7, dst4, dst5, dst6, dst7, in hevc_bi_copy_6w_msa()
193 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; in hevc_bi_copy_8w_msa() local
227 LD_SH6(src1_ptr, src2_stride, in0, in1, in2, in3, in4, in5); in hevc_bi_copy_8w_msa()
232 HEVC_BI_RND_CLIP2_MAX_SATU(in4, in5, dst4, dst5, 7, dst4, dst5); in hevc_bi_copy_8w_msa()
252 LD_SH8(src1_ptr, src2_stride, in0, in1, in2, in3, in4, in5, in6, in hevc_bi_copy_8w_msa()
259 HEVC_BI_RND_CLIP4_MAX_SATU(in4, in5, in6, in7, dst4, dst5, dst6, in hevc_bi_copy_8w_msa()
281 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; in hevc_bi_copy_12w_msa() local
289 LD_SH4(src1_ptr + 8, src2_stride, in4, in5, in6, in7); in hevc_bi_copy_12w_msa()
[all …]
Dh263dsp_msa.c32 v16u8 in0, in1, in2, in3, in4, in5, in6, in7; in h263_h_loop_filter_msa() local
38 LD_UB8(src, stride, in0, in1, in2, in3, in4, in5, in6, in7); in h263_h_loop_filter_msa()
39 TRANSPOSE8x4_UB_UB(in0, in1, in2, in3, in4, in5, in6, in7, in h263_h_loop_filter_msa()
Dhevc_mc_biw_msa.c243 v8i16 in0, in1, in2, in3, in4, in5; in hevc_biwgt_copy_8w_msa() local
278 LD_SH6(src1_ptr, src2_stride, in0, in1, in2, in3, in4, in5); in hevc_biwgt_copy_8w_msa()
284 HEVC_BIW_RND_CLIP2_MAX_SATU(dst4, dst5, in4, in5, weight_vec, rnd_vec, in hevc_biwgt_copy_8w_msa()
331 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; in hevc_biwgt_copy_12w_msa() local
347 LD_SH4(src1_ptr + 8, src2_stride, in4, in5, in6, in7); in hevc_biwgt_copy_12w_msa()
350 ILVR_D2_SH(in5, in4, in7, in6, in4, in5); in hevc_biwgt_copy_12w_msa()
363 HEVC_BIW_RND_CLIP2_MAX_SATU(dst4, dst5, in4, in5, weight_vec, rnd_vec, in hevc_biwgt_copy_12w_msa()
390 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; in hevc_biwgt_copy_16w_msa() local
406 LD_SH4(src1_ptr + 8, src2_stride, in4, in5, in6, in7); in hevc_biwgt_copy_16w_msa()
414 HEVC_BIW_RND_CLIP4_MAX_SATU(tmp0, tmp1, tmp4, tmp5, in0, in1, in4, in5, in hevc_biwgt_copy_16w_msa()
[all …]
/third_party/openh264/codec/common/inc/
Dmsa_macros.h237 #define MSA_ST_V8(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, pdst, stride) \ argument
240 MSA_ST_V4(RTYPE, in4, in5, in6, in7, (pdst) + 4 * (stride), stride); \
385 #define MSA_VSHF_B4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ argument
389 MSA_VSHF_B2(RTYPE, in4, in5, in6, in7, mask2, mask3, out2, out3); \
411 #define MSA_VSHF_H4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ argument
415 MSA_VSHF_H2(RTYPE, in4, in5, in6, in7, mask2, mask3, out2, out3); \
437 #define MSA_VSHF_W4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ argument
441 MSA_VSHF_W2(RTYPE, in4, in5, in6, in7, mask2, mask3, out2, out3); \
462 #define MSA_ILVEV_B4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ argument
466 MSA_ILVEV_B2(RTYPE, in4, in5, in6, in7, out2, out3); \
[all …]
/third_party/ffmpeg/libavutil/mips/
Dgeneric_macros_msa.h383 #define ST_V6(RTYPE, in0, in1, in2, in3, in4, in5, pdst, stride) \ argument
386 ST_V2(RTYPE, in4, in5, (pdst) + 4 * stride, stride); \
390 #define ST_V8(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, pdst, stride) \ argument
393 ST_V4(RTYPE, in4, in5, in6, in7, (pdst) + 4 * stride, stride); \
531 #define ST12x8_UB(in0, in1, in2, in3, in4, in5, in6, in7, pdst, stride) \ argument
544 out5_m = __msa_copy_u_d((v2i64) in5, 0); \
553 out13_m = __msa_copy_u_w((v4i32) in5, 2); \
601 #define AVER_UB4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ argument
605 AVER_UB2(RTYPE, in4, in5, in6, in7, out2, out3) \
670 #define VSHF_B3(RTYPE, in0, in1, in2, in3, in4, in5, mask0, mask1, mask2, \ argument
[all …]
/third_party/openssl/crypto/des/asm/
Ddes_enc.m481 #define in5 %i5
1096 rounds_macro(in5, out5, 1, .des_enc.1, in3, in4, retl)
1109 rounds_macro(out5, in5, -1, .des_dec.1, in4, in3, retl)
1130 ld [in0], in5 ! left
1140 ip_macro(in5, out5, in5, out5, in3, 0, 1, 1)
1142 rounds_macro(in5, out5, 1, .des_encrypt1.1, in3, in4) ! in4 not used
1144 fp_macro(in5, out5, 1) ! 1 for store to [in0]
1157 ip_macro(in5, out5, out5, in5, in4, 2, 0, 1) ! include dec, ks in4
1159 fp_macro(out5, in5, 1) ! 1 for store to [in0]
1201 ld [in0+4], in5 ! left
[all …]
/third_party/openssl/crypto/aes/asm/
Daesp8-ppc.pl674 my ($in0, $in1, $in2, $in3, $in4, $in5, $in6, $in7 )=map("v$_",(0..3,10..13));
785 lvx_u $in5,$x50,$inp
793 le?vperm $in5,$in5,$in5,$inpperm
798 vxor $out5,$in5,$rndkey0
905 vxor $in5,$in5,v31
923 vncipherlast $out6,$out6,$in5
925 lvx_u $in5,$x50,$inp
930 le?vperm $in5,$in5,$in5,$inpperm
954 vxor $out5,$in5,$rndkey0
1050 vxor $in5,$in5,v31
[all …]
/third_party/flutter/skia/third_party/externals/libwebp/src/dsp/
Dcommon_sse41.h40 __m128i* const in3, __m128i* const in4, __m128i* const in5) { in VP8PlanarTo24b_SSE41() argument
77 WEBP_SSE41_SHUFF(B, in4, in5) in VP8PlanarTo24b_SSE41()
93 *in5 = _mm_or_si128(RG5, B5); in VP8PlanarTo24b_SSE41()
Dmsa_macro.h310 #define ST_B8(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ argument
313 ST_B4(RTYPE, in4, in5, in6, in7, pdst + 4 * stride, stride); \
873 #define ILVR_B4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ argument
876 ILVR_B2(RTYPE, in4, in5, in6, in7, out2, out3); \
899 #define ILVR_H4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ argument
902 ILVR_H2(RTYPE, in4, in5, in6, in7, out2, out3); \
923 #define ILVR_D4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ argument
926 ILVR_D2(RTYPE, in4, in5, in6, in7, out2, out3); \
984 #define PCKEV_B4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ argument
987 PCKEV_B2(RTYPE, in4, in5, in6, in7, out2, out3); \
[all …]
/third_party/skia/third_party/externals/libwebp/src/dsp/
Dcommon_sse41.h40 __m128i* const in3, __m128i* const in4, __m128i* const in5) { in VP8PlanarTo24b_SSE41() argument
77 WEBP_SSE41_SHUFF(B, in4, in5) in VP8PlanarTo24b_SSE41()
93 *in5 = _mm_or_si128(RG5, B5); in VP8PlanarTo24b_SSE41()
Dmsa_macro.h310 #define ST_B8(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ argument
313 ST_B4(RTYPE, in4, in5, in6, in7, pdst + 4 * stride, stride); \
873 #define ILVR_B4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ argument
876 ILVR_B2(RTYPE, in4, in5, in6, in7, out2, out3); \
899 #define ILVR_H4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ argument
902 ILVR_H2(RTYPE, in4, in5, in6, in7, out2, out3); \
923 #define ILVR_D4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ argument
926 ILVR_D2(RTYPE, in4, in5, in6, in7, out2, out3); \
984 #define PCKEV_B4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ argument
987 PCKEV_B2(RTYPE, in4, in5, in6, in7, out2, out3); \
[all …]
/third_party/ffmpeg/libavcodec/aarch64/
Dhevcdsp_idct_neon.S280 .macro tr_8x4 shift, in0,in0t, in1,in1t, in2,in2t, in3,in3t, in4,in4t, in5,in5t, in6,in6t, in7,in7t…
290 sum_sub v30.4s, \in5\in5t, v0.h[7], +, \p2
291 sum_sub v28.4s, \in5\in5t, v0.h[6], +, \p2
292 sum_sub v29.4s, \in5\in5t, v0.h[4], -, \p2
305 sum_sub v31.4s, \in5\in5t, v0.h[5], +, \p2
307 fixsqrshrn \in5,\in5t, v26, \shift
411 .macro butterfly16 in0, in1, in2, in3, in4, in5, in6, in7
416 add \in3, \in4, \in5
417 sub \in4, \in4, \in5
418 add \in5, \in6, \in7
[all …]
/third_party/mindspore/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/intrinsics/sse/
DConvDwFp32IndirectRow.c56 __m256 in5 = _mm256_loadu_ps(in[k + 4]); in ConvDwFp32Avx5x5() local
65 out1 = _mm256_fmadd_ps(in5, w5, out1); in ConvDwFp32Avx5x5()
/third_party/ffmpeg/libavcodec/
Dmpegaudiodec_template.c324 SUINTFLOAT in0, in1, in2, in3, in4, in5, t1, t2; in imdct12() local
331 in5 = in[5*3] + in[4*3]; in imdct12()
332 in5 += in3; in imdct12()
339 t2 = MULH3(in1 - in5, C4, 2); in imdct12()
348 in5 += 2*in1; in imdct12()
349 in1 = MULH3(in5 + in3, C5, 1); in imdct12()
356 in5 = MULH3(in5 - in3, C6, 2); in imdct12()
358 out[ 5] = in0 - in5; in imdct12()
360 out[11] = in0 + in5; in imdct12()
/third_party/libjpeg-turbo/simd/powerpc/
Djquanti-altivec.c56 __vector unsigned char in0, in1, in2, in3, in4, in5, in6, in7; in jsimd_convsamp_altivec() local
77 out5 = (__vector short)VEC_UNPACKHU(in5); in jsimd_convsamp_altivec()
/third_party/flutter/skia/third_party/externals/libjpeg-turbo/simd/powerpc/
Djquanti-altivec.c56 __vector unsigned char in0, in1, in2, in3, in4, in5, in6, in7; in jsimd_convsamp_altivec() local
77 out5 = (__vector short)VEC_UNPACKHU(in5); in jsimd_convsamp_altivec()
/third_party/ffmpeg/libavcodec/x86/
Dflacdsp.asm185 cglobal flac_decorrelate_indep%2_%1, 2, %2+2, %3+1, out, in0, in1, len, in2, in3, in4, in5, in6, in7
188 DEFINE_ARGS out, in0, in1, in2, in3, in4, in5

12