Home
last modified time | relevance | path

Searched refs:dst1 (Results 1 – 25 of 194) sorted by relevance

12345678

/third_party/ffmpeg/tests/checkasm/
Dsbrdsp.c39 LOCAL_ALIGNED_16(INTFLOAT, dst1, [64 + 256]); in test_sum64x5()
44 memcpy(dst1, dst0, (64 + 256) * sizeof(INTFLOAT)); in test_sum64x5()
46 call_new(dst1); in test_sum64x5()
47 if (!float_near_abs_eps_array(dst0, dst1, EPS, 64 + 256)) in test_sum64x5()
49 bench_new(dst1); in test_sum64x5()
72 LOCAL_ALIGNED_16(INTFLOAT, dst1, [64]); in test_neg_odd_64()
77 memcpy(dst1, dst0, (64) * sizeof(INTFLOAT)); in test_neg_odd_64()
79 call_new(dst1); in test_neg_odd_64()
80 if (!float_near_abs_eps_array(dst0, dst1, EPS, 64)) in test_neg_odd_64()
82 bench_new(dst1); in test_neg_odd_64()
[all …]
Dllviddsp.c48 uint8_t *dst1 = av_mallocz(width); in check_add_bytes() local
55 if (!dst0 || !dst1) in check_add_bytes()
61 call_new(dst1, src1, width); in check_add_bytes()
62 if (memcmp(dst0, dst1, width)) in check_add_bytes()
64 bench_new(dst1, src1, width); in check_add_bytes()
70 av_free(dst1); in check_add_bytes()
76 uint8_t *dst1 = av_mallocz(width); in check_add_median_pred() local
96 call_new(dst1, src1, diff1, width, &A1, &B1); in check_add_median_pred()
97 if (memcmp(dst0, dst1, width) || (A0 != A1) || (B0 != B1)) in check_add_median_pred()
99 bench_new(dst1, src1, diff1, width, &A1, &B1); in check_add_median_pred()
[all …]
Dhevc_pel.c51 AV_WN32A(dst1 + k, r); \
78 LOCAL_ALIGNED_32(uint8_t, dst1, [BUF_SIZE]); in checkasm_check_hevc_qpel()
100 int16_t *dstw0 = (int16_t *) dst0, *dstw1 = (int16_t *) dst1; in checkasm_check_hevc_qpel()
122 LOCAL_ALIGNED_32(uint8_t, dst1, [BUF_SIZE]); in checkasm_check_hevc_qpel_uni()
146 …call_new(dst1, sizes[size] * SIZEOF_PIXEL, src1, sizes[size] * SIZEOF_PIXEL, sizes[size], i, j, si… in checkasm_check_hevc_qpel_uni()
147 if (memcmp(dst0, dst1, sizes[size] * sizes[size] * SIZEOF_PIXEL)) in checkasm_check_hevc_qpel_uni()
149 …bench_new(dst1, sizes[size] * SIZEOF_PIXEL, src1, sizes[size] * SIZEOF_PIXEL, sizes[size], i, j, s… in checkasm_check_hevc_qpel_uni()
163 LOCAL_ALIGNED_32(uint8_t, dst1, [BUF_SIZE]); in checkasm_check_hevc_qpel_uni_w()
191 …call_new(dst1, sizes[size] * SIZEOF_PIXEL, src1, sizes[size] * SIZEOF_PIXEL, sizes[size], *denom, … in checkasm_check_hevc_qpel_uni_w()
192 … if (memcmp(dst0, dst1, sizes[size] * sizes[size] * SIZEOF_PIXEL)) in checkasm_check_hevc_qpel_uni_w()
[all …]
Dfmtconvert.c50 LOCAL_ALIGNED(32, float, dst1, [BUF_SIZE]); in checkasm_check_fmtconvert()
62 memset(dst1, 0, sizeof(*dst1) * BUF_SIZE); in checkasm_check_fmtconvert()
73 call_new(dst1, in, scale_arr[i], length[j]); in checkasm_check_fmtconvert()
75 if (!float_near_ulp_array(dst0, dst1, 3, length[j])) { in checkasm_check_fmtconvert()
80 bench_new(dst1, in, scale_arr[i], length[j]); in checkasm_check_fmtconvert()
94 call_new(&c, dst1, in, scale_arr, length[j]); in checkasm_check_fmtconvert()
96 if (!float_near_ulp_array(dst0, dst1, 3, length[j])) { in checkasm_check_fmtconvert()
102 bench_new(&c, dst1, in, scale_arr, length[j]); in checkasm_check_fmtconvert()
Dllviddspenc.c49 LOCAL_ALIGNED_32(uint8_t, dst1, [MAX_STRIDE]); in check_diff_bytes()
59 memset(dst1, 0, MAX_STRIDE); in check_diff_bytes()
68 call_new(dst1, src1, src3, planes[i].w); in check_diff_bytes()
69 if (memcmp(dst0, dst1, planes[i].w)) in check_diff_bytes()
72 bench_new(dst1, src0, src2, planes[4].w); in check_diff_bytes()
80 LOCAL_ALIGNED_32(uint8_t, dst1, [MAX_STRIDE * MAX_HEIGHT]); in check_sub_left_pred()
88 memset(dst1, 0, MAX_STRIDE * MAX_HEIGHT); in check_sub_left_pred()
95 call_new(dst1, src1, planes[i].s, planes[i].w, planes[i].h); in check_sub_left_pred()
96 if (memcmp(dst0, dst1, planes[i].w * planes[i].h)) in check_sub_left_pred()
100 bench_new(dst1, src0, planes[4].s, planes[4].w, planes[4].h); in check_sub_left_pred()
Dvf_colorspace.c72 uint8_t *dst0[3] = { dst0_y, dst0_u, dst0_v }, *dst1[3] = { dst1_y, dst1_u, dst1_v }; in check_yuv2yuv() local
106 call_new(dst1, (ptrdiff_t[3]) { y_dst_stride, uv_dst_stride, uv_dst_stride }, in check_yuv2yuv()
109 if (memcmp(dst0[0], dst1[0], y_dst_stride * H) || in check_yuv2yuv()
110 memcmp(dst0[1], dst1[1], uv_dst_stride * H >> ss_h) || in check_yuv2yuv()
111 memcmp(dst0[2], dst1[2], uv_dst_stride * H >> ss_h)) { in check_yuv2yuv()
140 int16_t *dst0[3] = { dst0_y, dst0_u, dst0_v }, *dst1[3] = { dst1_y, dst1_u, dst1_v }; in check_yuv2rgb() local
169 call_new(dst1, W, src, in check_yuv2rgb()
172 if (memcmp(dst0[0], dst1[0], W * H * sizeof(int16_t)) || in check_yuv2rgb()
173 memcmp(dst0[1], dst1[1], W * H * sizeof(int16_t)) || in check_yuv2rgb()
174 memcmp(dst0[2], dst1[2], W * H * sizeof(int16_t))) { in check_yuv2rgb()
[all …]
Daacpsdsp.c52 LOCAL_ALIGNED_16(INTFLOAT, dst1, [BUF_SIZE]); in test_add_squares()
60 memcpy(dst1, dst0, BUF_SIZE * sizeof(INTFLOAT)); in test_add_squares()
62 call_new(dst1, src, BUF_SIZE); in test_add_squares()
63 if (!float_near_abs_eps_array(dst0, dst1, EPS, BUF_SIZE)) in test_add_squares()
65 bench_new(dst1, src, BUF_SIZE); in test_add_squares()
71 LOCAL_ALIGNED_16(INTFLOAT, dst1, [BUF_SIZE], [2]); in test_mul_pair_single()
81 call_new(dst1, src0, src1, BUF_SIZE); in test_mul_pair_single()
82 if (!float_near_abs_eps_array((float *)dst0, (float *)dst1, EPS, BUF_SIZE * 2)) in test_mul_pair_single()
84 bench_new(dst1, src0, src1, BUF_SIZE); in test_mul_pair_single()
90 LOCAL_ALIGNED_16(INTFLOAT, dst1, [BUF_SIZE], [2]); in test_hybrid_analysis()
[all …]
Dhevc_sao.c73 LOCAL_ALIGNED_32(uint8_t, dst1, [BUF_SIZE]); in check_sao_band()
92 memset(dst1, 0, BUF_SIZE); in check_sao_band()
95 call_new(dst1, src1, stride, stride, offset_val, left_class, w, block_size); in check_sao_band()
97 if (memcmp(dst0 + j*stride, dst1 + j*stride, w*SIZEOF_PIXEL)) in check_sao_band()
101 bench_new(dst1, src1, stride, stride, offset_val, left_class, block_size, block_size); in check_sao_band()
110 LOCAL_ALIGNED_32(uint8_t, dst1, [BUF_SIZE]); in check_sao_edge()
128 memset(dst1, 0, BUF_SIZE); in check_sao_edge()
132 call_new(dst1, src1 + offset, stride, offset_val, eo, w, block_size); in check_sao_edge()
134 if (memcmp(dst0 + j*stride, dst1 + j*stride, w*SIZEOF_PIXEL)) in check_sao_edge()
138 bench_new(dst1, src1 + offset, stride, offset_val, eo, block_size, block_size); in check_sao_edge()
Dh264dsp.c209 uint8_t *dst1 = dst1_base + align; in check_idct() local
217 memcpy(dst1, dst, sz * PIXEL_STRIDE); in check_idct()
220 call_new(dst1, subcoef1, PIXEL_STRIDE); in check_idct()
221 if (memcmp(dst0, dst1, sz * PIXEL_STRIDE) || in check_idct()
224 bench_new(dst1, subcoef1, sz * SIZEOF_PIXEL); in check_idct()
237 LOCAL_ALIGNED_16(uint8_t, dst1, [16 * 16 * 2]); in check_idct_multiple()
306 memcpy(dst1, dst_full, 16 * 16 * SIZEOF_PIXEL); in check_idct_multiple()
308 call_new(dst1, block_offset, coef1, 16 * SIZEOF_PIXEL, nnzc); in check_idct_multiple()
309 if (memcmp(dst0, dst1, 16 * 16 * SIZEOF_PIXEL) || in check_idct_multiple()
312 bench_new(dst1, block_offset, coef1, 16 * SIZEOF_PIXEL, nnzc); in check_idct_multiple()
[all …]
Dhuffyuvdsp.c43 uint16_t *dst1 = av_mallocz(width * sizeof(uint16_t)); in check_add_int16() local
47 if (!src0 || !src1 || !dst0 || !dst1) in check_add_int16()
55 call_new(dst1, src1, mask, width); in check_add_int16()
56 if (memcmp(dst0, dst1, width * sizeof(uint16_t))) in check_add_int16()
58 bench_new(dst1, src1, mask, width); in check_add_int16()
64 av_free(dst1); in check_add_int16()
Dsw_scale.c81 LOCAL_ALIGNED_16(uint8_t, dst1, [LARGEST_INPUT_SIZE]); in check_yuv2yuvX()
111 memset(dst1, 0, LARGEST_INPUT_SIZE * sizeof(dst1[0])); in check_yuv2yuvX()
120 … call_new((const int16_t*)vFilterData, filter_sizes[fsi], src, dst1, dstW - osi, dither, osi); in check_yuv2yuvX()
121 if (memcmp(dst0, dst1, LARGEST_INPUT_SIZE * sizeof(dst0[0]))) in check_yuv2yuvX()
124 … bench_new((const int16_t*)vFilterData, filter_sizes[fsi], src, dst1, dstW - osi, dither, osi); in check_yuv2yuvX()
161 LOCAL_ALIGNED_32(uint32_t, dst1, [SRC_PIXELS]); in check_hscale()
230 memset(dst1, 0, SRC_PIXELS * sizeof(dst1[0])); in check_hscale()
233 call_new(NULL, dst1, ctx->dstW, src, filterAvx2, filterPosAvx, width); in check_hscale()
234 if (memcmp(dst0, dst1, ctx->dstW * sizeof(dst0[0]))) in check_hscale()
Daudiodsp.c89 LOCAL_ALIGNED(32, int32_t, dst1, [MAX_SIZE]); in checkasm_check_audiodsp()
110 call_new(dst1, src, min, max, len); in checkasm_check_audiodsp()
111 if (memcmp(dst0, dst1, len * sizeof(*dst0))) in checkasm_check_audiodsp()
113 bench_new(dst1, src, min, max, MAX_SIZE); in checkasm_check_audiodsp()
119 LOCAL_ALIGNED(32, float, dst1, [MAX_SIZE]); in checkasm_check_audiodsp()
138 call_new(dst1, src, len, min, max); in checkasm_check_audiodsp()
140 if (!float_near_ulp_array(dst0, dst1, 3, len)) in checkasm_check_audiodsp()
143 bench_new(dst1, src, MAX_SIZE, min, max); in checkasm_check_audiodsp()
Dpixblockdsp.c58 call_new(dst1 + dst_offset, src11 + src_offset, 8); \
59 if (memcmp(src10, src11, BUF_SIZE)|| memcmp(dst0, dst1, BUF_SIZE)) \
61 bench_new(dst1 + dst_offset, src11 + src_offset, 8); \
75 call_new(dst1 + dst_offset, src11 + src_offset, src21 + src_offset, 8); \
76 …memcmp(src10, src11, BUF_SIZE) || memcmp(src20, src21, BUF_SIZE) || memcmp(dst0, dst1, BUF_SIZE)) \
78 bench_new(dst1 + dst_offset, src11 + src_offset, src21 + src_offset, 8); \
91 uint16_t *dst1 = (uint16_t *)dst1_; in checkasm_check_pixblockdsp() local
Dsw_rgb.c51 LOCAL_ALIGNED_32(uint8_t, dst1, [MAX_STRIDE]); in check_shuffle_bytes()
56 memset(dst1, 0, MAX_STRIDE); in check_shuffle_bytes()
63 call_new(src1, dst1, width[i]); in check_shuffle_bytes()
64 if (memcmp(dst0, dst1, MAX_STRIDE)) in check_shuffle_bytes()
125 uint8_t *dst1 = dst1_buf + 2; in check_interleave_bytes() local
145 memset(dst1, 0, 2 * MAX_STRIDE * MAX_HEIGHT); in check_interleave_bytes()
163 call_new(src0 + src0_offset, src1 + src1_offset, dst1 + dst_offset, in check_interleave_bytes()
167 checkasm_check(uint8_t, dst0, 2*MAX_STRIDE, dst1, 2*MAX_STRIDE, in check_interleave_bytes()
171 bench_new(src0, src1, dst1, 127, MAX_HEIGHT, in check_interleave_bytes()
/third_party/openh264/codec/encoder/core/loongarch/
Ddct_lasx.c44 dst0, dst1, dst2, dst3) \ argument
52 dst1 = __lasx_xvslli_h(tms3, 1); \
53 dst1 = __lasx_xvadd_h(dst1, tms2); \
72 __m256i dst0, dst1, dst2, dst3; in WelsDctT4_lasx() local
101 dst0, dst1, dst2, dst3); in WelsDctT4_lasx()
102 LASX_TRANSPOSE4x4_H(dst0, dst1, dst2, dst3, in WelsDctT4_lasx()
105 dst0, dst1, dst2, dst3); in WelsDctT4_lasx()
106 dst0 = __lasx_xvpackev_d(dst1, dst0); in WelsDctT4_lasx()
127 __m256i tmp0, tmp1, tmp2, tmp3, dst0, dst1, dst2, dst3, dst4, in WelsDctFourT4_lasx() local
187 dst0, dst1, dst2, dst3); in WelsDctFourT4_lasx()
[all …]
/third_party/skia/third_party/externals/libwebp/src/dsp/
Dlossless_msa.c25 v16u8 src0, src1, src2, src3, dst0, dst1, dst2; \
27 VSHF_B2_UB(src0, src1, src1, src2, m0, m1, dst0, dst1); \
29 ST_UB2(dst0, dst1, pdst, 16); \
35 v16u8 src0, src1, src2, dst0, dst1, dst2; \
37 VSHF_B2_UB(src0, src1, src1, src2, m0, m1, dst0, dst1); \
39 ST_UB2(dst0, dst1, pdst, 16); \
46 v16u8 src0, src1, src2 = { 0 }, dst0, dst1; \
48 VSHF_B2_UB(src0, src1, src1, src2, m0, m1, dst0, dst1); \
50 pix_d = __msa_copy_s_d((v2i64)dst1, 0); \
81 #define TRANSFORM_COLOR_INVERSE_8(src0, src1, dst0, dst1, \ argument
[all …]
Dlossless_enc_msa.c21 #define TRANSFORM_COLOR_8(src0, src1, dst0, dst1, c0, c1, mask0, mask1) do { \ argument
34 VSHF_B2_UB(src0, t0, src1, t1, mask1, mask1, dst0, dst1); \
63 v16u8 src1, dst1; in TransformColor_MSA() local
65 TRANSFORM_COLOR_8(src0, src1, dst0, dst1, g2br, r2b, mask0, mask1); in TransformColor_MSA()
66 ST_UB2(dst0, dst1, data, 4); in TransformColor_MSA()
106 v16u8 src1, dst1, tmp1; in SubtractGreenFromBlueAndRed_MSA() local
109 SUB2(src0, tmp0, src1, tmp1, dst0, dst1); in SubtractGreenFromBlueAndRed_MSA()
110 ST_UB2(dst0, dst1, ptemp_data, 16); in SubtractGreenFromBlueAndRed_MSA()
/third_party/ffmpeg/libavcodec/mips/
Dhevc_mc_bi_msa.c74 v8i16 dst0, dst1, dst2, dst3; in hevc_bi_copy_4w_msa() local
96 ILVRL_B2_SH(zero, src0, dst0, dst1); in hevc_bi_copy_4w_msa()
97 SLLI_2V(dst0, dst1, 6); in hevc_bi_copy_4w_msa()
98 HEVC_BI_RND_CLIP2_MAX_SATU(in0, in1, dst0, dst1, 7, dst0, dst1); in hevc_bi_copy_4w_msa()
99 dst0 = (v8i16) __msa_pckev_b((v16i8) dst1, (v16i8) dst0); in hevc_bi_copy_4w_msa()
117 ILVRL_B2_SH(zero, src0, dst0, dst1); in hevc_bi_copy_4w_msa()
119 SLLI_4V(dst0, dst1, dst2, dst3, 6); in hevc_bi_copy_4w_msa()
120 HEVC_BI_RND_CLIP4_MAX_SATU(in0, in1, in2, in3, dst0, dst1, dst2, in hevc_bi_copy_4w_msa()
121 dst3, 7, dst0, dst1, dst2, dst3); in hevc_bi_copy_4w_msa()
122 PCKEV_B2_SH(dst1, dst0, dst3, dst2, dst0, dst1); in hevc_bi_copy_4w_msa()
[all …]
Dhevc_mc_uniw_msa.c70 v8i16 dst0, dst1, dst2, dst3, offset_vec; in hevc_uniwgt_copy_4w_msa() local
97 ILVRL_B2_SH(zero, src0, dst0, dst1); in hevc_uniwgt_copy_4w_msa()
98 SLLI_2V(dst0, dst1, 6); in hevc_uniwgt_copy_4w_msa()
99 HEVC_UNIW_RND_CLIP2_MAX_SATU_H(dst0, dst1, weight_vec, offset_vec, in hevc_uniwgt_copy_4w_msa()
100 rnd_vec, dst0, dst1); in hevc_uniwgt_copy_4w_msa()
101 out0 = (v16u8) __msa_pckev_b((v16i8) dst1, (v16i8) dst0); in hevc_uniwgt_copy_4w_msa()
111 ILVRL_B2_SH(zero, src0, dst0, dst1); in hevc_uniwgt_copy_4w_msa()
113 SLLI_4V(dst0, dst1, dst2, dst3, 6); in hevc_uniwgt_copy_4w_msa()
114 HEVC_UNIW_RND_CLIP4_MAX_SATU_H(dst0, dst1, dst2, dst3, weight_vec, in hevc_uniwgt_copy_4w_msa()
115 offset_vec, rnd_vec, dst0, dst1, in hevc_uniwgt_copy_4w_msa()
[all …]
Dhevc_mc_biw_msa.c102 v8i16 dst0, dst1, dst2, dst3, weight_vec; in hevc_biwgt_copy_4w_msa() local
136 ILVRL_B2_SH(zero, src0, dst0, dst1); in hevc_biwgt_copy_4w_msa()
137 SLLI_2V(dst0, dst1, 6); in hevc_biwgt_copy_4w_msa()
138 HEVC_BIW_RND_CLIP2_MAX_SATU(dst0, dst1, in0, in1, weight_vec, rnd_vec, in hevc_biwgt_copy_4w_msa()
139 offset_vec, dst0, dst1); in hevc_biwgt_copy_4w_msa()
140 out0 = (v16u8) __msa_pckev_b((v16i8) dst1, (v16i8) dst0); in hevc_biwgt_copy_4w_msa()
158 ILVRL_B2_SH(zero, src0, dst0, dst1); in hevc_biwgt_copy_4w_msa()
160 SLLI_4V(dst0, dst1, dst2, dst3, 6); in hevc_biwgt_copy_4w_msa()
161 HEVC_BIW_RND_CLIP4_MAX_SATU(dst0, dst1, dst2, dst3, in0, in1, in2, in hevc_biwgt_copy_4w_msa()
163 dst0, dst1, dst2, dst3); in hevc_biwgt_copy_4w_msa()
[all …]
/third_party/mesa3d/src/intel/compiler/
Dtest_fs_saturate_propagation.cpp117 fs_reg dst1 = v->vgrf(glsl_type::float_type); in TEST_F() local
121 set_saturate(true, bld.MOV(dst1, dst0)); in TEST_F()
152 fs_reg dst1 = v->vgrf(glsl_type::float_type); in TEST_F() local
157 set_saturate(true, bld.MOV(dst1, dst0)); in TEST_F()
190 fs_reg dst1 = v->vgrf(glsl_type::float_type); in TEST_F() local
195 set_saturate(true, bld.MOV(dst1, dst0)); in TEST_F()
225 fs_reg dst1 = v->vgrf(glsl_type::float_type); in TEST_F() local
229 set_saturate(true, bld.MOV(dst1, dst0)); in TEST_F()
259 fs_reg dst1 = v->vgrf(glsl_type::float_type); in TEST_F() local
264 set_saturate(true, bld.MOV(dst1, dst0)); in TEST_F()
[all …]
/third_party/ffmpeg/libavcodec/loongarch/
Dhevc_mc_bi_lsx.c65 __m128i dst0, dst1, dst2, dst3; in hevc_bi_copy_4w_lsx() local
96 DUP2_ARG2(__lsx_vilvh_b, zero, src0, zero, src1, dst1, dst3); in hevc_bi_copy_4w_lsx()
97 DUP2_ARG2(__lsx_vslli_h, dst1, 6, dst3, 6, dst1, dst3); in hevc_bi_copy_4w_lsx()
98 dst0 = hevc_bi_rnd_clip(in0, dst0, in1, dst1); in hevc_bi_copy_4w_lsx()
99 dst1 = hevc_bi_rnd_clip(in2, dst2, in3, dst3); in hevc_bi_copy_4w_lsx()
105 __lsx_vstelm_w(dst1, dst, 0, 0); in hevc_bi_copy_4w_lsx()
106 __lsx_vstelm_w(dst1, dst + dst_stride, 0, 1); in hevc_bi_copy_4w_lsx()
107 __lsx_vstelm_w(dst1, dst + dst_stride_2x, 0, 2); in hevc_bi_copy_4w_lsx()
108 __lsx_vstelm_w(dst1, dst + dst_stride_3x, 0, 3); in hevc_bi_copy_4w_lsx()
149 __m128i dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7; in hevc_bi_copy_6w_lsx() local
[all …]
Dh264idct_lasx.c87 __m256i dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7; in ff_h264_idct8_addblk_lasx() local
205 dst, dst_stride_3x, dst0, dst1, dst2, dst3); in ff_h264_idct8_addblk_lasx()
210 DUP4_ARG2(__lasx_xvilvl_b, zero, dst0, zero, dst1, zero, dst2, zero, dst3, in ff_h264_idct8_addblk_lasx()
211 dst0, dst1, dst2, dst3); in ff_h264_idct8_addblk_lasx()
214 DUP4_ARG3(__lasx_xvpermi_q, dst1, dst0, 0x20, dst3, dst2, 0x20, dst5, in ff_h264_idct8_addblk_lasx()
215 dst4, 0x20, dst7, dst6, 0x20, dst0, dst1, dst2, dst3); in ff_h264_idct8_addblk_lasx()
217 res1 = __lasx_xvadd_h(res1, dst1); in ff_h264_idct8_addblk_lasx()
266 __m256i dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7; in ff_h264_idct8_dc_addblk_lasx() local
275 dst, dst_stride_3x, dst0, dst1, dst2, dst3); in ff_h264_idct8_dc_addblk_lasx()
280 DUP4_ARG1(__lasx_vext2xv_hu_bu, dst0, dst1, dst2, dst3, in ff_h264_idct8_dc_addblk_lasx()
[all …]
Dhevcdsp_lsx.c285 int16_t* dst1 = dst + 8; in hevc_copy_16w_lsx() local
310 __lsx_vst(in0_l, dst1, 0); in hevc_copy_16w_lsx()
311 __lsx_vstx(in1_l, dst1, dst_stride_x); in hevc_copy_16w_lsx()
312 __lsx_vstx(in2_l, dst1, dst_stride_2x); in hevc_copy_16w_lsx()
313 __lsx_vstx(in3_l, dst1, dst_stride_3x); in hevc_copy_16w_lsx()
315 dst1 += dst_stride_2x; in hevc_copy_16w_lsx()
327 __lsx_vst(in0_l, dst1, 0); in hevc_copy_16w_lsx()
328 __lsx_vstx(in1_l, dst1, dst_stride_x); in hevc_copy_16w_lsx()
329 __lsx_vstx(in2_l, dst1, dst_stride_2x); in hevc_copy_16w_lsx()
330 __lsx_vstx(in3_l, dst1, dst_stride_3x); in hevc_copy_16w_lsx()
[all …]
/third_party/ffmpeg/libavfilter/x86/
Dvf_spp.c37 #define REQUANT_CORE(dst0, dst1, dst2, dst3, src0, src1, src2, src3) \ in hardthresh_mmx() argument
76 "movq %%mm7, " #dst1 " \n" \ in hardthresh_mmx()
108 #define REQUANT_CORE(dst0, dst1, dst2, dst3, src0, src1, src2, src3) \ in softthresh_mmx() argument
156 "movq %%mm7, " #dst1 " \n" \ in softthresh_mmx()
185 uint8_t *dst1 = dst; in store_slice_mmx() local
210 : "+r" (src1), "+r"(dst1) in store_slice_mmx()

12345678