Home
last modified time | relevance | path

Searched refs:src0 (Results 1 – 25 of 431) sorted by relevance

12345678910>>...18

/external/swiftshader/third_party/llvm-7.0/llvm/docs/
DAMDGPUAsmGFX7.rst21 …ds_add_rtn_u32 dst, src0, src1 :ref:`ds_offset16<amdgpu_synid_ds_of…
22 …ds_add_rtn_u64 dst, src0, src1 :ref:`ds_offset16<amdgpu_synid_ds_of…
23 …ds_add_src2_u32 src0 :ref:`ds_offset16<amdgpu_synid_ds_of…
24 …ds_add_src2_u64 src0 :ref:`ds_offset16<amdgpu_synid_ds_of…
25 …ds_add_u32 src0, src1 :ref:`ds_offset16<amdgpu_synid_ds_of…
26 …ds_add_u64 src0, src1 :ref:`ds_offset16<amdgpu_synid_ds_of…
27 …ds_and_b32 src0, src1 :ref:`ds_offset16<amdgpu_synid_ds_of…
28 …ds_and_b64 src0, src1 :ref:`ds_offset16<amdgpu_synid_ds_of…
29 …ds_and_rtn_b32 dst, src0, src1 :ref:`ds_offset16<amdgpu_synid_ds_of…
30 …ds_and_rtn_b64 dst, src0, src1 :ref:`ds_offset16<amdgpu_synid_ds_of…
[all …]
DAMDGPUAsmGFX9.rst21 …ds_add_f32 src0, src1 :ref:`ds_offset16<amdgpu_synid_ds_of…
22 …ds_add_rtn_f32 dst, src0, src1 :ref:`ds_offset16<amdgpu_synid_ds_of…
23 …ds_add_rtn_u32 dst, src0, src1 :ref:`ds_offset16<amdgpu_synid_ds_of…
24 …ds_add_rtn_u64 dst, src0, src1 :ref:`ds_offset16<amdgpu_synid_ds_of…
25 …ds_add_src2_f32 src0 :ref:`ds_offset16<amdgpu_synid_ds_of…
26 …ds_add_src2_u32 src0 :ref:`ds_offset16<amdgpu_synid_ds_of…
27 …ds_add_src2_u64 src0 :ref:`ds_offset16<amdgpu_synid_ds_of…
28 …ds_add_u32 src0, src1 :ref:`ds_offset16<amdgpu_synid_ds_of…
29 …ds_add_u64 src0, src1 :ref:`ds_offset16<amdgpu_synid_ds_of…
30 …ds_and_b32 src0, src1 :ref:`ds_offset16<amdgpu_synid_ds_of…
[all …]
DAMDGPUAsmGFX8.rst21 …ds_add_f32 src0, src1 :ref:`ds_offset16<amdgpu_synid_ds_of…
22 …ds_add_rtn_f32 dst, src0, src1 :ref:`ds_offset16<amdgpu_synid_ds_of…
23 …ds_add_rtn_u32 dst, src0, src1 :ref:`ds_offset16<amdgpu_synid_ds_of…
24 …ds_add_rtn_u64 dst, src0, src1 :ref:`ds_offset16<amdgpu_synid_ds_of…
25 …ds_add_src2_f32 src0 :ref:`ds_offset16<amdgpu_synid_ds_of…
26 …ds_add_src2_u32 src0 :ref:`ds_offset16<amdgpu_synid_ds_of…
27 …ds_add_src2_u64 src0 :ref:`ds_offset16<amdgpu_synid_ds_of…
28 …ds_add_u32 src0, src1 :ref:`ds_offset16<amdgpu_synid_ds_of…
29 …ds_add_u64 src0, src1 :ref:`ds_offset16<amdgpu_synid_ds_of…
30 …ds_and_b32 src0, src1 :ref:`ds_offset16<amdgpu_synid_ds_of…
[all …]
/external/mesa3d/prebuilt-intermediates/nir/
Dnir_builder_opcodes.h29 nir_amul(nir_builder *build, nir_ssa_def *src0, nir_ssa_def *src1) in nir_amul() argument
31 return nir_build_alu(build, nir_op_amul, src0, src1, NULL, NULL); in nir_amul()
34 nir_b16all_fequal16(nir_builder *build, nir_ssa_def *src0, nir_ssa_def *src1) in nir_b16all_fequal16() argument
36 return nir_build_alu(build, nir_op_b16all_fequal16, src0, src1, NULL, NULL); in nir_b16all_fequal16()
39 nir_b16all_fequal2(nir_builder *build, nir_ssa_def *src0, nir_ssa_def *src1) in nir_b16all_fequal2() argument
41 return nir_build_alu(build, nir_op_b16all_fequal2, src0, src1, NULL, NULL); in nir_b16all_fequal2()
44 nir_b16all_fequal3(nir_builder *build, nir_ssa_def *src0, nir_ssa_def *src1) in nir_b16all_fequal3() argument
46 return nir_build_alu(build, nir_op_b16all_fequal3, src0, src1, NULL, NULL); in nir_b16all_fequal3()
49 nir_b16all_fequal4(nir_builder *build, nir_ssa_def *src0, nir_ssa_def *src1) in nir_b16all_fequal4() argument
51 return nir_build_alu(build, nir_op_b16all_fequal4, src0, src1, NULL, NULL); in nir_b16all_fequal4()
[all …]
Dnir_constant_expressions.c576 const int1_t src0 = -(int1_t)_src[0][_i].b; in evaluate_amul() local
580 int1_t dst = src0 * src1; in evaluate_amul()
595 const int8_t src0 = in evaluate_amul() local
600 int8_t dst = src0 * src1; in evaluate_amul()
614 const int16_t src0 = in evaluate_amul() local
619 int16_t dst = src0 * src1; in evaluate_amul()
633 const int32_t src0 = in evaluate_amul() local
638 int32_t dst = src0 * src1; in evaluate_amul()
652 const int64_t src0 = in evaluate_amul() local
657 int64_t dst = src0 * src1; in evaluate_amul()
[all …]
/external/mesa3d/prebuilt-intermediates/bifrost/
Dbi_generated_pack.h35 unsigned src0 = bi_get_src(ins, regs, 0); in pan_pack_fma_rshift_and_i32() local
36 assert((1 << src0) & 0xfb); in pan_pack_fma_rshift_and_i32()
57 …return 0x301000 | (src0 << 0) | (src1 << 3) | (src2 << 6) | (lane2 << 9) | (not1 << 14) | (not_res… in pan_pack_fma_rshift_and_i32()
63 unsigned src0 = bi_get_src(ins, regs, 0); in pan_pack_add_iadd_u32() local
87 return 0xbc600 | (src0 << 0) | (src1 << 3) | (saturate << 8) | (derived_7 << 7); in pan_pack_add_iadd_u32()
99 …return 0xbec00 | (src0 << 0) | (src1 << 3) | (saturate << 8) | (derived_7 << 7) | (derived_9 << 9); in pan_pack_add_iadd_u32()
113 …return 0xbe000 | (src0 << 0) | (src1 << 3) | (saturate << 8) | (derived_7 << 7) | (derived_9 << 9); in pan_pack_add_iadd_u32()
122 unsigned src0 = bi_get_src(ins, regs, 0); in pan_pack_add_ld_var_flat() local
150 …return 0x538c0 | (src0 << 3) | (vecsize << 8) | (function << 0) | (derived_10 << 10) | (derived_19… in pan_pack_add_ld_var_flat()
152 return 0xcf8c0 | (src0 << 3) | (vecsize << 8) | (function << 0); in pan_pack_add_ld_var_flat()
[all …]
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/AMDGPU/
Dmad-mix.ll11 define float @v_mad_mix_f32_f16lo_f16lo_f16lo(half %src0, half %src1, half %src2) #0 {
12 %src0.ext = fpext half %src0 to float
15 %result = tail call float @llvm.fmuladd.f32(float %src0.ext, float %src1.ext, float %src2.ext)
23 define float @v_mad_mix_f32_f16hi_f16hi_f16hi_int(i32 %src0, i32 %src1, i32 %src2) #0 {
24 %src0.hi = lshr i32 %src0, 16
27 %src0.i16 = trunc i32 %src0.hi to i16
30 %src0.fp16 = bitcast i16 %src0.i16 to half
33 %src0.ext = fpext half %src0.fp16 to float
36 %result = tail call float @llvm.fmuladd.f32(float %src0.ext, float %src1.ext, float %src2.ext)
45 define float @v_mad_mix_f32_f16hi_f16hi_f16hi_elt(<2 x half> %src0, <2 x half> %src1, <2 x half> %s…
[all …]
/external/libvpx/libvpx/vpx_dsp/mips/
Dsum_squares_msa.c22 uint64_t src0, src1, src2, src3; in vpx_sum_squares_2d_i16_msa() local
26 LD4(src, src_stride, src0, src1, src2, src3); in vpx_sum_squares_2d_i16_msa()
27 INSERT_D2_SH(src0, src1, diff0); in vpx_sum_squares_2d_i16_msa()
35 v8i16 src0, src1, src2, src3, src4, src5, src6, src7; in vpx_sum_squares_2d_i16_msa() local
37 LD_SH8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); in vpx_sum_squares_2d_i16_msa()
38 DOTP_SH2_SW(src0, src1, src0, src1, mul0, mul1); in vpx_sum_squares_2d_i16_msa()
47 v8i16 src0, src1, src2, src3, src4, src5, src6, src7; in vpx_sum_squares_2d_i16_msa() local
49 LD_SH8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); in vpx_sum_squares_2d_i16_msa()
50 DOTP_SH2_SW(src0, src1, src0, src1, mul0, mul1); in vpx_sum_squares_2d_i16_msa()
54 LD_SH8(src + 8, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); in vpx_sum_squares_2d_i16_msa()
[all …]
Dvpx_convolve_copy_msa.c19 v16u8 src0, src1, src2, src3, src4, src5, src6, src7; in copy_width8_msa() local
23 LD_UB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); in copy_width8_msa()
26 out0 = __msa_copy_u_d((v2i64)src0, 0); in copy_width8_msa()
40 LD_UB4(src, src_stride, src0, src1, src2, src3); in copy_width8_msa()
43 out0 = __msa_copy_u_d((v2i64)src0, 0); in copy_width8_msa()
52 LD_UB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); in copy_width8_msa()
55 out0 = __msa_copy_u_d((v2i64)src0, 0); in copy_width8_msa()
71 LD_UB4(src, src_stride, src0, src1, src2, src3); in copy_width8_msa()
73 out0 = __msa_copy_u_d((v2i64)src0, 0); in copy_width8_msa()
83 LD_UB2(src, src_stride, src0, src1); in copy_width8_msa()
[all …]
Dvariance_msa.c45 uint32_t src0, src1, src2, src3; in sse_diff_4width_msa() local
54 LW4(src_ptr, src_stride, src0, src1, src2, src3); in sse_diff_4width_msa()
59 INSERT_W4_UB(src0, src1, src2, src3, src); in sse_diff_4width_msa()
74 v16u8 src0, src1, src2, src3; in sse_diff_8width_msa() local
80 LD_UB4(src_ptr, src_stride, src0, src1, src2, src3); in sse_diff_8width_msa()
85 PCKEV_D4_UB(src1, src0, src3, src2, ref1, ref0, ref3, ref2, src0, src1, in sse_diff_8width_msa()
87 CALC_MSE_AVG_B(src0, ref0, var, avg); in sse_diff_8width_msa()
141 v16u8 src0, src1, ref0, ref1; in sse_diff_32width_msa() local
146 LD_UB2(src_ptr, 16, src0, src1); in sse_diff_32width_msa()
150 CALC_MSE_AVG_B(src0, ref0, var, avg); in sse_diff_32width_msa()
[all …]
Dvpx_convolve8_horiz_msa.c19 v16i8 src0, src1, src2, src3, filt0, filt1, filt2, filt3; in common_hz_8t_4x4_msa() local
33 LD_SB4(src, src_stride, src0, src1, src2, src3); in common_hz_8t_4x4_msa()
34 XORI_B4_128_SB(src0, src1, src2, src3); in common_hz_8t_4x4_msa()
35 HORIZ_8TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2, mask3, in common_hz_8t_4x4_msa()
47 v16i8 src0, src1, src2, src3; in common_hz_8t_4x8_msa() local
62 LD_SB4(src, src_stride, src0, src1, src2, src3); in common_hz_8t_4x8_msa()
63 XORI_B4_128_SB(src0, src1, src2, src3); in common_hz_8t_4x8_msa()
65 HORIZ_8TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2, mask3, in common_hz_8t_4x8_msa()
67 LD_SB4(src, src_stride, src0, src1, src2, src3); in common_hz_8t_4x8_msa()
68 XORI_B4_128_SB(src0, src1, src2, src3); in common_hz_8t_4x8_msa()
[all …]
/external/swiftshader/src/Shader/
DShaderCore.hpp253 void add(Vector4f &dst, const Vector4f &src0, const Vector4f &src1);
254 void iadd(Vector4f &dst, const Vector4f &src0, const Vector4f &src1);
255 void sub(Vector4f &dst, const Vector4f &src0, const Vector4f &src1);
256 void isub(Vector4f &dst, const Vector4f &src0, const Vector4f &src1);
257 void mad(Vector4f &dst, const Vector4f &src0, const Vector4f &src1, const Vector4f &src2);
258 void imad(Vector4f &dst, const Vector4f &src0, const Vector4f &src1, const Vector4f &src2);
259 void mul(Vector4f &dst, const Vector4f &src0, const Vector4f &src1);
260 void imul(Vector4f &dst, const Vector4f &src0, const Vector4f &src1);
262 void div(Vector4f &dst, const Vector4f &src0, const Vector4f &src1);
263 void idiv(Vector4f &dst, const Vector4f &src0, const Vector4f &src1);
[all …]
DShaderCore.cpp752 void ShaderCore::add(Vector4f &dst, const Vector4f &src0, const Vector4f &src1) in add() argument
754 dst.x = src0.x + src1.x; in add()
755 dst.y = src0.y + src1.y; in add()
756 dst.z = src0.z + src1.z; in add()
757 dst.w = src0.w + src1.w; in add()
760 void ShaderCore::iadd(Vector4f &dst, const Vector4f &src0, const Vector4f &src1) in iadd() argument
762 dst.x = As<Float4>(As<Int4>(src0.x) + As<Int4>(src1.x)); in iadd()
763 dst.y = As<Float4>(As<Int4>(src0.y) + As<Int4>(src1.y)); in iadd()
764 dst.z = As<Float4>(As<Int4>(src0.z) + As<Int4>(src1.z)); in iadd()
765 dst.w = As<Float4>(As<Int4>(src0.w) + As<Int4>(src1.w)); in iadd()
[all …]
DPixelPipeline.hpp74 void MOV(Vector4s &dst, Vector4s &src0);
75 void ADD(Vector4s &dst, Vector4s &src0, Vector4s &src1);
76 void SUB(Vector4s &dst, Vector4s &src0, Vector4s &src1);
77 void MAD(Vector4s &dst, Vector4s &src0, Vector4s &src1, Vector4s &src2);
78 void MUL(Vector4s &dst, Vector4s &src0, Vector4s &src1);
79 void DP3(Vector4s &dst, Vector4s &src0, Vector4s &src1);
80 void DP4(Vector4s &dst, Vector4s &src0, Vector4s &src1);
81 void LRP(Vector4s &dst, Vector4s &src0, Vector4s &src1, Vector4s &src2);
85 void TEXDP3TEX(Vector4s &dst, Float4 &u, Float4 &v, Float4 &s, int stage, Vector4s &src0);
92 void TEXREG2AR(Vector4s &dst, Vector4s &src0, int stage);
[all …]
/external/libaom/libaom/av1/common/x86/
Dreconinter_avx2.c31 const uint8_t *src0, int src0_stride, in av1_build_compound_diffwtd_mask_avx2() argument
39 const __m128i s0A = xx_loadl_32(src0); in av1_build_compound_diffwtd_mask_avx2()
40 const __m128i s0B = xx_loadl_32(src0 + src0_stride); in av1_build_compound_diffwtd_mask_avx2()
41 const __m128i s0C = xx_loadl_32(src0 + src0_stride * 2); in av1_build_compound_diffwtd_mask_avx2()
42 const __m128i s0D = xx_loadl_32(src0 + src0_stride * 3); in av1_build_compound_diffwtd_mask_avx2()
61 src0 += (src0_stride << 2); in av1_build_compound_diffwtd_mask_avx2()
68 const __m128i s0A = xx_loadl_64(src0); in av1_build_compound_diffwtd_mask_avx2()
69 const __m128i s0B = xx_loadl_64(src0 + src0_stride); in av1_build_compound_diffwtd_mask_avx2()
70 const __m128i s0C = xx_loadl_64(src0 + src0_stride * 2); in av1_build_compound_diffwtd_mask_avx2()
71 const __m128i s0D = xx_loadl_64(src0 + src0_stride * 3); in av1_build_compound_diffwtd_mask_avx2()
[all …]
/external/libaom/libaom/aom_dsp/x86/
Dblend_a64_vmask_sse4.c31 const uint8_t *src0, uint32_t src0_stride, in blend_a64_vmask_w4_sse4_1() argument
42 const __m128i v_res_w = blend_4(src0, src1, &v_m0_w, &v_m1_w); in blend_a64_vmask_w4_sse4_1()
49 src0 += src0_stride; in blend_a64_vmask_w4_sse4_1()
56 const uint8_t *src0, uint32_t src0_stride, in blend_a64_vmask_w8_sse4_1() argument
67 const __m128i v_res_w = blend_8(src0, src1, &v_m0_w, &v_m1_w); in blend_a64_vmask_w8_sse4_1()
74 src0 += src0_stride; in blend_a64_vmask_w8_sse4_1()
81 const uint8_t *src0, in blend_a64_vmask_w16n_sse4_1() argument
93 const __m128i v_resl_w = blend_8(src0 + c, src1 + c, &v_m0_w, &v_m1_w); in blend_a64_vmask_w16n_sse4_1()
95 blend_8(src0 + c + 8, src1 + c + 8, &v_m0_w, &v_m1_w); in blend_a64_vmask_w16n_sse4_1()
102 src0 += src0_stride; in blend_a64_vmask_w16n_sse4_1()
[all …]
Dblend_a64_mask_sse4.c32 const uint8_t *src0, uint32_t src0_stride, in blend_a64_mask_w4_sse4_1() argument
42 const __m128i v_res_b = blend_4_u8(src0, src1, &v_m0_b, &v_m1_b, &_r); in blend_a64_mask_w4_sse4_1()
46 src0 += src0_stride; in blend_a64_mask_w4_sse4_1()
53 const uint8_t *src0, uint32_t src0_stride, in blend_a64_mask_w8_sse4_1() argument
63 const __m128i v_res_b = blend_8_u8(src0, src1, &v_m0_b, &v_m1_b, &_r); in blend_a64_mask_w8_sse4_1()
67 src0 += src0_stride; in blend_a64_mask_w8_sse4_1()
74 uint8_t *dst, uint32_t dst_stride, const uint8_t *src0, in blend_a64_mask_w16n_sse4_1() argument
87 blend_16_u8(src0 + c, src1 + c, &v_m0_b, &v_m1_b, &_r); in blend_a64_mask_w16n_sse4_1()
92 src0 += src0_stride; in blend_a64_mask_w16n_sse4_1()
103 uint8_t *dst, uint32_t dst_stride, const uint8_t *src0, in blend_a64_mask_sx_w4_sse4_1() argument
[all …]
Dblend_a64_mask_avx2.c29 uint8_t *dst, const CONV_BUF_TYPE *src0, const CONV_BUF_TYPE *src1, in blend_a64_d16_mask_w16_avx2() argument
33 const __m256i s0_0 = yy_loadu_256(src0); in blend_a64_d16_mask_w16_avx2()
50 uint8_t *dst, const CONV_BUF_TYPE *src0, const CONV_BUF_TYPE *src1, in blend_a64_d16_mask_w32_avx2() argument
55 const __m256i s0_0 = yy_loadu_256(src0); in blend_a64_d16_mask_w32_avx2()
56 const __m256i s0_1 = yy_loadu_256(src0 + 16); in blend_a64_d16_mask_w32_avx2()
83 uint8_t *dst, uint32_t dst_stride, const CONV_BUF_TYPE *src0, in lowbd_blend_a64_d16_mask_subw0_subh0_w16_avx2() argument
92 blend_a64_d16_mask_w16_avx2(dst, src0, src1, &m0, round_offset, &v_maxval, in lowbd_blend_a64_d16_mask_subw0_subh0_w16_avx2()
96 src0 += src0_stride; in lowbd_blend_a64_d16_mask_subw0_subh0_w16_avx2()
102 uint8_t *dst, uint32_t dst_stride, const CONV_BUF_TYPE *src0, in lowbd_blend_a64_d16_mask_subw0_subh0_w32_avx2() argument
113 blend_a64_d16_mask_w32_avx2(dst + j, src0 + j, src1 + j, &m0, &m1, in lowbd_blend_a64_d16_mask_subw0_subh0_w32_avx2()
[all …]
/external/webp/src/dsp/
Dlossless_enc_msa.c21 #define TRANSFORM_COLOR_8(src0, src1, dst0, dst1, c0, c1, mask0, mask1) do { \ argument
24 VSHF_B2_SH(src0, src0, src1, src1, mask0, mask0, g0, g1); \
27 t0 = __msa_subv_h((v8i16)src0, t0); \
29 t4 = __msa_srli_w((v4i32)src0, 16); \
34 VSHF_B2_UB(src0, t0, src1, t1, mask1, mask1, dst0, dst1); \
53 v16u8 src0, dst0; in TransformColor_MSA() local
64 LD_UB2(data, 4, src0, src1); in TransformColor_MSA()
65 TRANSFORM_COLOR_8(src0, src1, dst0, dst1, g2br, r2b, mask0, mask1); in TransformColor_MSA()
72 src0 = LD_UB(data); in TransformColor_MSA()
73 TRANSFORM_COLOR_4(src0, dst0, g2br, r2b, mask0, mask1); in TransformColor_MSA()
[all …]
Dlossless_msa.c25 v16u8 src0, src1, src2, src3, dst0, dst1, dst2; \
26 LD_UB4(psrc, 16, src0, src1, src2, src3); \
27 VSHF_B2_UB(src0, src1, src1, src2, m0, m1, dst0, dst1); \
35 v16u8 src0, src1, src2, dst0, dst1, dst2; \
36 LD_UB3(psrc, 16, src0, src1, src2); \
37 VSHF_B2_UB(src0, src1, src1, src2, m0, m1, dst0, dst1); \
46 v16u8 src0, src1, src2 = { 0 }, dst0, dst1; \
47 LD_UB2(psrc, 16, src0, src1); \
48 VSHF_B2_UB(src0, src1, src1, src2, m0, m1, dst0, dst1); \
55 const v16u8 src0 = LD_UB(psrc); \
[all …]
/external/mesa3d/src/freedreno/ir3/
Dir3_a4xx.c43 struct ir3_instruction *ldgb, *src0, *src1, *byte_offset, *offset; in emit_intrinsic_load_ssbo() local
51 src0 = ir3_create_collect(ctx, (struct ir3_instruction*[]){ in emit_intrinsic_load_ssbo()
58 src0, 0, src1, 0); in emit_intrinsic_load_ssbo()
74 struct ir3_instruction *stgb, *src0, *src1, *src2, *byte_offset, *offset; in emit_intrinsic_store_ssbo() local
88 src0 = ir3_create_collect(ctx, ir3_get_src(ctx, &intr->src[0]), ncomp); in emit_intrinsic_store_ssbo()
95 stgb = ir3_STGB(b, ssbo, 0, src0, 0, src1, 0, src2, 0); in emit_intrinsic_store_ssbo()
126 struct ir3_instruction *atomic, *ssbo, *src0, *src1, *src2, *byte_offset, in emit_intrinsic_atomic_ssbo() local
141 src0 = ir3_get_src(ctx, &intr->src[2])[0]; in emit_intrinsic_atomic_ssbo()
150 atomic = ir3_ATOMIC_ADD_G(b, ssbo, 0, src0, 0, src1, 0, src2, 0); in emit_intrinsic_atomic_ssbo()
153 atomic = ir3_ATOMIC_MIN_G(b, ssbo, 0, src0, 0, src1, 0, src2, 0); in emit_intrinsic_atomic_ssbo()
[all …]
/external/libaom/libaom/aom_dsp/mips/
Daom_convolve_copy_msa.c19 v16u8 src0, src1, src2, src3, src4, src5, src6, src7; in copy_width8_msa() local
23 LD_UB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); in copy_width8_msa()
26 out0 = __msa_copy_u_d((v2i64)src0, 0); in copy_width8_msa()
40 LD_UB4(src, src_stride, src0, src1, src2, src3); in copy_width8_msa()
43 out0 = __msa_copy_u_d((v2i64)src0, 0); in copy_width8_msa()
52 LD_UB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); in copy_width8_msa()
55 out0 = __msa_copy_u_d((v2i64)src0, 0); in copy_width8_msa()
71 LD_UB4(src, src_stride, src0, src1, src2, src3); in copy_width8_msa()
73 out0 = __msa_copy_u_d((v2i64)src0, 0); in copy_width8_msa()
83 LD_UB2(src, src_stride, src0, src1); in copy_width8_msa()
[all …]
Dvariance_msa.c46 uint32_t src0, src1, src2, src3; in sse_diff_4width_msa() local
55 LW4(src_ptr, src_stride, src0, src1, src2, src3); in sse_diff_4width_msa()
60 INSERT_W4_UB(src0, src1, src2, src3, src); in sse_diff_4width_msa()
75 v16u8 src0, src1, src2, src3; in sse_diff_8width_msa() local
81 LD_UB4(src_ptr, src_stride, src0, src1, src2, src3); in sse_diff_8width_msa()
86 PCKEV_D4_UB(src1, src0, src3, src2, ref1, ref0, ref3, ref2, src0, src1, in sse_diff_8width_msa()
88 CALC_MSE_AVG_B(src0, ref0, var, avg); in sse_diff_8width_msa()
142 v16u8 src0, src1, ref0, ref1; in sse_diff_32width_msa() local
147 LD_UB2(src_ptr, 16, src0, src1); in sse_diff_32width_msa()
151 CALC_MSE_AVG_B(src0, ref0, var, avg); in sse_diff_32width_msa()
[all …]
Dsad_msa.c29 uint32_t src0, src1, src2, src3, ref0, ref1, ref2, ref3; in sad_4width_msa() local
36 LW4(src_ptr, src_stride, src0, src1, src2, src3); in sad_4width_msa()
41 INSERT_W4_UB(src0, src1, src2, src3, src); in sad_4width_msa()
55 v16u8 src0, src1, src2, src3, ref0, ref1, ref2, ref3; in sad_8width_msa() local
59 LD_UB4(src, src_stride, src0, src1, src2, src3); in sad_8width_msa()
64 PCKEV_D4_UB(src1, src0, src3, src2, ref1, ref0, ref3, ref2, src0, src1, in sad_8width_msa()
66 sad += SAD_UB2_UH(src0, src1, ref0, ref1); in sad_8width_msa()
76 v16u8 src0, src1, ref0, ref1; in sad_16width_msa() local
80 LD_UB2(src, src_stride, src0, src1); in sad_16width_msa()
84 sad += SAD_UB2_UH(src0, src1, ref0, ref1); in sad_16width_msa()
[all …]
Daom_convolve8_horiz_msa.c22 v16i8 src0, src1, src2, src3, filt0, filt1, filt2, filt3; in common_hz_8t_4x4_msa() local
36 LD_SB4(src, src_stride, src0, src1, src2, src3); in common_hz_8t_4x4_msa()
37 XORI_B4_128_SB(src0, src1, src2, src3); in common_hz_8t_4x4_msa()
38 HORIZ_8TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2, mask3, in common_hz_8t_4x4_msa()
50 v16i8 src0, src1, src2, src3; in common_hz_8t_4x8_msa() local
65 LD_SB4(src, src_stride, src0, src1, src2, src3); in common_hz_8t_4x8_msa()
66 XORI_B4_128_SB(src0, src1, src2, src3); in common_hz_8t_4x8_msa()
68 HORIZ_8TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2, mask3, in common_hz_8t_4x8_msa()
70 LD_SB4(src, src_stride, src0, src1, src2, src3); in common_hz_8t_4x8_msa()
71 XORI_B4_128_SB(src0, src1, src2, src3); in common_hz_8t_4x8_msa()
[all …]

12345678910>>...18