• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  Copyright (c) 2022 The WebM project authors. All Rights Reserved.
3  *
4  *  Use of this source code is governed by a BSD-style license
5  *  that can be found in the LICENSE file in the root of the source
6  *  tree. An additional intellectual property rights grant can be found
7  *  in the file PATENTS.  All contributing project authors may
8  *  be found in the AUTHORS file in the root of the source tree.
9  */
10 
11 #include "./vp8_rtcd.h"
12 #include "vpx_util/loongson_intrinsics.h"
13 #include "vp8/encoder/block.h"
14 
vp8_block_error_lsx(int16_t * coeff_ptr,int16_t * dq_coeff_ptr)15 int32_t vp8_block_error_lsx(int16_t *coeff_ptr, int16_t *dq_coeff_ptr) {
16   int32_t err = 0;
17   __m128i dq_coeff0, dq_coeff1, coeff0, coeff1;
18   __m128i reg0, reg1, reg2, reg3, error;
19 
20   DUP4_ARG2(__lsx_vld, coeff_ptr, 0, coeff_ptr, 16, dq_coeff_ptr, 0,
21             dq_coeff_ptr, 16, coeff0, coeff1, dq_coeff0, dq_coeff1);
22   DUP2_ARG2(__lsx_vsubwev_w_h, coeff0, dq_coeff0, coeff1, dq_coeff1, reg0,
23             reg2);
24   DUP2_ARG2(__lsx_vsubwod_w_h, coeff0, dq_coeff0, coeff1, dq_coeff1, reg1,
25             reg3);
26   error = __lsx_vmul_w(reg0, reg0);
27   DUP2_ARG3(__lsx_vmadd_w, error, reg1, reg1, error, reg2, reg2, error, error);
28   error = __lsx_vmadd_w(error, reg3, reg3);
29   error = __lsx_vhaddw_d_w(error, error);
30   err = __lsx_vpickve2gr_w(error, 0);
31   err += __lsx_vpickve2gr_w(error, 2);
32   return err;
33 }
34 
vp8_mbblock_error_lsx(MACROBLOCK * mb,int32_t dc)35 int32_t vp8_mbblock_error_lsx(MACROBLOCK *mb, int32_t dc) {
36   BLOCK *be;
37   BLOCKD *bd;
38   int16_t *coeff, *dq_coeff;
39   int32_t err = 0;
40   uint32_t loop_cnt;
41   __m128i src0, src1, src2, src3;
42   __m128i tmp0, tmp1, tmp2, tmp3;
43   __m128i reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7, error;
44   __m128i mask0 = __lsx_vldi(0xFF);
45   __m128i zero = __lsx_vldi(0);
46 
47   if (dc == 1) {
48     mask0 = __lsx_vinsgr2vr_w(mask0, 0, 0);
49   }
50 
51   for (loop_cnt = 0; loop_cnt < 8; loop_cnt++) {
52     int32_t loop_tmp = loop_cnt << 1;
53     be = &mb->block[loop_tmp];
54     bd = &mb->e_mbd.block[loop_tmp];
55     coeff = be->coeff;
56     dq_coeff = bd->dqcoeff;
57     DUP4_ARG2(__lsx_vld, coeff, 0, coeff, 16, dq_coeff, 0, dq_coeff, 16, src0,
58               src1, tmp0, tmp1);
59     be = &mb->block[loop_tmp + 1];
60     bd = &mb->e_mbd.block[loop_tmp + 1];
61     coeff = be->coeff;
62     dq_coeff = bd->dqcoeff;
63     DUP4_ARG2(__lsx_vld, coeff, 0, coeff, 16, dq_coeff, 0, dq_coeff, 16, src2,
64               src3, tmp2, tmp3);
65     DUP4_ARG2(__lsx_vsubwev_w_h, src0, tmp0, src1, tmp1, src2, tmp2, src3, tmp3,
66               reg0, reg2, reg4, reg6);
67     DUP4_ARG2(__lsx_vsubwod_w_h, src0, tmp0, src1, tmp1, src2, tmp2, src3, tmp3,
68               reg1, reg3, reg5, reg7);
69     DUP2_ARG3(__lsx_vbitsel_v, zero, reg0, mask0, zero, reg4, mask0, reg0,
70               reg4);
71     error = __lsx_vmul_w(reg0, reg0);
72     DUP4_ARG3(__lsx_vmadd_w, error, reg1, reg1, error, reg2, reg2, error, reg3,
73               reg3, error, reg4, reg4, error, error, error, error);
74     DUP2_ARG3(__lsx_vmadd_w, error, reg5, reg5, error, reg6, reg6, error,
75               error);
76     error = __lsx_vmadd_w(error, reg7, reg7);
77     error = __lsx_vhaddw_d_w(error, error);
78     error = __lsx_vhaddw_q_d(error, error);
79     err += __lsx_vpickve2gr_w(error, 0);
80   }
81   return err;
82 }
83