• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2021 Loongson Technology Corporation Limited
3  * Contributed by Hao Chen <chenhao@loongson.cn>
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 #include "libavutil/loongarch/loongson_intrinsics.h"
23 #include "h264_intrapred_lasx.h"
24 
25 #define PRED16X16_PLANE                                                        \
26     ptrdiff_t stride_1, stride_2, stride_3, stride_4, stride_5, stride_6;      \
27     ptrdiff_t stride_8, stride_15;                                             \
28     int32_t res0, res1, res2, res3, cnt;                                       \
29     uint8_t *src0, *src1;                                                      \
30     __m256i reg0, reg1, reg2, reg3, reg4;                                      \
31     __m256i tmp0, tmp1, tmp2, tmp3;                                            \
32     __m256i shuff = {0x0B040A0509060807, 0x0F000E010D020C03, 0, 0};            \
33     __m256i mult = {0x0004000300020001, 0x0008000700060005, 0, 0};             \
34     __m256i int_mult1 = {0x0000000100000000, 0x0000000300000002,               \
35                          0x0000000500000004, 0x0000000700000006};              \
36                                                                                \
37     stride_1 = -stride;                                                        \
38     stride_2 = stride << 1;                                                    \
39     stride_3 = stride_2 + stride;                                              \
40     stride_4 = stride_2 << 1;                                                  \
41     stride_5 = stride_4 + stride;                                              \
42     stride_6 = stride_3 << 1;                                                  \
43     stride_8 = stride_4 << 1;                                                  \
44     stride_15 = (stride_8 << 1) - stride;                                      \
45     src0 = src - 1;                                                            \
46     src1 = src0 + stride_8;                                                    \
47                                                                                \
48     reg0 = __lasx_xvldx(src0, -stride);                                        \
49     reg1 = __lasx_xvldx(src, (8 - stride));                                    \
50     reg0 = __lasx_xvilvl_d(reg1, reg0);                                        \
51     reg0 = __lasx_xvshuf_b(reg0, reg0, shuff);                                 \
52     reg0 = __lasx_xvhsubw_hu_bu(reg0, reg0);                                   \
53     reg0 = __lasx_xvmul_h(reg0, mult);                                         \
54     res1 = (src1[0] - src0[stride_6]) +                                        \
55         2 * (src1[stride] - src0[stride_5]) +                                  \
56         3 * (src1[stride_2] - src0[stride_4]) +                                \
57         4 * (src1[stride_3] - src0[stride_3]) +                                \
58         5 * (src1[stride_4] - src0[stride_2]) +                                \
59         6 * (src1[stride_5] - src0[stride]) +                                  \
60         7 * (src1[stride_6] - src0[0]) +                                       \
61         8 * (src0[stride_15] - src0[stride_1]);                                \
62     reg0 = __lasx_xvhaddw_w_h(reg0, reg0);                                     \
63     reg0 = __lasx_xvhaddw_d_w(reg0, reg0);                                     \
64     reg0 = __lasx_xvhaddw_q_d(reg0, reg0);                                     \
65     res0 = __lasx_xvpickve2gr_w(reg0, 0);                                      \
66 
67 #define PRED16X16_PLANE_END                                                    \
68     res2 = (src0[stride_15] + src[15 - stride] + 1) << 4;                      \
69     res3 = 7 * (res0 + res1);                                                  \
70     res2 -= res3;                                                              \
71     reg0 = __lasx_xvreplgr2vr_w(res0);                                         \
72     reg1 = __lasx_xvreplgr2vr_w(res1);                                         \
73     reg2 = __lasx_xvreplgr2vr_w(res2);                                         \
74     reg3 = __lasx_xvmul_w(reg0, int_mult1);                                    \
75     reg4 = __lasx_xvslli_w(reg0, 3);                                           \
76     reg4 = __lasx_xvadd_w(reg4, reg3);                                         \
77     for (cnt = 8; cnt--;) {                                                    \
78         tmp0 = __lasx_xvadd_w(reg2, reg3);                                     \
79         tmp1 = __lasx_xvadd_w(reg2, reg4);                                     \
80         tmp0 = __lasx_xvssrani_hu_w(tmp1, tmp0, 5);                            \
81         tmp0 = __lasx_xvpermi_d(tmp0, 0xD8);                                   \
82         reg2 = __lasx_xvadd_w(reg2, reg1);                                     \
83         tmp2 = __lasx_xvadd_w(reg2, reg3);                                     \
84         tmp3 = __lasx_xvadd_w(reg2, reg4);                                     \
85         tmp1 = __lasx_xvssrani_hu_w(tmp3, tmp2, 5);                            \
86         tmp1 = __lasx_xvpermi_d(tmp1, 0xD8);                                   \
87         tmp0 = __lasx_xvssrani_bu_h(tmp1, tmp0, 0);                            \
88         reg2 = __lasx_xvadd_w(reg2, reg1);                                     \
89         __lasx_xvstelm_d(tmp0, src, 0, 0);                                     \
90         __lasx_xvstelm_d(tmp0, src, 8, 2);                                     \
91         src += stride;                                                         \
92         __lasx_xvstelm_d(tmp0, src, 0, 1);                                     \
93         __lasx_xvstelm_d(tmp0, src, 8, 3);                                     \
94         src += stride;                                                         \
95     }
96 
97 
ff_h264_pred16x16_plane_h264_8_lasx(uint8_t * src,ptrdiff_t stride)98 void ff_h264_pred16x16_plane_h264_8_lasx(uint8_t *src, ptrdiff_t stride)
99 {
100     PRED16X16_PLANE
101     res0 = (5 * res0 + 32) >> 6;
102     res1 = (5 * res1 + 32) >> 6;
103     PRED16X16_PLANE_END
104 }
105 
ff_h264_pred16x16_plane_rv40_8_lasx(uint8_t * src,ptrdiff_t stride)106 void ff_h264_pred16x16_plane_rv40_8_lasx(uint8_t *src, ptrdiff_t stride)
107 {
108     PRED16X16_PLANE
109     res0 = (res0 + (res0 >> 2)) >> 4;
110     res1 = (res1 + (res1 >> 2)) >> 4;
111     PRED16X16_PLANE_END
112 }
113 
ff_h264_pred16x16_plane_svq3_8_lasx(uint8_t * src,ptrdiff_t stride)114 void ff_h264_pred16x16_plane_svq3_8_lasx(uint8_t *src, ptrdiff_t stride)
115 {
116     PRED16X16_PLANE
117     cnt  = (5 * (res0/4)) / 16;
118     res0 = (5 * (res1/4)) / 16;
119     res1 = cnt;
120     PRED16X16_PLANE_END
121 }
122