1 /*
2 * Copyright (c) 2021 Loongson Technology Corporation Limited
3 * Contributed by Hao Chen <chenhao@loongson.cn>
4 *
5 * This file is part of FFmpeg.
6 *
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22 #include "idctdsp_loongarch.h"
23 #include "libavutil/loongarch/loongson_intrinsics.h"
24
ff_put_pixels_clamped_lasx(const int16_t * block,uint8_t * av_restrict pixels,ptrdiff_t stride)25 void ff_put_pixels_clamped_lasx(const int16_t *block,
26 uint8_t *av_restrict pixels,
27 ptrdiff_t stride)
28 {
29 __m256i b0, b1, b2, b3;
30 __m256i temp0, temp1;
31 ptrdiff_t stride_2x = stride << 1;
32 ptrdiff_t stride_4x = stride << 2;
33 ptrdiff_t stride_3x = stride_2x + stride;
34
35 DUP4_ARG2(__lasx_xvld, block, 0, block, 32, block, 64, block, 96,
36 b0, b1, b2, b3);
37 DUP4_ARG1(__lasx_xvclip255_h, b0, b1, b2, b3, b0, b1, b2, b3);
38 DUP2_ARG2(__lasx_xvpickev_b, b1, b0, b3, b2, temp0, temp1);
39 __lasx_xvstelm_d(temp0, pixels, 0, 0);
40 __lasx_xvstelm_d(temp0, pixels + stride, 0, 2);
41 __lasx_xvstelm_d(temp0, pixels + stride_2x, 0, 1);
42 __lasx_xvstelm_d(temp0, pixels + stride_3x, 0, 3);
43 pixels += stride_4x;
44 __lasx_xvstelm_d(temp1, pixels, 0, 0);
45 __lasx_xvstelm_d(temp1, pixels + stride, 0, 2);
46 __lasx_xvstelm_d(temp1, pixels + stride_2x, 0, 1);
47 __lasx_xvstelm_d(temp1, pixels + stride_3x, 0, 3);
48 }
49
ff_put_signed_pixels_clamped_lasx(const int16_t * block,uint8_t * av_restrict pixels,ptrdiff_t stride)50 void ff_put_signed_pixels_clamped_lasx(const int16_t *block,
51 uint8_t *av_restrict pixels,
52 ptrdiff_t stride)
53 {
54 __m256i b0, b1, b2, b3;
55 __m256i temp0, temp1;
56 __m256i const_128 = {0x0080008000800080, 0x0080008000800080,
57 0x0080008000800080, 0x0080008000800080};
58 ptrdiff_t stride_2x = stride << 1;
59 ptrdiff_t stride_4x = stride << 2;
60 ptrdiff_t stride_3x = stride_2x + stride;
61
62 DUP4_ARG2(__lasx_xvld, block, 0, block, 32, block, 64, block, 96,
63 b0, b1, b2, b3);
64 DUP4_ARG2(__lasx_xvadd_h, b0, const_128, b1, const_128, b2, const_128,
65 b3, const_128, b0, b1, b2, b3);
66 DUP4_ARG1(__lasx_xvclip255_h, b0, b1, b2, b3, b0, b1, b2, b3);
67 DUP2_ARG2(__lasx_xvpickev_b, b1, b0, b3, b2, temp0, temp1);
68 __lasx_xvstelm_d(temp0, pixels, 0, 0);
69 __lasx_xvstelm_d(temp0, pixels + stride, 0, 2);
70 __lasx_xvstelm_d(temp0, pixels + stride_2x, 0, 1);
71 __lasx_xvstelm_d(temp0, pixels + stride_3x, 0, 3);
72 pixels += stride_4x;
73 __lasx_xvstelm_d(temp1, pixels, 0, 0);
74 __lasx_xvstelm_d(temp1, pixels + stride, 0, 2);
75 __lasx_xvstelm_d(temp1, pixels + stride_2x, 0, 1);
76 __lasx_xvstelm_d(temp1, pixels + stride_3x, 0, 3);
77 }
78
ff_add_pixels_clamped_lasx(const int16_t * block,uint8_t * av_restrict pixels,ptrdiff_t stride)79 void ff_add_pixels_clamped_lasx(const int16_t *block,
80 uint8_t *av_restrict pixels,
81 ptrdiff_t stride)
82 {
83 __m256i b0, b1, b2, b3;
84 __m256i p0, p1, p2, p3, p4, p5, p6, p7;
85 __m256i temp0, temp1, temp2, temp3;
86 uint8_t *pix = pixels;
87 ptrdiff_t stride_2x = stride << 1;
88 ptrdiff_t stride_4x = stride << 2;
89 ptrdiff_t stride_3x = stride_2x + stride;
90
91 DUP4_ARG2(__lasx_xvld, block, 0, block, 32, block, 64, block, 96,
92 b0, b1, b2, b3);
93 p0 = __lasx_xvldrepl_d(pix, 0);
94 pix += stride;
95 p1 = __lasx_xvldrepl_d(pix, 0);
96 pix += stride;
97 p2 = __lasx_xvldrepl_d(pix, 0);
98 pix += stride;
99 p3 = __lasx_xvldrepl_d(pix, 0);
100 pix += stride;
101 p4 = __lasx_xvldrepl_d(pix, 0);
102 pix += stride;
103 p5 = __lasx_xvldrepl_d(pix, 0);
104 pix += stride;
105 p6 = __lasx_xvldrepl_d(pix, 0);
106 pix += stride;
107 p7 = __lasx_xvldrepl_d(pix, 0);
108 DUP4_ARG3(__lasx_xvpermi_q, p1, p0, 0x20, p3, p2, 0x20, p5, p4, 0x20,
109 p7, p6, 0x20, temp0, temp1, temp2, temp3);
110 DUP4_ARG2(__lasx_xvaddw_h_h_bu, b0, temp0, b1, temp1, b2, temp2, b3, temp3,
111 temp0, temp1, temp2, temp3);
112 DUP4_ARG1(__lasx_xvclip255_h, temp0, temp1, temp2, temp3,
113 temp0, temp1, temp2, temp3);
114 DUP2_ARG2(__lasx_xvpickev_b, temp1, temp0, temp3, temp2, temp0, temp1);
115 __lasx_xvstelm_d(temp0, pixels, 0, 0);
116 __lasx_xvstelm_d(temp0, pixels + stride, 0, 2);
117 __lasx_xvstelm_d(temp0, pixels + stride_2x, 0, 1);
118 __lasx_xvstelm_d(temp0, pixels + stride_3x, 0, 3);
119 pixels += stride_4x;
120 __lasx_xvstelm_d(temp1, pixels, 0, 0);
121 __lasx_xvstelm_d(temp1, pixels + stride, 0, 2);
122 __lasx_xvstelm_d(temp1, pixels + stride_2x, 0, 1);
123 __lasx_xvstelm_d(temp1, pixels + stride_3x, 0, 3);
124 }
125