1 /*
2 * Copyright (c) 2015 Shivraj Patil (Shivraj.Patil@imgtec.com)
3 *
4 * This file is part of FFmpeg.
5 *
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20
21 #include "libavutil/mips/generic_macros_msa.h"
22 #include "pixblockdsp_mips.h"
23
diff_pixels_msa(int16_t * block,const uint8_t * src1,const uint8_t * src2,int32_t stride)24 static void diff_pixels_msa(int16_t *block, const uint8_t *src1,
25 const uint8_t *src2, int32_t stride)
26 {
27 v16u8 in10, in11, in12, in13, in14, in15, in16, in17;
28 v16u8 in20, in21, in22, in23, in24, in25, in26, in27;
29 v8i16 out0, out1, out2, out3, out4, out5, out6, out7;
30
31 LD_UB8(src1, stride, in10, in11, in12, in13, in14, in15, in16, in17);
32 LD_UB8(src2, stride, in20, in21, in22, in23, in24, in25, in26, in27);
33 ILVR_B4_SH(in10, in20, in11, in21, in12, in22, in13, in23,
34 out0, out1, out2, out3);
35 ILVR_B4_SH(in14, in24, in15, in25, in16, in26, in17, in27,
36 out4, out5, out6, out7);
37 HSUB_UB4_SH(out0, out1, out2, out3, out0, out1, out2, out3);
38 HSUB_UB4_SH(out4, out5, out6, out7, out4, out5, out6, out7);
39 ST_SH8(out0, out1, out2, out3, out4, out5, out6, out7, block, 8);
40 }
41
copy_8bit_to_16bit_width8_msa(const uint8_t * src,int32_t src_stride,int16_t * dst,int32_t dst_stride,int32_t height)42 static void copy_8bit_to_16bit_width8_msa(const uint8_t *src, int32_t src_stride,
43 int16_t *dst, int32_t dst_stride,
44 int32_t height)
45 {
46 uint8_t *dst_ptr;
47 int32_t cnt;
48 v16u8 src0, src1, src2, src3;
49 v16i8 zero = { 0 };
50
51 dst_ptr = (uint8_t *) dst;
52
53 for (cnt = (height >> 2); cnt--;) {
54 LD_UB4(src, src_stride, src0, src1, src2, src3);
55 src += (4 * src_stride);
56
57 ILVR_B4_UB(zero, src0, zero, src1, zero, src2, zero, src3,
58 src0, src1, src2, src3);
59
60 ST_UB4(src0, src1, src2, src3, dst_ptr, (dst_stride * 2));
61 dst_ptr += (4 * 2 * dst_stride);
62 }
63 }
64
copy_16multx8mult_msa(const uint8_t * src,int32_t src_stride,uint8_t * dst,int32_t dst_stride,int32_t height,int32_t width)65 static void copy_16multx8mult_msa(const uint8_t *src, int32_t src_stride,
66 uint8_t *dst, int32_t dst_stride,
67 int32_t height, int32_t width)
68 {
69 int32_t cnt, loop_cnt;
70 const uint8_t *src_tmp;
71 uint8_t *dst_tmp;
72 v16u8 src0, src1, src2, src3, src4, src5, src6, src7;
73
74 for (cnt = (width >> 4); cnt--;) {
75 src_tmp = src;
76 dst_tmp = dst;
77
78 for (loop_cnt = (height >> 3); loop_cnt--;) {
79 LD_UB8(src_tmp, src_stride,
80 src0, src1, src2, src3, src4, src5, src6, src7);
81 src_tmp += (8 * src_stride);
82
83 ST_UB8(src0, src1, src2, src3, src4, src5, src6, src7,
84 dst_tmp, dst_stride);
85 dst_tmp += (8 * dst_stride);
86 }
87
88 src += 16;
89 dst += 16;
90 }
91 }
92
copy_width16_msa(const uint8_t * src,int32_t src_stride,uint8_t * dst,int32_t dst_stride,int32_t height)93 static void copy_width16_msa(const uint8_t *src, int32_t src_stride,
94 uint8_t *dst, int32_t dst_stride,
95 int32_t height)
96 {
97 int32_t cnt;
98 v16u8 src0, src1, src2, src3, src4, src5, src6, src7;
99
100 if (0 == height % 12) {
101 for (cnt = (height / 12); cnt--;) {
102 LD_UB8(src, src_stride,
103 src0, src1, src2, src3, src4, src5, src6, src7);
104 src += (8 * src_stride);
105 ST_UB8(src0, src1, src2, src3, src4, src5, src6, src7,
106 dst, dst_stride);
107 dst += (8 * dst_stride);
108
109 LD_UB4(src, src_stride, src0, src1, src2, src3);
110 src += (4 * src_stride);
111 ST_UB4(src0, src1, src2, src3, dst, dst_stride);
112 dst += (4 * dst_stride);
113 }
114 } else if (0 == height % 8) {
115 copy_16multx8mult_msa(src, src_stride, dst, dst_stride, height, 16);
116 } else if (0 == height % 4) {
117 for (cnt = (height >> 2); cnt--;) {
118 LD_UB4(src, src_stride, src0, src1, src2, src3);
119 src += (4 * src_stride);
120
121 ST_UB4(src0, src1, src2, src3, dst, dst_stride);
122 dst += (4 * dst_stride);
123 }
124 }
125 }
126
ff_get_pixels_16_msa(int16_t * av_restrict dest,const uint8_t * src,ptrdiff_t stride)127 void ff_get_pixels_16_msa(int16_t *av_restrict dest, const uint8_t *src,
128 ptrdiff_t stride)
129 {
130 copy_width16_msa(src, stride, (uint8_t *) dest, 16, 8);
131 }
132
ff_get_pixels_8_msa(int16_t * av_restrict dest,const uint8_t * src,ptrdiff_t stride)133 void ff_get_pixels_8_msa(int16_t *av_restrict dest, const uint8_t *src,
134 ptrdiff_t stride)
135 {
136 copy_8bit_to_16bit_width8_msa(src, stride, dest, 8, 8);
137 }
138
ff_diff_pixels_msa(int16_t * av_restrict block,const uint8_t * src1,const uint8_t * src2,ptrdiff_t stride)139 void ff_diff_pixels_msa(int16_t *av_restrict block, const uint8_t *src1,
140 const uint8_t *src2, ptrdiff_t stride)
141 {
142 diff_pixels_msa(block, src1, src2, stride);
143 }
144