1 /*
2 * Copyright (c) 2014 The WebM project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11 #include <arm_neon.h>
12 #include <assert.h>
13
14 #include "./vp9_rtcd.h"
15 #include "./vpx_config.h"
16 #include "vp9/common/vp9_common.h"
17
18 static int16_t sinpi_1_9 = 0x14a3;
19 static int16_t sinpi_2_9 = 0x26c9;
20 static int16_t sinpi_3_9 = 0x3441;
21 static int16_t sinpi_4_9 = 0x3b6c;
22 static int16_t cospi_8_64 = 0x3b21;
23 static int16_t cospi_16_64 = 0x2d41;
24 static int16_t cospi_24_64 = 0x187e;
25
TRANSPOSE4X4(int16x8_t * q8s16,int16x8_t * q9s16)26 static INLINE void TRANSPOSE4X4(int16x8_t *q8s16, int16x8_t *q9s16) {
27 int32x4_t q8s32, q9s32;
28 int16x4x2_t d0x2s16, d1x2s16;
29 int32x4x2_t q0x2s32;
30
31 d0x2s16 = vtrn_s16(vget_low_s16(*q8s16), vget_high_s16(*q8s16));
32 d1x2s16 = vtrn_s16(vget_low_s16(*q9s16), vget_high_s16(*q9s16));
33
34 q8s32 = vreinterpretq_s32_s16(vcombine_s16(d0x2s16.val[0], d0x2s16.val[1]));
35 q9s32 = vreinterpretq_s32_s16(vcombine_s16(d1x2s16.val[0], d1x2s16.val[1]));
36 q0x2s32 = vtrnq_s32(q8s32, q9s32);
37
38 *q8s16 = vreinterpretq_s16_s32(q0x2s32.val[0]);
39 *q9s16 = vreinterpretq_s16_s32(q0x2s32.val[1]);
40 }
41
GENERATE_COSINE_CONSTANTS(int16x4_t * d0s16,int16x4_t * d1s16,int16x4_t * d2s16)42 static INLINE void GENERATE_COSINE_CONSTANTS(int16x4_t *d0s16, int16x4_t *d1s16,
43 int16x4_t *d2s16) {
44 *d0s16 = vdup_n_s16(cospi_8_64);
45 *d1s16 = vdup_n_s16(cospi_16_64);
46 *d2s16 = vdup_n_s16(cospi_24_64);
47 }
48
GENERATE_SINE_CONSTANTS(int16x4_t * d3s16,int16x4_t * d4s16,int16x4_t * d5s16,int16x8_t * q3s16)49 static INLINE void GENERATE_SINE_CONSTANTS(int16x4_t *d3s16, int16x4_t *d4s16,
50 int16x4_t *d5s16, int16x8_t *q3s16) {
51 *d3s16 = vdup_n_s16(sinpi_1_9);
52 *d4s16 = vdup_n_s16(sinpi_2_9);
53 *q3s16 = vdupq_n_s16(sinpi_3_9);
54 *d5s16 = vdup_n_s16(sinpi_4_9);
55 }
56
IDCT4x4_1D(int16x4_t * d0s16,int16x4_t * d1s16,int16x4_t * d2s16,int16x8_t * q8s16,int16x8_t * q9s16)57 static INLINE void IDCT4x4_1D(int16x4_t *d0s16, int16x4_t *d1s16,
58 int16x4_t *d2s16, int16x8_t *q8s16,
59 int16x8_t *q9s16) {
60 int16x4_t d16s16, d17s16, d18s16, d19s16, d23s16, d24s16;
61 int16x4_t d26s16, d27s16, d28s16, d29s16;
62 int32x4_t q10s32, q13s32, q14s32, q15s32;
63 int16x8_t q13s16, q14s16;
64
65 d16s16 = vget_low_s16(*q8s16);
66 d17s16 = vget_high_s16(*q8s16);
67 d18s16 = vget_low_s16(*q9s16);
68 d19s16 = vget_high_s16(*q9s16);
69
70 d23s16 = vadd_s16(d16s16, d18s16);
71 d24s16 = vsub_s16(d16s16, d18s16);
72
73 q15s32 = vmull_s16(d17s16, *d2s16);
74 q10s32 = vmull_s16(d17s16, *d0s16);
75 q13s32 = vmull_s16(d23s16, *d1s16);
76 q14s32 = vmull_s16(d24s16, *d1s16);
77 q15s32 = vmlsl_s16(q15s32, d19s16, *d0s16);
78 q10s32 = vmlal_s16(q10s32, d19s16, *d2s16);
79
80 d26s16 = vrshrn_n_s32(q13s32, 14);
81 d27s16 = vrshrn_n_s32(q14s32, 14);
82 d29s16 = vrshrn_n_s32(q15s32, 14);
83 d28s16 = vrshrn_n_s32(q10s32, 14);
84
85 q13s16 = vcombine_s16(d26s16, d27s16);
86 q14s16 = vcombine_s16(d28s16, d29s16);
87 *q8s16 = vaddq_s16(q13s16, q14s16);
88 *q9s16 = vsubq_s16(q13s16, q14s16);
89 *q9s16 = vcombine_s16(vget_high_s16(*q9s16), vget_low_s16(*q9s16)); // vswp
90 }
91
IADST4x4_1D(int16x4_t * d3s16,int16x4_t * d4s16,int16x4_t * d5s16,int16x8_t * q3s16,int16x8_t * q8s16,int16x8_t * q9s16)92 static INLINE void IADST4x4_1D(int16x4_t *d3s16, int16x4_t *d4s16,
93 int16x4_t *d5s16, int16x8_t *q3s16,
94 int16x8_t *q8s16, int16x8_t *q9s16) {
95 int16x4_t d6s16, d16s16, d17s16, d18s16, d19s16;
96 int32x4_t q8s32, q9s32, q10s32, q11s32, q12s32, q13s32, q14s32, q15s32;
97
98 d6s16 = vget_low_s16(*q3s16);
99
100 d16s16 = vget_low_s16(*q8s16);
101 d17s16 = vget_high_s16(*q8s16);
102 d18s16 = vget_low_s16(*q9s16);
103 d19s16 = vget_high_s16(*q9s16);
104
105 q10s32 = vmull_s16(*d3s16, d16s16);
106 q11s32 = vmull_s16(*d4s16, d16s16);
107 q12s32 = vmull_s16(d6s16, d17s16);
108 q13s32 = vmull_s16(*d5s16, d18s16);
109 q14s32 = vmull_s16(*d3s16, d18s16);
110 q15s32 = vmovl_s16(d16s16);
111 q15s32 = vaddw_s16(q15s32, d19s16);
112 q8s32 = vmull_s16(*d4s16, d19s16);
113 q15s32 = vsubw_s16(q15s32, d18s16);
114 q9s32 = vmull_s16(*d5s16, d19s16);
115
116 q10s32 = vaddq_s32(q10s32, q13s32);
117 q10s32 = vaddq_s32(q10s32, q8s32);
118 q11s32 = vsubq_s32(q11s32, q14s32);
119 q8s32 = vdupq_n_s32(sinpi_3_9);
120 q11s32 = vsubq_s32(q11s32, q9s32);
121 q15s32 = vmulq_s32(q15s32, q8s32);
122
123 q13s32 = vaddq_s32(q10s32, q12s32);
124 q10s32 = vaddq_s32(q10s32, q11s32);
125 q14s32 = vaddq_s32(q11s32, q12s32);
126 q10s32 = vsubq_s32(q10s32, q12s32);
127
128 d16s16 = vrshrn_n_s32(q13s32, 14);
129 d17s16 = vrshrn_n_s32(q14s32, 14);
130 d18s16 = vrshrn_n_s32(q15s32, 14);
131 d19s16 = vrshrn_n_s32(q10s32, 14);
132
133 *q8s16 = vcombine_s16(d16s16, d17s16);
134 *q9s16 = vcombine_s16(d18s16, d19s16);
135 }
136
vp9_iht4x4_16_add_neon(const tran_low_t * input,uint8_t * dest,int stride,int tx_type)137 void vp9_iht4x4_16_add_neon(const tran_low_t *input, uint8_t *dest, int stride,
138 int tx_type) {
139 uint8x8_t d26u8, d27u8;
140 int16x4_t d0s16, d1s16, d2s16, d3s16, d4s16, d5s16;
141 uint32x2_t d26u32, d27u32;
142 int16x8_t q3s16, q8s16, q9s16;
143 uint16x8_t q8u16, q9u16;
144
145 d26u32 = d27u32 = vdup_n_u32(0);
146
147 q8s16 = vld1q_s16(input);
148 q9s16 = vld1q_s16(input + 8);
149
150 TRANSPOSE4X4(&q8s16, &q9s16);
151
152 switch (tx_type) {
153 case 0: // idct_idct is not supported. Fall back to C
154 vp9_iht4x4_16_add_c(input, dest, stride, tx_type);
155 return;
156 case 1: // iadst_idct
157 // generate constants
158 GENERATE_COSINE_CONSTANTS(&d0s16, &d1s16, &d2s16);
159 GENERATE_SINE_CONSTANTS(&d3s16, &d4s16, &d5s16, &q3s16);
160
161 // first transform rows
162 IDCT4x4_1D(&d0s16, &d1s16, &d2s16, &q8s16, &q9s16);
163
164 // transpose the matrix
165 TRANSPOSE4X4(&q8s16, &q9s16);
166
167 // then transform columns
168 IADST4x4_1D(&d3s16, &d4s16, &d5s16, &q3s16, &q8s16, &q9s16);
169 break;
170 case 2: // idct_iadst
171 // generate constantsyy
172 GENERATE_COSINE_CONSTANTS(&d0s16, &d1s16, &d2s16);
173 GENERATE_SINE_CONSTANTS(&d3s16, &d4s16, &d5s16, &q3s16);
174
175 // first transform rows
176 IADST4x4_1D(&d3s16, &d4s16, &d5s16, &q3s16, &q8s16, &q9s16);
177
178 // transpose the matrix
179 TRANSPOSE4X4(&q8s16, &q9s16);
180
181 // then transform columns
182 IDCT4x4_1D(&d0s16, &d1s16, &d2s16, &q8s16, &q9s16);
183 break;
184 case 3: // iadst_iadst
185 // generate constants
186 GENERATE_SINE_CONSTANTS(&d3s16, &d4s16, &d5s16, &q3s16);
187
188 // first transform rows
189 IADST4x4_1D(&d3s16, &d4s16, &d5s16, &q3s16, &q8s16, &q9s16);
190
191 // transpose the matrix
192 TRANSPOSE4X4(&q8s16, &q9s16);
193
194 // then transform columns
195 IADST4x4_1D(&d3s16, &d4s16, &d5s16, &q3s16, &q8s16, &q9s16);
196 break;
197 default: // iadst_idct
198 assert(0);
199 break;
200 }
201
202 q8s16 = vrshrq_n_s16(q8s16, 4);
203 q9s16 = vrshrq_n_s16(q9s16, 4);
204
205 d26u32 = vld1_lane_u32((const uint32_t *)dest, d26u32, 0);
206 dest += stride;
207 d26u32 = vld1_lane_u32((const uint32_t *)dest, d26u32, 1);
208 dest += stride;
209 d27u32 = vld1_lane_u32((const uint32_t *)dest, d27u32, 0);
210 dest += stride;
211 d27u32 = vld1_lane_u32((const uint32_t *)dest, d27u32, 1);
212
213 q8u16 = vaddw_u8(vreinterpretq_u16_s16(q8s16), vreinterpret_u8_u32(d26u32));
214 q9u16 = vaddw_u8(vreinterpretq_u16_s16(q9s16), vreinterpret_u8_u32(d27u32));
215
216 d26u8 = vqmovun_s16(vreinterpretq_s16_u16(q8u16));
217 d27u8 = vqmovun_s16(vreinterpretq_s16_u16(q9u16));
218
219 vst1_lane_u32((uint32_t *)dest, vreinterpret_u32_u8(d27u8), 1);
220 dest -= stride;
221 vst1_lane_u32((uint32_t *)dest, vreinterpret_u32_u8(d27u8), 0);
222 dest -= stride;
223 vst1_lane_u32((uint32_t *)dest, vreinterpret_u32_u8(d26u8), 1);
224 dest -= stride;
225 vst1_lane_u32((uint32_t *)dest, vreinterpret_u32_u8(d26u8), 0);
226 }
227