• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  Copyright (c) 2018 The WebM project authors. All Rights Reserved.
3  *
4  *  Use of this source code is governed by a BSD-style license
5  *  that can be found in the LICENSE file in the root of the source
6  *  tree. An additional intellectual property rights grant can be found
7  *  in the file PATENTS.  All contributing project authors may
8  *  be found in the AUTHORS file in the root of the source tree.
9  */
10 
11 #include <arm_neon.h>
12 #include <assert.h>
13 
14 #include "./vp9_rtcd.h"
15 #include "./vpx_config.h"
16 #include "vp9/common/vp9_common.h"
17 #include "vp9/common/arm/neon/vp9_iht_neon.h"
18 #include "vpx_dsp/arm/idct_neon.h"
19 #include "vpx_dsp/arm/mem_neon.h"
20 #include "vpx_dsp/arm/transpose_neon.h"
21 
vpx_iadst16x16_256_add_half1d(const void * const input,int16_t * output,void * const dest,const int stride,const int highbd_flag)22 void vpx_iadst16x16_256_add_half1d(const void *const input, int16_t *output,
23                                    void *const dest, const int stride,
24                                    const int highbd_flag) {
25   int16x8_t in[16], out[16];
26   const int16x4_t c_1_31_5_27 =
27       create_s16x4_neon(cospi_1_64, cospi_31_64, cospi_5_64, cospi_27_64);
28   const int16x4_t c_9_23_13_19 =
29       create_s16x4_neon(cospi_9_64, cospi_23_64, cospi_13_64, cospi_19_64);
30   const int16x4_t c_17_15_21_11 =
31       create_s16x4_neon(cospi_17_64, cospi_15_64, cospi_21_64, cospi_11_64);
32   const int16x4_t c_25_7_29_3 =
33       create_s16x4_neon(cospi_25_64, cospi_7_64, cospi_29_64, cospi_3_64);
34   const int16x4_t c_4_28_20_12 =
35       create_s16x4_neon(cospi_4_64, cospi_28_64, cospi_20_64, cospi_12_64);
36   const int16x4_t c_16_n16_8_24 =
37       create_s16x4_neon(cospi_16_64, -cospi_16_64, cospi_8_64, cospi_24_64);
38   int16x8_t x[16], t[12];
39   int32x4_t s0[2], s1[2], s2[2], s3[2], s4[2], s5[2], s6[2], s7[2];
40   int32x4_t s8[2], s9[2], s10[2], s11[2], s12[2], s13[2], s14[2], s15[2];
41 
42   // Load input (16x8)
43   if (output) {
44     const tran_low_t *inputT = (const tran_low_t *)input;
45     in[0] = load_tran_low_to_s16q(inputT);
46     inputT += 8;
47     in[8] = load_tran_low_to_s16q(inputT);
48     inputT += 8;
49     in[1] = load_tran_low_to_s16q(inputT);
50     inputT += 8;
51     in[9] = load_tran_low_to_s16q(inputT);
52     inputT += 8;
53     in[2] = load_tran_low_to_s16q(inputT);
54     inputT += 8;
55     in[10] = load_tran_low_to_s16q(inputT);
56     inputT += 8;
57     in[3] = load_tran_low_to_s16q(inputT);
58     inputT += 8;
59     in[11] = load_tran_low_to_s16q(inputT);
60     inputT += 8;
61     in[4] = load_tran_low_to_s16q(inputT);
62     inputT += 8;
63     in[12] = load_tran_low_to_s16q(inputT);
64     inputT += 8;
65     in[5] = load_tran_low_to_s16q(inputT);
66     inputT += 8;
67     in[13] = load_tran_low_to_s16q(inputT);
68     inputT += 8;
69     in[6] = load_tran_low_to_s16q(inputT);
70     inputT += 8;
71     in[14] = load_tran_low_to_s16q(inputT);
72     inputT += 8;
73     in[7] = load_tran_low_to_s16q(inputT);
74     inputT += 8;
75     in[15] = load_tran_low_to_s16q(inputT);
76   } else {
77     const int16_t *inputT = (const int16_t *)input;
78     in[0] = vld1q_s16(inputT);
79     inputT += 8;
80     in[8] = vld1q_s16(inputT);
81     inputT += 8;
82     in[1] = vld1q_s16(inputT);
83     inputT += 8;
84     in[9] = vld1q_s16(inputT);
85     inputT += 8;
86     in[2] = vld1q_s16(inputT);
87     inputT += 8;
88     in[10] = vld1q_s16(inputT);
89     inputT += 8;
90     in[3] = vld1q_s16(inputT);
91     inputT += 8;
92     in[11] = vld1q_s16(inputT);
93     inputT += 8;
94     in[4] = vld1q_s16(inputT);
95     inputT += 8;
96     in[12] = vld1q_s16(inputT);
97     inputT += 8;
98     in[5] = vld1q_s16(inputT);
99     inputT += 8;
100     in[13] = vld1q_s16(inputT);
101     inputT += 8;
102     in[6] = vld1q_s16(inputT);
103     inputT += 8;
104     in[14] = vld1q_s16(inputT);
105     inputT += 8;
106     in[7] = vld1q_s16(inputT);
107     inputT += 8;
108     in[15] = vld1q_s16(inputT);
109   }
110 
111   // Transpose
112   transpose_s16_8x8(&in[0], &in[1], &in[2], &in[3], &in[4], &in[5], &in[6],
113                     &in[7]);
114   transpose_s16_8x8(&in[8], &in[9], &in[10], &in[11], &in[12], &in[13], &in[14],
115                     &in[15]);
116 
117   x[0] = in[15];
118   x[1] = in[0];
119   x[2] = in[13];
120   x[3] = in[2];
121   x[4] = in[11];
122   x[5] = in[4];
123   x[6] = in[9];
124   x[7] = in[6];
125   x[8] = in[7];
126   x[9] = in[8];
127   x[10] = in[5];
128   x[11] = in[10];
129   x[12] = in[3];
130   x[13] = in[12];
131   x[14] = in[1];
132   x[15] = in[14];
133 
134   // stage 1
135   iadst_butterfly_lane_0_1_neon(x[0], x[1], c_1_31_5_27, s0, s1);
136   iadst_butterfly_lane_2_3_neon(x[2], x[3], c_1_31_5_27, s2, s3);
137   iadst_butterfly_lane_0_1_neon(x[4], x[5], c_9_23_13_19, s4, s5);
138   iadst_butterfly_lane_2_3_neon(x[6], x[7], c_9_23_13_19, s6, s7);
139   iadst_butterfly_lane_0_1_neon(x[8], x[9], c_17_15_21_11, s8, s9);
140   iadst_butterfly_lane_2_3_neon(x[10], x[11], c_17_15_21_11, s10, s11);
141   iadst_butterfly_lane_0_1_neon(x[12], x[13], c_25_7_29_3, s12, s13);
142   iadst_butterfly_lane_2_3_neon(x[14], x[15], c_25_7_29_3, s14, s15);
143 
144   x[0] = add_dct_const_round_shift_low_8(s0, s8);
145   x[1] = add_dct_const_round_shift_low_8(s1, s9);
146   x[2] = add_dct_const_round_shift_low_8(s2, s10);
147   x[3] = add_dct_const_round_shift_low_8(s3, s11);
148   x[4] = add_dct_const_round_shift_low_8(s4, s12);
149   x[5] = add_dct_const_round_shift_low_8(s5, s13);
150   x[6] = add_dct_const_round_shift_low_8(s6, s14);
151   x[7] = add_dct_const_round_shift_low_8(s7, s15);
152   x[8] = sub_dct_const_round_shift_low_8(s0, s8);
153   x[9] = sub_dct_const_round_shift_low_8(s1, s9);
154   x[10] = sub_dct_const_round_shift_low_8(s2, s10);
155   x[11] = sub_dct_const_round_shift_low_8(s3, s11);
156   x[12] = sub_dct_const_round_shift_low_8(s4, s12);
157   x[13] = sub_dct_const_round_shift_low_8(s5, s13);
158   x[14] = sub_dct_const_round_shift_low_8(s6, s14);
159   x[15] = sub_dct_const_round_shift_low_8(s7, s15);
160 
161   // stage 2
162   t[0] = x[0];
163   t[1] = x[1];
164   t[2] = x[2];
165   t[3] = x[3];
166   t[4] = x[4];
167   t[5] = x[5];
168   t[6] = x[6];
169   t[7] = x[7];
170   iadst_butterfly_lane_0_1_neon(x[8], x[9], c_4_28_20_12, s8, s9);
171   iadst_butterfly_lane_2_3_neon(x[10], x[11], c_4_28_20_12, s10, s11);
172   iadst_butterfly_lane_1_0_neon(x[13], x[12], c_4_28_20_12, s13, s12);
173   iadst_butterfly_lane_3_2_neon(x[15], x[14], c_4_28_20_12, s15, s14);
174 
175   x[0] = vaddq_s16(t[0], t[4]);
176   x[1] = vaddq_s16(t[1], t[5]);
177   x[2] = vaddq_s16(t[2], t[6]);
178   x[3] = vaddq_s16(t[3], t[7]);
179   x[4] = vsubq_s16(t[0], t[4]);
180   x[5] = vsubq_s16(t[1], t[5]);
181   x[6] = vsubq_s16(t[2], t[6]);
182   x[7] = vsubq_s16(t[3], t[7]);
183   x[8] = add_dct_const_round_shift_low_8(s8, s12);
184   x[9] = add_dct_const_round_shift_low_8(s9, s13);
185   x[10] = add_dct_const_round_shift_low_8(s10, s14);
186   x[11] = add_dct_const_round_shift_low_8(s11, s15);
187   x[12] = sub_dct_const_round_shift_low_8(s8, s12);
188   x[13] = sub_dct_const_round_shift_low_8(s9, s13);
189   x[14] = sub_dct_const_round_shift_low_8(s10, s14);
190   x[15] = sub_dct_const_round_shift_low_8(s11, s15);
191 
192   // stage 3
193   t[0] = x[0];
194   t[1] = x[1];
195   t[2] = x[2];
196   t[3] = x[3];
197   iadst_butterfly_lane_2_3_neon(x[4], x[5], c_16_n16_8_24, s4, s5);
198   iadst_butterfly_lane_3_2_neon(x[7], x[6], c_16_n16_8_24, s7, s6);
199   t[8] = x[8];
200   t[9] = x[9];
201   t[10] = x[10];
202   t[11] = x[11];
203   iadst_butterfly_lane_2_3_neon(x[12], x[13], c_16_n16_8_24, s12, s13);
204   iadst_butterfly_lane_3_2_neon(x[15], x[14], c_16_n16_8_24, s15, s14);
205 
206   x[0] = vaddq_s16(t[0], t[2]);
207   x[1] = vaddq_s16(t[1], t[3]);
208   x[2] = vsubq_s16(t[0], t[2]);
209   x[3] = vsubq_s16(t[1], t[3]);
210   x[4] = add_dct_const_round_shift_low_8(s4, s6);
211   x[5] = add_dct_const_round_shift_low_8(s5, s7);
212   x[6] = sub_dct_const_round_shift_low_8(s4, s6);
213   x[7] = sub_dct_const_round_shift_low_8(s5, s7);
214   x[8] = vaddq_s16(t[8], t[10]);
215   x[9] = vaddq_s16(t[9], t[11]);
216   x[10] = vsubq_s16(t[8], t[10]);
217   x[11] = vsubq_s16(t[9], t[11]);
218   x[12] = add_dct_const_round_shift_low_8(s12, s14);
219   x[13] = add_dct_const_round_shift_low_8(s13, s15);
220   x[14] = sub_dct_const_round_shift_low_8(s12, s14);
221   x[15] = sub_dct_const_round_shift_low_8(s13, s15);
222 
223   // stage 4
224   iadst_half_butterfly_neg_neon(&x[3], &x[2], c_16_n16_8_24);
225   iadst_half_butterfly_pos_neon(&x[7], &x[6], c_16_n16_8_24);
226   iadst_half_butterfly_pos_neon(&x[11], &x[10], c_16_n16_8_24);
227   iadst_half_butterfly_neg_neon(&x[15], &x[14], c_16_n16_8_24);
228 
229   out[0] = x[0];
230   out[1] = vnegq_s16(x[8]);
231   out[2] = x[12];
232   out[3] = vnegq_s16(x[4]);
233   out[4] = x[6];
234   out[5] = x[14];
235   out[6] = x[10];
236   out[7] = x[2];
237   out[8] = x[3];
238   out[9] = x[11];
239   out[10] = x[15];
240   out[11] = x[7];
241   out[12] = x[5];
242   out[13] = vnegq_s16(x[13]);
243   out[14] = x[9];
244   out[15] = vnegq_s16(x[1]);
245 
246   if (output) {
247     idct16x16_store_pass1(out, output);
248   } else {
249     if (highbd_flag) {
250       idct16x16_add_store_bd8(out, dest, stride);
251     } else {
252       idct16x16_add_store(out, dest, stride);
253     }
254   }
255 }
256 
vp9_iht16x16_256_add_neon(const tran_low_t * input,uint8_t * dest,int stride,int tx_type)257 void vp9_iht16x16_256_add_neon(const tran_low_t *input, uint8_t *dest,
258                                int stride, int tx_type) {
259   static const iht_2d IHT_16[] = {
260     { vpx_idct16x16_256_add_half1d,
261       vpx_idct16x16_256_add_half1d },  // DCT_DCT  = 0
262     { vpx_iadst16x16_256_add_half1d,
263       vpx_idct16x16_256_add_half1d },  // ADST_DCT = 1
264     { vpx_idct16x16_256_add_half1d,
265       vpx_iadst16x16_256_add_half1d },  // DCT_ADST = 2
266     { vpx_iadst16x16_256_add_half1d,
267       vpx_iadst16x16_256_add_half1d }  // ADST_ADST = 3
268   };
269   const iht_2d ht = IHT_16[tx_type];
270   int16_t row_output[16 * 16];
271 
272   // pass 1
273   ht.rows(input, row_output, dest, stride, 0);               // upper 8 rows
274   ht.rows(input + 8 * 16, row_output + 8, dest, stride, 0);  // lower 8 rows
275 
276   // pass 2
277   ht.cols(row_output, NULL, dest, stride, 0);               // left 8 columns
278   ht.cols(row_output + 16 * 8, NULL, dest + 8, stride, 0);  // right 8 columns
279 }
280