• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  Copyright (c) 2018 The WebM project authors. All Rights Reserved.
3  *
4  *  Use of this source code is governed by a BSD-style license
5  *  that can be found in the LICENSE file in the root of the source
6  *  tree. An additional intellectual property rights grant can be found
7  *  in the file PATENTS.  All contributing project authors may
8  *  be found in the AUTHORS file in the root of the source tree.
9  */
10 
11 #include <arm_neon.h>
12 #include <assert.h>
13 
14 #include "./vp9_rtcd.h"
15 #include "./vpx_config.h"
16 #include "vp9/common/vp9_common.h"
17 #include "vp9/common/arm/neon/vp9_iht_neon.h"
18 #include "vpx_dsp/arm/highbd_idct_neon.h"
19 #include "vpx_dsp/arm/idct_neon.h"
20 #include "vpx_dsp/arm/mem_neon.h"
21 #include "vpx_dsp/txfm_common.h"
22 
highbd_iadst4(int32x4_t * const io)23 static INLINE void highbd_iadst4(int32x4_t *const io) {
24   const int32_t sinpis[4] = { sinpi_1_9, sinpi_2_9, sinpi_3_9, sinpi_4_9 };
25   const int32x4_t sinpi = vld1q_s32(sinpis);
26   int64x2x2_t s[7], t[4];
27   int32x4_t s7;
28 
29   s[0].val[0] = vmull_lane_s32(vget_low_s32(io[0]), vget_low_s32(sinpi), 0);
30   s[0].val[1] = vmull_lane_s32(vget_high_s32(io[0]), vget_low_s32(sinpi), 0);
31   s[1].val[0] = vmull_lane_s32(vget_low_s32(io[0]), vget_low_s32(sinpi), 1);
32   s[1].val[1] = vmull_lane_s32(vget_high_s32(io[0]), vget_low_s32(sinpi), 1);
33   s[2].val[0] = vmull_lane_s32(vget_low_s32(io[1]), vget_high_s32(sinpi), 0);
34   s[2].val[1] = vmull_lane_s32(vget_high_s32(io[1]), vget_high_s32(sinpi), 0);
35   s[3].val[0] = vmull_lane_s32(vget_low_s32(io[2]), vget_high_s32(sinpi), 1);
36   s[3].val[1] = vmull_lane_s32(vget_high_s32(io[2]), vget_high_s32(sinpi), 1);
37   s[4].val[0] = vmull_lane_s32(vget_low_s32(io[2]), vget_low_s32(sinpi), 0);
38   s[4].val[1] = vmull_lane_s32(vget_high_s32(io[2]), vget_low_s32(sinpi), 0);
39   s[5].val[0] = vmull_lane_s32(vget_low_s32(io[3]), vget_low_s32(sinpi), 1);
40   s[5].val[1] = vmull_lane_s32(vget_high_s32(io[3]), vget_low_s32(sinpi), 1);
41   s[6].val[0] = vmull_lane_s32(vget_low_s32(io[3]), vget_high_s32(sinpi), 1);
42   s[6].val[1] = vmull_lane_s32(vget_high_s32(io[3]), vget_high_s32(sinpi), 1);
43   s7 = vsubq_s32(io[0], io[2]);
44   s7 = vaddq_s32(s7, io[3]);
45 
46   s[0].val[0] = vaddq_s64(s[0].val[0], s[3].val[0]);
47   s[0].val[1] = vaddq_s64(s[0].val[1], s[3].val[1]);
48   s[0].val[0] = vaddq_s64(s[0].val[0], s[5].val[0]);
49   s[0].val[1] = vaddq_s64(s[0].val[1], s[5].val[1]);
50   s[1].val[0] = vsubq_s64(s[1].val[0], s[4].val[0]);
51   s[1].val[1] = vsubq_s64(s[1].val[1], s[4].val[1]);
52   s[1].val[0] = vsubq_s64(s[1].val[0], s[6].val[0]);
53   s[1].val[1] = vsubq_s64(s[1].val[1], s[6].val[1]);
54   s[3] = s[2];
55   s[2].val[0] = vmull_lane_s32(vget_low_s32(s7), vget_high_s32(sinpi), 0);
56   s[2].val[1] = vmull_lane_s32(vget_high_s32(s7), vget_high_s32(sinpi), 0);
57 
58   t[0].val[0] = vaddq_s64(s[0].val[0], s[3].val[0]);
59   t[0].val[1] = vaddq_s64(s[0].val[1], s[3].val[1]);
60   t[1].val[0] = vaddq_s64(s[1].val[0], s[3].val[0]);
61   t[1].val[1] = vaddq_s64(s[1].val[1], s[3].val[1]);
62   t[2] = s[2];
63   t[3].val[0] = vaddq_s64(s[0].val[0], s[1].val[0]);
64   t[3].val[1] = vaddq_s64(s[0].val[1], s[1].val[1]);
65   t[3].val[0] = vsubq_s64(t[3].val[0], s[3].val[0]);
66   t[3].val[1] = vsubq_s64(t[3].val[1], s[3].val[1]);
67   io[0] = vcombine_s32(vrshrn_n_s64(t[0].val[0], DCT_CONST_BITS),
68                        vrshrn_n_s64(t[0].val[1], DCT_CONST_BITS));
69   io[1] = vcombine_s32(vrshrn_n_s64(t[1].val[0], DCT_CONST_BITS),
70                        vrshrn_n_s64(t[1].val[1], DCT_CONST_BITS));
71   io[2] = vcombine_s32(vrshrn_n_s64(t[2].val[0], DCT_CONST_BITS),
72                        vrshrn_n_s64(t[2].val[1], DCT_CONST_BITS));
73   io[3] = vcombine_s32(vrshrn_n_s64(t[3].val[0], DCT_CONST_BITS),
74                        vrshrn_n_s64(t[3].val[1], DCT_CONST_BITS));
75 }
76 
vp9_highbd_iht4x4_16_add_neon(const tran_low_t * input,uint16_t * dest,int stride,int tx_type,int bd)77 void vp9_highbd_iht4x4_16_add_neon(const tran_low_t *input, uint16_t *dest,
78                                    int stride, int tx_type, int bd) {
79   const int16x8_t max = vdupq_n_s16((1 << bd) - 1);
80   int16x8_t a[2];
81   int32x4_t c[4];
82 
83   c[0] = vld1q_s32(input);
84   c[1] = vld1q_s32(input + 4);
85   c[2] = vld1q_s32(input + 8);
86   c[3] = vld1q_s32(input + 12);
87 
88   if (bd == 8) {
89     a[0] = vcombine_s16(vmovn_s32(c[0]), vmovn_s32(c[1]));
90     a[1] = vcombine_s16(vmovn_s32(c[2]), vmovn_s32(c[3]));
91     transpose_s16_4x4q(&a[0], &a[1]);
92 
93     switch (tx_type) {
94       case DCT_DCT:
95         idct4x4_16_kernel_bd8(a);
96         a[1] = vcombine_s16(vget_high_s16(a[1]), vget_low_s16(a[1]));
97         transpose_s16_4x4q(&a[0], &a[1]);
98         idct4x4_16_kernel_bd8(a);
99         a[1] = vcombine_s16(vget_high_s16(a[1]), vget_low_s16(a[1]));
100         break;
101 
102       case ADST_DCT:
103         idct4x4_16_kernel_bd8(a);
104         a[1] = vcombine_s16(vget_high_s16(a[1]), vget_low_s16(a[1]));
105         transpose_s16_4x4q(&a[0], &a[1]);
106         iadst4(a);
107         break;
108 
109       case DCT_ADST:
110         iadst4(a);
111         transpose_s16_4x4q(&a[0], &a[1]);
112         idct4x4_16_kernel_bd8(a);
113         a[1] = vcombine_s16(vget_high_s16(a[1]), vget_low_s16(a[1]));
114         break;
115 
116       default:
117         assert(tx_type == ADST_ADST);
118         iadst4(a);
119         transpose_s16_4x4q(&a[0], &a[1]);
120         iadst4(a);
121         break;
122     }
123     a[0] = vrshrq_n_s16(a[0], 4);
124     a[1] = vrshrq_n_s16(a[1], 4);
125   } else {
126     switch (tx_type) {
127       case DCT_DCT: {
128         const int32x4_t cospis = vld1q_s32(kCospi32);
129 
130         if (bd == 10) {
131           idct4x4_16_kernel_bd10(cospis, c);
132           idct4x4_16_kernel_bd10(cospis, c);
133         } else {
134           idct4x4_16_kernel_bd12(cospis, c);
135           idct4x4_16_kernel_bd12(cospis, c);
136         }
137         break;
138       }
139 
140       case ADST_DCT: {
141         const int32x4_t cospis = vld1q_s32(kCospi32);
142 
143         if (bd == 10) {
144           idct4x4_16_kernel_bd10(cospis, c);
145         } else {
146           idct4x4_16_kernel_bd12(cospis, c);
147         }
148         transpose_s32_4x4(&c[0], &c[1], &c[2], &c[3]);
149         highbd_iadst4(c);
150         break;
151       }
152 
153       case DCT_ADST: {
154         const int32x4_t cospis = vld1q_s32(kCospi32);
155 
156         transpose_s32_4x4(&c[0], &c[1], &c[2], &c[3]);
157         highbd_iadst4(c);
158         if (bd == 10) {
159           idct4x4_16_kernel_bd10(cospis, c);
160         } else {
161           idct4x4_16_kernel_bd12(cospis, c);
162         }
163         break;
164       }
165 
166       default: {
167         assert(tx_type == ADST_ADST);
168         transpose_s32_4x4(&c[0], &c[1], &c[2], &c[3]);
169         highbd_iadst4(c);
170         transpose_s32_4x4(&c[0], &c[1], &c[2], &c[3]);
171         highbd_iadst4(c);
172         break;
173       }
174     }
175     a[0] = vcombine_s16(vqrshrn_n_s32(c[0], 4), vqrshrn_n_s32(c[1], 4));
176     a[1] = vcombine_s16(vqrshrn_n_s32(c[2], 4), vqrshrn_n_s32(c[3], 4));
177   }
178 
179   highbd_idct4x4_1_add_kernel1(&dest, stride, a[0], max);
180   highbd_idct4x4_1_add_kernel1(&dest, stride, a[1], max);
181 }
182