• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2018, Alliance for Open Media. All rights reserved
3  *
4  * This source code is subject to the terms of the BSD 2 Clause License and
5  * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6  * was not distributed with this source code in the LICENSE file, you can
7  * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8  * Media Patent License 1.0 was not distributed with this source code in the
9  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10  */
11 #ifndef AOM_AV1_COMMON_X86_AV1_INV_TXFM_AVX2_H_
12 #define AOM_AV1_COMMON_X86_AV1_INV_TXFM_AVX2_H_
13 
14 #include <immintrin.h>
15 
16 #include "config/aom_config.h"
17 #include "config/av1_rtcd.h"
18 
19 #include "aom/aom_integer.h"
20 #include "aom_dsp/x86/transpose_sse2.h"
21 #include "aom_dsp/x86/txfm_common_sse2.h"
22 #include "aom_dsp/x86/txfm_common_avx2.h"
23 
24 #ifdef __cplusplus
25 extern "C" {
26 #endif
27 
28 // half input is zero
29 #define btf_16_w16_0_avx2(w0, w1, in, out0, out1)  \
30   do {                                             \
31     const __m256i _w0 = _mm256_set1_epi16(w0 * 8); \
32     const __m256i _w1 = _mm256_set1_epi16(w1 * 8); \
33     const __m256i _in = in;                        \
34     out0 = _mm256_mulhrs_epi16(_in, _w0);          \
35     out1 = _mm256_mulhrs_epi16(_in, _w1);          \
36   } while (0)
37 
round_shift_avx2(const __m256i * input,__m256i * output,int size)38 static INLINE void round_shift_avx2(const __m256i *input, __m256i *output,
39                                     int size) {
40   const __m256i scale = _mm256_set1_epi16(NewInvSqrt2 * 8);
41   for (int i = 0; i < size; ++i) {
42     output[i] = _mm256_mulhrs_epi16(input[i], scale);
43   }
44 }
45 
write_recon_w16_avx2(__m256i res,uint8_t * output)46 static INLINE void write_recon_w16_avx2(__m256i res, uint8_t *output) {
47   __m128i pred = _mm_loadu_si128((__m128i const *)(output));
48   __m256i u = _mm256_adds_epi16(_mm256_cvtepu8_epi16(pred), res);
49   __m128i y = _mm256_castsi256_si128(
50       _mm256_permute4x64_epi64(_mm256_packus_epi16(u, u), 168));
51   _mm_storeu_si128((__m128i *)(output), y);
52 }
53 
lowbd_write_buffer_16xn_avx2(__m256i * in,uint8_t * output,int stride,int flipud,int height)54 static INLINE void lowbd_write_buffer_16xn_avx2(__m256i *in, uint8_t *output,
55                                                 int stride, int flipud,
56                                                 int height) {
57   int j = flipud ? (height - 1) : 0;
58   const int step = flipud ? -1 : 1;
59   for (int i = 0; i < height; ++i, j += step) {
60     write_recon_w16_avx2(in[j], output + i * stride);
61   }
62 }
63 
64 void av1_lowbd_inv_txfm2d_add_avx2(const int32_t *input, uint8_t *output,
65                                    int stride, TX_TYPE tx_type, TX_SIZE tx_size,
66                                    int eob);
67 #ifdef __cplusplus
68 }
69 #endif
70 
71 #endif  // AOM_AV1_COMMON_X86_AV1_INV_TXFM_AVX2_H_
72