1 /*
2 * Copyright (c) 2016, Alliance for Open Media. All rights reserved
3 *
4 * This source code is subject to the terms of the BSD 2 Clause License and
5 * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6 * was not distributed with this source code in the LICENSE file, you can
7 * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8 * Media Patent License 1.0 was not distributed with this source code in the
9 * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10 */
11
12 #include <immintrin.h> // AVX2
13
14 #include "config/av1_rtcd.h"
15
16 #include "aom/aom_integer.h"
17
read_coeff(const tran_low_t * coeff,intptr_t offset,__m256i * c)18 static INLINE void read_coeff(const tran_low_t *coeff, intptr_t offset,
19 __m256i *c) {
20 const tran_low_t *addr = coeff + offset;
21
22 if (sizeof(tran_low_t) == 4) {
23 const __m256i x0 = _mm256_loadu_si256((const __m256i *)addr);
24 const __m256i x1 = _mm256_loadu_si256((const __m256i *)addr + 1);
25 const __m256i y = _mm256_packs_epi32(x0, x1);
26 *c = _mm256_permute4x64_epi64(y, 0xD8);
27 } else {
28 *c = _mm256_loadu_si256((const __m256i *)addr);
29 }
30 }
31
av1_block_error_lp_avx2(const int16_t * coeff,const int16_t * dqcoeff,intptr_t block_size)32 int64_t av1_block_error_lp_avx2(const int16_t *coeff, const int16_t *dqcoeff,
33 intptr_t block_size) {
34 const __m256i zero = _mm256_setzero_si256();
35 __m256i sse_256 = zero;
36 __m256i sse_hi;
37 __m128i sse_128;
38 int64_t sse;
39
40 if (block_size == 16) {
41 // Load 16 elements for coeff and dqcoeff.
42 const __m256i _coeff = _mm256_loadu_si256((const __m256i *)coeff);
43 const __m256i _dqcoeff = _mm256_loadu_si256((const __m256i *)dqcoeff);
44 // dqcoeff - coeff
45 const __m256i diff = _mm256_sub_epi16(_dqcoeff, _coeff);
46 // madd (dqcoeff - coeff)
47 const __m256i error_lo = _mm256_madd_epi16(diff, diff);
48 // Save the higher 64 bit of each 128 bit lane.
49 const __m256i error_hi = _mm256_srli_si256(error_lo, 8);
50 // Add the higher 64 bit to the low 64 bit.
51 const __m256i error = _mm256_add_epi32(error_lo, error_hi);
52 // Expand each double word in the lower 64 bits to quad word.
53 sse_256 = _mm256_unpacklo_epi32(error, zero);
54 } else {
55 for (int i = 0; i < block_size; i += 16) {
56 // Load 16 elements for coeff and dqcoeff.
57 const __m256i _coeff = _mm256_loadu_si256((const __m256i *)coeff);
58 const __m256i _dqcoeff = _mm256_loadu_si256((const __m256i *)dqcoeff);
59 const __m256i diff = _mm256_sub_epi16(_dqcoeff, _coeff);
60 const __m256i error = _mm256_madd_epi16(diff, diff);
61 // Expand each double word of madd (dqcoeff - coeff) to quad word.
62 const __m256i exp_error_lo = _mm256_unpacklo_epi32(error, zero);
63 const __m256i exp_error_hi = _mm256_unpackhi_epi32(error, zero);
64 // Add each quad word of madd (dqcoeff - coeff).
65 sse_256 = _mm256_add_epi64(sse_256, exp_error_lo);
66 sse_256 = _mm256_add_epi64(sse_256, exp_error_hi);
67 coeff += 16;
68 dqcoeff += 16;
69 }
70 }
71 // Save the higher 64 bit of each 128 bit lane.
72 sse_hi = _mm256_srli_si256(sse_256, 8);
73 // Add the higher 64 bit to the low 64 bit.
74 sse_256 = _mm256_add_epi64(sse_256, sse_hi);
75
76 // Add each 64 bit from each of the 128 bit lane of the 256 bit.
77 sse_128 = _mm_add_epi64(_mm256_castsi256_si128(sse_256),
78 _mm256_extractf128_si256(sse_256, 1));
79
80 // Store the results.
81 _mm_storel_epi64((__m128i *)&sse, sse_128);
82 return sse;
83 }
84
av1_block_error_avx2(const tran_low_t * coeff,const tran_low_t * dqcoeff,intptr_t block_size,int64_t * ssz)85 int64_t av1_block_error_avx2(const tran_low_t *coeff, const tran_low_t *dqcoeff,
86 intptr_t block_size, int64_t *ssz) {
87 __m256i sse_reg, ssz_reg, coeff_reg, dqcoeff_reg;
88 __m256i exp_dqcoeff_lo, exp_dqcoeff_hi, exp_coeff_lo, exp_coeff_hi;
89 __m256i sse_reg_64hi, ssz_reg_64hi;
90 __m128i sse_reg128, ssz_reg128;
91 int64_t sse;
92 int i;
93 const __m256i zero_reg = _mm256_setzero_si256();
94
95 // init sse and ssz registerd to zero
96 sse_reg = _mm256_setzero_si256();
97 ssz_reg = _mm256_setzero_si256();
98
99 for (i = 0; i < block_size; i += 16) {
100 // load 32 bytes from coeff and dqcoeff
101 read_coeff(coeff, i, &coeff_reg);
102 read_coeff(dqcoeff, i, &dqcoeff_reg);
103 // dqcoeff - coeff
104 dqcoeff_reg = _mm256_sub_epi16(dqcoeff_reg, coeff_reg);
105 // madd (dqcoeff - coeff)
106 dqcoeff_reg = _mm256_madd_epi16(dqcoeff_reg, dqcoeff_reg);
107 // madd coeff
108 coeff_reg = _mm256_madd_epi16(coeff_reg, coeff_reg);
109 // expand each double word of madd (dqcoeff - coeff) to quad word
110 exp_dqcoeff_lo = _mm256_unpacklo_epi32(dqcoeff_reg, zero_reg);
111 exp_dqcoeff_hi = _mm256_unpackhi_epi32(dqcoeff_reg, zero_reg);
112 // expand each double word of madd (coeff) to quad word
113 exp_coeff_lo = _mm256_unpacklo_epi32(coeff_reg, zero_reg);
114 exp_coeff_hi = _mm256_unpackhi_epi32(coeff_reg, zero_reg);
115 // add each quad word of madd (dqcoeff - coeff) and madd (coeff)
116 sse_reg = _mm256_add_epi64(sse_reg, exp_dqcoeff_lo);
117 ssz_reg = _mm256_add_epi64(ssz_reg, exp_coeff_lo);
118 sse_reg = _mm256_add_epi64(sse_reg, exp_dqcoeff_hi);
119 ssz_reg = _mm256_add_epi64(ssz_reg, exp_coeff_hi);
120 }
121 // save the higher 64 bit of each 128 bit lane
122 sse_reg_64hi = _mm256_srli_si256(sse_reg, 8);
123 ssz_reg_64hi = _mm256_srli_si256(ssz_reg, 8);
124 // add the higher 64 bit to the low 64 bit
125 sse_reg = _mm256_add_epi64(sse_reg, sse_reg_64hi);
126 ssz_reg = _mm256_add_epi64(ssz_reg, ssz_reg_64hi);
127
128 // add each 64 bit from each of the 128 bit lane of the 256 bit
129 sse_reg128 = _mm_add_epi64(_mm256_castsi256_si128(sse_reg),
130 _mm256_extractf128_si256(sse_reg, 1));
131
132 ssz_reg128 = _mm_add_epi64(_mm256_castsi256_si128(ssz_reg),
133 _mm256_extractf128_si256(ssz_reg, 1));
134
135 // store the results
136 _mm_storel_epi64((__m128i *)(&sse), sse_reg128);
137
138 _mm_storel_epi64((__m128i *)(ssz), ssz_reg128);
139 _mm256_zeroupper();
140 return sse;
141 }
142