• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2019 The libgav1 Authors
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 //      http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 
15 #include "src/dsp/intrapred.h"
16 #include "src/utils/cpu.h"
17 
18 #if LIBGAV1_ENABLE_SSE4_1
19 
20 #include <xmmintrin.h>
21 
22 #include <cassert>
23 #include <cstddef>
24 #include <cstdint>
25 #include <cstring>  // memcpy
26 
27 #include "src/dsp/constants.h"
28 #include "src/dsp/dsp.h"
29 #include "src/dsp/x86/common_sse4.h"
30 #include "src/utils/common.h"
31 
32 namespace libgav1 {
33 namespace dsp {
34 namespace low_bitdepth {
35 namespace {
36 
37 // Note these constants are duplicated from intrapred.cc to allow the compiler
38 // to have visibility of the values. This helps reduce loads and in the
39 // creation of the inverse weights.
40 constexpr uint8_t kSmoothWeights[] = {
41     // block dimension = 4
42     255, 149, 85, 64,
43     // block dimension = 8
44     255, 197, 146, 105, 73, 50, 37, 32,
45     // block dimension = 16
46     255, 225, 196, 170, 145, 123, 102, 84, 68, 54, 43, 33, 26, 20, 17, 16,
47     // block dimension = 32
48     255, 240, 225, 210, 196, 182, 169, 157, 145, 133, 122, 111, 101, 92, 83, 74,
49     66, 59, 52, 45, 39, 34, 29, 25, 21, 17, 14, 12, 10, 9, 8, 8,
50     // block dimension = 64
51     255, 248, 240, 233, 225, 218, 210, 203, 196, 189, 182, 176, 169, 163, 156,
52     150, 144, 138, 133, 127, 121, 116, 111, 106, 101, 96, 91, 86, 82, 77, 73,
53     69, 65, 61, 57, 54, 50, 47, 44, 41, 38, 35, 32, 29, 27, 25, 22, 20, 18, 16,
54     15, 13, 12, 10, 9, 8, 7, 6, 6, 5, 5, 4, 4, 4};
55 
56 template <int y_mask>
WriteSmoothHorizontalSum4(void * const dest,const __m128i & left,const __m128i & weights,const __m128i & scaled_top_right,const __m128i & round)57 inline void WriteSmoothHorizontalSum4(void* const dest, const __m128i& left,
58                                       const __m128i& weights,
59                                       const __m128i& scaled_top_right,
60                                       const __m128i& round) {
61   const __m128i left_y = _mm_shuffle_epi32(left, y_mask);
62   const __m128i weighted_left_y = _mm_mullo_epi16(left_y, weights);
63   const __m128i pred_sum = _mm_add_epi32(scaled_top_right, weighted_left_y);
64   // Equivalent to RightShiftWithRounding(pred[x][y], 8).
65   const __m128i pred = _mm_srli_epi32(_mm_add_epi32(pred_sum, round), 8);
66   const __m128i cvtepi32_epi8 = _mm_set1_epi32(0x0C080400);
67   Store4(dest, _mm_shuffle_epi8(pred, cvtepi32_epi8));
68 }
69 
70 template <int y_mask>
SmoothVerticalSum4(const __m128i & top,const __m128i & weights,const __m128i & scaled_bottom_left)71 inline __m128i SmoothVerticalSum4(const __m128i& top, const __m128i& weights,
72                                   const __m128i& scaled_bottom_left) {
73   const __m128i weights_y = _mm_shuffle_epi32(weights, y_mask);
74   const __m128i weighted_top_y = _mm_mullo_epi16(top, weights_y);
75   const __m128i scaled_bottom_left_y =
76       _mm_shuffle_epi32(scaled_bottom_left, y_mask);
77   return _mm_add_epi32(scaled_bottom_left_y, weighted_top_y);
78 }
79 
80 template <int y_mask>
WriteSmoothVerticalSum4(uint8_t * dest,const __m128i & top,const __m128i & weights,const __m128i & scaled_bottom_left,const __m128i & round)81 inline void WriteSmoothVerticalSum4(uint8_t* dest, const __m128i& top,
82                                     const __m128i& weights,
83                                     const __m128i& scaled_bottom_left,
84                                     const __m128i& round) {
85   __m128i pred_sum =
86       SmoothVerticalSum4<y_mask>(top, weights, scaled_bottom_left);
87   // Equivalent to RightShiftWithRounding(pred[x][y], 8).
88   pred_sum = _mm_srli_epi32(_mm_add_epi32(pred_sum, round), 8);
89   const __m128i cvtepi32_epi8 = _mm_set1_epi32(0x0C080400);
90   Store4(dest, _mm_shuffle_epi8(pred_sum, cvtepi32_epi8));
91 }
92 
93 // For SMOOTH_H, |pixels| is the repeated left value for the row. For SMOOTH_V,
94 // |pixels| is a segment of the top row or the whole top row, and |weights| is
95 // repeated.
SmoothDirectionalSum8(const __m128i & pixels,const __m128i & weights,const __m128i & scaled_corner)96 inline __m128i SmoothDirectionalSum8(const __m128i& pixels,
97                                      const __m128i& weights,
98                                      const __m128i& scaled_corner) {
99   const __m128i weighted_px = _mm_mullo_epi16(pixels, weights);
100   return _mm_add_epi16(scaled_corner, weighted_px);
101 }
102 
WriteSmoothDirectionalSum8(uint8_t * dest,const __m128i & pixels,const __m128i & weights,const __m128i & scaled_corner,const __m128i & round)103 inline void WriteSmoothDirectionalSum8(uint8_t* dest, const __m128i& pixels,
104                                        const __m128i& weights,
105                                        const __m128i& scaled_corner,
106                                        const __m128i& round) {
107   const __m128i pred_sum =
108       SmoothDirectionalSum8(pixels, weights, scaled_corner);
109   // Equivalent to RightShiftWithRounding(pred[x][y], 8).
110   const __m128i pred = _mm_srli_epi16(_mm_add_epi16(pred_sum, round), 8);
111   StoreLo8(dest, _mm_packus_epi16(pred, pred));
112 }
113 
114 // For Horizontal, pixels1 and pixels2 are the same repeated value. For
115 // Vertical, weights1 and weights2 are the same, and scaled_corner1 and
116 // scaled_corner2 are the same.
WriteSmoothDirectionalSum16(uint8_t * dest,const __m128i & pixels1,const __m128i & pixels2,const __m128i & weights1,const __m128i & weights2,const __m128i & scaled_corner1,const __m128i & scaled_corner2,const __m128i & round)117 inline void WriteSmoothDirectionalSum16(uint8_t* dest, const __m128i& pixels1,
118                                         const __m128i& pixels2,
119                                         const __m128i& weights1,
120                                         const __m128i& weights2,
121                                         const __m128i& scaled_corner1,
122                                         const __m128i& scaled_corner2,
123                                         const __m128i& round) {
124   const __m128i weighted_px1 = _mm_mullo_epi16(pixels1, weights1);
125   const __m128i weighted_px2 = _mm_mullo_epi16(pixels2, weights2);
126   const __m128i pred_sum1 = _mm_add_epi16(scaled_corner1, weighted_px1);
127   const __m128i pred_sum2 = _mm_add_epi16(scaled_corner2, weighted_px2);
128   // Equivalent to RightShiftWithRounding(pred[x][y], 8).
129   const __m128i pred1 = _mm_srli_epi16(_mm_add_epi16(pred_sum1, round), 8);
130   const __m128i pred2 = _mm_srli_epi16(_mm_add_epi16(pred_sum2, round), 8);
131   StoreUnaligned16(dest, _mm_packus_epi16(pred1, pred2));
132 }
133 
134 template <int y_mask>
WriteSmoothPredSum4(uint8_t * const dest,const __m128i & top,const __m128i & left,const __m128i & weights_x,const __m128i & weights_y,const __m128i & scaled_bottom_left,const __m128i & scaled_top_right,const __m128i & round)135 inline void WriteSmoothPredSum4(uint8_t* const dest, const __m128i& top,
136                                 const __m128i& left, const __m128i& weights_x,
137                                 const __m128i& weights_y,
138                                 const __m128i& scaled_bottom_left,
139                                 const __m128i& scaled_top_right,
140                                 const __m128i& round) {
141   const __m128i left_y = _mm_shuffle_epi32(left, y_mask);
142   const __m128i weighted_left_y = _mm_mullo_epi32(left_y, weights_x);
143   const __m128i weight_y = _mm_shuffle_epi32(weights_y, y_mask);
144   const __m128i weighted_top = _mm_mullo_epi32(weight_y, top);
145   const __m128i scaled_bottom_left_y =
146       _mm_shuffle_epi32(scaled_bottom_left, y_mask);
147   const __m128i col_pred = _mm_add_epi32(scaled_bottom_left_y, weighted_left_y);
148   const __m128i row_pred = _mm_add_epi32(scaled_top_right, weighted_top);
149   const __m128i pred_sum = _mm_add_epi32(row_pred, col_pred);
150 
151   // Equivalent to RightShiftWithRounding(pred[x][y], 9).
152   const __m128i pred = _mm_srli_epi32(_mm_add_epi32(pred_sum, round), 9);
153 
154   const __m128i cvtepi32_epi8 = _mm_set1_epi32(0x0C080400);
155   Store4(dest, _mm_shuffle_epi8(pred, cvtepi32_epi8));
156 }
157 
158 // pixels[0]: above and below_pred interleave vector
159 // pixels[1]: left vector
160 // pixels[2]: right_pred vector
LoadSmoothPixels4(const uint8_t * above,const uint8_t * left,const int height,__m128i * pixels)161 inline void LoadSmoothPixels4(const uint8_t* above, const uint8_t* left,
162                               const int height, __m128i* pixels) {
163   if (height == 4) {
164     pixels[1] = Load4(left);
165   } else if (height == 8) {
166     pixels[1] = LoadLo8(left);
167   } else {
168     pixels[1] = LoadUnaligned16(left);
169   }
170 
171   const __m128i bottom_left = _mm_set1_epi16(left[height - 1]);
172   const __m128i top = _mm_cvtepu8_epi16(Load4(above));
173   pixels[0] = _mm_unpacklo_epi16(top, bottom_left);
174   pixels[2] = _mm_set1_epi16(above[3]);
175 }
176 
177 // weight_h[0]: weight_h vector
178 // weight_h[1]: scale - weight_h vector
179 // weight_h[2]: same as [0], second half for height = 16 only
180 // weight_h[3]: same as [1], second half for height = 16 only
181 // weight_w[0]: weights_w and scale - weights_w interleave vector
LoadSmoothWeights4(const uint8_t * weight_array,const int height,__m128i * weight_h,__m128i * weight_w)182 inline void LoadSmoothWeights4(const uint8_t* weight_array, const int height,
183                                __m128i* weight_h, __m128i* weight_w) {
184   const __m128i scale = _mm_set1_epi16(256);
185   const __m128i x_weights = Load4(weight_array);
186   weight_h[0] = _mm_cvtepu8_epi16(x_weights);
187   weight_h[1] = _mm_sub_epi16(scale, weight_h[0]);
188   weight_w[0] = _mm_unpacklo_epi16(weight_h[0], weight_h[1]);
189 
190   if (height == 8) {
191     const __m128i y_weights = LoadLo8(weight_array + 4);
192     weight_h[0] = _mm_cvtepu8_epi16(y_weights);
193     weight_h[1] = _mm_sub_epi16(scale, weight_h[0]);
194   } else if (height == 16) {
195     const __m128i zero = _mm_setzero_si128();
196     const __m128i y_weights = LoadUnaligned16(weight_array + 12);
197     weight_h[0] = _mm_cvtepu8_epi16(y_weights);
198     weight_h[1] = _mm_sub_epi16(scale, weight_h[0]);
199     weight_h[2] = _mm_unpackhi_epi8(y_weights, zero);
200     weight_h[3] = _mm_sub_epi16(scale, weight_h[2]);
201   }
202 }
203 
WriteSmoothPred4x8(const __m128i * pixel,const __m128i * weights_y,const __m128i * weight_x,uint8_t * dst,const ptrdiff_t stride,const bool use_second_half)204 inline void WriteSmoothPred4x8(const __m128i* pixel, const __m128i* weights_y,
205                                const __m128i* weight_x, uint8_t* dst,
206                                const ptrdiff_t stride,
207                                const bool use_second_half) {
208   const __m128i round = _mm_set1_epi32(256);
209   const __m128i mask_increment = _mm_set1_epi16(0x0202);
210   const __m128i cvtepi32_epi8 = _mm_set1_epi32(0x0C080400);
211   const __m128i zero = _mm_setzero_si128();
212   const __m128i left = use_second_half ? _mm_unpackhi_epi8(pixel[1], zero)
213                                        : _mm_unpacklo_epi8(pixel[1], zero);
214   __m128i y_select = _mm_set1_epi16(0x0100);
215 
216   for (int i = 0; i < 8; ++i) {
217     const __m128i weight_y = _mm_shuffle_epi8(weights_y[0], y_select);
218     const __m128i inverted_weight_y = _mm_shuffle_epi8(weights_y[1], y_select);
219     const __m128i interleaved_weights =
220         _mm_unpacklo_epi16(weight_y, inverted_weight_y);
221     __m128i vertical_pred = _mm_madd_epi16(pixel[0], interleaved_weights);
222 
223     __m128i horizontal_vect = _mm_shuffle_epi8(left, y_select);
224     horizontal_vect = _mm_unpacklo_epi16(horizontal_vect, pixel[2]);
225     __m128i sum = _mm_madd_epi16(horizontal_vect, weight_x[0]);
226 
227     sum = _mm_add_epi32(vertical_pred, sum);
228     sum = _mm_add_epi32(sum, round);
229     sum = _mm_srai_epi32(sum, 9);
230 
231     sum = _mm_shuffle_epi8(sum, cvtepi32_epi8);
232     Store4(dst, sum);
233     dst += stride;
234 
235     y_select = _mm_add_epi16(y_select, mask_increment);
236   }
237 }
238 
239 // The interleaving approach has some overhead that causes it to underperform in
240 // the 4x4 case.
Smooth4x4_SSE4_1(void * const dest,const ptrdiff_t stride,const void * top_row,const void * left_column)241 void Smooth4x4_SSE4_1(void* const dest, const ptrdiff_t stride,
242                       const void* top_row, const void* left_column) {
243   const __m128i top = _mm_cvtepu8_epi32(Load4(top_row));
244   const __m128i left = _mm_cvtepu8_epi32(Load4(left_column));
245   const __m128i weights = _mm_cvtepu8_epi32(Load4(kSmoothWeights));
246   const __m128i scale = _mm_set1_epi32(256);
247   // Fourth short is top_row[3].
248   const __m128i top_right = _mm_shuffle_epi32(top, 0xFF);
249   // Fourth short is left_column[3].
250   const __m128i bottom_left = _mm_shuffle_epi32(left, 0xFF);
251   const __m128i inverted_weights = _mm_sub_epi32(scale, weights);
252   const __m128i scaled_top_right = _mm_mullo_epi16(inverted_weights, top_right);
253   const __m128i scaled_bottom_left =
254       _mm_mullo_epi16(inverted_weights, bottom_left);
255   auto* dst = static_cast<uint8_t*>(dest);
256   // AV1 spec 7.11.2.6 (3) describes the sum:
257   // smoothPred[y][x:x+3] = weighted_top + scaled_right + weighted_left[y] +
258   // scaled_bottom[y] This could be a loop, but for the immediate value in the
259   // shuffles.
260   WriteSmoothPredSum4<0>(dst, top, left, weights, weights, scaled_bottom_left,
261                          scaled_top_right, scale);
262   dst += stride;
263   WriteSmoothPredSum4<0x55>(dst, top, left, weights, weights,
264                             scaled_bottom_left, scaled_top_right, scale);
265   dst += stride;
266   WriteSmoothPredSum4<0xAA>(dst, top, left, weights, weights,
267                             scaled_bottom_left, scaled_top_right, scale);
268   dst += stride;
269   WriteSmoothPredSum4<0xFF>(dst, top, left, weights, weights,
270                             scaled_bottom_left, scaled_top_right, scale);
271 }
272 
Smooth4x8_SSE4_1(void * const dest,const ptrdiff_t stride,const void * top_row,const void * left_column)273 void Smooth4x8_SSE4_1(void* const dest, const ptrdiff_t stride,
274                       const void* top_row, const void* left_column) {
275   const auto* const left_ptr = static_cast<const uint8_t*>(left_column);
276   const auto* const top_ptr = static_cast<const uint8_t*>(top_row);
277   __m128i weights_x[1];
278   __m128i weights_y[2];
279   LoadSmoothWeights4(kSmoothWeights, 8, weights_y, weights_x);
280   __m128i pixels[3];
281   LoadSmoothPixels4(top_ptr, left_ptr, 8, pixels);
282   auto* dst = static_cast<uint8_t*>(dest);
283   WriteSmoothPred4x8(pixels, weights_y, weights_x, dst, stride, false);
284 }
285 
Smooth4x16_SSE4_1(void * const dest,const ptrdiff_t stride,const void * top_row,const void * left_column)286 void Smooth4x16_SSE4_1(void* const dest, const ptrdiff_t stride,
287                        const void* top_row, const void* left_column) {
288   const auto* const left_ptr = static_cast<const uint8_t*>(left_column);
289   const auto* const top_ptr = static_cast<const uint8_t*>(top_row);
290   __m128i weights_x[1];
291   __m128i weights_y[4];
292   LoadSmoothWeights4(kSmoothWeights, 16, weights_y, weights_x);
293   __m128i pixels[3];
294   LoadSmoothPixels4(top_ptr, left_ptr, 16, pixels);
295   auto* dst = static_cast<uint8_t*>(dest);
296   WriteSmoothPred4x8(pixels, weights_y, weights_x, dst, stride, false);
297   dst += stride << 3;
298   WriteSmoothPred4x8(pixels, &weights_y[2], weights_x, dst, stride, true);
299 }
300 
301 // pixels[0]: above and below_pred interleave vector, first half
302 // pixels[1]: above and below_pred interleave vector, second half
303 // pixels[2]: left vector
304 // pixels[3]: right_pred vector
305 // pixels[4]: above and below_pred interleave vector, first half
306 // pixels[5]: above and below_pred interleave vector, second half
307 // pixels[6]: left vector + 16
308 // pixels[7]: right_pred vector
LoadSmoothPixels8(const uint8_t * above,const uint8_t * left,const int height,__m128i * pixels)309 inline void LoadSmoothPixels8(const uint8_t* above, const uint8_t* left,
310                               const int height, __m128i* pixels) {
311   const __m128i bottom_left = _mm_set1_epi16(left[height - 1]);
312   __m128i top_row = _mm_cvtepu8_epi16(LoadLo8(above));
313   pixels[0] = _mm_unpacklo_epi16(top_row, bottom_left);
314   pixels[1] = _mm_unpackhi_epi16(top_row, bottom_left);
315 
316   pixels[3] = _mm_set1_epi16(above[7]);
317 
318   if (height == 4) {
319     pixels[2] = Load4(left);
320   } else if (height == 8) {
321     pixels[2] = LoadLo8(left);
322   } else if (height == 16) {
323     pixels[2] = LoadUnaligned16(left);
324   } else {
325     pixels[2] = LoadUnaligned16(left);
326     pixels[4] = pixels[0];
327     pixels[5] = pixels[1];
328     pixels[6] = LoadUnaligned16(left + 16);
329     pixels[7] = pixels[3];
330   }
331 }
332 
333 // weight_h[0]: weight_h vector
334 // weight_h[1]: scale - weight_h vector
335 // weight_h[2]: same as [0], offset 8
336 // weight_h[3]: same as [1], offset 8
337 // weight_h[4]: same as [0], offset 16
338 // weight_h[5]: same as [1], offset 16
339 // weight_h[6]: same as [0], offset 24
340 // weight_h[7]: same as [1], offset 24
341 // weight_w[0]: weights_w and scale - weights_w interleave vector, first half
342 // weight_w[1]: weights_w and scale - weights_w interleave vector, second half
LoadSmoothWeights8(const uint8_t * weight_array,const int height,__m128i * weight_w,__m128i * weight_h)343 inline void LoadSmoothWeights8(const uint8_t* weight_array, const int height,
344                                __m128i* weight_w, __m128i* weight_h) {
345   const int offset = (height < 8) ? 0 : 4;
346   __m128i loaded_weights = LoadUnaligned16(&weight_array[offset]);
347   weight_h[0] = _mm_cvtepu8_epi16(loaded_weights);
348   const __m128i inverter = _mm_set1_epi16(256);
349   weight_h[1] = _mm_sub_epi16(inverter, weight_h[0]);
350 
351   if (height == 4) {
352     loaded_weights = _mm_srli_si128(loaded_weights, 4);
353     __m128i weights_x = _mm_cvtepu8_epi16(loaded_weights);
354     __m128i inverted_weights_x = _mm_sub_epi16(inverter, weights_x);
355     weight_w[0] = _mm_unpacklo_epi16(weights_x, inverted_weights_x);
356     weight_w[1] = _mm_unpackhi_epi16(weights_x, inverted_weights_x);
357   } else {
358     weight_w[0] = _mm_unpacklo_epi16(weight_h[0], weight_h[1]);
359     weight_w[1] = _mm_unpackhi_epi16(weight_h[0], weight_h[1]);
360   }
361 
362   if (height == 16) {
363     const __m128i zero = _mm_setzero_si128();
364     loaded_weights = LoadUnaligned16(weight_array + 12);
365     weight_h[0] = _mm_cvtepu8_epi16(loaded_weights);
366     weight_h[1] = _mm_sub_epi16(inverter, weight_h[0]);
367     weight_h[2] = _mm_unpackhi_epi8(loaded_weights, zero);
368     weight_h[3] = _mm_sub_epi16(inverter, weight_h[2]);
369   } else if (height == 32) {
370     const __m128i zero = _mm_setzero_si128();
371     const __m128i weight_lo = LoadUnaligned16(weight_array + 28);
372     weight_h[0] = _mm_cvtepu8_epi16(weight_lo);
373     weight_h[1] = _mm_sub_epi16(inverter, weight_h[0]);
374     weight_h[2] = _mm_unpackhi_epi8(weight_lo, zero);
375     weight_h[3] = _mm_sub_epi16(inverter, weight_h[2]);
376     const __m128i weight_hi = LoadUnaligned16(weight_array + 44);
377     weight_h[4] = _mm_cvtepu8_epi16(weight_hi);
378     weight_h[5] = _mm_sub_epi16(inverter, weight_h[4]);
379     weight_h[6] = _mm_unpackhi_epi8(weight_hi, zero);
380     weight_h[7] = _mm_sub_epi16(inverter, weight_h[6]);
381   }
382 }
383 
WriteSmoothPred8xH(const __m128i * pixels,const __m128i * weights_x,const __m128i * weights_y,const int height,uint8_t * dst,const ptrdiff_t stride,const bool use_second_half)384 inline void WriteSmoothPred8xH(const __m128i* pixels, const __m128i* weights_x,
385                                const __m128i* weights_y, const int height,
386                                uint8_t* dst, const ptrdiff_t stride,
387                                const bool use_second_half) {
388   const __m128i round = _mm_set1_epi32(256);
389   const __m128i mask_increment = _mm_set1_epi16(0x0202);
390   const __m128i cvt_epu16_epi8 = _mm_set_epi32(0, 0, 0xe0c0a08, 0x6040200);
391 
392   const __m128i zero = _mm_setzero_si128();
393   const __m128i left = use_second_half ? _mm_unpackhi_epi8(pixels[2], zero)
394                                        : _mm_unpacklo_epi8(pixels[2], zero);
395   __m128i y_select = _mm_set1_epi16(0x100);
396 
397   for (int i = 0; i < height; ++i) {
398     const __m128i weight_y = _mm_shuffle_epi8(weights_y[0], y_select);
399     const __m128i inverted_weight_y = _mm_shuffle_epi8(weights_y[1], y_select);
400     const __m128i interleaved_weights =
401         _mm_unpacklo_epi16(weight_y, inverted_weight_y);
402     const __m128i vertical_sum0 =
403         _mm_madd_epi16(pixels[0], interleaved_weights);
404     const __m128i vertical_sum1 =
405         _mm_madd_epi16(pixels[1], interleaved_weights);
406 
407     __m128i horizontal_pixels = _mm_shuffle_epi8(left, y_select);
408     horizontal_pixels = _mm_unpacklo_epi16(horizontal_pixels, pixels[3]);
409     const __m128i horizontal_sum0 =
410         _mm_madd_epi16(horizontal_pixels, weights_x[0]);
411     const __m128i horizontal_sum1 =
412         _mm_madd_epi16(horizontal_pixels, weights_x[1]);
413 
414     __m128i sum0 = _mm_add_epi32(vertical_sum0, horizontal_sum0);
415     sum0 = _mm_add_epi32(sum0, round);
416     sum0 = _mm_srai_epi32(sum0, 9);
417 
418     __m128i sum1 = _mm_add_epi32(vertical_sum1, horizontal_sum1);
419     sum1 = _mm_add_epi32(sum1, round);
420     sum1 = _mm_srai_epi32(sum1, 9);
421 
422     sum0 = _mm_packus_epi16(sum0, sum1);
423     sum0 = _mm_shuffle_epi8(sum0, cvt_epu16_epi8);
424     StoreLo8(dst, sum0);
425     dst += stride;
426 
427     y_select = _mm_add_epi16(y_select, mask_increment);
428   }
429 }
430 
Smooth8x4_SSE4_1(void * const dest,const ptrdiff_t stride,const void * top_row,const void * left_column)431 void Smooth8x4_SSE4_1(void* const dest, const ptrdiff_t stride,
432                       const void* top_row, const void* left_column) {
433   const auto* const left_ptr = static_cast<const uint8_t*>(left_column);
434   const auto* const top_ptr = static_cast<const uint8_t*>(top_row);
435   __m128i pixels[4];
436   LoadSmoothPixels8(top_ptr, left_ptr, 4, pixels);
437 
438   __m128i weights_x[2], weights_y[2];
439   LoadSmoothWeights8(kSmoothWeights, 4, weights_x, weights_y);
440 
441   auto* dst = static_cast<uint8_t*>(dest);
442   WriteSmoothPred8xH(pixels, weights_x, weights_y, 4, dst, stride, false);
443 }
444 
Smooth8x8_SSE4_1(void * const dest,const ptrdiff_t stride,const void * top_row,const void * left_column)445 void Smooth8x8_SSE4_1(void* const dest, const ptrdiff_t stride,
446                       const void* top_row, const void* left_column) {
447   const auto* const left_ptr = static_cast<const uint8_t*>(left_column);
448   const auto* const top_ptr = static_cast<const uint8_t*>(top_row);
449 
450   __m128i pixels[4];
451   LoadSmoothPixels8(top_ptr, left_ptr, 8, pixels);
452 
453   __m128i weights_x[2], weights_y[2];
454   LoadSmoothWeights8(kSmoothWeights, 8, weights_x, weights_y);
455 
456   auto* dst = static_cast<uint8_t*>(dest);
457   WriteSmoothPred8xH(pixels, weights_x, weights_y, 8, dst, stride, false);
458 }
459 
Smooth8x16_SSE4_1(void * const dest,const ptrdiff_t stride,const void * top_row,const void * left_column)460 void Smooth8x16_SSE4_1(void* const dest, const ptrdiff_t stride,
461                        const void* top_row, const void* left_column) {
462   const auto* const left_ptr = static_cast<const uint8_t*>(left_column);
463   const auto* const top_ptr = static_cast<const uint8_t*>(top_row);
464   __m128i pixels[4];
465   LoadSmoothPixels8(top_ptr, left_ptr, 16, pixels);
466 
467   __m128i weights_x[2], weights_y[4];
468   LoadSmoothWeights8(kSmoothWeights, 16, weights_x, weights_y);
469 
470   auto* dst = static_cast<uint8_t*>(dest);
471   WriteSmoothPred8xH(pixels, weights_x, weights_y, 8, dst, stride, false);
472   dst += stride << 3;
473   WriteSmoothPred8xH(pixels, weights_x, &weights_y[2], 8, dst, stride, true);
474 }
475 
Smooth8x32_SSE4_1(void * const dest,const ptrdiff_t stride,const void * top_row,const void * left_column)476 void Smooth8x32_SSE4_1(void* const dest, const ptrdiff_t stride,
477                        const void* top_row, const void* left_column) {
478   const auto* const left_ptr = static_cast<const uint8_t*>(left_column);
479   const auto* const top_ptr = static_cast<const uint8_t*>(top_row);
480   __m128i pixels[8];
481   LoadSmoothPixels8(top_ptr, left_ptr, 32, pixels);
482 
483   __m128i weights_x[2], weights_y[8];
484   LoadSmoothWeights8(kSmoothWeights, 32, weights_x, weights_y);
485 
486   auto* dst = static_cast<uint8_t*>(dest);
487   WriteSmoothPred8xH(pixels, weights_x, weights_y, 8, dst, stride, false);
488   dst += stride << 3;
489   WriteSmoothPred8xH(pixels, weights_x, &weights_y[2], 8, dst, stride, true);
490   dst += stride << 3;
491   WriteSmoothPred8xH(&pixels[4], weights_x, &weights_y[4], 8, dst, stride,
492                      false);
493   dst += stride << 3;
494   WriteSmoothPred8xH(&pixels[4], weights_x, &weights_y[6], 8, dst, stride,
495                      true);
496 }
497 
498 template <int width, int height>
SmoothWxH(void * const dest,const ptrdiff_t stride,const void * const top_row,const void * const left_column)499 void SmoothWxH(void* const dest, const ptrdiff_t stride,
500                const void* const top_row, const void* const left_column) {
501   const auto* const left_ptr = static_cast<const uint8_t*>(left_column);
502   const auto* const top_ptr = static_cast<const uint8_t*>(top_row);
503   const uint8_t* const sm_weights_h = kSmoothWeights + height - 4;
504   const uint8_t* const sm_weights_w = kSmoothWeights + width - 4;
505   const __m128i zero = _mm_setzero_si128();
506   const __m128i scale_value = _mm_set1_epi16(256);
507   const __m128i bottom_left = _mm_cvtsi32_si128(left_ptr[height - 1]);
508   const __m128i top_right = _mm_set1_epi16(top_ptr[width - 1]);
509   const __m128i round = _mm_set1_epi32(256);
510   auto* dst = static_cast<uint8_t*>(dest);
511   for (int y = 0; y < height; ++y) {
512     const __m128i weights_y = _mm_cvtsi32_si128(sm_weights_h[y]);
513     const __m128i left_y = _mm_cvtsi32_si128(left_ptr[y]);
514     const __m128i scale_m_weights_y = _mm_sub_epi16(scale_value, weights_y);
515     __m128i scaled_bottom_left =
516         _mm_mullo_epi16(scale_m_weights_y, bottom_left);
517     const __m128i weight_left_y =
518         _mm_shuffle_epi32(_mm_unpacklo_epi16(weights_y, left_y), 0);
519     scaled_bottom_left = _mm_add_epi32(scaled_bottom_left, round);
520     scaled_bottom_left = _mm_shuffle_epi32(scaled_bottom_left, 0);
521     for (int x = 0; x < width; x += 8) {
522       const __m128i top_x = LoadLo8(top_ptr + x);
523       const __m128i weights_x = LoadLo8(sm_weights_w + x);
524       const __m128i top_weights_x = _mm_unpacklo_epi8(top_x, weights_x);
525       const __m128i top_weights_x_lo = _mm_cvtepu8_epi16(top_weights_x);
526       const __m128i top_weights_x_hi = _mm_unpackhi_epi8(top_weights_x, zero);
527 
528       // Here opposite weights and pixels are multiplied, where the order of
529       // interleaving is indicated in the names.
530       __m128i pred_lo = _mm_madd_epi16(top_weights_x_lo, weight_left_y);
531       __m128i pred_hi = _mm_madd_epi16(top_weights_x_hi, weight_left_y);
532 
533       // |scaled_bottom_left| is always scaled by the same weight each row, so
534       // we only derive |scaled_top_right| values here.
535       const __m128i inverted_weights_x =
536           _mm_sub_epi16(scale_value, _mm_cvtepu8_epi16(weights_x));
537       const __m128i scaled_top_right =
538           _mm_mullo_epi16(inverted_weights_x, top_right);
539       const __m128i scaled_top_right_lo = _mm_cvtepu16_epi32(scaled_top_right);
540       const __m128i scaled_top_right_hi =
541           _mm_unpackhi_epi16(scaled_top_right, zero);
542       pred_lo = _mm_add_epi32(pred_lo, scaled_bottom_left);
543       pred_hi = _mm_add_epi32(pred_hi, scaled_bottom_left);
544       pred_lo = _mm_add_epi32(pred_lo, scaled_top_right_lo);
545       pred_hi = _mm_add_epi32(pred_hi, scaled_top_right_hi);
546 
547       // The round value for RightShiftWithRounding was added with
548       // |scaled_bottom_left|.
549       pred_lo = _mm_srli_epi32(pred_lo, 9);
550       pred_hi = _mm_srli_epi32(pred_hi, 9);
551       const __m128i pred = _mm_packus_epi16(pred_lo, pred_hi);
552       StoreLo8(dst + x, _mm_packus_epi16(pred, pred));
553     }
554     dst += stride;
555   }
556 }
557 
SmoothHorizontal4x4_SSE4_1(void * dest,const ptrdiff_t stride,const void * top_row,const void * left_column)558 void SmoothHorizontal4x4_SSE4_1(void* dest, const ptrdiff_t stride,
559                                 const void* top_row, const void* left_column) {
560   const auto* const top_ptr = static_cast<const uint8_t*>(top_row);
561   const __m128i top_right = _mm_set1_epi32(top_ptr[3]);
562   const auto* const left_ptr = static_cast<const uint8_t*>(left_column);
563   const __m128i left = _mm_cvtepu8_epi32(Load4(left_ptr));
564   const __m128i weights = _mm_cvtepu8_epi32(Load4(kSmoothWeights));
565   __m128i scale = _mm_set1_epi32(256);
566   const __m128i inverted_weights = _mm_sub_epi32(scale, weights);
567   const __m128i scaled_top_right = _mm_mullo_epi16(inverted_weights, top_right);
568   scale = _mm_set1_epi32(128);
569   auto* dst = static_cast<uint8_t*>(dest);
570   WriteSmoothHorizontalSum4<0>(dst, left, weights, scaled_top_right, scale);
571   dst += stride;
572   WriteSmoothHorizontalSum4<0x55>(dst, left, weights, scaled_top_right, scale);
573   dst += stride;
574   WriteSmoothHorizontalSum4<0xAA>(dst, left, weights, scaled_top_right, scale);
575   dst += stride;
576   WriteSmoothHorizontalSum4<0xFF>(dst, left, weights, scaled_top_right, scale);
577 }
578 
SmoothHorizontal4x8_SSE4_1(void * const dest,const ptrdiff_t stride,const void * const top_row,const void * const left_column)579 void SmoothHorizontal4x8_SSE4_1(void* const dest, const ptrdiff_t stride,
580                                 const void* const top_row,
581                                 const void* const left_column) {
582   const auto* const top = static_cast<const uint8_t*>(top_row);
583   const __m128i top_right = _mm_set1_epi32(top[3]);
584   const __m128i weights = _mm_cvtepu8_epi32(Load4(kSmoothWeights));
585   __m128i scale = _mm_set1_epi32(256);
586   const __m128i inverted_weights = _mm_sub_epi32(scale, weights);
587   const __m128i scaled_top_right = _mm_mullo_epi16(inverted_weights, top_right);
588   scale = _mm_set1_epi32(128);
589   const auto* const left_ptr = static_cast<const uint8_t*>(left_column);
590   __m128i left = _mm_cvtepu8_epi32(Load4(left_column));
591   auto* dst = static_cast<uint8_t*>(dest);
592   WriteSmoothHorizontalSum4<0>(dst, left, weights, scaled_top_right, scale);
593   dst += stride;
594   WriteSmoothHorizontalSum4<0x55>(dst, left, weights, scaled_top_right, scale);
595   dst += stride;
596   WriteSmoothHorizontalSum4<0xAA>(dst, left, weights, scaled_top_right, scale);
597   dst += stride;
598   WriteSmoothHorizontalSum4<0xFF>(dst, left, weights, scaled_top_right, scale);
599   dst += stride;
600 
601   left = _mm_cvtepu8_epi32(Load4(left_ptr + 4));
602   WriteSmoothHorizontalSum4<0>(dst, left, weights, scaled_top_right, scale);
603   dst += stride;
604   WriteSmoothHorizontalSum4<0x55>(dst, left, weights, scaled_top_right, scale);
605   dst += stride;
606   WriteSmoothHorizontalSum4<0xAA>(dst, left, weights, scaled_top_right, scale);
607   dst += stride;
608   WriteSmoothHorizontalSum4<0xFF>(dst, left, weights, scaled_top_right, scale);
609 }
610 
SmoothHorizontal4x16_SSE4_1(void * const dest,const ptrdiff_t stride,const void * const top_row,const void * const left_column)611 void SmoothHorizontal4x16_SSE4_1(void* const dest, const ptrdiff_t stride,
612                                  const void* const top_row,
613                                  const void* const left_column) {
614   const auto* const top = static_cast<const uint8_t*>(top_row);
615   const __m128i top_right = _mm_set1_epi32(top[3]);
616   const __m128i weights = _mm_cvtepu8_epi32(Load4(kSmoothWeights));
617   __m128i scale = _mm_set1_epi32(256);
618   const __m128i inverted_weights = _mm_sub_epi32(scale, weights);
619   const __m128i scaled_top_right = _mm_mullo_epi16(inverted_weights, top_right);
620   scale = _mm_set1_epi32(128);
621   const auto* const left_ptr = static_cast<const uint8_t*>(left_column);
622   __m128i left = _mm_cvtepu8_epi32(Load4(left_column));
623   auto* dst = static_cast<uint8_t*>(dest);
624   WriteSmoothHorizontalSum4<0>(dst, left, weights, scaled_top_right, scale);
625   dst += stride;
626   WriteSmoothHorizontalSum4<0x55>(dst, left, weights, scaled_top_right, scale);
627   dst += stride;
628   WriteSmoothHorizontalSum4<0xAA>(dst, left, weights, scaled_top_right, scale);
629   dst += stride;
630   WriteSmoothHorizontalSum4<0xFF>(dst, left, weights, scaled_top_right, scale);
631   dst += stride;
632 
633   left = _mm_cvtepu8_epi32(Load4(left_ptr + 4));
634   WriteSmoothHorizontalSum4<0>(dst, left, weights, scaled_top_right, scale);
635   dst += stride;
636   WriteSmoothHorizontalSum4<0x55>(dst, left, weights, scaled_top_right, scale);
637   dst += stride;
638   WriteSmoothHorizontalSum4<0xAA>(dst, left, weights, scaled_top_right, scale);
639   dst += stride;
640   WriteSmoothHorizontalSum4<0xFF>(dst, left, weights, scaled_top_right, scale);
641   dst += stride;
642 
643   left = _mm_cvtepu8_epi32(Load4(left_ptr + 8));
644   WriteSmoothHorizontalSum4<0>(dst, left, weights, scaled_top_right, scale);
645   dst += stride;
646   WriteSmoothHorizontalSum4<0x55>(dst, left, weights, scaled_top_right, scale);
647   dst += stride;
648   WriteSmoothHorizontalSum4<0xAA>(dst, left, weights, scaled_top_right, scale);
649   dst += stride;
650   WriteSmoothHorizontalSum4<0xFF>(dst, left, weights, scaled_top_right, scale);
651   dst += stride;
652 
653   left = _mm_cvtepu8_epi32(Load4(left_ptr + 12));
654   WriteSmoothHorizontalSum4<0>(dst, left, weights, scaled_top_right, scale);
655   dst += stride;
656   WriteSmoothHorizontalSum4<0x55>(dst, left, weights, scaled_top_right, scale);
657   dst += stride;
658   WriteSmoothHorizontalSum4<0xAA>(dst, left, weights, scaled_top_right, scale);
659   dst += stride;
660   WriteSmoothHorizontalSum4<0xFF>(dst, left, weights, scaled_top_right, scale);
661 }
662 
SmoothHorizontal8x4_SSE4_1(void * const dest,const ptrdiff_t stride,const void * const top_row,const void * const left_column)663 void SmoothHorizontal8x4_SSE4_1(void* const dest, const ptrdiff_t stride,
664                                 const void* const top_row,
665                                 const void* const left_column) {
666   const auto* const top = static_cast<const uint8_t*>(top_row);
667   const __m128i top_right = _mm_set1_epi16(top[7]);
668   const __m128i left = _mm_cvtepu8_epi16(Load4(left_column));
669   const __m128i weights = _mm_cvtepu8_epi16(LoadLo8(kSmoothWeights + 4));
670   __m128i scale = _mm_set1_epi16(256);
671   const __m128i inverted_weights = _mm_sub_epi16(scale, weights);
672   const __m128i scaled_top_right = _mm_mullo_epi16(inverted_weights, top_right);
673   scale = _mm_set1_epi16(128);
674   __m128i y_select = _mm_set1_epi32(0x01000100);
675   __m128i left_y = _mm_shuffle_epi8(left, y_select);
676   auto* dst = static_cast<uint8_t*>(dest);
677   WriteSmoothDirectionalSum8(dst, left_y, weights, scaled_top_right, scale);
678   dst += stride;
679   y_select = _mm_set1_epi32(0x03020302);
680   left_y = _mm_shuffle_epi8(left, y_select);
681   WriteSmoothDirectionalSum8(dst, left_y, weights, scaled_top_right, scale);
682   dst += stride;
683   y_select = _mm_set1_epi32(0x05040504);
684   left_y = _mm_shuffle_epi8(left, y_select);
685   WriteSmoothDirectionalSum8(dst, left_y, weights, scaled_top_right, scale);
686   dst += stride;
687   y_select = _mm_set1_epi32(0x07060706);
688   left_y = _mm_shuffle_epi8(left, y_select);
689   WriteSmoothDirectionalSum8(dst, left_y, weights, scaled_top_right, scale);
690 }
691 
SmoothHorizontal8x8_SSE4_1(void * const dest,const ptrdiff_t stride,const void * const top_row,const void * const left_column)692 void SmoothHorizontal8x8_SSE4_1(void* const dest, const ptrdiff_t stride,
693                                 const void* const top_row,
694                                 const void* const left_column) {
695   const auto* const top = static_cast<const uint8_t*>(top_row);
696   const __m128i top_right = _mm_set1_epi16(top[7]);
697   const __m128i left = _mm_cvtepu8_epi16(LoadLo8(left_column));
698   const __m128i weights = _mm_cvtepu8_epi16(LoadLo8(kSmoothWeights + 4));
699   __m128i scale = _mm_set1_epi16(256);
700   const __m128i inverted_weights = _mm_sub_epi16(scale, weights);
701   const __m128i scaled_top_right = _mm_mullo_epi16(inverted_weights, top_right);
702   scale = _mm_set1_epi16(128);
703   auto* dst = static_cast<uint8_t*>(dest);
704   for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
705     const __m128i y_select = _mm_set1_epi32(y_mask);
706     const __m128i left_y = _mm_shuffle_epi8(left, y_select);
707     WriteSmoothDirectionalSum8(dst, left_y, weights, scaled_top_right, scale);
708     dst += stride;
709   }
710 }
711 
SmoothHorizontal8x16_SSE4_1(void * const dest,const ptrdiff_t stride,const void * const top_row,const void * const left_column)712 void SmoothHorizontal8x16_SSE4_1(void* const dest, const ptrdiff_t stride,
713                                  const void* const top_row,
714                                  const void* const left_column) {
715   const auto* const top = static_cast<const uint8_t*>(top_row);
716   const __m128i top_right = _mm_set1_epi16(top[7]);
717   const __m128i weights = _mm_cvtepu8_epi16(LoadLo8(kSmoothWeights + 4));
718   __m128i scale = _mm_set1_epi16(256);
719   const __m128i inverted_weights = _mm_sub_epi16(scale, weights);
720   const __m128i scaled_top_right = _mm_mullo_epi16(inverted_weights, top_right);
721   scale = _mm_set1_epi16(128);
722   const auto* const left_ptr = static_cast<const uint8_t*>(left_column);
723   __m128i left = _mm_cvtepu8_epi16(LoadLo8(left_column));
724   auto* dst = static_cast<uint8_t*>(dest);
725   for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
726     const __m128i y_select = _mm_set1_epi32(y_mask);
727     const __m128i left_y = _mm_shuffle_epi8(left, y_select);
728     WriteSmoothDirectionalSum8(dst, left_y, weights, scaled_top_right, scale);
729     dst += stride;
730   }
731   left = _mm_cvtepu8_epi16(LoadLo8(left_ptr + 8));
732   for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
733     const __m128i y_select = _mm_set1_epi32(y_mask);
734     const __m128i left_y = _mm_shuffle_epi8(left, y_select);
735     WriteSmoothDirectionalSum8(dst, left_y, weights, scaled_top_right, scale);
736     dst += stride;
737   }
738 }
739 
SmoothHorizontal8x32_SSE4_1(void * const dest,const ptrdiff_t stride,const void * const top_row,const void * const left_column)740 void SmoothHorizontal8x32_SSE4_1(void* const dest, const ptrdiff_t stride,
741                                  const void* const top_row,
742                                  const void* const left_column) {
743   const auto* const top = static_cast<const uint8_t*>(top_row);
744   const __m128i top_right = _mm_set1_epi16(top[7]);
745   const __m128i weights = _mm_cvtepu8_epi16(LoadLo8(kSmoothWeights + 4));
746   __m128i scale = _mm_set1_epi16(256);
747   const __m128i inverted_weights = _mm_sub_epi16(scale, weights);
748   const __m128i scaled_top_right = _mm_mullo_epi16(inverted_weights, top_right);
749   scale = _mm_set1_epi16(128);
750   const auto* const left_ptr = static_cast<const uint8_t*>(left_column);
751   __m128i left = _mm_cvtepu8_epi16(LoadLo8(left_column));
752   auto* dst = static_cast<uint8_t*>(dest);
753   for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
754     const __m128i y_select = _mm_set1_epi32(y_mask);
755     const __m128i left_y = _mm_shuffle_epi8(left, y_select);
756     WriteSmoothDirectionalSum8(dst, left_y, weights, scaled_top_right, scale);
757     dst += stride;
758   }
759   left = _mm_cvtepu8_epi16(LoadLo8(left_ptr + 8));
760   for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
761     const __m128i y_select = _mm_set1_epi32(y_mask);
762     const __m128i left_y = _mm_shuffle_epi8(left, y_select);
763     WriteSmoothDirectionalSum8(dst, left_y, weights, scaled_top_right, scale);
764     dst += stride;
765   }
766   left = _mm_cvtepu8_epi16(LoadLo8(left_ptr + 16));
767   for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
768     const __m128i y_select = _mm_set1_epi32(y_mask);
769     const __m128i left_y = _mm_shuffle_epi8(left, y_select);
770     WriteSmoothDirectionalSum8(dst, left_y, weights, scaled_top_right, scale);
771     dst += stride;
772   }
773   left = _mm_cvtepu8_epi16(LoadLo8(left_ptr + 24));
774   for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
775     const __m128i y_select = _mm_set1_epi32(y_mask);
776     const __m128i left_y = _mm_shuffle_epi8(left, y_select);
777     WriteSmoothDirectionalSum8(dst, left_y, weights, scaled_top_right, scale);
778     dst += stride;
779   }
780 }
781 
SmoothHorizontal16x4_SSE4_1(void * const dest,const ptrdiff_t stride,const void * const top_row,const void * const left_column)782 void SmoothHorizontal16x4_SSE4_1(void* const dest, const ptrdiff_t stride,
783                                  const void* const top_row,
784                                  const void* const left_column) {
785   const auto* const top = static_cast<const uint8_t*>(top_row);
786   const __m128i top_right = _mm_set1_epi16(top[15]);
787   const __m128i left = _mm_cvtepu8_epi16(Load4(left_column));
788   const __m128i weights = LoadUnaligned16(kSmoothWeights + 12);
789   __m128i scale = _mm_set1_epi16(256);
790   const __m128i weights1 = _mm_cvtepu8_epi16(weights);
791   const __m128i weights2 = _mm_cvtepu8_epi16(_mm_srli_si128(weights, 8));
792   const __m128i inverted_weights1 = _mm_sub_epi16(scale, weights1);
793   const __m128i inverted_weights2 = _mm_sub_epi16(scale, weights2);
794   const __m128i scaled_top_right1 =
795       _mm_mullo_epi16(inverted_weights1, top_right);
796   const __m128i scaled_top_right2 =
797       _mm_mullo_epi16(inverted_weights2, top_right);
798   scale = _mm_set1_epi16(128);
799   __m128i y_mask = _mm_set1_epi32(0x01000100);
800   __m128i left_y = _mm_shuffle_epi8(left, y_mask);
801   auto* dst = static_cast<uint8_t*>(dest);
802   WriteSmoothDirectionalSum16(dst, left_y, left_y, weights1, weights2,
803                               scaled_top_right1, scaled_top_right2, scale);
804   dst += stride;
805   y_mask = _mm_set1_epi32(0x03020302);
806   left_y = _mm_shuffle_epi8(left, y_mask);
807   WriteSmoothDirectionalSum16(dst, left_y, left_y, weights1, weights2,
808                               scaled_top_right1, scaled_top_right2, scale);
809   dst += stride;
810   y_mask = _mm_set1_epi32(0x05040504);
811   left_y = _mm_shuffle_epi8(left, y_mask);
812   WriteSmoothDirectionalSum16(dst, left_y, left_y, weights1, weights2,
813                               scaled_top_right1, scaled_top_right2, scale);
814   dst += stride;
815   y_mask = _mm_set1_epi32(0x07060706);
816   left_y = _mm_shuffle_epi8(left, y_mask);
817   WriteSmoothDirectionalSum16(dst, left_y, left_y, weights1, weights2,
818                               scaled_top_right1, scaled_top_right2, scale);
819 }
820 
SmoothHorizontal16x8_SSE4_1(void * const dest,const ptrdiff_t stride,const void * const top_row,const void * const left_column)821 void SmoothHorizontal16x8_SSE4_1(void* const dest, const ptrdiff_t stride,
822                                  const void* const top_row,
823                                  const void* const left_column) {
824   const auto* const top = static_cast<const uint8_t*>(top_row);
825   const __m128i top_right = _mm_set1_epi16(top[15]);
826   const __m128i left = _mm_cvtepu8_epi16(LoadLo8(left_column));
827   const __m128i weights = LoadUnaligned16(kSmoothWeights + 12);
828   __m128i scale = _mm_set1_epi16(256);
829   const __m128i weights1 = _mm_cvtepu8_epi16(weights);
830   const __m128i weights2 = _mm_cvtepu8_epi16(_mm_srli_si128(weights, 8));
831   const __m128i inverted_weights1 = _mm_sub_epi16(scale, weights1);
832   const __m128i inverted_weights2 = _mm_sub_epi16(scale, weights2);
833   const __m128i scaled_top_right1 =
834       _mm_mullo_epi16(inverted_weights1, top_right);
835   const __m128i scaled_top_right2 =
836       _mm_mullo_epi16(inverted_weights2, top_right);
837   scale = _mm_set1_epi16(128);
838   auto* dst = static_cast<uint8_t*>(dest);
839   for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
840     const __m128i y_select = _mm_set1_epi32(y_mask);
841     const __m128i left_y = _mm_shuffle_epi8(left, y_select);
842     WriteSmoothDirectionalSum16(dst, left_y, left_y, weights1, weights2,
843                                 scaled_top_right1, scaled_top_right2, scale);
844     dst += stride;
845   }
846 }
847 
SmoothHorizontal16x16_SSE4_1(void * const dest,const ptrdiff_t stride,const void * const top_row,const void * const left_column)848 void SmoothHorizontal16x16_SSE4_1(void* const dest, const ptrdiff_t stride,
849                                   const void* const top_row,
850                                   const void* const left_column) {
851   const auto* const top = static_cast<const uint8_t*>(top_row);
852   const __m128i top_right = _mm_set1_epi16(top[15]);
853   const __m128i weights = LoadUnaligned16(kSmoothWeights + 12);
854   __m128i scale = _mm_set1_epi16(256);
855   const __m128i weights1 = _mm_cvtepu8_epi16(weights);
856   const __m128i weights2 = _mm_cvtepu8_epi16(_mm_srli_si128(weights, 8));
857   const __m128i inverted_weights1 = _mm_sub_epi16(scale, weights1);
858   const __m128i inverted_weights2 = _mm_sub_epi16(scale, weights2);
859   const __m128i scaled_top_right1 =
860       _mm_mullo_epi16(inverted_weights1, top_right);
861   const __m128i scaled_top_right2 =
862       _mm_mullo_epi16(inverted_weights2, top_right);
863   scale = _mm_set1_epi16(128);
864   const auto* const left_ptr = static_cast<const uint8_t*>(left_column);
865   __m128i left = _mm_cvtepu8_epi16(LoadLo8(left_column));
866   auto* dst = static_cast<uint8_t*>(dest);
867   for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
868     const __m128i y_select = _mm_set1_epi32(y_mask);
869     const __m128i left_y = _mm_shuffle_epi8(left, y_select);
870     WriteSmoothDirectionalSum16(dst, left_y, left_y, weights1, weights2,
871                                 scaled_top_right1, scaled_top_right2, scale);
872     dst += stride;
873   }
874   left = _mm_cvtepu8_epi16(LoadLo8(left_ptr + 8));
875   for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
876     const __m128i y_select = _mm_set1_epi32(y_mask);
877     const __m128i left_y = _mm_shuffle_epi8(left, y_select);
878     WriteSmoothDirectionalSum16(dst, left_y, left_y, weights1, weights2,
879                                 scaled_top_right1, scaled_top_right2, scale);
880     dst += stride;
881   }
882 }
883 
SmoothHorizontal16x32_SSE4_1(void * const dest,const ptrdiff_t stride,const void * const top_row,const void * const left_column)884 void SmoothHorizontal16x32_SSE4_1(void* const dest, const ptrdiff_t stride,
885                                   const void* const top_row,
886                                   const void* const left_column) {
887   const auto* const top = static_cast<const uint8_t*>(top_row);
888   const __m128i top_right = _mm_set1_epi16(top[15]);
889   const __m128i weights = LoadUnaligned16(kSmoothWeights + 12);
890   __m128i scale = _mm_set1_epi16(256);
891   const __m128i weights1 = _mm_cvtepu8_epi16(weights);
892   const __m128i weights2 = _mm_cvtepu8_epi16(_mm_srli_si128(weights, 8));
893   const __m128i inverted_weights1 = _mm_sub_epi16(scale, weights1);
894   const __m128i inverted_weights2 = _mm_sub_epi16(scale, weights2);
895   const __m128i scaled_top_right1 =
896       _mm_mullo_epi16(inverted_weights1, top_right);
897   const __m128i scaled_top_right2 =
898       _mm_mullo_epi16(inverted_weights2, top_right);
899   scale = _mm_set1_epi16(128);
900   const auto* const left_ptr = static_cast<const uint8_t*>(left_column);
901   __m128i left = _mm_cvtepu8_epi16(LoadLo8(left_column));
902   auto* dst = static_cast<uint8_t*>(dest);
903   for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
904     const __m128i y_select = _mm_set1_epi32(y_mask);
905     const __m128i left_y = _mm_shuffle_epi8(left, y_select);
906     WriteSmoothDirectionalSum16(dst, left_y, left_y, weights1, weights2,
907                                 scaled_top_right1, scaled_top_right2, scale);
908     dst += stride;
909   }
910   left = _mm_cvtepu8_epi16(LoadLo8(left_ptr + 8));
911   for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
912     const __m128i y_select = _mm_set1_epi32(y_mask);
913     const __m128i left_y = _mm_shuffle_epi8(left, y_select);
914     WriteSmoothDirectionalSum16(dst, left_y, left_y, weights1, weights2,
915                                 scaled_top_right1, scaled_top_right2, scale);
916     dst += stride;
917   }
918   left = _mm_cvtepu8_epi16(LoadLo8(left_ptr + 16));
919   for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
920     const __m128i y_select = _mm_set1_epi32(y_mask);
921     const __m128i left_y = _mm_shuffle_epi8(left, y_select);
922     WriteSmoothDirectionalSum16(dst, left_y, left_y, weights1, weights2,
923                                 scaled_top_right1, scaled_top_right2, scale);
924     dst += stride;
925   }
926   left = _mm_cvtepu8_epi16(LoadLo8(left_ptr + 24));
927   for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
928     const __m128i y_select = _mm_set1_epi32(y_mask);
929     const __m128i left_y = _mm_shuffle_epi8(left, y_select);
930     WriteSmoothDirectionalSum16(dst, left_y, left_y, weights1, weights2,
931                                 scaled_top_right1, scaled_top_right2, scale);
932     dst += stride;
933   }
934 }
935 
SmoothHorizontal16x64_SSE4_1(void * const dest,const ptrdiff_t stride,const void * const top_row,const void * const left_column)936 void SmoothHorizontal16x64_SSE4_1(void* const dest, const ptrdiff_t stride,
937                                   const void* const top_row,
938                                   const void* const left_column) {
939   const auto* const top = static_cast<const uint8_t*>(top_row);
940   const __m128i top_right = _mm_set1_epi16(top[15]);
941   const __m128i weights = LoadUnaligned16(kSmoothWeights + 12);
942   __m128i scale = _mm_set1_epi16(256);
943   const __m128i weights1 = _mm_cvtepu8_epi16(weights);
944   const __m128i weights2 = _mm_cvtepu8_epi16(_mm_srli_si128(weights, 8));
945   const __m128i inverted_weights1 = _mm_sub_epi16(scale, weights1);
946   const __m128i inverted_weights2 = _mm_sub_epi16(scale, weights2);
947   const __m128i scaled_top_right1 =
948       _mm_mullo_epi16(inverted_weights1, top_right);
949   const __m128i scaled_top_right2 =
950       _mm_mullo_epi16(inverted_weights2, top_right);
951   scale = _mm_set1_epi16(128);
952   const auto* const left_ptr = static_cast<const uint8_t*>(left_column);
953   auto* dst = static_cast<uint8_t*>(dest);
954   for (int left_offset = 0; left_offset < 64; left_offset += 8) {
955     const __m128i left = _mm_cvtepu8_epi16(LoadLo8(left_ptr + left_offset));
956     for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
957       const __m128i y_select = _mm_set1_epi32(y_mask);
958       const __m128i left_y = _mm_shuffle_epi8(left, y_select);
959       WriteSmoothDirectionalSum16(dst, left_y, left_y, weights1, weights2,
960                                   scaled_top_right1, scaled_top_right2, scale);
961       dst += stride;
962     }
963   }
964 }
965 
SmoothHorizontal32x8_SSE4_1(void * const dest,const ptrdiff_t stride,const void * const top_row,const void * const left_column)966 void SmoothHorizontal32x8_SSE4_1(void* const dest, const ptrdiff_t stride,
967                                  const void* const top_row,
968                                  const void* const left_column) {
969   const auto* const top = static_cast<const uint8_t*>(top_row);
970   const __m128i top_right = _mm_set1_epi16(top[31]);
971   const __m128i left = _mm_cvtepu8_epi16(LoadLo8(left_column));
972   const __m128i weights_lo = LoadUnaligned16(kSmoothWeights + 28);
973   const __m128i weights_hi = LoadUnaligned16(kSmoothWeights + 44);
974   __m128i scale = _mm_set1_epi16(256);
975   const __m128i weights1 = _mm_cvtepu8_epi16(weights_lo);
976   const __m128i weights2 = _mm_cvtepu8_epi16(_mm_srli_si128(weights_lo, 8));
977   const __m128i weights3 = _mm_cvtepu8_epi16(weights_hi);
978   const __m128i weights4 = _mm_cvtepu8_epi16(_mm_srli_si128(weights_hi, 8));
979   const __m128i inverted_weights1 = _mm_sub_epi16(scale, weights1);
980   const __m128i inverted_weights2 = _mm_sub_epi16(scale, weights2);
981   const __m128i inverted_weights3 = _mm_sub_epi16(scale, weights3);
982   const __m128i inverted_weights4 = _mm_sub_epi16(scale, weights4);
983   const __m128i scaled_top_right1 =
984       _mm_mullo_epi16(inverted_weights1, top_right);
985   const __m128i scaled_top_right2 =
986       _mm_mullo_epi16(inverted_weights2, top_right);
987   const __m128i scaled_top_right3 =
988       _mm_mullo_epi16(inverted_weights3, top_right);
989   const __m128i scaled_top_right4 =
990       _mm_mullo_epi16(inverted_weights4, top_right);
991   scale = _mm_set1_epi16(128);
992   auto* dst = static_cast<uint8_t*>(dest);
993   for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
994     __m128i y_select = _mm_set1_epi32(y_mask);
995     __m128i left_y = _mm_shuffle_epi8(left, y_select);
996     WriteSmoothDirectionalSum16(dst, left_y, left_y, weights1, weights2,
997                                 scaled_top_right1, scaled_top_right2, scale);
998     WriteSmoothDirectionalSum16(dst + 16, left_y, left_y, weights3, weights4,
999                                 scaled_top_right3, scaled_top_right4, scale);
1000     dst += stride;
1001   }
1002 }
1003 
SmoothHorizontal32x16_SSE4_1(void * const dest,const ptrdiff_t stride,const void * const top_row,const void * const left_column)1004 void SmoothHorizontal32x16_SSE4_1(void* const dest, const ptrdiff_t stride,
1005                                   const void* const top_row,
1006                                   const void* const left_column) {
1007   const auto* const top = static_cast<const uint8_t*>(top_row);
1008   const __m128i top_right = _mm_set1_epi16(top[31]);
1009   const __m128i left1 = _mm_cvtepu8_epi16(LoadLo8(left_column));
1010   const __m128i weights_lo = LoadUnaligned16(kSmoothWeights + 28);
1011   const __m128i weights_hi = LoadUnaligned16(kSmoothWeights + 44);
1012   __m128i scale = _mm_set1_epi16(256);
1013   const __m128i weights1 = _mm_cvtepu8_epi16(weights_lo);
1014   const __m128i weights2 = _mm_cvtepu8_epi16(_mm_srli_si128(weights_lo, 8));
1015   const __m128i weights3 = _mm_cvtepu8_epi16(weights_hi);
1016   const __m128i weights4 = _mm_cvtepu8_epi16(_mm_srli_si128(weights_hi, 8));
1017   const __m128i inverted_weights1 = _mm_sub_epi16(scale, weights1);
1018   const __m128i inverted_weights2 = _mm_sub_epi16(scale, weights2);
1019   const __m128i inverted_weights3 = _mm_sub_epi16(scale, weights3);
1020   const __m128i inverted_weights4 = _mm_sub_epi16(scale, weights4);
1021   const __m128i scaled_top_right1 =
1022       _mm_mullo_epi16(inverted_weights1, top_right);
1023   const __m128i scaled_top_right2 =
1024       _mm_mullo_epi16(inverted_weights2, top_right);
1025   const __m128i scaled_top_right3 =
1026       _mm_mullo_epi16(inverted_weights3, top_right);
1027   const __m128i scaled_top_right4 =
1028       _mm_mullo_epi16(inverted_weights4, top_right);
1029   scale = _mm_set1_epi16(128);
1030   auto* dst = static_cast<uint8_t*>(dest);
1031   for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1032     __m128i y_select = _mm_set1_epi32(y_mask);
1033     __m128i left_y = _mm_shuffle_epi8(left1, y_select);
1034     WriteSmoothDirectionalSum16(dst, left_y, left_y, weights1, weights2,
1035                                 scaled_top_right1, scaled_top_right2, scale);
1036     WriteSmoothDirectionalSum16(dst + 16, left_y, left_y, weights3, weights4,
1037                                 scaled_top_right3, scaled_top_right4, scale);
1038     dst += stride;
1039   }
1040   const __m128i left2 =
1041       _mm_cvtepu8_epi16(LoadLo8(static_cast<const uint8_t*>(left_column) + 8));
1042   for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1043     __m128i y_select = _mm_set1_epi32(y_mask);
1044     __m128i left_y = _mm_shuffle_epi8(left2, y_select);
1045     WriteSmoothDirectionalSum16(dst, left_y, left_y, weights1, weights2,
1046                                 scaled_top_right1, scaled_top_right2, scale);
1047     WriteSmoothDirectionalSum16(dst + 16, left_y, left_y, weights3, weights4,
1048                                 scaled_top_right3, scaled_top_right4, scale);
1049     dst += stride;
1050   }
1051 }
1052 
SmoothHorizontal32x32_SSE4_1(void * const dest,const ptrdiff_t stride,const void * const top_row,const void * const left_column)1053 void SmoothHorizontal32x32_SSE4_1(void* const dest, const ptrdiff_t stride,
1054                                   const void* const top_row,
1055                                   const void* const left_column) {
1056   const auto* const top = static_cast<const uint8_t*>(top_row);
1057   const __m128i top_right = _mm_set1_epi16(top[31]);
1058   const __m128i weights_lo = LoadUnaligned16(kSmoothWeights + 28);
1059   const __m128i weights_hi = LoadUnaligned16(kSmoothWeights + 44);
1060   __m128i scale = _mm_set1_epi16(256);
1061   const __m128i weights1 = _mm_cvtepu8_epi16(weights_lo);
1062   const __m128i weights2 = _mm_cvtepu8_epi16(_mm_srli_si128(weights_lo, 8));
1063   const __m128i weights3 = _mm_cvtepu8_epi16(weights_hi);
1064   const __m128i weights4 = _mm_cvtepu8_epi16(_mm_srli_si128(weights_hi, 8));
1065   const __m128i inverted_weights1 = _mm_sub_epi16(scale, weights1);
1066   const __m128i inverted_weights2 = _mm_sub_epi16(scale, weights2);
1067   const __m128i inverted_weights3 = _mm_sub_epi16(scale, weights3);
1068   const __m128i inverted_weights4 = _mm_sub_epi16(scale, weights4);
1069   const __m128i scaled_top_right1 =
1070       _mm_mullo_epi16(inverted_weights1, top_right);
1071   const __m128i scaled_top_right2 =
1072       _mm_mullo_epi16(inverted_weights2, top_right);
1073   const __m128i scaled_top_right3 =
1074       _mm_mullo_epi16(inverted_weights3, top_right);
1075   const __m128i scaled_top_right4 =
1076       _mm_mullo_epi16(inverted_weights4, top_right);
1077   scale = _mm_set1_epi16(128);
1078   const auto* const left_ptr = static_cast<const uint8_t*>(left_column);
1079   __m128i left = _mm_cvtepu8_epi16(LoadLo8(left_column));
1080   auto* dst = static_cast<uint8_t*>(dest);
1081   for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1082     __m128i y_select = _mm_set1_epi32(y_mask);
1083     __m128i left_y = _mm_shuffle_epi8(left, y_select);
1084     WriteSmoothDirectionalSum16(dst, left_y, left_y, weights1, weights2,
1085                                 scaled_top_right1, scaled_top_right2, scale);
1086     WriteSmoothDirectionalSum16(dst + 16, left_y, left_y, weights3, weights4,
1087                                 scaled_top_right3, scaled_top_right4, scale);
1088     dst += stride;
1089   }
1090   left = _mm_cvtepu8_epi16(LoadLo8(left_ptr + 8));
1091   for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1092     __m128i y_select = _mm_set1_epi32(y_mask);
1093     __m128i left_y = _mm_shuffle_epi8(left, y_select);
1094     WriteSmoothDirectionalSum16(dst, left_y, left_y, weights1, weights2,
1095                                 scaled_top_right1, scaled_top_right2, scale);
1096     WriteSmoothDirectionalSum16(dst + 16, left_y, left_y, weights3, weights4,
1097                                 scaled_top_right3, scaled_top_right4, scale);
1098     dst += stride;
1099   }
1100   left = _mm_cvtepu8_epi16(LoadLo8(left_ptr + 16));
1101   for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1102     __m128i y_select = _mm_set1_epi32(y_mask);
1103     __m128i left_y = _mm_shuffle_epi8(left, y_select);
1104     WriteSmoothDirectionalSum16(dst, left_y, left_y, weights1, weights2,
1105                                 scaled_top_right1, scaled_top_right2, scale);
1106     WriteSmoothDirectionalSum16(dst + 16, left_y, left_y, weights3, weights4,
1107                                 scaled_top_right3, scaled_top_right4, scale);
1108     dst += stride;
1109   }
1110   left = _mm_cvtepu8_epi16(LoadLo8(left_ptr + 24));
1111   for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1112     __m128i y_select = _mm_set1_epi32(y_mask);
1113     __m128i left_y = _mm_shuffle_epi8(left, y_select);
1114     WriteSmoothDirectionalSum16(dst, left_y, left_y, weights1, weights2,
1115                                 scaled_top_right1, scaled_top_right2, scale);
1116     WriteSmoothDirectionalSum16(dst + 16, left_y, left_y, weights3, weights4,
1117                                 scaled_top_right3, scaled_top_right4, scale);
1118     dst += stride;
1119   }
1120 }
1121 
SmoothHorizontal32x64_SSE4_1(void * const dest,const ptrdiff_t stride,const void * const top_row,const void * const left_column)1122 void SmoothHorizontal32x64_SSE4_1(void* const dest, const ptrdiff_t stride,
1123                                   const void* const top_row,
1124                                   const void* const left_column) {
1125   const auto* const top = static_cast<const uint8_t*>(top_row);
1126   const __m128i top_right = _mm_set1_epi16(top[31]);
1127   const __m128i weights_lo = LoadUnaligned16(kSmoothWeights + 28);
1128   const __m128i weights_hi = LoadUnaligned16(kSmoothWeights + 44);
1129   __m128i scale = _mm_set1_epi16(256);
1130   const __m128i weights1 = _mm_cvtepu8_epi16(weights_lo);
1131   const __m128i weights2 = _mm_cvtepu8_epi16(_mm_srli_si128(weights_lo, 8));
1132   const __m128i weights3 = _mm_cvtepu8_epi16(weights_hi);
1133   const __m128i weights4 = _mm_cvtepu8_epi16(_mm_srli_si128(weights_hi, 8));
1134   const __m128i inverted_weights1 = _mm_sub_epi16(scale, weights1);
1135   const __m128i inverted_weights2 = _mm_sub_epi16(scale, weights2);
1136   const __m128i inverted_weights3 = _mm_sub_epi16(scale, weights3);
1137   const __m128i inverted_weights4 = _mm_sub_epi16(scale, weights4);
1138   const __m128i scaled_top_right1 =
1139       _mm_mullo_epi16(inverted_weights1, top_right);
1140   const __m128i scaled_top_right2 =
1141       _mm_mullo_epi16(inverted_weights2, top_right);
1142   const __m128i scaled_top_right3 =
1143       _mm_mullo_epi16(inverted_weights3, top_right);
1144   const __m128i scaled_top_right4 =
1145       _mm_mullo_epi16(inverted_weights4, top_right);
1146   scale = _mm_set1_epi16(128);
1147   const auto* const left_ptr = static_cast<const uint8_t*>(left_column);
1148   auto* dst = static_cast<uint8_t*>(dest);
1149   for (int left_offset = 0; left_offset < 64; left_offset += 8) {
1150     const __m128i left = _mm_cvtepu8_epi16(LoadLo8(left_ptr + left_offset));
1151     for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1152       const __m128i y_select = _mm_set1_epi32(y_mask);
1153       const __m128i left_y = _mm_shuffle_epi8(left, y_select);
1154       WriteSmoothDirectionalSum16(dst, left_y, left_y, weights1, weights2,
1155                                   scaled_top_right1, scaled_top_right2, scale);
1156       WriteSmoothDirectionalSum16(dst + 16, left_y, left_y, weights3, weights4,
1157                                   scaled_top_right3, scaled_top_right4, scale);
1158       dst += stride;
1159     }
1160   }
1161 }
1162 
SmoothHorizontal64x16_SSE4_1(void * const dest,const ptrdiff_t stride,const void * const top_row,const void * const left_column)1163 void SmoothHorizontal64x16_SSE4_1(void* const dest, const ptrdiff_t stride,
1164                                   const void* const top_row,
1165                                   const void* const left_column) {
1166   const auto* const top = static_cast<const uint8_t*>(top_row);
1167   const __m128i top_right = _mm_set1_epi16(top[63]);
1168   const __m128i left1 = _mm_cvtepu8_epi16(LoadLo8(left_column));
1169   const __m128i weights_lolo = LoadUnaligned16(kSmoothWeights + 60);
1170   const __m128i weights_lohi = LoadUnaligned16(kSmoothWeights + 76);
1171   __m128i scale = _mm_set1_epi16(256);
1172   const __m128i weights1 = _mm_cvtepu8_epi16(weights_lolo);
1173   const __m128i weights2 = _mm_cvtepu8_epi16(_mm_srli_si128(weights_lolo, 8));
1174   const __m128i weights3 = _mm_cvtepu8_epi16(weights_lohi);
1175   const __m128i weights4 = _mm_cvtepu8_epi16(_mm_srli_si128(weights_lohi, 8));
1176   const __m128i inverted_weights1 = _mm_sub_epi16(scale, weights1);
1177   const __m128i inverted_weights2 = _mm_sub_epi16(scale, weights2);
1178   const __m128i inverted_weights3 = _mm_sub_epi16(scale, weights3);
1179   const __m128i inverted_weights4 = _mm_sub_epi16(scale, weights4);
1180   const __m128i scaled_top_right1 =
1181       _mm_mullo_epi16(inverted_weights1, top_right);
1182   const __m128i scaled_top_right2 =
1183       _mm_mullo_epi16(inverted_weights2, top_right);
1184   const __m128i scaled_top_right3 =
1185       _mm_mullo_epi16(inverted_weights3, top_right);
1186   const __m128i scaled_top_right4 =
1187       _mm_mullo_epi16(inverted_weights4, top_right);
1188   const __m128i weights_hilo = LoadUnaligned16(kSmoothWeights + 92);
1189   const __m128i weights_hihi = LoadUnaligned16(kSmoothWeights + 108);
1190   const __m128i weights5 = _mm_cvtepu8_epi16(weights_hilo);
1191   const __m128i weights6 = _mm_cvtepu8_epi16(_mm_srli_si128(weights_hilo, 8));
1192   const __m128i weights7 = _mm_cvtepu8_epi16(weights_hihi);
1193   const __m128i weights8 = _mm_cvtepu8_epi16(_mm_srli_si128(weights_hihi, 8));
1194   const __m128i inverted_weights5 = _mm_sub_epi16(scale, weights5);
1195   const __m128i inverted_weights6 = _mm_sub_epi16(scale, weights6);
1196   const __m128i inverted_weights7 = _mm_sub_epi16(scale, weights7);
1197   const __m128i inverted_weights8 = _mm_sub_epi16(scale, weights8);
1198   const __m128i scaled_top_right5 =
1199       _mm_mullo_epi16(inverted_weights5, top_right);
1200   const __m128i scaled_top_right6 =
1201       _mm_mullo_epi16(inverted_weights6, top_right);
1202   const __m128i scaled_top_right7 =
1203       _mm_mullo_epi16(inverted_weights7, top_right);
1204   const __m128i scaled_top_right8 =
1205       _mm_mullo_epi16(inverted_weights8, top_right);
1206   scale = _mm_set1_epi16(128);
1207   const auto* const left_ptr = static_cast<const uint8_t*>(left_column);
1208   auto* dst = static_cast<uint8_t*>(dest);
1209   for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1210     __m128i y_select = _mm_set1_epi32(y_mask);
1211     __m128i left_y = _mm_shuffle_epi8(left1, y_select);
1212     WriteSmoothDirectionalSum16(dst, left_y, left_y, weights1, weights2,
1213                                 scaled_top_right1, scaled_top_right2, scale);
1214     WriteSmoothDirectionalSum16(dst + 16, left_y, left_y, weights3, weights4,
1215                                 scaled_top_right3, scaled_top_right4, scale);
1216     WriteSmoothDirectionalSum16(dst + 32, left_y, left_y, weights5, weights6,
1217                                 scaled_top_right5, scaled_top_right6, scale);
1218     WriteSmoothDirectionalSum16(dst + 48, left_y, left_y, weights7, weights8,
1219                                 scaled_top_right7, scaled_top_right8, scale);
1220     dst += stride;
1221   }
1222   const __m128i left2 = _mm_cvtepu8_epi16(LoadLo8(left_ptr + 8));
1223   for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1224     __m128i y_select = _mm_set1_epi32(y_mask);
1225     __m128i left_y = _mm_shuffle_epi8(left2, y_select);
1226     WriteSmoothDirectionalSum16(dst, left_y, left_y, weights1, weights2,
1227                                 scaled_top_right1, scaled_top_right2, scale);
1228     WriteSmoothDirectionalSum16(dst + 16, left_y, left_y, weights3, weights4,
1229                                 scaled_top_right3, scaled_top_right4, scale);
1230     WriteSmoothDirectionalSum16(dst + 32, left_y, left_y, weights5, weights6,
1231                                 scaled_top_right5, scaled_top_right6, scale);
1232     WriteSmoothDirectionalSum16(dst + 48, left_y, left_y, weights7, weights8,
1233                                 scaled_top_right7, scaled_top_right8, scale);
1234     dst += stride;
1235   }
1236 }
1237 
SmoothHorizontal64x32_SSE4_1(void * const dest,const ptrdiff_t stride,const void * const top_row,const void * const left_column)1238 void SmoothHorizontal64x32_SSE4_1(void* const dest, const ptrdiff_t stride,
1239                                   const void* const top_row,
1240                                   const void* const left_column) {
1241   const auto* const top = static_cast<const uint8_t*>(top_row);
1242   const __m128i top_right = _mm_set1_epi16(top[63]);
1243   const __m128i left1 = _mm_cvtepu8_epi16(LoadLo8(left_column));
1244   const __m128i weights_lolo = LoadUnaligned16(kSmoothWeights + 60);
1245   const __m128i weights_lohi = LoadUnaligned16(kSmoothWeights + 76);
1246   __m128i scale = _mm_set1_epi16(256);
1247   const __m128i weights1 = _mm_cvtepu8_epi16(weights_lolo);
1248   const __m128i weights2 = _mm_cvtepu8_epi16(_mm_srli_si128(weights_lolo, 8));
1249   const __m128i weights3 = _mm_cvtepu8_epi16(weights_lohi);
1250   const __m128i weights4 = _mm_cvtepu8_epi16(_mm_srli_si128(weights_lohi, 8));
1251   const __m128i inverted_weights1 = _mm_sub_epi16(scale, weights1);
1252   const __m128i inverted_weights2 = _mm_sub_epi16(scale, weights2);
1253   const __m128i inverted_weights3 = _mm_sub_epi16(scale, weights3);
1254   const __m128i inverted_weights4 = _mm_sub_epi16(scale, weights4);
1255   const __m128i scaled_top_right1 =
1256       _mm_mullo_epi16(inverted_weights1, top_right);
1257   const __m128i scaled_top_right2 =
1258       _mm_mullo_epi16(inverted_weights2, top_right);
1259   const __m128i scaled_top_right3 =
1260       _mm_mullo_epi16(inverted_weights3, top_right);
1261   const __m128i scaled_top_right4 =
1262       _mm_mullo_epi16(inverted_weights4, top_right);
1263   const __m128i weights_hilo = LoadUnaligned16(kSmoothWeights + 92);
1264   const __m128i weights_hihi = LoadUnaligned16(kSmoothWeights + 108);
1265   const __m128i weights5 = _mm_cvtepu8_epi16(weights_hilo);
1266   const __m128i weights6 = _mm_cvtepu8_epi16(_mm_srli_si128(weights_hilo, 8));
1267   const __m128i weights7 = _mm_cvtepu8_epi16(weights_hihi);
1268   const __m128i weights8 = _mm_cvtepu8_epi16(_mm_srli_si128(weights_hihi, 8));
1269   const __m128i inverted_weights5 = _mm_sub_epi16(scale, weights5);
1270   const __m128i inverted_weights6 = _mm_sub_epi16(scale, weights6);
1271   const __m128i inverted_weights7 = _mm_sub_epi16(scale, weights7);
1272   const __m128i inverted_weights8 = _mm_sub_epi16(scale, weights8);
1273   const __m128i scaled_top_right5 =
1274       _mm_mullo_epi16(inverted_weights5, top_right);
1275   const __m128i scaled_top_right6 =
1276       _mm_mullo_epi16(inverted_weights6, top_right);
1277   const __m128i scaled_top_right7 =
1278       _mm_mullo_epi16(inverted_weights7, top_right);
1279   const __m128i scaled_top_right8 =
1280       _mm_mullo_epi16(inverted_weights8, top_right);
1281   scale = _mm_set1_epi16(128);
1282   auto* dst = static_cast<uint8_t*>(dest);
1283   for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1284     const __m128i y_select = _mm_set1_epi32(y_mask);
1285     const __m128i left_y = _mm_shuffle_epi8(left1, y_select);
1286     WriteSmoothDirectionalSum16(dst, left_y, left_y, weights1, weights2,
1287                                 scaled_top_right1, scaled_top_right2, scale);
1288     WriteSmoothDirectionalSum16(dst + 16, left_y, left_y, weights3, weights4,
1289                                 scaled_top_right3, scaled_top_right4, scale);
1290     WriteSmoothDirectionalSum16(dst + 32, left_y, left_y, weights5, weights6,
1291                                 scaled_top_right5, scaled_top_right6, scale);
1292     WriteSmoothDirectionalSum16(dst + 48, left_y, left_y, weights7, weights8,
1293                                 scaled_top_right7, scaled_top_right8, scale);
1294     dst += stride;
1295   }
1296   const auto* const left_ptr = static_cast<const uint8_t*>(left_column);
1297   const __m128i left2 = _mm_cvtepu8_epi16(LoadLo8(left_ptr + 8));
1298   for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1299     const __m128i y_select = _mm_set1_epi32(y_mask);
1300     const __m128i left_y = _mm_shuffle_epi8(left2, y_select);
1301     WriteSmoothDirectionalSum16(dst, left_y, left_y, weights1, weights2,
1302                                 scaled_top_right1, scaled_top_right2, scale);
1303     WriteSmoothDirectionalSum16(dst + 16, left_y, left_y, weights3, weights4,
1304                                 scaled_top_right3, scaled_top_right4, scale);
1305     WriteSmoothDirectionalSum16(dst + 32, left_y, left_y, weights5, weights6,
1306                                 scaled_top_right5, scaled_top_right6, scale);
1307     WriteSmoothDirectionalSum16(dst + 48, left_y, left_y, weights7, weights8,
1308                                 scaled_top_right7, scaled_top_right8, scale);
1309     dst += stride;
1310   }
1311   const __m128i left3 = _mm_cvtepu8_epi16(LoadLo8(left_ptr + 16));
1312   for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1313     const __m128i y_select = _mm_set1_epi32(y_mask);
1314     const __m128i left_y = _mm_shuffle_epi8(left3, y_select);
1315     WriteSmoothDirectionalSum16(dst, left_y, left_y, weights1, weights2,
1316                                 scaled_top_right1, scaled_top_right2, scale);
1317     WriteSmoothDirectionalSum16(dst + 16, left_y, left_y, weights3, weights4,
1318                                 scaled_top_right3, scaled_top_right4, scale);
1319     WriteSmoothDirectionalSum16(dst + 32, left_y, left_y, weights5, weights6,
1320                                 scaled_top_right5, scaled_top_right6, scale);
1321     WriteSmoothDirectionalSum16(dst + 48, left_y, left_y, weights7, weights8,
1322                                 scaled_top_right7, scaled_top_right8, scale);
1323     dst += stride;
1324   }
1325   const __m128i left4 = _mm_cvtepu8_epi16(LoadLo8(left_ptr + 24));
1326   for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1327     const __m128i y_select = _mm_set1_epi32(y_mask);
1328     const __m128i left_y = _mm_shuffle_epi8(left4, y_select);
1329     WriteSmoothDirectionalSum16(dst, left_y, left_y, weights1, weights2,
1330                                 scaled_top_right1, scaled_top_right2, scale);
1331     WriteSmoothDirectionalSum16(dst + 16, left_y, left_y, weights3, weights4,
1332                                 scaled_top_right3, scaled_top_right4, scale);
1333     WriteSmoothDirectionalSum16(dst + 32, left_y, left_y, weights5, weights6,
1334                                 scaled_top_right5, scaled_top_right6, scale);
1335     WriteSmoothDirectionalSum16(dst + 48, left_y, left_y, weights7, weights8,
1336                                 scaled_top_right7, scaled_top_right8, scale);
1337     dst += stride;
1338   }
1339 }
1340 
SmoothHorizontal64x64_SSE4_1(void * const dest,const ptrdiff_t stride,const void * const top_row,const void * const left_column)1341 void SmoothHorizontal64x64_SSE4_1(void* const dest, const ptrdiff_t stride,
1342                                   const void* const top_row,
1343                                   const void* const left_column) {
1344   const auto* const top = static_cast<const uint8_t*>(top_row);
1345   const __m128i top_right = _mm_set1_epi16(top[63]);
1346   const __m128i weights_lolo = LoadUnaligned16(kSmoothWeights + 60);
1347   const __m128i weights_lohi = LoadUnaligned16(kSmoothWeights + 76);
1348   __m128i scale = _mm_set1_epi16(256);
1349   const __m128i weights1 = _mm_cvtepu8_epi16(weights_lolo);
1350   const __m128i weights2 = _mm_cvtepu8_epi16(_mm_srli_si128(weights_lolo, 8));
1351   const __m128i weights3 = _mm_cvtepu8_epi16(weights_lohi);
1352   const __m128i weights4 = _mm_cvtepu8_epi16(_mm_srli_si128(weights_lohi, 8));
1353   const __m128i inverted_weights1 = _mm_sub_epi16(scale, weights1);
1354   const __m128i inverted_weights2 = _mm_sub_epi16(scale, weights2);
1355   const __m128i inverted_weights3 = _mm_sub_epi16(scale, weights3);
1356   const __m128i inverted_weights4 = _mm_sub_epi16(scale, weights4);
1357   const __m128i scaled_top_right1 =
1358       _mm_mullo_epi16(inverted_weights1, top_right);
1359   const __m128i scaled_top_right2 =
1360       _mm_mullo_epi16(inverted_weights2, top_right);
1361   const __m128i scaled_top_right3 =
1362       _mm_mullo_epi16(inverted_weights3, top_right);
1363   const __m128i scaled_top_right4 =
1364       _mm_mullo_epi16(inverted_weights4, top_right);
1365   const __m128i weights_hilo = LoadUnaligned16(kSmoothWeights + 92);
1366   const __m128i weights_hihi = LoadUnaligned16(kSmoothWeights + 108);
1367   const __m128i weights5 = _mm_cvtepu8_epi16(weights_hilo);
1368   const __m128i weights6 = _mm_cvtepu8_epi16(_mm_srli_si128(weights_hilo, 8));
1369   const __m128i weights7 = _mm_cvtepu8_epi16(weights_hihi);
1370   const __m128i weights8 = _mm_cvtepu8_epi16(_mm_srli_si128(weights_hihi, 8));
1371   const __m128i inverted_weights5 = _mm_sub_epi16(scale, weights5);
1372   const __m128i inverted_weights6 = _mm_sub_epi16(scale, weights6);
1373   const __m128i inverted_weights7 = _mm_sub_epi16(scale, weights7);
1374   const __m128i inverted_weights8 = _mm_sub_epi16(scale, weights8);
1375   const __m128i scaled_top_right5 =
1376       _mm_mullo_epi16(inverted_weights5, top_right);
1377   const __m128i scaled_top_right6 =
1378       _mm_mullo_epi16(inverted_weights6, top_right);
1379   const __m128i scaled_top_right7 =
1380       _mm_mullo_epi16(inverted_weights7, top_right);
1381   const __m128i scaled_top_right8 =
1382       _mm_mullo_epi16(inverted_weights8, top_right);
1383   scale = _mm_set1_epi16(128);
1384   const auto* const left_ptr = static_cast<const uint8_t*>(left_column);
1385   auto* dst = static_cast<uint8_t*>(dest);
1386   for (int left_offset = 0; left_offset < 64; left_offset += 8) {
1387     const __m128i left = _mm_cvtepu8_epi16(LoadLo8(left_ptr + left_offset));
1388     for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1389       const __m128i y_select = _mm_set1_epi32(y_mask);
1390       const __m128i left_y = _mm_shuffle_epi8(left, y_select);
1391       WriteSmoothDirectionalSum16(dst, left_y, left_y, weights1, weights2,
1392                                   scaled_top_right1, scaled_top_right2, scale);
1393       WriteSmoothDirectionalSum16(dst + 16, left_y, left_y, weights3, weights4,
1394                                   scaled_top_right3, scaled_top_right4, scale);
1395       WriteSmoothDirectionalSum16(dst + 32, left_y, left_y, weights5, weights6,
1396                                   scaled_top_right5, scaled_top_right6, scale);
1397       WriteSmoothDirectionalSum16(dst + 48, left_y, left_y, weights7, weights8,
1398                                   scaled_top_right7, scaled_top_right8, scale);
1399       dst += stride;
1400     }
1401   }
1402 }
1403 
LoadSmoothVerticalPixels4(const uint8_t * above,const uint8_t * left,const int height,__m128i * pixels)1404 inline void LoadSmoothVerticalPixels4(const uint8_t* above, const uint8_t* left,
1405                                       const int height, __m128i* pixels) {
1406   __m128i top = Load4(above);
1407   const __m128i bottom_left = _mm_set1_epi16(left[height - 1]);
1408   top = _mm_cvtepu8_epi16(top);
1409   pixels[0] = _mm_unpacklo_epi16(top, bottom_left);
1410 }
1411 
1412 // |weight_array| alternates weight vectors from the table with their inverted
1413 // (256-w) counterparts. This is precomputed by the compiler when the weights
1414 // table is visible to this module. Removing this visibility can cut speed by up
1415 // to half in both 4xH and 8xH transforms.
LoadSmoothVerticalWeights4(const uint8_t * weight_array,const int height,__m128i * weights)1416 inline void LoadSmoothVerticalWeights4(const uint8_t* weight_array,
1417                                        const int height, __m128i* weights) {
1418   const __m128i inverter = _mm_set1_epi16(256);
1419 
1420   if (height == 4) {
1421     const __m128i weight = Load4(weight_array);
1422     weights[0] = _mm_cvtepu8_epi16(weight);
1423     weights[1] = _mm_sub_epi16(inverter, weights[0]);
1424   } else if (height == 8) {
1425     const __m128i weight = LoadLo8(weight_array + 4);
1426     weights[0] = _mm_cvtepu8_epi16(weight);
1427     weights[1] = _mm_sub_epi16(inverter, weights[0]);
1428   } else {
1429     const __m128i weight = LoadUnaligned16(weight_array + 12);
1430     const __m128i zero = _mm_setzero_si128();
1431     weights[0] = _mm_cvtepu8_epi16(weight);
1432     weights[1] = _mm_sub_epi16(inverter, weights[0]);
1433     weights[2] = _mm_unpackhi_epi8(weight, zero);
1434     weights[3] = _mm_sub_epi16(inverter, weights[2]);
1435   }
1436 }
1437 
WriteSmoothVertical4xH(const __m128i * pixel,const __m128i * weight,const int height,uint8_t * dst,const ptrdiff_t stride)1438 inline void WriteSmoothVertical4xH(const __m128i* pixel, const __m128i* weight,
1439                                    const int height, uint8_t* dst,
1440                                    const ptrdiff_t stride) {
1441   const __m128i pred_round = _mm_set1_epi32(128);
1442   const __m128i mask_increment = _mm_set1_epi16(0x0202);
1443   const __m128i cvtepu8_epi32 = _mm_set1_epi32(0xC080400);
1444   __m128i y_select = _mm_set1_epi16(0x0100);
1445 
1446   for (int y = 0; y < height; ++y) {
1447     const __m128i weight_y = _mm_shuffle_epi8(weight[0], y_select);
1448     const __m128i inverted_weight_y = _mm_shuffle_epi8(weight[1], y_select);
1449     const __m128i alternate_weights =
1450         _mm_unpacklo_epi16(weight_y, inverted_weight_y);
1451     // Here the pixel vector is top_row[0], corner, top_row[1], corner, ...
1452     // The madd instruction yields four results of the form:
1453     // (top_row[x] * weight[y] + corner * inverted_weight[y])
1454     __m128i sum = _mm_madd_epi16(pixel[0], alternate_weights);
1455     sum = _mm_add_epi32(sum, pred_round);
1456     sum = _mm_srai_epi32(sum, 8);
1457     sum = _mm_shuffle_epi8(sum, cvtepu8_epi32);
1458     Store4(dst, sum);
1459     dst += stride;
1460     y_select = _mm_add_epi16(y_select, mask_increment);
1461   }
1462 }
1463 
SmoothVertical4x4_SSE4_1(void * const dest,const ptrdiff_t stride,const void * const top_row,const void * const left_column)1464 void SmoothVertical4x4_SSE4_1(void* const dest, const ptrdiff_t stride,
1465                               const void* const top_row,
1466                               const void* const left_column) {
1467   const auto* const left = static_cast<const uint8_t*>(left_column);
1468   const auto* const above = static_cast<const uint8_t*>(top_row);
1469   auto* dst = static_cast<uint8_t*>(dest);
1470   __m128i pixels;
1471   LoadSmoothVerticalPixels4(above, left, 4, &pixels);
1472 
1473   __m128i weights[2];
1474   LoadSmoothVerticalWeights4(kSmoothWeights, 4, weights);
1475 
1476   WriteSmoothVertical4xH(&pixels, weights, 4, dst, stride);
1477 }
1478 
SmoothVertical4x8_SSE4_1(void * const dest,const ptrdiff_t stride,const void * const top_row,const void * const left_column)1479 void SmoothVertical4x8_SSE4_1(void* const dest, const ptrdiff_t stride,
1480                               const void* const top_row,
1481                               const void* const left_column) {
1482   const auto* const left = static_cast<const uint8_t*>(left_column);
1483   const auto* const above = static_cast<const uint8_t*>(top_row);
1484   auto* dst = static_cast<uint8_t*>(dest);
1485   __m128i pixels;
1486   LoadSmoothVerticalPixels4(above, left, 8, &pixels);
1487 
1488   __m128i weights[2];
1489   LoadSmoothVerticalWeights4(kSmoothWeights, 8, weights);
1490 
1491   WriteSmoothVertical4xH(&pixels, weights, 8, dst, stride);
1492 }
1493 
SmoothVertical4x16_SSE4_1(void * const dest,const ptrdiff_t stride,const void * const top_row,const void * const left_column)1494 void SmoothVertical4x16_SSE4_1(void* const dest, const ptrdiff_t stride,
1495                                const void* const top_row,
1496                                const void* const left_column) {
1497   const auto* const left = static_cast<const uint8_t*>(left_column);
1498   const auto* const above = static_cast<const uint8_t*>(top_row);
1499   auto* dst = static_cast<uint8_t*>(dest);
1500   __m128i pixels;
1501   LoadSmoothVerticalPixels4(above, left, 16, &pixels);
1502 
1503   __m128i weights[4];
1504   LoadSmoothVerticalWeights4(kSmoothWeights, 16, weights);
1505 
1506   WriteSmoothVertical4xH(&pixels, weights, 8, dst, stride);
1507   dst += stride << 3;
1508   WriteSmoothVertical4xH(&pixels, &weights[2], 8, dst, stride);
1509 }
1510 
SmoothVertical8x4_SSE4_1(void * const dest,const ptrdiff_t stride,const void * const top_row,const void * const left_column)1511 void SmoothVertical8x4_SSE4_1(void* const dest, const ptrdiff_t stride,
1512                               const void* const top_row,
1513                               const void* const left_column) {
1514   const auto* const left_ptr = static_cast<const uint8_t*>(left_column);
1515   const __m128i bottom_left = _mm_set1_epi16(left_ptr[3]);
1516   const __m128i weights = _mm_cvtepu8_epi16(Load4(kSmoothWeights));
1517   __m128i scale = _mm_set1_epi16(256);
1518   const __m128i inverted_weights = _mm_sub_epi16(scale, weights);
1519   const __m128i scaled_bottom_left =
1520       _mm_mullo_epi16(inverted_weights, bottom_left);
1521   scale = _mm_set1_epi16(128);
1522 
1523   auto* dst = static_cast<uint8_t*>(dest);
1524   __m128i y_select = _mm_set1_epi32(0x01000100);
1525   const __m128i top = _mm_cvtepu8_epi16(LoadLo8(top_row));
1526   __m128i weights_y = _mm_shuffle_epi8(weights, y_select);
1527   __m128i scaled_bottom_left_y = _mm_shuffle_epi8(scaled_bottom_left, y_select);
1528   WriteSmoothDirectionalSum8(dst, top, weights_y, scaled_bottom_left_y, scale);
1529   dst += stride;
1530   y_select = _mm_set1_epi32(0x03020302);
1531   weights_y = _mm_shuffle_epi8(weights, y_select);
1532   scaled_bottom_left_y = _mm_shuffle_epi8(scaled_bottom_left, y_select);
1533   WriteSmoothDirectionalSum8(dst, top, weights_y, scaled_bottom_left_y, scale);
1534   dst += stride;
1535   y_select = _mm_set1_epi32(0x05040504);
1536   weights_y = _mm_shuffle_epi8(weights, y_select);
1537   scaled_bottom_left_y = _mm_shuffle_epi8(scaled_bottom_left, y_select);
1538   WriteSmoothDirectionalSum8(dst, top, weights_y, scaled_bottom_left_y, scale);
1539   dst += stride;
1540   y_select = _mm_set1_epi32(0x07060706);
1541   weights_y = _mm_shuffle_epi8(weights, y_select);
1542   scaled_bottom_left_y = _mm_shuffle_epi8(scaled_bottom_left, y_select);
1543   WriteSmoothDirectionalSum8(dst, top, weights_y, scaled_bottom_left_y, scale);
1544 }
1545 
SmoothVertical8x8_SSE4_1(void * const dest,const ptrdiff_t stride,const void * const top_row,const void * const left_column)1546 void SmoothVertical8x8_SSE4_1(void* const dest, const ptrdiff_t stride,
1547                               const void* const top_row,
1548                               const void* const left_column) {
1549   const auto* const left_ptr = static_cast<const uint8_t*>(left_column);
1550   const __m128i bottom_left = _mm_set1_epi16(left_ptr[7]);
1551   const __m128i weights = _mm_cvtepu8_epi16(LoadLo8(kSmoothWeights + 4));
1552   __m128i scale = _mm_set1_epi16(256);
1553   const __m128i inverted_weights = _mm_sub_epi16(scale, weights);
1554   const __m128i scaled_bottom_left =
1555       _mm_mullo_epi16(inverted_weights, bottom_left);
1556   scale = _mm_set1_epi16(128);
1557   const __m128i top = _mm_cvtepu8_epi16(LoadLo8(top_row));
1558   auto* dst = static_cast<uint8_t*>(dest);
1559   for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1560     const __m128i y_select = _mm_set1_epi32(y_mask);
1561     const __m128i weights_y = _mm_shuffle_epi8(weights, y_select);
1562     const __m128i scaled_bottom_left_y =
1563         _mm_shuffle_epi8(scaled_bottom_left, y_select);
1564     WriteSmoothDirectionalSum8(dst, top, weights_y, scaled_bottom_left_y,
1565                                scale);
1566     dst += stride;
1567   }
1568 }
1569 
SmoothVertical8x16_SSE4_1(void * const dest,const ptrdiff_t stride,const void * const top_row,const void * const left_column)1570 void SmoothVertical8x16_SSE4_1(void* const dest, const ptrdiff_t stride,
1571                                const void* const top_row,
1572                                const void* const left_column) {
1573   const auto* const left_ptr = static_cast<const uint8_t*>(left_column);
1574   const __m128i bottom_left = _mm_set1_epi16(left_ptr[15]);
1575   const __m128i weights = LoadUnaligned16(kSmoothWeights + 12);
1576 
1577   const __m128i weights1 = _mm_cvtepu8_epi16(weights);
1578   const __m128i weights2 = _mm_cvtepu8_epi16(_mm_srli_si128(weights, 8));
1579   __m128i scale = _mm_set1_epi16(256);
1580   const __m128i inverted_weights1 = _mm_sub_epi16(scale, weights1);
1581   const __m128i inverted_weights2 = _mm_sub_epi16(scale, weights2);
1582   const __m128i scaled_bottom_left1 =
1583       _mm_mullo_epi16(inverted_weights1, bottom_left);
1584   const __m128i scaled_bottom_left2 =
1585       _mm_mullo_epi16(inverted_weights2, bottom_left);
1586   scale = _mm_set1_epi16(128);
1587   const __m128i top = _mm_cvtepu8_epi16(LoadLo8(top_row));
1588   auto* dst = static_cast<uint8_t*>(dest);
1589   for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1590     const __m128i y_select = _mm_set1_epi32(y_mask);
1591     const __m128i weights_y = _mm_shuffle_epi8(weights1, y_select);
1592     const __m128i scaled_bottom_left_y =
1593         _mm_shuffle_epi8(scaled_bottom_left1, y_select);
1594     WriteSmoothDirectionalSum8(dst, top, weights_y, scaled_bottom_left_y,
1595                                scale);
1596     dst += stride;
1597   }
1598   for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1599     const __m128i y_select = _mm_set1_epi32(y_mask);
1600     const __m128i weights_y = _mm_shuffle_epi8(weights2, y_select);
1601     const __m128i scaled_bottom_left_y =
1602         _mm_shuffle_epi8(scaled_bottom_left2, y_select);
1603     WriteSmoothDirectionalSum8(dst, top, weights_y, scaled_bottom_left_y,
1604                                scale);
1605     dst += stride;
1606   }
1607 }
1608 
SmoothVertical8x32_SSE4_1(void * const dest,const ptrdiff_t stride,const void * const top_row,const void * const left_column)1609 void SmoothVertical8x32_SSE4_1(void* const dest, const ptrdiff_t stride,
1610                                const void* const top_row,
1611                                const void* const left_column) {
1612   const auto* const left_ptr = static_cast<const uint8_t*>(left_column);
1613   const __m128i zero = _mm_setzero_si128();
1614   const __m128i bottom_left = _mm_set1_epi16(left_ptr[31]);
1615   const __m128i weights_lo = LoadUnaligned16(kSmoothWeights + 28);
1616   const __m128i weights_hi = LoadUnaligned16(kSmoothWeights + 44);
1617   const __m128i weights1 = _mm_cvtepu8_epi16(weights_lo);
1618   const __m128i weights2 = _mm_unpackhi_epi8(weights_lo, zero);
1619   const __m128i weights3 = _mm_cvtepu8_epi16(weights_hi);
1620   const __m128i weights4 = _mm_unpackhi_epi8(weights_hi, zero);
1621   __m128i scale = _mm_set1_epi16(256);
1622   const __m128i inverted_weights1 = _mm_sub_epi16(scale, weights1);
1623   const __m128i inverted_weights2 = _mm_sub_epi16(scale, weights2);
1624   const __m128i inverted_weights3 = _mm_sub_epi16(scale, weights3);
1625   const __m128i inverted_weights4 = _mm_sub_epi16(scale, weights4);
1626   const __m128i scaled_bottom_left1 =
1627       _mm_mullo_epi16(inverted_weights1, bottom_left);
1628   const __m128i scaled_bottom_left2 =
1629       _mm_mullo_epi16(inverted_weights2, bottom_left);
1630   const __m128i scaled_bottom_left3 =
1631       _mm_mullo_epi16(inverted_weights3, bottom_left);
1632   const __m128i scaled_bottom_left4 =
1633       _mm_mullo_epi16(inverted_weights4, bottom_left);
1634   scale = _mm_set1_epi16(128);
1635   auto* dst = static_cast<uint8_t*>(dest);
1636   const __m128i top = _mm_cvtepu8_epi16(LoadLo8(top_row));
1637   for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1638     const __m128i y_select = _mm_set1_epi32(y_mask);
1639     const __m128i weights_y = _mm_shuffle_epi8(weights1, y_select);
1640     const __m128i scaled_bottom_left_y =
1641         _mm_shuffle_epi8(scaled_bottom_left1, y_select);
1642     WriteSmoothDirectionalSum8(dst, top, weights_y, scaled_bottom_left_y,
1643                                scale);
1644     dst += stride;
1645   }
1646   for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1647     const __m128i y_select = _mm_set1_epi32(y_mask);
1648     const __m128i weights_y = _mm_shuffle_epi8(weights2, y_select);
1649     const __m128i scaled_bottom_left_y =
1650         _mm_shuffle_epi8(scaled_bottom_left2, y_select);
1651     WriteSmoothDirectionalSum8(dst, top, weights_y, scaled_bottom_left_y,
1652                                scale);
1653     dst += stride;
1654   }
1655   for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1656     const __m128i y_select = _mm_set1_epi32(y_mask);
1657     const __m128i weights_y = _mm_shuffle_epi8(weights3, y_select);
1658     const __m128i scaled_bottom_left_y =
1659         _mm_shuffle_epi8(scaled_bottom_left3, y_select);
1660     WriteSmoothDirectionalSum8(dst, top, weights_y, scaled_bottom_left_y,
1661                                scale);
1662     dst += stride;
1663   }
1664   for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1665     const __m128i y_select = _mm_set1_epi32(y_mask);
1666     const __m128i weights_y = _mm_shuffle_epi8(weights4, y_select);
1667     const __m128i scaled_bottom_left_y =
1668         _mm_shuffle_epi8(scaled_bottom_left4, y_select);
1669     WriteSmoothDirectionalSum8(dst, top, weights_y, scaled_bottom_left_y,
1670                                scale);
1671     dst += stride;
1672   }
1673 }
1674 
SmoothVertical16x4_SSE4_1(void * const dest,const ptrdiff_t stride,const void * const top_row,const void * const left_column)1675 void SmoothVertical16x4_SSE4_1(void* const dest, const ptrdiff_t stride,
1676                                const void* const top_row,
1677                                const void* const left_column) {
1678   const auto* const left_ptr = static_cast<const uint8_t*>(left_column);
1679   auto* dst = static_cast<uint8_t*>(dest);
1680   const __m128i bottom_left = _mm_set1_epi16(left_ptr[3]);
1681   const __m128i weights = _mm_cvtepu8_epi16(Load4(kSmoothWeights));
1682   __m128i scale = _mm_set1_epi16(256);
1683   const __m128i inverted_weights = _mm_sub_epi16(scale, weights);
1684   const __m128i scaled_bottom_left =
1685       _mm_mullo_epi16(inverted_weights, bottom_left);
1686   scale = _mm_set1_epi16(128);
1687   const __m128i top = LoadUnaligned16(top_row);
1688   const __m128i top_lo = _mm_cvtepu8_epi16(top);
1689   const __m128i top_hi = _mm_cvtepu8_epi16(_mm_srli_si128(top, 8));
1690 
1691   __m128i y_select = _mm_set1_epi32(0x01000100);
1692   __m128i weights_y = _mm_shuffle_epi8(weights, y_select);
1693   __m128i scaled_bottom_left_y = _mm_shuffle_epi8(scaled_bottom_left, y_select);
1694   WriteSmoothDirectionalSum16(dst, top_lo, top_hi, weights_y, weights_y,
1695                               scaled_bottom_left_y, scaled_bottom_left_y,
1696                               scale);
1697   dst += stride;
1698   y_select = _mm_set1_epi32(0x03020302);
1699   weights_y = _mm_shuffle_epi8(weights, y_select);
1700   scaled_bottom_left_y = _mm_shuffle_epi8(scaled_bottom_left, y_select);
1701   WriteSmoothDirectionalSum16(dst, top_lo, top_hi, weights_y, weights_y,
1702                               scaled_bottom_left_y, scaled_bottom_left_y,
1703                               scale);
1704   dst += stride;
1705   y_select = _mm_set1_epi32(0x05040504);
1706   weights_y = _mm_shuffle_epi8(weights, y_select);
1707   scaled_bottom_left_y = _mm_shuffle_epi8(scaled_bottom_left, y_select);
1708   WriteSmoothDirectionalSum16(dst, top_lo, top_hi, weights_y, weights_y,
1709                               scaled_bottom_left_y, scaled_bottom_left_y,
1710                               scale);
1711   dst += stride;
1712   y_select = _mm_set1_epi32(0x07060706);
1713   weights_y = _mm_shuffle_epi8(weights, y_select);
1714   scaled_bottom_left_y = _mm_shuffle_epi8(scaled_bottom_left, y_select);
1715   WriteSmoothDirectionalSum16(dst, top_lo, top_hi, weights_y, weights_y,
1716                               scaled_bottom_left_y, scaled_bottom_left_y,
1717                               scale);
1718 }
1719 
SmoothVertical16x8_SSE4_1(void * const dest,const ptrdiff_t stride,const void * const top_row,const void * const left_column)1720 void SmoothVertical16x8_SSE4_1(void* const dest, const ptrdiff_t stride,
1721                                const void* const top_row,
1722                                const void* const left_column) {
1723   const auto* const left_ptr = static_cast<const uint8_t*>(left_column);
1724   auto* dst = static_cast<uint8_t*>(dest);
1725   const __m128i bottom_left = _mm_set1_epi16(left_ptr[7]);
1726   const __m128i weights = _mm_cvtepu8_epi16(LoadLo8(kSmoothWeights + 4));
1727   __m128i scale = _mm_set1_epi16(256);
1728   const __m128i inverted_weights = _mm_sub_epi16(scale, weights);
1729   const __m128i scaled_bottom_left =
1730       _mm_mullo_epi16(inverted_weights, bottom_left);
1731   scale = _mm_set1_epi16(128);
1732 
1733   const __m128i top = LoadUnaligned16(top_row);
1734   const __m128i top_lo = _mm_cvtepu8_epi16(top);
1735   const __m128i top_hi = _mm_cvtepu8_epi16(_mm_srli_si128(top, 8));
1736   for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1737     const __m128i y_select = _mm_set1_epi32(y_mask);
1738     const __m128i weights_y = _mm_shuffle_epi8(weights, y_select);
1739     const __m128i scaled_bottom_left_y =
1740         _mm_shuffle_epi8(scaled_bottom_left, y_select);
1741     WriteSmoothDirectionalSum16(dst, top_lo, top_hi, weights_y, weights_y,
1742                                 scaled_bottom_left_y, scaled_bottom_left_y,
1743                                 scale);
1744     dst += stride;
1745   }
1746 }
1747 
SmoothVertical16x16_SSE4_1(void * const dest,const ptrdiff_t stride,const void * const top_row,const void * const left_column)1748 void SmoothVertical16x16_SSE4_1(void* const dest, const ptrdiff_t stride,
1749                                 const void* const top_row,
1750                                 const void* const left_column) {
1751   const auto* const left_ptr = static_cast<const uint8_t*>(left_column);
1752   auto* dst = static_cast<uint8_t*>(dest);
1753   const __m128i bottom_left = _mm_set1_epi16(left_ptr[15]);
1754   const __m128i zero = _mm_setzero_si128();
1755   __m128i scale = _mm_set1_epi16(256);
1756   const __m128i weights = LoadUnaligned16(kSmoothWeights + 12);
1757   const __m128i weights_lo = _mm_cvtepu8_epi16(weights);
1758   const __m128i weights_hi = _mm_unpackhi_epi8(weights, zero);
1759   const __m128i inverted_weights_lo = _mm_sub_epi16(scale, weights_lo);
1760   const __m128i inverted_weights_hi = _mm_sub_epi16(scale, weights_hi);
1761   const __m128i scaled_bottom_left_lo =
1762       _mm_mullo_epi16(inverted_weights_lo, bottom_left);
1763   const __m128i scaled_bottom_left_hi =
1764       _mm_mullo_epi16(inverted_weights_hi, bottom_left);
1765   scale = _mm_set1_epi16(128);
1766 
1767   const __m128i top = LoadUnaligned16(top_row);
1768   const __m128i top_lo = _mm_cvtepu8_epi16(top);
1769   const __m128i top_hi = _mm_unpackhi_epi8(top, zero);
1770   for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1771     const __m128i y_select = _mm_set1_epi32(y_mask);
1772     const __m128i weights_y = _mm_shuffle_epi8(weights_lo, y_select);
1773     const __m128i scaled_bottom_left_y =
1774         _mm_shuffle_epi8(scaled_bottom_left_lo, y_select);
1775     WriteSmoothDirectionalSum16(dst, top_lo, top_hi, weights_y, weights_y,
1776                                 scaled_bottom_left_y, scaled_bottom_left_y,
1777                                 scale);
1778     dst += stride;
1779   }
1780   for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1781     const __m128i y_select = _mm_set1_epi32(y_mask);
1782     const __m128i weights_y = _mm_shuffle_epi8(weights_hi, y_select);
1783     const __m128i scaled_bottom_left_y =
1784         _mm_shuffle_epi8(scaled_bottom_left_hi, y_select);
1785     WriteSmoothDirectionalSum16(dst, top_lo, top_hi, weights_y, weights_y,
1786                                 scaled_bottom_left_y, scaled_bottom_left_y,
1787                                 scale);
1788     dst += stride;
1789   }
1790 }
1791 
SmoothVertical16x32_SSE4_1(void * const dest,const ptrdiff_t stride,const void * const top_row,const void * const left_column)1792 void SmoothVertical16x32_SSE4_1(void* const dest, const ptrdiff_t stride,
1793                                 const void* const top_row,
1794                                 const void* const left_column) {
1795   const auto* const left_ptr = static_cast<const uint8_t*>(left_column);
1796   auto* dst = static_cast<uint8_t*>(dest);
1797   const __m128i bottom_left = _mm_set1_epi16(left_ptr[31]);
1798   const __m128i weights_lo = LoadUnaligned16(kSmoothWeights + 28);
1799   const __m128i weights_hi = LoadUnaligned16(kSmoothWeights + 44);
1800   __m128i scale = _mm_set1_epi16(256);
1801   const __m128i zero = _mm_setzero_si128();
1802   const __m128i weights1 = _mm_cvtepu8_epi16(weights_lo);
1803   const __m128i weights2 = _mm_unpackhi_epi8(weights_lo, zero);
1804   const __m128i weights3 = _mm_cvtepu8_epi16(weights_hi);
1805   const __m128i weights4 = _mm_unpackhi_epi8(weights_hi, zero);
1806   const __m128i inverted_weights1 = _mm_sub_epi16(scale, weights1);
1807   const __m128i inverted_weights2 = _mm_sub_epi16(scale, weights2);
1808   const __m128i inverted_weights3 = _mm_sub_epi16(scale, weights3);
1809   const __m128i inverted_weights4 = _mm_sub_epi16(scale, weights4);
1810   const __m128i scaled_bottom_left1 =
1811       _mm_mullo_epi16(inverted_weights1, bottom_left);
1812   const __m128i scaled_bottom_left2 =
1813       _mm_mullo_epi16(inverted_weights2, bottom_left);
1814   const __m128i scaled_bottom_left3 =
1815       _mm_mullo_epi16(inverted_weights3, bottom_left);
1816   const __m128i scaled_bottom_left4 =
1817       _mm_mullo_epi16(inverted_weights4, bottom_left);
1818   scale = _mm_set1_epi16(128);
1819 
1820   const __m128i top = LoadUnaligned16(top_row);
1821   const __m128i top_lo = _mm_cvtepu8_epi16(top);
1822   const __m128i top_hi = _mm_unpackhi_epi8(top, zero);
1823   for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1824     const __m128i y_select = _mm_set1_epi32(y_mask);
1825     const __m128i weights_y = _mm_shuffle_epi8(weights1, y_select);
1826     const __m128i scaled_bottom_left_y =
1827         _mm_shuffle_epi8(scaled_bottom_left1, y_select);
1828     WriteSmoothDirectionalSum16(dst, top_lo, top_hi, weights_y, weights_y,
1829                                 scaled_bottom_left_y, scaled_bottom_left_y,
1830                                 scale);
1831     dst += stride;
1832   }
1833   for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1834     const __m128i y_select = _mm_set1_epi32(y_mask);
1835     const __m128i weights_y = _mm_shuffle_epi8(weights2, y_select);
1836     const __m128i scaled_bottom_left_y =
1837         _mm_shuffle_epi8(scaled_bottom_left2, y_select);
1838     WriteSmoothDirectionalSum16(dst, top_lo, top_hi, weights_y, weights_y,
1839                                 scaled_bottom_left_y, scaled_bottom_left_y,
1840                                 scale);
1841     dst += stride;
1842   }
1843   for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1844     const __m128i y_select = _mm_set1_epi32(y_mask);
1845     const __m128i weights_y = _mm_shuffle_epi8(weights3, y_select);
1846     const __m128i scaled_bottom_left_y =
1847         _mm_shuffle_epi8(scaled_bottom_left3, y_select);
1848     WriteSmoothDirectionalSum16(dst, top_lo, top_hi, weights_y, weights_y,
1849                                 scaled_bottom_left_y, scaled_bottom_left_y,
1850                                 scale);
1851     dst += stride;
1852   }
1853   for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1854     const __m128i y_select = _mm_set1_epi32(y_mask);
1855     const __m128i weights_y = _mm_shuffle_epi8(weights4, y_select);
1856     const __m128i scaled_bottom_left_y =
1857         _mm_shuffle_epi8(scaled_bottom_left4, y_select);
1858     WriteSmoothDirectionalSum16(dst, top_lo, top_hi, weights_y, weights_y,
1859                                 scaled_bottom_left_y, scaled_bottom_left_y,
1860                                 scale);
1861     dst += stride;
1862   }
1863 }
1864 
SmoothVertical16x64_SSE4_1(void * const dest,const ptrdiff_t stride,const void * const top_row,const void * const left_column)1865 void SmoothVertical16x64_SSE4_1(void* const dest, const ptrdiff_t stride,
1866                                 const void* const top_row,
1867                                 const void* const left_column) {
1868   const auto* const left_ptr = static_cast<const uint8_t*>(left_column);
1869   auto* dst = static_cast<uint8_t*>(dest);
1870   const __m128i bottom_left = _mm_set1_epi16(left_ptr[63]);
1871   const __m128i scale = _mm_set1_epi16(256);
1872   const __m128i round = _mm_set1_epi16(128);
1873   const __m128i zero = _mm_setzero_si128();
1874 
1875   const __m128i top = LoadUnaligned16(top_row);
1876   const __m128i top_lo = _mm_cvtepu8_epi16(top);
1877   const __m128i top_hi = _mm_unpackhi_epi8(top, zero);
1878   const uint8_t* weights_base_ptr = kSmoothWeights + 60;
1879   for (int left_offset = 0; left_offset < 64; left_offset += 16) {
1880     const __m128i weights = LoadUnaligned16(weights_base_ptr + left_offset);
1881     const __m128i weights_lo = _mm_cvtepu8_epi16(weights);
1882     const __m128i weights_hi = _mm_unpackhi_epi8(weights, zero);
1883     const __m128i inverted_weights_lo = _mm_sub_epi16(scale, weights_lo);
1884     const __m128i inverted_weights_hi = _mm_sub_epi16(scale, weights_hi);
1885     const __m128i scaled_bottom_left_lo =
1886         _mm_mullo_epi16(inverted_weights_lo, bottom_left);
1887     const __m128i scaled_bottom_left_hi =
1888         _mm_mullo_epi16(inverted_weights_hi, bottom_left);
1889 
1890     for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1891       const __m128i y_select = _mm_set1_epi32(y_mask);
1892       const __m128i weights_y = _mm_shuffle_epi8(weights_lo, y_select);
1893       const __m128i scaled_bottom_left_y =
1894           _mm_shuffle_epi8(scaled_bottom_left_lo, y_select);
1895       WriteSmoothDirectionalSum16(dst, top_lo, top_hi, weights_y, weights_y,
1896                                   scaled_bottom_left_y, scaled_bottom_left_y,
1897                                   round);
1898       dst += stride;
1899     }
1900     for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1901       const __m128i y_select = _mm_set1_epi32(y_mask);
1902       const __m128i weights_y = _mm_shuffle_epi8(weights_hi, y_select);
1903       const __m128i scaled_bottom_left_y =
1904           _mm_shuffle_epi8(scaled_bottom_left_hi, y_select);
1905       WriteSmoothDirectionalSum16(dst, top_lo, top_hi, weights_y, weights_y,
1906                                   scaled_bottom_left_y, scaled_bottom_left_y,
1907                                   round);
1908       dst += stride;
1909     }
1910   }
1911 }
1912 
SmoothVertical32x8_SSE4_1(void * const dest,const ptrdiff_t stride,const void * const top_row,const void * const left_column)1913 void SmoothVertical32x8_SSE4_1(void* const dest, const ptrdiff_t stride,
1914                                const void* const top_row,
1915                                const void* const left_column) {
1916   const auto* const left_ptr = static_cast<const uint8_t*>(left_column);
1917   const auto* const top_ptr = static_cast<const uint8_t*>(top_row);
1918   auto* dst = static_cast<uint8_t*>(dest);
1919   const __m128i zero = _mm_setzero_si128();
1920   const __m128i bottom_left = _mm_set1_epi16(left_ptr[7]);
1921   const __m128i top_lo = LoadUnaligned16(top_ptr);
1922   const __m128i top_hi = LoadUnaligned16(top_ptr + 16);
1923   const __m128i top1 = _mm_cvtepu8_epi16(top_lo);
1924   const __m128i top2 = _mm_unpackhi_epi8(top_lo, zero);
1925   const __m128i top3 = _mm_cvtepu8_epi16(top_hi);
1926   const __m128i top4 = _mm_unpackhi_epi8(top_hi, zero);
1927   __m128i scale = _mm_set1_epi16(256);
1928   const __m128i weights = _mm_cvtepu8_epi16(LoadLo8(kSmoothWeights + 4));
1929   const __m128i inverted_weights = _mm_sub_epi16(scale, weights);
1930   const __m128i scaled_bottom_left =
1931       _mm_mullo_epi16(inverted_weights, bottom_left);
1932   scale = _mm_set1_epi16(128);
1933   for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1934     __m128i y_select = _mm_set1_epi32(y_mask);
1935     const __m128i weights_y = _mm_shuffle_epi8(weights, y_select);
1936     const __m128i scaled_bottom_left_y =
1937         _mm_shuffle_epi8(scaled_bottom_left, y_select);
1938     WriteSmoothDirectionalSum16(dst, top1, top2, weights_y, weights_y,
1939                                 scaled_bottom_left_y, scaled_bottom_left_y,
1940                                 scale);
1941     WriteSmoothDirectionalSum16(dst + 16, top3, top4, weights_y, weights_y,
1942                                 scaled_bottom_left_y, scaled_bottom_left_y,
1943                                 scale);
1944     dst += stride;
1945   }
1946 }
1947 
SmoothVertical32x16_SSE4_1(void * const dest,const ptrdiff_t stride,const void * const top_row,const void * const left_column)1948 void SmoothVertical32x16_SSE4_1(void* const dest, const ptrdiff_t stride,
1949                                 const void* const top_row,
1950                                 const void* const left_column) {
1951   const auto* const left_ptr = static_cast<const uint8_t*>(left_column);
1952   const auto* const top_ptr = static_cast<const uint8_t*>(top_row);
1953   auto* dst = static_cast<uint8_t*>(dest);
1954   const __m128i zero = _mm_setzero_si128();
1955   const __m128i bottom_left = _mm_set1_epi16(left_ptr[15]);
1956   const __m128i top_lo = LoadUnaligned16(top_ptr);
1957   const __m128i top_hi = LoadUnaligned16(top_ptr + 16);
1958   const __m128i top1 = _mm_cvtepu8_epi16(top_lo);
1959   const __m128i top2 = _mm_unpackhi_epi8(top_lo, zero);
1960   const __m128i top3 = _mm_cvtepu8_epi16(top_hi);
1961   const __m128i top4 = _mm_unpackhi_epi8(top_hi, zero);
1962   const __m128i weights = LoadUnaligned16(kSmoothWeights + 12);
1963   const __m128i weights1 = _mm_cvtepu8_epi16(weights);
1964   const __m128i weights2 = _mm_unpackhi_epi8(weights, zero);
1965   __m128i scale = _mm_set1_epi16(256);
1966   const __m128i inverted_weights1 = _mm_sub_epi16(scale, weights1);
1967   const __m128i inverted_weights2 = _mm_sub_epi16(scale, weights2);
1968   const __m128i scaled_bottom_left1 =
1969       _mm_mullo_epi16(inverted_weights1, bottom_left);
1970   const __m128i scaled_bottom_left2 =
1971       _mm_mullo_epi16(inverted_weights2, bottom_left);
1972   scale = _mm_set1_epi16(128);
1973   for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1974     __m128i y_select = _mm_set1_epi32(y_mask);
1975     const __m128i weights_y = _mm_shuffle_epi8(weights1, y_select);
1976     const __m128i scaled_bottom_left_y =
1977         _mm_shuffle_epi8(scaled_bottom_left1, y_select);
1978     WriteSmoothDirectionalSum16(dst, top1, top2, weights_y, weights_y,
1979                                 scaled_bottom_left_y, scaled_bottom_left_y,
1980                                 scale);
1981     WriteSmoothDirectionalSum16(dst + 16, top3, top4, weights_y, weights_y,
1982                                 scaled_bottom_left_y, scaled_bottom_left_y,
1983                                 scale);
1984     dst += stride;
1985   }
1986   for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1987     __m128i y_select = _mm_set1_epi32(y_mask);
1988     const __m128i weights_y = _mm_shuffle_epi8(weights2, y_select);
1989     const __m128i scaled_bottom_left_y =
1990         _mm_shuffle_epi8(scaled_bottom_left2, y_select);
1991     WriteSmoothDirectionalSum16(dst, top1, top2, weights_y, weights_y,
1992                                 scaled_bottom_left_y, scaled_bottom_left_y,
1993                                 scale);
1994     WriteSmoothDirectionalSum16(dst + 16, top3, top4, weights_y, weights_y,
1995                                 scaled_bottom_left_y, scaled_bottom_left_y,
1996                                 scale);
1997     dst += stride;
1998   }
1999 }
2000 
SmoothVertical32x32_SSE4_1(void * const dest,const ptrdiff_t stride,const void * const top_row,const void * const left_column)2001 void SmoothVertical32x32_SSE4_1(void* const dest, const ptrdiff_t stride,
2002                                 const void* const top_row,
2003                                 const void* const left_column) {
2004   const auto* const left_ptr = static_cast<const uint8_t*>(left_column);
2005   auto* dst = static_cast<uint8_t*>(dest);
2006   const auto* const top_ptr = static_cast<const uint8_t*>(top_row);
2007   const __m128i bottom_left = _mm_set1_epi16(left_ptr[31]);
2008   const __m128i weights_lo = LoadUnaligned16(kSmoothWeights + 28);
2009   const __m128i weights_hi = LoadUnaligned16(kSmoothWeights + 44);
2010   const __m128i zero = _mm_setzero_si128();
2011   __m128i scale = _mm_set1_epi16(256);
2012   const __m128i top_lo = LoadUnaligned16(top_ptr);
2013   const __m128i top_hi = LoadUnaligned16(top_ptr + 16);
2014   const __m128i top1 = _mm_cvtepu8_epi16(top_lo);
2015   const __m128i top2 = _mm_unpackhi_epi8(top_lo, zero);
2016   const __m128i top3 = _mm_cvtepu8_epi16(top_hi);
2017   const __m128i top4 = _mm_unpackhi_epi8(top_hi, zero);
2018   const __m128i weights1 = _mm_cvtepu8_epi16(weights_lo);
2019   const __m128i weights2 = _mm_unpackhi_epi8(weights_lo, zero);
2020   const __m128i weights3 = _mm_cvtepu8_epi16(weights_hi);
2021   const __m128i weights4 = _mm_unpackhi_epi8(weights_hi, zero);
2022   const __m128i inverted_weights1 = _mm_sub_epi16(scale, weights1);
2023   const __m128i inverted_weights2 = _mm_sub_epi16(scale, weights2);
2024   const __m128i inverted_weights3 = _mm_sub_epi16(scale, weights3);
2025   const __m128i inverted_weights4 = _mm_sub_epi16(scale, weights4);
2026   const __m128i scaled_bottom_left1 =
2027       _mm_mullo_epi16(inverted_weights1, bottom_left);
2028   const __m128i scaled_bottom_left2 =
2029       _mm_mullo_epi16(inverted_weights2, bottom_left);
2030   const __m128i scaled_bottom_left3 =
2031       _mm_mullo_epi16(inverted_weights3, bottom_left);
2032   const __m128i scaled_bottom_left4 =
2033       _mm_mullo_epi16(inverted_weights4, bottom_left);
2034   scale = _mm_set1_epi16(128);
2035   for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2036     const __m128i y_select = _mm_set1_epi32(y_mask);
2037     const __m128i weights_y = _mm_shuffle_epi8(weights1, y_select);
2038     const __m128i scaled_bottom_left_y =
2039         _mm_shuffle_epi8(scaled_bottom_left1, y_select);
2040     WriteSmoothDirectionalSum16(dst, top1, top2, weights_y, weights_y,
2041                                 scaled_bottom_left_y, scaled_bottom_left_y,
2042                                 scale);
2043     WriteSmoothDirectionalSum16(dst + 16, top3, top4, weights_y, weights_y,
2044                                 scaled_bottom_left_y, scaled_bottom_left_y,
2045                                 scale);
2046     dst += stride;
2047   }
2048   for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2049     const __m128i y_select = _mm_set1_epi32(y_mask);
2050     const __m128i weights_y = _mm_shuffle_epi8(weights2, y_select);
2051     const __m128i scaled_bottom_left_y =
2052         _mm_shuffle_epi8(scaled_bottom_left2, y_select);
2053     WriteSmoothDirectionalSum16(dst, top1, top2, weights_y, weights_y,
2054                                 scaled_bottom_left_y, scaled_bottom_left_y,
2055                                 scale);
2056     WriteSmoothDirectionalSum16(dst + 16, top3, top4, weights_y, weights_y,
2057                                 scaled_bottom_left_y, scaled_bottom_left_y,
2058                                 scale);
2059     dst += stride;
2060   }
2061   for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2062     const __m128i y_select = _mm_set1_epi32(y_mask);
2063     const __m128i weights_y = _mm_shuffle_epi8(weights3, y_select);
2064     const __m128i scaled_bottom_left_y =
2065         _mm_shuffle_epi8(scaled_bottom_left3, y_select);
2066     WriteSmoothDirectionalSum16(dst, top1, top2, weights_y, weights_y,
2067                                 scaled_bottom_left_y, scaled_bottom_left_y,
2068                                 scale);
2069     WriteSmoothDirectionalSum16(dst + 16, top3, top4, weights_y, weights_y,
2070                                 scaled_bottom_left_y, scaled_bottom_left_y,
2071                                 scale);
2072     dst += stride;
2073   }
2074   for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2075     const __m128i y_select = _mm_set1_epi32(y_mask);
2076     const __m128i weights_y = _mm_shuffle_epi8(weights4, y_select);
2077     const __m128i scaled_bottom_left_y =
2078         _mm_shuffle_epi8(scaled_bottom_left4, y_select);
2079     WriteSmoothDirectionalSum16(dst, top1, top2, weights_y, weights_y,
2080                                 scaled_bottom_left_y, scaled_bottom_left_y,
2081                                 scale);
2082     WriteSmoothDirectionalSum16(dst + 16, top3, top4, weights_y, weights_y,
2083                                 scaled_bottom_left_y, scaled_bottom_left_y,
2084                                 scale);
2085     dst += stride;
2086   }
2087 }
2088 
SmoothVertical32x64_SSE4_1(void * const dest,const ptrdiff_t stride,const void * const top_row,const void * const left_column)2089 void SmoothVertical32x64_SSE4_1(void* const dest, const ptrdiff_t stride,
2090                                 const void* const top_row,
2091                                 const void* const left_column) {
2092   const auto* const left_ptr = static_cast<const uint8_t*>(left_column);
2093   auto* dst = static_cast<uint8_t*>(dest);
2094   const auto* const top_ptr = static_cast<const uint8_t*>(top_row);
2095   const __m128i zero = _mm_setzero_si128();
2096   const __m128i bottom_left = _mm_set1_epi16(left_ptr[63]);
2097   const __m128i top_lo = LoadUnaligned16(top_ptr);
2098   const __m128i top_hi = LoadUnaligned16(top_ptr + 16);
2099   const __m128i top1 = _mm_cvtepu8_epi16(top_lo);
2100   const __m128i top2 = _mm_unpackhi_epi8(top_lo, zero);
2101   const __m128i top3 = _mm_cvtepu8_epi16(top_hi);
2102   const __m128i top4 = _mm_unpackhi_epi8(top_hi, zero);
2103   const __m128i scale = _mm_set1_epi16(256);
2104   const __m128i round = _mm_set1_epi16(128);
2105   const uint8_t* weights_base_ptr = kSmoothWeights + 60;
2106   for (int left_offset = 0; left_offset < 64; left_offset += 16) {
2107     const __m128i weights = LoadUnaligned16(weights_base_ptr + left_offset);
2108     const __m128i weights_lo = _mm_cvtepu8_epi16(weights);
2109     const __m128i weights_hi = _mm_unpackhi_epi8(weights, zero);
2110     const __m128i inverted_weights_lo = _mm_sub_epi16(scale, weights_lo);
2111     const __m128i inverted_weights_hi = _mm_sub_epi16(scale, weights_hi);
2112     const __m128i scaled_bottom_left_lo =
2113         _mm_mullo_epi16(inverted_weights_lo, bottom_left);
2114     const __m128i scaled_bottom_left_hi =
2115         _mm_mullo_epi16(inverted_weights_hi, bottom_left);
2116 
2117     for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2118       const __m128i y_select = _mm_set1_epi32(y_mask);
2119       const __m128i weights_y = _mm_shuffle_epi8(weights_lo, y_select);
2120       const __m128i scaled_bottom_left_y =
2121           _mm_shuffle_epi8(scaled_bottom_left_lo, y_select);
2122       WriteSmoothDirectionalSum16(dst, top1, top2, weights_y, weights_y,
2123                                   scaled_bottom_left_y, scaled_bottom_left_y,
2124                                   round);
2125       WriteSmoothDirectionalSum16(dst + 16, top3, top4, weights_y, weights_y,
2126                                   scaled_bottom_left_y, scaled_bottom_left_y,
2127                                   round);
2128       dst += stride;
2129     }
2130     for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2131       const __m128i y_select = _mm_set1_epi32(y_mask);
2132       const __m128i weights_y = _mm_shuffle_epi8(weights_hi, y_select);
2133       const __m128i scaled_bottom_left_y =
2134           _mm_shuffle_epi8(scaled_bottom_left_hi, y_select);
2135       WriteSmoothDirectionalSum16(dst, top1, top2, weights_y, weights_y,
2136                                   scaled_bottom_left_y, scaled_bottom_left_y,
2137                                   round);
2138       WriteSmoothDirectionalSum16(dst + 16, top3, top4, weights_y, weights_y,
2139                                   scaled_bottom_left_y, scaled_bottom_left_y,
2140                                   round);
2141       dst += stride;
2142     }
2143   }
2144 }
2145 
SmoothVertical64x16_SSE4_1(void * const dest,const ptrdiff_t stride,const void * const top_row,const void * const left_column)2146 void SmoothVertical64x16_SSE4_1(void* const dest, const ptrdiff_t stride,
2147                                 const void* const top_row,
2148                                 const void* const left_column) {
2149   const auto* const left_ptr = static_cast<const uint8_t*>(left_column);
2150   auto* dst = static_cast<uint8_t*>(dest);
2151   const auto* const top_ptr = static_cast<const uint8_t*>(top_row);
2152   const __m128i bottom_left = _mm_set1_epi16(left_ptr[15]);
2153   __m128i scale = _mm_set1_epi16(256);
2154   const __m128i zero = _mm_setzero_si128();
2155   const __m128i top_lolo = LoadUnaligned16(top_ptr);
2156   const __m128i top_lohi = LoadUnaligned16(top_ptr + 16);
2157   const __m128i top1 = _mm_cvtepu8_epi16(top_lolo);
2158   const __m128i top2 = _mm_unpackhi_epi8(top_lolo, zero);
2159   const __m128i top3 = _mm_cvtepu8_epi16(top_lohi);
2160   const __m128i top4 = _mm_unpackhi_epi8(top_lohi, zero);
2161 
2162   const __m128i weights = LoadUnaligned16(kSmoothWeights + 12);
2163   const __m128i weights1 = _mm_cvtepu8_epi16(weights);
2164   const __m128i weights2 = _mm_unpackhi_epi8(weights, zero);
2165   const __m128i inverted_weights1 = _mm_sub_epi16(scale, weights1);
2166   const __m128i inverted_weights2 = _mm_sub_epi16(scale, weights2);
2167   const __m128i top_hilo = LoadUnaligned16(top_ptr + 32);
2168   const __m128i top_hihi = LoadUnaligned16(top_ptr + 48);
2169   const __m128i top5 = _mm_cvtepu8_epi16(top_hilo);
2170   const __m128i top6 = _mm_unpackhi_epi8(top_hilo, zero);
2171   const __m128i top7 = _mm_cvtepu8_epi16(top_hihi);
2172   const __m128i top8 = _mm_unpackhi_epi8(top_hihi, zero);
2173   const __m128i scaled_bottom_left1 =
2174       _mm_mullo_epi16(inverted_weights1, bottom_left);
2175   const __m128i scaled_bottom_left2 =
2176       _mm_mullo_epi16(inverted_weights2, bottom_left);
2177   scale = _mm_set1_epi16(128);
2178   for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2179     const __m128i y_select = _mm_set1_epi32(y_mask);
2180     const __m128i weights_y = _mm_shuffle_epi8(weights1, y_select);
2181     const __m128i scaled_bottom_left_y =
2182         _mm_shuffle_epi8(scaled_bottom_left1, y_select);
2183     WriteSmoothDirectionalSum16(dst, top1, top2, weights_y, weights_y,
2184                                 scaled_bottom_left_y, scaled_bottom_left_y,
2185                                 scale);
2186     WriteSmoothDirectionalSum16(dst + 16, top3, top4, weights_y, weights_y,
2187                                 scaled_bottom_left_y, scaled_bottom_left_y,
2188                                 scale);
2189     WriteSmoothDirectionalSum16(dst + 32, top5, top6, weights_y, weights_y,
2190                                 scaled_bottom_left_y, scaled_bottom_left_y,
2191                                 scale);
2192     WriteSmoothDirectionalSum16(dst + 48, top7, top8, weights_y, weights_y,
2193                                 scaled_bottom_left_y, scaled_bottom_left_y,
2194                                 scale);
2195     dst += stride;
2196   }
2197   for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2198     const __m128i y_select = _mm_set1_epi32(y_mask);
2199     const __m128i weights_y = _mm_shuffle_epi8(weights2, y_select);
2200     const __m128i scaled_bottom_left_y =
2201         _mm_shuffle_epi8(scaled_bottom_left2, y_select);
2202     WriteSmoothDirectionalSum16(dst, top1, top2, weights_y, weights_y,
2203                                 scaled_bottom_left_y, scaled_bottom_left_y,
2204                                 scale);
2205     WriteSmoothDirectionalSum16(dst + 16, top3, top4, weights_y, weights_y,
2206                                 scaled_bottom_left_y, scaled_bottom_left_y,
2207                                 scale);
2208     WriteSmoothDirectionalSum16(dst + 32, top5, top6, weights_y, weights_y,
2209                                 scaled_bottom_left_y, scaled_bottom_left_y,
2210                                 scale);
2211     WriteSmoothDirectionalSum16(dst + 48, top7, top8, weights_y, weights_y,
2212                                 scaled_bottom_left_y, scaled_bottom_left_y,
2213                                 scale);
2214     dst += stride;
2215   }
2216 }
2217 
SmoothVertical64x32_SSE4_1(void * const dest,const ptrdiff_t stride,const void * const top_row,const void * const left_column)2218 void SmoothVertical64x32_SSE4_1(void* const dest, const ptrdiff_t stride,
2219                                 const void* const top_row,
2220                                 const void* const left_column) {
2221   const auto* const left_ptr = static_cast<const uint8_t*>(left_column);
2222   auto* dst = static_cast<uint8_t*>(dest);
2223   const auto* const top_ptr = static_cast<const uint8_t*>(top_row);
2224   const __m128i zero = _mm_setzero_si128();
2225   const __m128i bottom_left = _mm_set1_epi16(left_ptr[31]);
2226   const __m128i top_lolo = LoadUnaligned16(top_ptr);
2227   const __m128i top_lohi = LoadUnaligned16(top_ptr + 16);
2228   const __m128i top1 = _mm_cvtepu8_epi16(top_lolo);
2229   const __m128i top2 = _mm_unpackhi_epi8(top_lolo, zero);
2230   const __m128i top3 = _mm_cvtepu8_epi16(top_lohi);
2231   const __m128i top4 = _mm_unpackhi_epi8(top_lohi, zero);
2232   const __m128i top_hilo = LoadUnaligned16(top_ptr + 32);
2233   const __m128i top_hihi = LoadUnaligned16(top_ptr + 48);
2234   const __m128i top5 = _mm_cvtepu8_epi16(top_hilo);
2235   const __m128i top6 = _mm_unpackhi_epi8(top_hilo, zero);
2236   const __m128i top7 = _mm_cvtepu8_epi16(top_hihi);
2237   const __m128i top8 = _mm_unpackhi_epi8(top_hihi, zero);
2238   const __m128i weights_lo = LoadUnaligned16(kSmoothWeights + 28);
2239   const __m128i weights_hi = LoadUnaligned16(kSmoothWeights + 44);
2240   const __m128i weights1 = _mm_cvtepu8_epi16(weights_lo);
2241   const __m128i weights2 = _mm_unpackhi_epi8(weights_lo, zero);
2242   const __m128i weights3 = _mm_cvtepu8_epi16(weights_hi);
2243   const __m128i weights4 = _mm_unpackhi_epi8(weights_hi, zero);
2244   __m128i scale = _mm_set1_epi16(256);
2245   const __m128i inverted_weights1 = _mm_sub_epi16(scale, weights1);
2246   const __m128i inverted_weights2 = _mm_sub_epi16(scale, weights2);
2247   const __m128i inverted_weights3 = _mm_sub_epi16(scale, weights3);
2248   const __m128i inverted_weights4 = _mm_sub_epi16(scale, weights4);
2249   const __m128i scaled_bottom_left1 =
2250       _mm_mullo_epi16(inverted_weights1, bottom_left);
2251   const __m128i scaled_bottom_left2 =
2252       _mm_mullo_epi16(inverted_weights2, bottom_left);
2253   const __m128i scaled_bottom_left3 =
2254       _mm_mullo_epi16(inverted_weights3, bottom_left);
2255   const __m128i scaled_bottom_left4 =
2256       _mm_mullo_epi16(inverted_weights4, bottom_left);
2257   scale = _mm_set1_epi16(128);
2258 
2259   for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2260     const __m128i y_select = _mm_set1_epi32(y_mask);
2261     const __m128i weights_y = _mm_shuffle_epi8(weights1, y_select);
2262     const __m128i scaled_bottom_left_y =
2263         _mm_shuffle_epi8(scaled_bottom_left1, y_select);
2264     WriteSmoothDirectionalSum16(dst, top1, top2, weights_y, weights_y,
2265                                 scaled_bottom_left_y, scaled_bottom_left_y,
2266                                 scale);
2267     WriteSmoothDirectionalSum16(dst + 16, top3, top4, weights_y, weights_y,
2268                                 scaled_bottom_left_y, scaled_bottom_left_y,
2269                                 scale);
2270     WriteSmoothDirectionalSum16(dst + 32, top5, top6, weights_y, weights_y,
2271                                 scaled_bottom_left_y, scaled_bottom_left_y,
2272                                 scale);
2273     WriteSmoothDirectionalSum16(dst + 48, top7, top8, weights_y, weights_y,
2274                                 scaled_bottom_left_y, scaled_bottom_left_y,
2275                                 scale);
2276     dst += stride;
2277   }
2278   for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2279     const __m128i y_select = _mm_set1_epi32(y_mask);
2280     const __m128i weights_y = _mm_shuffle_epi8(weights2, y_select);
2281     const __m128i scaled_bottom_left_y =
2282         _mm_shuffle_epi8(scaled_bottom_left2, y_select);
2283     WriteSmoothDirectionalSum16(dst, top1, top2, weights_y, weights_y,
2284                                 scaled_bottom_left_y, scaled_bottom_left_y,
2285                                 scale);
2286     WriteSmoothDirectionalSum16(dst + 16, top3, top4, weights_y, weights_y,
2287                                 scaled_bottom_left_y, scaled_bottom_left_y,
2288                                 scale);
2289     WriteSmoothDirectionalSum16(dst + 32, top5, top6, weights_y, weights_y,
2290                                 scaled_bottom_left_y, scaled_bottom_left_y,
2291                                 scale);
2292     WriteSmoothDirectionalSum16(dst + 48, top7, top8, weights_y, weights_y,
2293                                 scaled_bottom_left_y, scaled_bottom_left_y,
2294                                 scale);
2295     dst += stride;
2296   }
2297   for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2298     const __m128i y_select = _mm_set1_epi32(y_mask);
2299     const __m128i weights_y = _mm_shuffle_epi8(weights3, y_select);
2300     const __m128i scaled_bottom_left_y =
2301         _mm_shuffle_epi8(scaled_bottom_left3, y_select);
2302     WriteSmoothDirectionalSum16(dst, top1, top2, weights_y, weights_y,
2303                                 scaled_bottom_left_y, scaled_bottom_left_y,
2304                                 scale);
2305     WriteSmoothDirectionalSum16(dst + 16, top3, top4, weights_y, weights_y,
2306                                 scaled_bottom_left_y, scaled_bottom_left_y,
2307                                 scale);
2308     WriteSmoothDirectionalSum16(dst + 32, top5, top6, weights_y, weights_y,
2309                                 scaled_bottom_left_y, scaled_bottom_left_y,
2310                                 scale);
2311     WriteSmoothDirectionalSum16(dst + 48, top7, top8, weights_y, weights_y,
2312                                 scaled_bottom_left_y, scaled_bottom_left_y,
2313                                 scale);
2314     dst += stride;
2315   }
2316   for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2317     const __m128i y_select = _mm_set1_epi32(y_mask);
2318     const __m128i weights_y = _mm_shuffle_epi8(weights4, y_select);
2319     const __m128i scaled_bottom_left_y =
2320         _mm_shuffle_epi8(scaled_bottom_left4, y_select);
2321     WriteSmoothDirectionalSum16(dst, top1, top2, weights_y, weights_y,
2322                                 scaled_bottom_left_y, scaled_bottom_left_y,
2323                                 scale);
2324     WriteSmoothDirectionalSum16(dst + 16, top3, top4, weights_y, weights_y,
2325                                 scaled_bottom_left_y, scaled_bottom_left_y,
2326                                 scale);
2327     WriteSmoothDirectionalSum16(dst + 32, top5, top6, weights_y, weights_y,
2328                                 scaled_bottom_left_y, scaled_bottom_left_y,
2329                                 scale);
2330     WriteSmoothDirectionalSum16(dst + 48, top7, top8, weights_y, weights_y,
2331                                 scaled_bottom_left_y, scaled_bottom_left_y,
2332                                 scale);
2333     dst += stride;
2334   }
2335 }
2336 
SmoothVertical64x64_SSE4_1(void * const dest,const ptrdiff_t stride,const void * const top_row,const void * const left_column)2337 void SmoothVertical64x64_SSE4_1(void* const dest, const ptrdiff_t stride,
2338                                 const void* const top_row,
2339                                 const void* const left_column) {
2340   const auto* const left_ptr = static_cast<const uint8_t*>(left_column);
2341   auto* dst = static_cast<uint8_t*>(dest);
2342   const auto* const top_ptr = static_cast<const uint8_t*>(top_row);
2343   const __m128i zero = _mm_setzero_si128();
2344   const __m128i bottom_left = _mm_set1_epi16(left_ptr[63]);
2345   const __m128i top_lolo = LoadUnaligned16(top_ptr);
2346   const __m128i top_lohi = LoadUnaligned16(top_ptr + 16);
2347   const __m128i top1 = _mm_cvtepu8_epi16(top_lolo);
2348   const __m128i top2 = _mm_unpackhi_epi8(top_lolo, zero);
2349   const __m128i top3 = _mm_cvtepu8_epi16(top_lohi);
2350   const __m128i top4 = _mm_unpackhi_epi8(top_lohi, zero);
2351   const __m128i top_hilo = LoadUnaligned16(top_ptr + 32);
2352   const __m128i top_hihi = LoadUnaligned16(top_ptr + 48);
2353   const __m128i top5 = _mm_cvtepu8_epi16(top_hilo);
2354   const __m128i top6 = _mm_unpackhi_epi8(top_hilo, zero);
2355   const __m128i top7 = _mm_cvtepu8_epi16(top_hihi);
2356   const __m128i top8 = _mm_unpackhi_epi8(top_hihi, zero);
2357   const __m128i scale = _mm_set1_epi16(256);
2358   const __m128i round = _mm_set1_epi16(128);
2359   const uint8_t* weights_base_ptr = kSmoothWeights + 60;
2360   for (int left_offset = 0; left_offset < 64; left_offset += 16) {
2361     const __m128i weights = LoadUnaligned16(weights_base_ptr + left_offset);
2362     const __m128i weights_lo = _mm_cvtepu8_epi16(weights);
2363     const __m128i weights_hi = _mm_unpackhi_epi8(weights, zero);
2364     const __m128i inverted_weights_lo = _mm_sub_epi16(scale, weights_lo);
2365     const __m128i inverted_weights_hi = _mm_sub_epi16(scale, weights_hi);
2366     const __m128i scaled_bottom_left_lo =
2367         _mm_mullo_epi16(inverted_weights_lo, bottom_left);
2368     const __m128i scaled_bottom_left_hi =
2369         _mm_mullo_epi16(inverted_weights_hi, bottom_left);
2370     for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2371       const __m128i y_select = _mm_set1_epi32(y_mask);
2372       const __m128i weights_y = _mm_shuffle_epi8(weights_lo, y_select);
2373       const __m128i scaled_bottom_left_y =
2374           _mm_shuffle_epi8(scaled_bottom_left_lo, y_select);
2375       WriteSmoothDirectionalSum16(dst, top1, top2, weights_y, weights_y,
2376                                   scaled_bottom_left_y, scaled_bottom_left_y,
2377                                   round);
2378       WriteSmoothDirectionalSum16(dst + 16, top3, top4, weights_y, weights_y,
2379                                   scaled_bottom_left_y, scaled_bottom_left_y,
2380                                   round);
2381       WriteSmoothDirectionalSum16(dst + 32, top5, top6, weights_y, weights_y,
2382                                   scaled_bottom_left_y, scaled_bottom_left_y,
2383                                   round);
2384       WriteSmoothDirectionalSum16(dst + 48, top7, top8, weights_y, weights_y,
2385                                   scaled_bottom_left_y, scaled_bottom_left_y,
2386                                   round);
2387       dst += stride;
2388     }
2389     for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2390       const __m128i y_select = _mm_set1_epi32(y_mask);
2391       const __m128i weights_y = _mm_shuffle_epi8(weights_hi, y_select);
2392       const __m128i scaled_bottom_left_y =
2393           _mm_shuffle_epi8(scaled_bottom_left_hi, y_select);
2394       WriteSmoothDirectionalSum16(dst, top1, top2, weights_y, weights_y,
2395                                   scaled_bottom_left_y, scaled_bottom_left_y,
2396                                   round);
2397       WriteSmoothDirectionalSum16(dst + 16, top3, top4, weights_y, weights_y,
2398                                   scaled_bottom_left_y, scaled_bottom_left_y,
2399                                   round);
2400       WriteSmoothDirectionalSum16(dst + 32, top5, top6, weights_y, weights_y,
2401                                   scaled_bottom_left_y, scaled_bottom_left_y,
2402                                   round);
2403       WriteSmoothDirectionalSum16(dst + 48, top7, top8, weights_y, weights_y,
2404                                   scaled_bottom_left_y, scaled_bottom_left_y,
2405                                   round);
2406       dst += stride;
2407     }
2408   }
2409 }
2410 
Init8bpp()2411 void Init8bpp() {
2412   Dsp* const dsp = dsp_internal::GetWritableDspTable(kBitdepth8);
2413   assert(dsp != nullptr);
2414 #if DSP_ENABLED_8BPP_SSE4_1(TransformSize4x4_IntraPredictorSmooth)
2415   dsp->intra_predictors[kTransformSize4x4][kIntraPredictorSmooth] =
2416       Smooth4x4_SSE4_1;
2417 #endif
2418 #if DSP_ENABLED_8BPP_SSE4_1(TransformSize4x8_IntraPredictorSmooth)
2419   dsp->intra_predictors[kTransformSize4x8][kIntraPredictorSmooth] =
2420       Smooth4x8_SSE4_1;
2421 #endif
2422 #if DSP_ENABLED_8BPP_SSE4_1(TransformSize4x16_IntraPredictorSmooth)
2423   dsp->intra_predictors[kTransformSize4x16][kIntraPredictorSmooth] =
2424       Smooth4x16_SSE4_1;
2425 #endif
2426 #if DSP_ENABLED_8BPP_SSE4_1(TransformSize8x4_IntraPredictorSmooth)
2427   dsp->intra_predictors[kTransformSize8x4][kIntraPredictorSmooth] =
2428       Smooth8x4_SSE4_1;
2429 #endif
2430 #if DSP_ENABLED_8BPP_SSE4_1(TransformSize8x8_IntraPredictorSmooth)
2431   dsp->intra_predictors[kTransformSize8x8][kIntraPredictorSmooth] =
2432       Smooth8x8_SSE4_1;
2433 #endif
2434 #if DSP_ENABLED_8BPP_SSE4_1(TransformSize8x16_IntraPredictorSmooth)
2435   dsp->intra_predictors[kTransformSize8x16][kIntraPredictorSmooth] =
2436       Smooth8x16_SSE4_1;
2437 #endif
2438 #if DSP_ENABLED_8BPP_SSE4_1(TransformSize8x32_IntraPredictorSmooth)
2439   dsp->intra_predictors[kTransformSize8x32][kIntraPredictorSmooth] =
2440       Smooth8x32_SSE4_1;
2441 #endif
2442 #if DSP_ENABLED_8BPP_SSE4_1(TransformSize16x4_IntraPredictorSmooth)
2443   dsp->intra_predictors[kTransformSize16x4][kIntraPredictorSmooth] =
2444       SmoothWxH<16, 4>;
2445 #endif
2446 #if DSP_ENABLED_8BPP_SSE4_1(TransformSize16x8_IntraPredictorSmooth)
2447   dsp->intra_predictors[kTransformSize16x8][kIntraPredictorSmooth] =
2448       SmoothWxH<16, 8>;
2449 #endif
2450 #if DSP_ENABLED_8BPP_SSE4_1(TransformSize16x16_IntraPredictorSmooth)
2451   dsp->intra_predictors[kTransformSize16x16][kIntraPredictorSmooth] =
2452       SmoothWxH<16, 16>;
2453 #endif
2454 #if DSP_ENABLED_8BPP_SSE4_1(TransformSize16x32_IntraPredictorSmooth)
2455   dsp->intra_predictors[kTransformSize16x32][kIntraPredictorSmooth] =
2456       SmoothWxH<16, 32>;
2457 #endif
2458 #if DSP_ENABLED_8BPP_SSE4_1(TransformSize16x64_IntraPredictorSmooth)
2459   dsp->intra_predictors[kTransformSize16x64][kIntraPredictorSmooth] =
2460       SmoothWxH<16, 64>;
2461 #endif
2462 #if DSP_ENABLED_8BPP_SSE4_1(TransformSize32x8_IntraPredictorSmooth)
2463   dsp->intra_predictors[kTransformSize32x8][kIntraPredictorSmooth] =
2464       SmoothWxH<32, 8>;
2465 #endif
2466 #if DSP_ENABLED_8BPP_SSE4_1(TransformSize32x16_IntraPredictorSmooth)
2467   dsp->intra_predictors[kTransformSize32x16][kIntraPredictorSmooth] =
2468       SmoothWxH<32, 16>;
2469 #endif
2470 #if DSP_ENABLED_8BPP_SSE4_1(TransformSize32x32_IntraPredictorSmooth)
2471   dsp->intra_predictors[kTransformSize32x32][kIntraPredictorSmooth] =
2472       SmoothWxH<32, 32>;
2473 #endif
2474 #if DSP_ENABLED_8BPP_SSE4_1(TransformSize32x64_IntraPredictorSmooth)
2475   dsp->intra_predictors[kTransformSize32x64][kIntraPredictorSmooth] =
2476       SmoothWxH<32, 64>;
2477 #endif
2478 #if DSP_ENABLED_8BPP_SSE4_1(TransformSize64x16_IntraPredictorSmooth)
2479   dsp->intra_predictors[kTransformSize64x16][kIntraPredictorSmooth] =
2480       SmoothWxH<64, 16>;
2481 #endif
2482 #if DSP_ENABLED_8BPP_SSE4_1(TransformSize64x32_IntraPredictorSmooth)
2483   dsp->intra_predictors[kTransformSize64x32][kIntraPredictorSmooth] =
2484       SmoothWxH<64, 32>;
2485 #endif
2486 #if DSP_ENABLED_8BPP_SSE4_1(TransformSize64x64_IntraPredictorSmooth)
2487   dsp->intra_predictors[kTransformSize64x64][kIntraPredictorSmooth] =
2488       SmoothWxH<64, 64>;
2489 #endif
2490 #if DSP_ENABLED_8BPP_SSE4_1(TransformSize4x4_IntraPredictorSmoothVertical)
2491   dsp->intra_predictors[kTransformSize4x4][kIntraPredictorSmoothVertical] =
2492       SmoothVertical4x4_SSE4_1;
2493 #endif
2494 #if DSP_ENABLED_8BPP_SSE4_1(TransformSize4x8_IntraPredictorSmoothVertical)
2495   dsp->intra_predictors[kTransformSize4x8][kIntraPredictorSmoothVertical] =
2496       SmoothVertical4x8_SSE4_1;
2497 #endif
2498 #if DSP_ENABLED_8BPP_SSE4_1(TransformSize4x16_IntraPredictorSmoothVertical)
2499   dsp->intra_predictors[kTransformSize4x16][kIntraPredictorSmoothVertical] =
2500       SmoothVertical4x16_SSE4_1;
2501 #endif
2502 #if DSP_ENABLED_8BPP_SSE4_1(TransformSize8x4_IntraPredictorSmoothVertical)
2503   dsp->intra_predictors[kTransformSize8x4][kIntraPredictorSmoothVertical] =
2504       SmoothVertical8x4_SSE4_1;
2505 #endif
2506 #if DSP_ENABLED_8BPP_SSE4_1(TransformSize8x8_IntraPredictorSmoothVertical)
2507   dsp->intra_predictors[kTransformSize8x8][kIntraPredictorSmoothVertical] =
2508       SmoothVertical8x8_SSE4_1;
2509 #endif
2510 #if DSP_ENABLED_8BPP_SSE4_1(TransformSize8x16_IntraPredictorSmoothVertical)
2511   dsp->intra_predictors[kTransformSize8x16][kIntraPredictorSmoothVertical] =
2512       SmoothVertical8x16_SSE4_1;
2513 #endif
2514 #if DSP_ENABLED_8BPP_SSE4_1(TransformSize8x32_IntraPredictorSmoothVertical)
2515   dsp->intra_predictors[kTransformSize8x32][kIntraPredictorSmoothVertical] =
2516       SmoothVertical8x32_SSE4_1;
2517 #endif
2518 #if DSP_ENABLED_8BPP_SSE4_1(TransformSize16x4_IntraPredictorSmoothVertical)
2519   dsp->intra_predictors[kTransformSize16x4][kIntraPredictorSmoothVertical] =
2520       SmoothVertical16x4_SSE4_1;
2521 #endif
2522 #if DSP_ENABLED_8BPP_SSE4_1(TransformSize16x8_IntraPredictorSmoothVertical)
2523   dsp->intra_predictors[kTransformSize16x8][kIntraPredictorSmoothVertical] =
2524       SmoothVertical16x8_SSE4_1;
2525 #endif
2526 #if DSP_ENABLED_8BPP_SSE4_1(TransformSize16x16_IntraPredictorSmoothVertical)
2527   dsp->intra_predictors[kTransformSize16x16][kIntraPredictorSmoothVertical] =
2528       SmoothVertical16x16_SSE4_1;
2529 #endif
2530 #if DSP_ENABLED_8BPP_SSE4_1(TransformSize16x32_IntraPredictorSmoothVertical)
2531   dsp->intra_predictors[kTransformSize16x32][kIntraPredictorSmoothVertical] =
2532       SmoothVertical16x32_SSE4_1;
2533 #endif
2534 #if DSP_ENABLED_8BPP_SSE4_1(TransformSize16x64_IntraPredictorSmoothVertical)
2535   dsp->intra_predictors[kTransformSize16x64][kIntraPredictorSmoothVertical] =
2536       SmoothVertical16x64_SSE4_1;
2537 #endif
2538 #if DSP_ENABLED_8BPP_SSE4_1(TransformSize32x8_IntraPredictorSmoothVertical)
2539   dsp->intra_predictors[kTransformSize32x8][kIntraPredictorSmoothVertical] =
2540       SmoothVertical32x8_SSE4_1;
2541 #endif
2542 #if DSP_ENABLED_8BPP_SSE4_1(TransformSize32x16_IntraPredictorSmoothVertical)
2543   dsp->intra_predictors[kTransformSize32x16][kIntraPredictorSmoothVertical] =
2544       SmoothVertical32x16_SSE4_1;
2545 #endif
2546 #if DSP_ENABLED_8BPP_SSE4_1(TransformSize32x32_IntraPredictorSmoothVertical)
2547   dsp->intra_predictors[kTransformSize32x32][kIntraPredictorSmoothVertical] =
2548       SmoothVertical32x32_SSE4_1;
2549 #endif
2550 #if DSP_ENABLED_8BPP_SSE4_1(TransformSize32x64_IntraPredictorSmoothVertical)
2551   dsp->intra_predictors[kTransformSize32x64][kIntraPredictorSmoothVertical] =
2552       SmoothVertical32x64_SSE4_1;
2553 #endif
2554 #if DSP_ENABLED_8BPP_SSE4_1(TransformSize64x16_IntraPredictorSmoothVertical)
2555   dsp->intra_predictors[kTransformSize64x16][kIntraPredictorSmoothVertical] =
2556       SmoothVertical64x16_SSE4_1;
2557 #endif
2558 #if DSP_ENABLED_8BPP_SSE4_1(TransformSize64x32_IntraPredictorSmoothVertical)
2559   dsp->intra_predictors[kTransformSize64x32][kIntraPredictorSmoothVertical] =
2560       SmoothVertical64x32_SSE4_1;
2561 #endif
2562 #if DSP_ENABLED_8BPP_SSE4_1(TransformSize64x64_IntraPredictorSmoothVertical)
2563   dsp->intra_predictors[kTransformSize64x64][kIntraPredictorSmoothVertical] =
2564       SmoothVertical64x64_SSE4_1;
2565 #endif
2566 #if DSP_ENABLED_8BPP_SSE4_1(TransformSize4x4_IntraPredictorSmoothHorizontal)
2567   dsp->intra_predictors[kTransformSize4x4][kIntraPredictorSmoothHorizontal] =
2568       SmoothHorizontal4x4_SSE4_1;
2569 #endif
2570 #if DSP_ENABLED_8BPP_SSE4_1(TransformSize4x8_IntraPredictorSmoothHorizontal)
2571   dsp->intra_predictors[kTransformSize4x8][kIntraPredictorSmoothHorizontal] =
2572       SmoothHorizontal4x8_SSE4_1;
2573 #endif
2574 #if DSP_ENABLED_8BPP_SSE4_1(TransformSize4x16_IntraPredictorSmoothHorizontal)
2575   dsp->intra_predictors[kTransformSize4x16][kIntraPredictorSmoothHorizontal] =
2576       SmoothHorizontal4x16_SSE4_1;
2577 #endif
2578 #if DSP_ENABLED_8BPP_SSE4_1(TransformSize8x4_IntraPredictorSmoothHorizontal)
2579   dsp->intra_predictors[kTransformSize8x4][kIntraPredictorSmoothHorizontal] =
2580       SmoothHorizontal8x4_SSE4_1;
2581 #endif
2582 #if DSP_ENABLED_8BPP_SSE4_1(TransformSize8x8_IntraPredictorSmoothHorizontal)
2583   dsp->intra_predictors[kTransformSize8x8][kIntraPredictorSmoothHorizontal] =
2584       SmoothHorizontal8x8_SSE4_1;
2585 #endif
2586 #if DSP_ENABLED_8BPP_SSE4_1(TransformSize8x16_IntraPredictorSmoothHorizontal)
2587   dsp->intra_predictors[kTransformSize8x16][kIntraPredictorSmoothHorizontal] =
2588       SmoothHorizontal8x16_SSE4_1;
2589 #endif
2590 #if DSP_ENABLED_8BPP_SSE4_1(TransformSize8x32_IntraPredictorSmoothHorizontal)
2591   dsp->intra_predictors[kTransformSize8x32][kIntraPredictorSmoothHorizontal] =
2592       SmoothHorizontal8x32_SSE4_1;
2593 #endif
2594 #if DSP_ENABLED_8BPP_SSE4_1(TransformSize16x4_IntraPredictorSmoothHorizontal)
2595   dsp->intra_predictors[kTransformSize16x4][kIntraPredictorSmoothHorizontal] =
2596       SmoothHorizontal16x4_SSE4_1;
2597 #endif
2598 #if DSP_ENABLED_8BPP_SSE4_1(TransformSize16x8_IntraPredictorSmoothHorizontal)
2599   dsp->intra_predictors[kTransformSize16x8][kIntraPredictorSmoothHorizontal] =
2600       SmoothHorizontal16x8_SSE4_1;
2601 #endif
2602 #if DSP_ENABLED_8BPP_SSE4_1(TransformSize16x16_IntraPredictorSmoothHorizontal)
2603   dsp->intra_predictors[kTransformSize16x16][kIntraPredictorSmoothHorizontal] =
2604       SmoothHorizontal16x16_SSE4_1;
2605 #endif
2606 #if DSP_ENABLED_8BPP_SSE4_1(TransformSize16x32_IntraPredictorSmoothHorizontal)
2607   dsp->intra_predictors[kTransformSize16x32][kIntraPredictorSmoothHorizontal] =
2608       SmoothHorizontal16x32_SSE4_1;
2609 #endif
2610 #if DSP_ENABLED_8BPP_SSE4_1(TransformSize16x64_IntraPredictorSmoothHorizontal)
2611   dsp->intra_predictors[kTransformSize16x64][kIntraPredictorSmoothHorizontal] =
2612       SmoothHorizontal16x64_SSE4_1;
2613 #endif
2614 #if DSP_ENABLED_8BPP_SSE4_1(TransformSize32x8_IntraPredictorSmoothHorizontal)
2615   dsp->intra_predictors[kTransformSize32x8][kIntraPredictorSmoothHorizontal] =
2616       SmoothHorizontal32x8_SSE4_1;
2617 #endif
2618 #if DSP_ENABLED_8BPP_SSE4_1(TransformSize32x16_IntraPredictorSmoothHorizontal)
2619   dsp->intra_predictors[kTransformSize32x16][kIntraPredictorSmoothHorizontal] =
2620       SmoothHorizontal32x16_SSE4_1;
2621 #endif
2622 #if DSP_ENABLED_8BPP_SSE4_1(TransformSize32x32_IntraPredictorSmoothHorizontal)
2623   dsp->intra_predictors[kTransformSize32x32][kIntraPredictorSmoothHorizontal] =
2624       SmoothHorizontal32x32_SSE4_1;
2625 #endif
2626 #if DSP_ENABLED_8BPP_SSE4_1(TransformSize32x64_IntraPredictorSmoothHorizontal)
2627   dsp->intra_predictors[kTransformSize32x64][kIntraPredictorSmoothHorizontal] =
2628       SmoothHorizontal32x64_SSE4_1;
2629 #endif
2630 #if DSP_ENABLED_8BPP_SSE4_1(TransformSize64x16_IntraPredictorSmoothHorizontal)
2631   dsp->intra_predictors[kTransformSize64x16][kIntraPredictorSmoothHorizontal] =
2632       SmoothHorizontal64x16_SSE4_1;
2633 #endif
2634 #if DSP_ENABLED_8BPP_SSE4_1(TransformSize64x32_IntraPredictorSmoothHorizontal)
2635   dsp->intra_predictors[kTransformSize64x32][kIntraPredictorSmoothHorizontal] =
2636       SmoothHorizontal64x32_SSE4_1;
2637 #endif
2638 #if DSP_ENABLED_8BPP_SSE4_1(TransformSize64x64_IntraPredictorSmoothHorizontal)
2639   dsp->intra_predictors[kTransformSize64x64][kIntraPredictorSmoothHorizontal] =
2640       SmoothHorizontal64x64_SSE4_1;
2641 #endif
2642 }
2643 
2644 }  // namespace
2645 }  // namespace low_bitdepth
2646 
IntraPredSmoothInit_SSE4_1()2647 void IntraPredSmoothInit_SSE4_1() { low_bitdepth::Init8bpp(); }
2648 
2649 }  // namespace dsp
2650 }  // namespace libgav1
2651 
2652 #else  // !LIBGAV1_ENABLE_SSE4_1
2653 
2654 namespace libgav1 {
2655 namespace dsp {
2656 
IntraPredSmoothInit_SSE4_1()2657 void IntraPredSmoothInit_SSE4_1() {}
2658 
2659 }  // namespace dsp
2660 }  // namespace libgav1
2661 
2662 #endif  // LIBGAV1_ENABLE_SSE4_1
2663