• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2016, Alliance for Open Media. All rights reserved
3  *
4  * This source code is subject to the terms of the BSD 2 Clause License and
5  * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6  * was not distributed with this source code in the LICENSE file, you can
7  * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8  * Media Patent License 1.0 was not distributed with this source code in the
9  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10  */
11 
12 #ifndef AOM_AOM_DSP_SIMD_V256_INTRINSICS_C_H_
13 #define AOM_AOM_DSP_SIMD_V256_INTRINSICS_C_H_
14 
15 #include <stdio.h>
16 #include <stdlib.h>
17 
18 #include "config/aom_config.h"
19 
20 #include "aom_dsp/simd/v128_intrinsics_c.h"
21 
22 typedef union {
23   uint8_t u8[32];
24   uint16_t u16[16];
25   uint32_t u32[8];
26   uint64_t u64[4];
27   int8_t s8[32];
28   int16_t s16[16];
29   int32_t s32[8];
30   int64_t s64[4];
31   c_v64 v64[4];
32   c_v128 v128[2];
33 } c_v256;
34 
c_v256_low_u32(c_v256 a)35 SIMD_INLINE uint32_t c_v256_low_u32(c_v256 a) { return a.u32[0]; }
36 
c_v256_low_v64(c_v256 a)37 SIMD_INLINE c_v64 c_v256_low_v64(c_v256 a) { return a.v64[0]; }
38 
c_v256_low_u64(c_v256 a)39 SIMD_INLINE uint64_t c_v256_low_u64(c_v256 a) { return a.u64[0]; }
40 
c_v256_low_v128(c_v256 a)41 SIMD_INLINE c_v128 c_v256_low_v128(c_v256 a) { return a.v128[0]; }
42 
c_v256_high_v128(c_v256 a)43 SIMD_INLINE c_v128 c_v256_high_v128(c_v256 a) { return a.v128[1]; }
44 
c_v256_from_v128(c_v128 hi,c_v128 lo)45 SIMD_INLINE c_v256 c_v256_from_v128(c_v128 hi, c_v128 lo) {
46   c_v256 t;
47   t.v128[1] = hi;
48   t.v128[0] = lo;
49   return t;
50 }
51 
c_v256_from_64(uint64_t a,uint64_t b,uint64_t c,uint64_t d)52 SIMD_INLINE c_v256 c_v256_from_64(uint64_t a, uint64_t b, uint64_t c,
53                                   uint64_t d) {
54   c_v256 t;
55   t.u64[3] = a;
56   t.u64[2] = b;
57   t.u64[1] = c;
58   t.u64[0] = d;
59   return t;
60 }
61 
c_v256_from_v64(c_v64 a,c_v64 b,c_v64 c,c_v64 d)62 SIMD_INLINE c_v256 c_v256_from_v64(c_v64 a, c_v64 b, c_v64 c, c_v64 d) {
63   c_v256 t;
64   t.u64[3] = a.u64;
65   t.u64[2] = b.u64;
66   t.u64[1] = c.u64;
67   t.u64[0] = d.u64;
68   return t;
69 }
70 
c_v256_load_unaligned(const void * p)71 SIMD_INLINE c_v256 c_v256_load_unaligned(const void *p) {
72   c_v256 t;
73   uint8_t *pp = (uint8_t *)p;
74   uint8_t *q = (uint8_t *)&t;
75   int c;
76   for (c = 0; c < 32; c++) q[c] = pp[c];
77   return t;
78 }
79 
c_v256_load_aligned(const void * p)80 SIMD_INLINE c_v256 c_v256_load_aligned(const void *p) {
81   if (SIMD_CHECK && (uintptr_t)p & 31) {
82     fprintf(stderr, "Error: unaligned v256 load at %p\n", p);
83     abort();
84   }
85   return c_v256_load_unaligned(p);
86 }
87 
c_v256_store_unaligned(void * p,c_v256 a)88 SIMD_INLINE void c_v256_store_unaligned(void *p, c_v256 a) {
89   uint8_t *pp = (uint8_t *)p;
90   uint8_t *q = (uint8_t *)&a;
91   int c;
92   for (c = 0; c < 32; c++) pp[c] = q[c];
93 }
94 
c_v256_store_aligned(void * p,c_v256 a)95 SIMD_INLINE void c_v256_store_aligned(void *p, c_v256 a) {
96   if (SIMD_CHECK && (uintptr_t)p & 31) {
97     fprintf(stderr, "Error: unaligned v256 store at %p\n", p);
98     abort();
99   }
100   c_v256_store_unaligned(p, a);
101 }
102 
c_v256_zero()103 SIMD_INLINE c_v256 c_v256_zero() {
104   c_v256 t;
105   t.u64[3] = t.u64[2] = t.u64[1] = t.u64[0] = 0;
106   return t;
107 }
108 
c_v256_dup_8(uint8_t x)109 SIMD_INLINE c_v256 c_v256_dup_8(uint8_t x) {
110   c_v256 t;
111   t.v64[3] = t.v64[2] = t.v64[1] = t.v64[0] = c_v64_dup_8(x);
112   return t;
113 }
114 
c_v256_dup_16(uint16_t x)115 SIMD_INLINE c_v256 c_v256_dup_16(uint16_t x) {
116   c_v256 t;
117   t.v64[3] = t.v64[2] = t.v64[1] = t.v64[0] = c_v64_dup_16(x);
118   return t;
119 }
120 
c_v256_dup_32(uint32_t x)121 SIMD_INLINE c_v256 c_v256_dup_32(uint32_t x) {
122   c_v256 t;
123   t.v64[3] = t.v64[2] = t.v64[1] = t.v64[0] = c_v64_dup_32(x);
124   return t;
125 }
126 
c_v256_dup_64(uint64_t x)127 SIMD_INLINE c_v256 c_v256_dup_64(uint64_t x) {
128   c_v256 t;
129   t.u64[3] = t.u64[2] = t.u64[1] = t.u64[0] = x;
130   return t;
131 }
132 
c_v256_dotp_su8(c_v256 a,c_v256 b)133 SIMD_INLINE int64_t c_v256_dotp_su8(c_v256 a, c_v256 b) {
134   return c_v128_dotp_su8(a.v128[1], b.v128[1]) +
135          c_v128_dotp_su8(a.v128[0], b.v128[0]);
136 }
137 
c_v256_dotp_s16(c_v256 a,c_v256 b)138 SIMD_INLINE int64_t c_v256_dotp_s16(c_v256 a, c_v256 b) {
139   return c_v128_dotp_s16(a.v128[1], b.v128[1]) +
140          c_v128_dotp_s16(a.v128[0], b.v128[0]);
141 }
142 
c_v256_dotp_s32(c_v256 a,c_v256 b)143 SIMD_INLINE int64_t c_v256_dotp_s32(c_v256 a, c_v256 b) {
144   return c_v128_dotp_s32(a.v128[1], b.v128[1]) +
145          c_v128_dotp_s32(a.v128[0], b.v128[0]);
146 }
147 
c_v256_hadd_u8(c_v256 a)148 SIMD_INLINE uint64_t c_v256_hadd_u8(c_v256 a) {
149   return c_v128_hadd_u8(a.v128[1]) + c_v128_hadd_u8(a.v128[0]);
150 }
151 
152 typedef struct {
153   uint32_t val;
154   int count;
155 } c_sad256_internal;
156 
c_v256_sad_u8_init(void)157 SIMD_INLINE c_sad256_internal c_v256_sad_u8_init(void) {
158   c_sad256_internal t;
159   t.val = t.count = 0;
160   return t;
161 }
162 
163 /* Implementation dependent return value.  Result must be finalised with
164    v256_sad_u8_sum().
165    The result for more than 16 v256_sad_u8() calls is undefined. */
c_v256_sad_u8(c_sad256_internal s,c_v256 a,c_v256 b)166 SIMD_INLINE c_sad256_internal c_v256_sad_u8(c_sad256_internal s, c_v256 a,
167                                             c_v256 b) {
168   int c;
169   for (c = 0; c < 32; c++)
170     s.val += a.u8[c] > b.u8[c] ? a.u8[c] - b.u8[c] : b.u8[c] - a.u8[c];
171   s.count++;
172   if (SIMD_CHECK && s.count > 32) {
173     fprintf(stderr,
174             "Error: sad called 32 times returning an undefined result\n");
175     abort();
176   }
177   return s;
178 }
179 
c_v256_sad_u8_sum(c_sad256_internal s)180 SIMD_INLINE uint32_t c_v256_sad_u8_sum(c_sad256_internal s) { return s.val; }
181 
182 typedef uint32_t c_ssd256_internal;
183 
c_v256_ssd_u8_init()184 SIMD_INLINE c_ssd256_internal c_v256_ssd_u8_init() { return 0; }
185 
186 /* Implementation dependent return value.  Result must be finalised with
187  * v256_ssd_u8_sum(). */
c_v256_ssd_u8(c_ssd256_internal s,c_v256 a,c_v256 b)188 SIMD_INLINE c_ssd256_internal c_v256_ssd_u8(c_ssd256_internal s, c_v256 a,
189                                             c_v256 b) {
190   int c;
191   for (c = 0; c < 32; c++) s += (a.u8[c] - b.u8[c]) * (a.u8[c] - b.u8[c]);
192   return s;
193 }
194 
c_v256_ssd_u8_sum(c_ssd256_internal s)195 SIMD_INLINE uint32_t c_v256_ssd_u8_sum(c_ssd256_internal s) { return s; }
196 
c_v256_or(c_v256 a,c_v256 b)197 SIMD_INLINE c_v256 c_v256_or(c_v256 a, c_v256 b) {
198   return c_v256_from_v128(c_v128_or(a.v128[1], b.v128[1]),
199                           c_v128_or(a.v128[0], b.v128[0]));
200 }
201 
c_v256_xor(c_v256 a,c_v256 b)202 SIMD_INLINE c_v256 c_v256_xor(c_v256 a, c_v256 b) {
203   return c_v256_from_v128(c_v128_xor(a.v128[1], b.v128[1]),
204                           c_v128_xor(a.v128[0], b.v128[0]));
205 }
206 
c_v256_and(c_v256 a,c_v256 b)207 SIMD_INLINE c_v256 c_v256_and(c_v256 a, c_v256 b) {
208   return c_v256_from_v128(c_v128_and(a.v128[1], b.v128[1]),
209                           c_v128_and(a.v128[0], b.v128[0]));
210 }
211 
c_v256_andn(c_v256 a,c_v256 b)212 SIMD_INLINE c_v256 c_v256_andn(c_v256 a, c_v256 b) {
213   return c_v256_from_v128(c_v128_andn(a.v128[1], b.v128[1]),
214                           c_v128_andn(a.v128[0], b.v128[0]));
215 }
216 
c_v256_add_8(c_v256 a,c_v256 b)217 SIMD_INLINE c_v256 c_v256_add_8(c_v256 a, c_v256 b) {
218   return c_v256_from_v128(c_v128_add_8(a.v128[1], b.v128[1]),
219                           c_v128_add_8(a.v128[0], b.v128[0]));
220 }
221 
c_v256_add_16(c_v256 a,c_v256 b)222 SIMD_INLINE c_v256 c_v256_add_16(c_v256 a, c_v256 b) {
223   return c_v256_from_v128(c_v128_add_16(a.v128[1], b.v128[1]),
224                           c_v128_add_16(a.v128[0], b.v128[0]));
225 }
226 
c_v256_sadd_s8(c_v256 a,c_v256 b)227 SIMD_INLINE c_v256 c_v256_sadd_s8(c_v256 a, c_v256 b) {
228   return c_v256_from_v128(c_v128_sadd_s8(a.v128[1], b.v128[1]),
229                           c_v128_sadd_s8(a.v128[0], b.v128[0]));
230 }
231 
c_v256_sadd_u8(c_v256 a,c_v256 b)232 SIMD_INLINE c_v256 c_v256_sadd_u8(c_v256 a, c_v256 b) {
233   return c_v256_from_v128(c_v128_sadd_u8(a.v128[1], b.v128[1]),
234                           c_v128_sadd_u8(a.v128[0], b.v128[0]));
235 }
236 
c_v256_sadd_s16(c_v256 a,c_v256 b)237 SIMD_INLINE c_v256 c_v256_sadd_s16(c_v256 a, c_v256 b) {
238   return c_v256_from_v128(c_v128_sadd_s16(a.v128[1], b.v128[1]),
239                           c_v128_sadd_s16(a.v128[0], b.v128[0]));
240 }
241 
c_v256_add_32(c_v256 a,c_v256 b)242 SIMD_INLINE c_v256 c_v256_add_32(c_v256 a, c_v256 b) {
243   return c_v256_from_v128(c_v128_add_32(a.v128[1], b.v128[1]),
244                           c_v128_add_32(a.v128[0], b.v128[0]));
245 }
246 
c_v256_add_64(c_v256 a,c_v256 b)247 SIMD_INLINE c_v256 c_v256_add_64(c_v256 a, c_v256 b) {
248   return c_v256_from_v128(c_v128_add_64(a.v128[1], b.v128[1]),
249                           c_v128_add_64(a.v128[0], b.v128[0]));
250 }
251 
c_v256_sub_64(c_v256 a,c_v256 b)252 SIMD_INLINE c_v256 c_v256_sub_64(c_v256 a, c_v256 b) {
253   return c_v256_from_v128(c_v128_sub_64(a.v128[1], b.v128[1]),
254                           c_v128_sub_64(a.v128[0], b.v128[0]));
255 }
256 
c_v256_padd_u8(c_v256 a)257 SIMD_INLINE c_v256 c_v256_padd_u8(c_v256 a) {
258   c_v256 t;
259   for (int i = 0; i < 16; i++)
260     t.u16[i] = (uint16_t)a.u8[i * 2] + (uint16_t)a.u8[i * 2 + 1];
261   return t;
262 }
263 
c_v256_padd_s16(c_v256 a)264 SIMD_INLINE c_v256 c_v256_padd_s16(c_v256 a) {
265   c_v256 t;
266   t.s32[0] = (int32_t)a.s16[0] + (int32_t)a.s16[1];
267   t.s32[1] = (int32_t)a.s16[2] + (int32_t)a.s16[3];
268   t.s32[2] = (int32_t)a.s16[4] + (int32_t)a.s16[5];
269   t.s32[3] = (int32_t)a.s16[6] + (int32_t)a.s16[7];
270   t.s32[4] = (int32_t)a.s16[8] + (int32_t)a.s16[9];
271   t.s32[5] = (int32_t)a.s16[10] + (int32_t)a.s16[11];
272   t.s32[6] = (int32_t)a.s16[12] + (int32_t)a.s16[13];
273   t.s32[7] = (int32_t)a.s16[14] + (int32_t)a.s16[15];
274   return t;
275 }
276 
c_v256_sub_8(c_v256 a,c_v256 b)277 SIMD_INLINE c_v256 c_v256_sub_8(c_v256 a, c_v256 b) {
278   return c_v256_from_v128(c_v128_sub_8(a.v128[1], b.v128[1]),
279                           c_v128_sub_8(a.v128[0], b.v128[0]));
280 }
281 
c_v256_ssub_u8(c_v256 a,c_v256 b)282 SIMD_INLINE c_v256 c_v256_ssub_u8(c_v256 a, c_v256 b) {
283   return c_v256_from_v128(c_v128_ssub_u8(a.v128[1], b.v128[1]),
284                           c_v128_ssub_u8(a.v128[0], b.v128[0]));
285 }
286 
c_v256_ssub_s8(c_v256 a,c_v256 b)287 SIMD_INLINE c_v256 c_v256_ssub_s8(c_v256 a, c_v256 b) {
288   return c_v256_from_v128(c_v128_ssub_s8(a.v128[1], b.v128[1]),
289                           c_v128_ssub_s8(a.v128[0], b.v128[0]));
290 }
291 
c_v256_sub_16(c_v256 a,c_v256 b)292 SIMD_INLINE c_v256 c_v256_sub_16(c_v256 a, c_v256 b) {
293   return c_v256_from_v128(c_v128_sub_16(a.v128[1], b.v128[1]),
294                           c_v128_sub_16(a.v128[0], b.v128[0]));
295 }
296 
c_v256_ssub_s16(c_v256 a,c_v256 b)297 SIMD_INLINE c_v256 c_v256_ssub_s16(c_v256 a, c_v256 b) {
298   return c_v256_from_v128(c_v128_ssub_s16(a.v128[1], b.v128[1]),
299                           c_v128_ssub_s16(a.v128[0], b.v128[0]));
300 }
301 
c_v256_ssub_u16(c_v256 a,c_v256 b)302 SIMD_INLINE c_v256 c_v256_ssub_u16(c_v256 a, c_v256 b) {
303   return c_v256_from_v128(c_v128_ssub_u16(a.v128[1], b.v128[1]),
304                           c_v128_ssub_u16(a.v128[0], b.v128[0]));
305 }
306 
c_v256_sub_32(c_v256 a,c_v256 b)307 SIMD_INLINE c_v256 c_v256_sub_32(c_v256 a, c_v256 b) {
308   return c_v256_from_v128(c_v128_sub_32(a.v128[1], b.v128[1]),
309                           c_v128_sub_32(a.v128[0], b.v128[0]));
310 }
311 
c_v256_abs_s16(c_v256 a)312 SIMD_INLINE c_v256 c_v256_abs_s16(c_v256 a) {
313   return c_v256_from_v128(c_v128_abs_s16(a.v128[1]), c_v128_abs_s16(a.v128[0]));
314 }
315 
c_v256_abs_s8(c_v256 a)316 SIMD_INLINE c_v256 c_v256_abs_s8(c_v256 a) {
317   return c_v256_from_v128(c_v128_abs_s8(a.v128[1]), c_v128_abs_s8(a.v128[0]));
318 }
319 
c_v256_mul_s16(c_v128 a,c_v128 b)320 SIMD_INLINE c_v256 c_v256_mul_s16(c_v128 a, c_v128 b) {
321   c_v128 lo_bits = c_v128_mullo_s16(a, b);
322   c_v128 hi_bits = c_v128_mulhi_s16(a, b);
323   return c_v256_from_v128(c_v128_ziphi_16(hi_bits, lo_bits),
324                           c_v128_ziplo_16(hi_bits, lo_bits));
325 }
326 
c_v256_mullo_s16(c_v256 a,c_v256 b)327 SIMD_INLINE c_v256 c_v256_mullo_s16(c_v256 a, c_v256 b) {
328   return c_v256_from_v128(c_v128_mullo_s16(a.v128[1], b.v128[1]),
329                           c_v128_mullo_s16(a.v128[0], b.v128[0]));
330 }
331 
c_v256_mulhi_s16(c_v256 a,c_v256 b)332 SIMD_INLINE c_v256 c_v256_mulhi_s16(c_v256 a, c_v256 b) {
333   return c_v256_from_v128(c_v128_mulhi_s16(a.v128[1], b.v128[1]),
334                           c_v128_mulhi_s16(a.v128[0], b.v128[0]));
335 }
336 
c_v256_mullo_s32(c_v256 a,c_v256 b)337 SIMD_INLINE c_v256 c_v256_mullo_s32(c_v256 a, c_v256 b) {
338   return c_v256_from_v128(c_v128_mullo_s32(a.v128[1], b.v128[1]),
339                           c_v128_mullo_s32(a.v128[0], b.v128[0]));
340 }
341 
c_v256_madd_s16(c_v256 a,c_v256 b)342 SIMD_INLINE c_v256 c_v256_madd_s16(c_v256 a, c_v256 b) {
343   return c_v256_from_v128(c_v128_madd_s16(a.v128[1], b.v128[1]),
344                           c_v128_madd_s16(a.v128[0], b.v128[0]));
345 }
346 
c_v256_madd_us8(c_v256 a,c_v256 b)347 SIMD_INLINE c_v256 c_v256_madd_us8(c_v256 a, c_v256 b) {
348   return c_v256_from_v128(c_v128_madd_us8(a.v128[1], b.v128[1]),
349                           c_v128_madd_us8(a.v128[0], b.v128[0]));
350 }
351 
c_v256_avg_u8(c_v256 a,c_v256 b)352 SIMD_INLINE c_v256 c_v256_avg_u8(c_v256 a, c_v256 b) {
353   return c_v256_from_v128(c_v128_avg_u8(a.v128[1], b.v128[1]),
354                           c_v128_avg_u8(a.v128[0], b.v128[0]));
355 }
356 
c_v256_rdavg_u8(c_v256 a,c_v256 b)357 SIMD_INLINE c_v256 c_v256_rdavg_u8(c_v256 a, c_v256 b) {
358   return c_v256_from_v128(c_v128_rdavg_u8(a.v128[1], b.v128[1]),
359                           c_v128_rdavg_u8(a.v128[0], b.v128[0]));
360 }
361 
c_v256_rdavg_u16(c_v256 a,c_v256 b)362 SIMD_INLINE c_v256 c_v256_rdavg_u16(c_v256 a, c_v256 b) {
363   return c_v256_from_v128(c_v128_rdavg_u16(a.v128[1], b.v128[1]),
364                           c_v128_rdavg_u16(a.v128[0], b.v128[0]));
365 }
366 
c_v256_avg_u16(c_v256 a,c_v256 b)367 SIMD_INLINE c_v256 c_v256_avg_u16(c_v256 a, c_v256 b) {
368   return c_v256_from_v128(c_v128_avg_u16(a.v128[1], b.v128[1]),
369                           c_v128_avg_u16(a.v128[0], b.v128[0]));
370 }
371 
c_v256_min_u8(c_v256 a,c_v256 b)372 SIMD_INLINE c_v256 c_v256_min_u8(c_v256 a, c_v256 b) {
373   return c_v256_from_v128(c_v128_min_u8(a.v128[1], b.v128[1]),
374                           c_v128_min_u8(a.v128[0], b.v128[0]));
375 }
376 
c_v256_max_u8(c_v256 a,c_v256 b)377 SIMD_INLINE c_v256 c_v256_max_u8(c_v256 a, c_v256 b) {
378   return c_v256_from_v128(c_v128_max_u8(a.v128[1], b.v128[1]),
379                           c_v128_max_u8(a.v128[0], b.v128[0]));
380 }
381 
c_v256_min_s8(c_v256 a,c_v256 b)382 SIMD_INLINE c_v256 c_v256_min_s8(c_v256 a, c_v256 b) {
383   return c_v256_from_v128(c_v128_min_s8(a.v128[1], b.v128[1]),
384                           c_v128_min_s8(a.v128[0], b.v128[0]));
385 }
386 
c_v256_movemask_8(c_v256 a)387 SIMD_INLINE uint32_t c_v256_movemask_8(c_v256 a) {
388   return ((a.s8[31] < 0) << 31) | ((a.s8[30] < 0) << 30) |
389          ((a.s8[29] < 0) << 29) | ((a.s8[28] < 0) << 28) |
390          ((a.s8[27] < 0) << 27) | ((a.s8[26] < 0) << 26) |
391          ((a.s8[25] < 0) << 25) | ((a.s8[24] < 0) << 24) |
392          ((a.s8[23] < 0) << 23) | ((a.s8[22] < 0) << 22) |
393          ((a.s8[21] < 0) << 21) | ((a.s8[20] < 0) << 20) |
394          ((a.s8[19] < 0) << 19) | ((a.s8[18] < 0) << 18) |
395          ((a.s8[17] < 0) << 17) | ((a.s8[16] < 0) << 16) |
396          ((a.s8[15] < 0) << 15) | ((a.s8[14] < 0) << 14) |
397          ((a.s8[13] < 0) << 13) | ((a.s8[12] < 0) << 12) |
398          ((a.s8[11] < 0) << 11) | ((a.s8[10] < 0) << 10) |
399          ((a.s8[9] < 0) << 9) | ((a.s8[8] < 0) << 8) | ((a.s8[7] < 0) << 7) |
400          ((a.s8[6] < 0) << 6) | ((a.s8[5] < 0) << 5) | ((a.s8[4] < 0) << 4) |
401          ((a.s8[3] < 0) << 3) | ((a.s8[2] < 0) << 2) | ((a.s8[1] < 0) << 1) |
402          ((a.s8[0] < 0) << 0);
403 }
404 
c_v256_blend_8(c_v256 a,c_v256 b,c_v256 c)405 SIMD_INLINE c_v256 c_v256_blend_8(c_v256 a, c_v256 b, c_v256 c) {
406   c_v256 t;
407   for (int i = 0; i < 32; i++) t.u8[i] = c.s8[i] < 0 ? b.u8[i] : a.u8[i];
408   return t;
409 }
410 
c_v256_max_s8(c_v256 a,c_v256 b)411 SIMD_INLINE c_v256 c_v256_max_s8(c_v256 a, c_v256 b) {
412   return c_v256_from_v128(c_v128_max_s8(a.v128[1], b.v128[1]),
413                           c_v128_max_s8(a.v128[0], b.v128[0]));
414 }
415 
c_v256_min_s16(c_v256 a,c_v256 b)416 SIMD_INLINE c_v256 c_v256_min_s16(c_v256 a, c_v256 b) {
417   return c_v256_from_v128(c_v128_min_s16(a.v128[1], b.v128[1]),
418                           c_v128_min_s16(a.v128[0], b.v128[0]));
419 }
420 
c_v256_max_s16(c_v256 a,c_v256 b)421 SIMD_INLINE c_v256 c_v256_max_s16(c_v256 a, c_v256 b) {
422   return c_v256_from_v128(c_v128_max_s16(a.v128[1], b.v128[1]),
423                           c_v128_max_s16(a.v128[0], b.v128[0]));
424 }
425 
c_v256_min_s32(c_v256 a,c_v256 b)426 SIMD_INLINE c_v256 c_v256_min_s32(c_v256 a, c_v256 b) {
427   return c_v256_from_v128(c_v128_min_s32(a.v128[1], b.v128[1]),
428                           c_v128_min_s32(a.v128[0], b.v128[0]));
429 }
430 
c_v256_max_s32(c_v256 a,c_v256 b)431 SIMD_INLINE c_v256 c_v256_max_s32(c_v256 a, c_v256 b) {
432   return c_v256_from_v128(c_v128_max_s32(a.v128[1], b.v128[1]),
433                           c_v128_max_s32(a.v128[0], b.v128[0]));
434 }
435 
c_v256_ziplo_8(c_v256 a,c_v256 b)436 SIMD_INLINE c_v256 c_v256_ziplo_8(c_v256 a, c_v256 b) {
437   return c_v256_from_v128(c_v128_ziphi_8(a.v128[0], b.v128[0]),
438                           c_v128_ziplo_8(a.v128[0], b.v128[0]));
439 }
440 
c_v256_ziphi_8(c_v256 a,c_v256 b)441 SIMD_INLINE c_v256 c_v256_ziphi_8(c_v256 a, c_v256 b) {
442   return c_v256_from_v128(c_v128_ziphi_8(a.v128[1], b.v128[1]),
443                           c_v128_ziplo_8(a.v128[1], b.v128[1]));
444 }
445 
c_v256_ziplo_16(c_v256 a,c_v256 b)446 SIMD_INLINE c_v256 c_v256_ziplo_16(c_v256 a, c_v256 b) {
447   return c_v256_from_v128(c_v128_ziphi_16(a.v128[0], b.v128[0]),
448                           c_v128_ziplo_16(a.v128[0], b.v128[0]));
449 }
450 
c_v256_ziphi_16(c_v256 a,c_v256 b)451 SIMD_INLINE c_v256 c_v256_ziphi_16(c_v256 a, c_v256 b) {
452   return c_v256_from_v128(c_v128_ziphi_16(a.v128[1], b.v128[1]),
453                           c_v128_ziplo_16(a.v128[1], b.v128[1]));
454 }
455 
c_v256_ziplo_32(c_v256 a,c_v256 b)456 SIMD_INLINE c_v256 c_v256_ziplo_32(c_v256 a, c_v256 b) {
457   return c_v256_from_v128(c_v128_ziphi_32(a.v128[0], b.v128[0]),
458                           c_v128_ziplo_32(a.v128[0], b.v128[0]));
459 }
460 
c_v256_ziphi_32(c_v256 a,c_v256 b)461 SIMD_INLINE c_v256 c_v256_ziphi_32(c_v256 a, c_v256 b) {
462   return c_v256_from_v128(c_v128_ziphi_32(a.v128[1], b.v128[1]),
463                           c_v128_ziplo_32(a.v128[1], b.v128[1]));
464 }
465 
c_v256_ziplo_64(c_v256 a,c_v256 b)466 SIMD_INLINE c_v256 c_v256_ziplo_64(c_v256 a, c_v256 b) {
467   return c_v256_from_v128(c_v128_ziphi_64(a.v128[0], b.v128[0]),
468                           c_v128_ziplo_64(a.v128[0], b.v128[0]));
469 }
470 
c_v256_ziphi_64(c_v256 a,c_v256 b)471 SIMD_INLINE c_v256 c_v256_ziphi_64(c_v256 a, c_v256 b) {
472   return c_v256_from_v128(c_v128_ziphi_64(a.v128[1], b.v128[1]),
473                           c_v128_ziplo_64(a.v128[1], b.v128[1]));
474 }
475 
c_v256_ziplo_128(c_v256 a,c_v256 b)476 SIMD_INLINE c_v256 c_v256_ziplo_128(c_v256 a, c_v256 b) {
477   return c_v256_from_v128(a.v128[0], b.v128[0]);
478 }
479 
c_v256_ziphi_128(c_v256 a,c_v256 b)480 SIMD_INLINE c_v256 c_v256_ziphi_128(c_v256 a, c_v256 b) {
481   return c_v256_from_v128(a.v128[1], b.v128[1]);
482 }
483 
c_v256_zip_8(c_v128 a,c_v128 b)484 SIMD_INLINE c_v256 c_v256_zip_8(c_v128 a, c_v128 b) {
485   return c_v256_from_v128(c_v128_ziphi_8(a, b), c_v128_ziplo_8(a, b));
486 }
487 
c_v256_zip_16(c_v128 a,c_v128 b)488 SIMD_INLINE c_v256 c_v256_zip_16(c_v128 a, c_v128 b) {
489   return c_v256_from_v128(c_v128_ziphi_16(a, b), c_v128_ziplo_16(a, b));
490 }
491 
c_v256_zip_32(c_v128 a,c_v128 b)492 SIMD_INLINE c_v256 c_v256_zip_32(c_v128 a, c_v128 b) {
493   return c_v256_from_v128(c_v128_ziphi_32(a, b), c_v128_ziplo_32(a, b));
494 }
495 
_c_v256_unzip_8(c_v256 a,c_v256 b,int mode)496 SIMD_INLINE c_v256 _c_v256_unzip_8(c_v256 a, c_v256 b, int mode) {
497   c_v256 t;
498   int i;
499   if (mode) {
500     for (i = 0; i < 16; i++) {
501       t.u8[i] = a.u8[i * 2 + 1];
502       t.u8[i + 16] = b.u8[i * 2 + 1];
503     }
504   } else {
505     for (i = 0; i < 16; i++) {
506       t.u8[i] = b.u8[i * 2];
507       t.u8[i + 16] = a.u8[i * 2];
508     }
509   }
510   return t;
511 }
512 
c_v256_unziplo_8(c_v256 a,c_v256 b)513 SIMD_INLINE c_v256 c_v256_unziplo_8(c_v256 a, c_v256 b) {
514   return CONFIG_BIG_ENDIAN ? _c_v256_unzip_8(a, b, 1)
515                            : _c_v256_unzip_8(a, b, 0);
516 }
517 
c_v256_unziphi_8(c_v256 a,c_v256 b)518 SIMD_INLINE c_v256 c_v256_unziphi_8(c_v256 a, c_v256 b) {
519   return CONFIG_BIG_ENDIAN ? _c_v256_unzip_8(b, a, 0)
520                            : _c_v256_unzip_8(b, a, 1);
521 }
522 
_c_v256_unzip_16(c_v256 a,c_v256 b,int mode)523 SIMD_INLINE c_v256 _c_v256_unzip_16(c_v256 a, c_v256 b, int mode) {
524   c_v256 t;
525   int i;
526   if (mode) {
527     for (i = 0; i < 8; i++) {
528       t.u16[i] = a.u16[i * 2 + 1];
529       t.u16[i + 8] = b.u16[i * 2 + 1];
530     }
531   } else {
532     for (i = 0; i < 8; i++) {
533       t.u16[i] = b.u16[i * 2];
534       t.u16[i + 8] = a.u16[i * 2];
535     }
536   }
537   return t;
538 }
539 
c_v256_unziplo_16(c_v256 a,c_v256 b)540 SIMD_INLINE c_v256 c_v256_unziplo_16(c_v256 a, c_v256 b) {
541   return CONFIG_BIG_ENDIAN ? _c_v256_unzip_16(a, b, 1)
542                            : _c_v256_unzip_16(a, b, 0);
543 }
544 
c_v256_unziphi_16(c_v256 a,c_v256 b)545 SIMD_INLINE c_v256 c_v256_unziphi_16(c_v256 a, c_v256 b) {
546   return CONFIG_BIG_ENDIAN ? _c_v256_unzip_16(b, a, 0)
547                            : _c_v256_unzip_16(b, a, 1);
548 }
549 
_c_v256_unzip_32(c_v256 a,c_v256 b,int mode)550 SIMD_INLINE c_v256 _c_v256_unzip_32(c_v256 a, c_v256 b, int mode) {
551   c_v256 t;
552   if (mode) {
553     t.u32[7] = b.u32[7];
554     t.u32[6] = b.u32[5];
555     t.u32[5] = b.u32[3];
556     t.u32[4] = b.u32[1];
557     t.u32[3] = a.u32[7];
558     t.u32[2] = a.u32[5];
559     t.u32[1] = a.u32[3];
560     t.u32[0] = a.u32[1];
561   } else {
562     t.u32[7] = a.u32[6];
563     t.u32[6] = a.u32[4];
564     t.u32[5] = a.u32[2];
565     t.u32[4] = a.u32[0];
566     t.u32[3] = b.u32[6];
567     t.u32[2] = b.u32[4];
568     t.u32[1] = b.u32[2];
569     t.u32[0] = b.u32[0];
570   }
571   return t;
572 }
573 
c_v256_unziplo_32(c_v256 a,c_v256 b)574 SIMD_INLINE c_v256 c_v256_unziplo_32(c_v256 a, c_v256 b) {
575   return CONFIG_BIG_ENDIAN ? _c_v256_unzip_32(a, b, 1)
576                            : _c_v256_unzip_32(a, b, 0);
577 }
578 
c_v256_unziphi_32(c_v256 a,c_v256 b)579 SIMD_INLINE c_v256 c_v256_unziphi_32(c_v256 a, c_v256 b) {
580   return CONFIG_BIG_ENDIAN ? _c_v256_unzip_32(b, a, 0)
581                            : _c_v256_unzip_32(b, a, 1);
582 }
583 
_c_v256_unzip_64(c_v256 a,c_v256 b,int mode)584 SIMD_INLINE c_v256 _c_v256_unzip_64(c_v256 a, c_v256 b, int mode) {
585   c_v256 t;
586   if (mode) {
587     t.u64[3] = b.u64[3];
588     t.u64[2] = b.u64[1];
589     t.u64[1] = a.u64[3];
590     t.u64[0] = a.u64[1];
591   } else {
592     t.u64[3] = a.u64[2];
593     t.u64[2] = a.u64[0];
594     t.u64[1] = b.u64[2];
595     t.u64[0] = b.u64[0];
596   }
597   return t;
598 }
599 
c_v256_unziplo_64(c_v256 a,c_v256 b)600 SIMD_INLINE c_v256 c_v256_unziplo_64(c_v256 a, c_v256 b) {
601   return CONFIG_BIG_ENDIAN ? _c_v256_unzip_64(a, b, 1)
602                            : _c_v256_unzip_64(a, b, 0);
603 }
604 
c_v256_unziphi_64(c_v256 a,c_v256 b)605 SIMD_INLINE c_v256 c_v256_unziphi_64(c_v256 a, c_v256 b) {
606   return CONFIG_BIG_ENDIAN ? _c_v256_unzip_64(b, a, 0)
607                            : _c_v256_unzip_64(b, a, 1);
608 }
609 
c_v256_unpack_u8_s16(c_v128 a)610 SIMD_INLINE c_v256 c_v256_unpack_u8_s16(c_v128 a) {
611   return c_v256_from_v128(c_v128_unpackhi_u8_s16(a), c_v128_unpacklo_u8_s16(a));
612 }
613 
c_v256_unpacklo_u8_s16(c_v256 a)614 SIMD_INLINE c_v256 c_v256_unpacklo_u8_s16(c_v256 a) {
615   return c_v256_from_v128(c_v128_unpackhi_u8_s16(a.v128[0]),
616                           c_v128_unpacklo_u8_s16(a.v128[0]));
617 }
618 
c_v256_unpackhi_u8_s16(c_v256 a)619 SIMD_INLINE c_v256 c_v256_unpackhi_u8_s16(c_v256 a) {
620   return c_v256_from_v128(c_v128_unpackhi_u8_s16(a.v128[1]),
621                           c_v128_unpacklo_u8_s16(a.v128[1]));
622 }
623 
c_v256_unpack_s8_s16(c_v128 a)624 SIMD_INLINE c_v256 c_v256_unpack_s8_s16(c_v128 a) {
625   return c_v256_from_v128(c_v128_unpackhi_s8_s16(a), c_v128_unpacklo_s8_s16(a));
626 }
627 
c_v256_unpacklo_s8_s16(c_v256 a)628 SIMD_INLINE c_v256 c_v256_unpacklo_s8_s16(c_v256 a) {
629   return c_v256_from_v128(c_v128_unpackhi_s8_s16(a.v128[0]),
630                           c_v128_unpacklo_s8_s16(a.v128[0]));
631 }
632 
c_v256_unpackhi_s8_s16(c_v256 a)633 SIMD_INLINE c_v256 c_v256_unpackhi_s8_s16(c_v256 a) {
634   return c_v256_from_v128(c_v128_unpackhi_s8_s16(a.v128[1]),
635                           c_v128_unpacklo_s8_s16(a.v128[1]));
636 }
637 
c_v256_pack_s32_s16(c_v256 a,c_v256 b)638 SIMD_INLINE c_v256 c_v256_pack_s32_s16(c_v256 a, c_v256 b) {
639   return c_v256_from_v128(c_v128_pack_s32_s16(a.v128[1], a.v128[0]),
640                           c_v128_pack_s32_s16(b.v128[1], b.v128[0]));
641 }
642 
c_v256_pack_s32_u16(c_v256 a,c_v256 b)643 SIMD_INLINE c_v256 c_v256_pack_s32_u16(c_v256 a, c_v256 b) {
644   return c_v256_from_v128(c_v128_pack_s32_u16(a.v128[1], a.v128[0]),
645                           c_v128_pack_s32_u16(b.v128[1], b.v128[0]));
646 }
647 
c_v256_pack_s16_u8(c_v256 a,c_v256 b)648 SIMD_INLINE c_v256 c_v256_pack_s16_u8(c_v256 a, c_v256 b) {
649   return c_v256_from_v128(c_v128_pack_s16_u8(a.v128[1], a.v128[0]),
650                           c_v128_pack_s16_u8(b.v128[1], b.v128[0]));
651 }
652 
c_v256_pack_s16_s8(c_v256 a,c_v256 b)653 SIMD_INLINE c_v256 c_v256_pack_s16_s8(c_v256 a, c_v256 b) {
654   return c_v256_from_v128(c_v128_pack_s16_s8(a.v128[1], a.v128[0]),
655                           c_v128_pack_s16_s8(b.v128[1], b.v128[0]));
656 }
657 
c_v256_unpack_u16_s32(c_v128 a)658 SIMD_INLINE c_v256 c_v256_unpack_u16_s32(c_v128 a) {
659   return c_v256_from_v128(c_v128_unpackhi_u16_s32(a),
660                           c_v128_unpacklo_u16_s32(a));
661 }
662 
c_v256_unpack_s16_s32(c_v128 a)663 SIMD_INLINE c_v256 c_v256_unpack_s16_s32(c_v128 a) {
664   return c_v256_from_v128(c_v128_unpackhi_s16_s32(a),
665                           c_v128_unpacklo_s16_s32(a));
666 }
667 
c_v256_unpacklo_u16_s32(c_v256 a)668 SIMD_INLINE c_v256 c_v256_unpacklo_u16_s32(c_v256 a) {
669   return c_v256_from_v128(c_v128_unpackhi_u16_s32(a.v128[0]),
670                           c_v128_unpacklo_u16_s32(a.v128[0]));
671 }
672 
c_v256_unpacklo_s16_s32(c_v256 a)673 SIMD_INLINE c_v256 c_v256_unpacklo_s16_s32(c_v256 a) {
674   return c_v256_from_v128(c_v128_unpackhi_s16_s32(a.v128[0]),
675                           c_v128_unpacklo_s16_s32(a.v128[0]));
676 }
677 
c_v256_unpackhi_u16_s32(c_v256 a)678 SIMD_INLINE c_v256 c_v256_unpackhi_u16_s32(c_v256 a) {
679   return c_v256_from_v128(c_v128_unpackhi_u16_s32(a.v128[1]),
680                           c_v128_unpacklo_u16_s32(a.v128[1]));
681 }
682 
c_v256_unpackhi_s16_s32(c_v256 a)683 SIMD_INLINE c_v256 c_v256_unpackhi_s16_s32(c_v256 a) {
684   return c_v256_from_v128(c_v128_unpackhi_s16_s32(a.v128[1]),
685                           c_v128_unpacklo_s16_s32(a.v128[1]));
686 }
687 
c_v256_shuffle_8(c_v256 a,c_v256 pattern)688 SIMD_INLINE c_v256 c_v256_shuffle_8(c_v256 a, c_v256 pattern) {
689   c_v256 t;
690   int c;
691   for (c = 0; c < 32; c++)
692     t.u8[c] = a.u8[CONFIG_BIG_ENDIAN ? 31 - (pattern.u8[c] & 31)
693                                      : pattern.u8[c] & 31];
694 
695   return t;
696 }
697 
c_v256_wideshuffle_8(c_v256 a,c_v256 b,c_v256 pattern)698 SIMD_INLINE c_v256 c_v256_wideshuffle_8(c_v256 a, c_v256 b, c_v256 pattern) {
699   c_v256 t;
700   int c;
701   for (c = 0; c < 32; c++)
702     t.u8[c] = (pattern.u8[c] < 32
703                    ? b.u8
704                    : a.u8)[CONFIG_BIG_ENDIAN ? 31 - (pattern.u8[c] & 31)
705                                              : pattern.u8[c] & 31];
706   return t;
707 }
708 
709 // Pairwise / dual-lane shuffle: shuffle two 128 bit lates.
c_v256_pshuffle_8(c_v256 a,c_v256 pattern)710 SIMD_INLINE c_v256 c_v256_pshuffle_8(c_v256 a, c_v256 pattern) {
711   return c_v256_from_v128(
712       c_v128_shuffle_8(c_v256_high_v128(a), c_v256_high_v128(pattern)),
713       c_v128_shuffle_8(c_v256_low_v128(a), c_v256_low_v128(pattern)));
714 }
715 
c_v256_cmpgt_s8(c_v256 a,c_v256 b)716 SIMD_INLINE c_v256 c_v256_cmpgt_s8(c_v256 a, c_v256 b) {
717   return c_v256_from_v128(c_v128_cmpgt_s8(a.v128[1], b.v128[1]),
718                           c_v128_cmpgt_s8(a.v128[0], b.v128[0]));
719 }
720 
c_v256_cmplt_s8(c_v256 a,c_v256 b)721 SIMD_INLINE c_v256 c_v256_cmplt_s8(c_v256 a, c_v256 b) {
722   return c_v256_from_v128(c_v128_cmplt_s8(a.v128[1], b.v128[1]),
723                           c_v128_cmplt_s8(a.v128[0], b.v128[0]));
724 }
725 
c_v256_cmpeq_8(c_v256 a,c_v256 b)726 SIMD_INLINE c_v256 c_v256_cmpeq_8(c_v256 a, c_v256 b) {
727   return c_v256_from_v128(c_v128_cmpeq_8(a.v128[1], b.v128[1]),
728                           c_v128_cmpeq_8(a.v128[0], b.v128[0]));
729 }
730 
c_v256_cmpgt_s16(c_v256 a,c_v256 b)731 SIMD_INLINE c_v256 c_v256_cmpgt_s16(c_v256 a, c_v256 b) {
732   return c_v256_from_v128(c_v128_cmpgt_s16(a.v128[1], b.v128[1]),
733                           c_v128_cmpgt_s16(a.v128[0], b.v128[0]));
734 }
735 
c_v256_cmplt_s16(c_v256 a,c_v256 b)736 SIMD_INLINE c_v256 c_v256_cmplt_s16(c_v256 a, c_v256 b) {
737   return c_v256_from_v128(c_v128_cmplt_s16(a.v128[1], b.v128[1]),
738                           c_v128_cmplt_s16(a.v128[0], b.v128[0]));
739 }
740 
c_v256_cmpeq_16(c_v256 a,c_v256 b)741 SIMD_INLINE c_v256 c_v256_cmpeq_16(c_v256 a, c_v256 b) {
742   return c_v256_from_v128(c_v128_cmpeq_16(a.v128[1], b.v128[1]),
743                           c_v128_cmpeq_16(a.v128[0], b.v128[0]));
744 }
745 
c_v256_cmpgt_s32(c_v256 a,c_v256 b)746 SIMD_INLINE c_v256 c_v256_cmpgt_s32(c_v256 a, c_v256 b) {
747   return c_v256_from_v128(c_v128_cmpgt_s32(a.v128[1], b.v128[1]),
748                           c_v128_cmpgt_s32(a.v128[0], b.v128[0]));
749 }
750 
c_v256_cmplt_s32(c_v256 a,c_v256 b)751 SIMD_INLINE c_v256 c_v256_cmplt_s32(c_v256 a, c_v256 b) {
752   return c_v256_from_v128(c_v128_cmplt_s32(a.v128[1], b.v128[1]),
753                           c_v128_cmplt_s32(a.v128[0], b.v128[0]));
754 }
755 
c_v256_cmpeq_32(c_v256 a,c_v256 b)756 SIMD_INLINE c_v256 c_v256_cmpeq_32(c_v256 a, c_v256 b) {
757   return c_v256_from_v128(c_v128_cmpeq_32(a.v128[1], b.v128[1]),
758                           c_v128_cmpeq_32(a.v128[0], b.v128[0]));
759 }
760 
c_v256_shl_n_byte(c_v256 a,unsigned int n)761 SIMD_INLINE c_v256 c_v256_shl_n_byte(c_v256 a, unsigned int n) {
762   if (n == 0) return a;
763   if (n < 16)
764     return c_v256_from_v128(c_v128_or(c_v128_shl_n_byte(a.v128[1], n),
765                                       c_v128_shr_n_byte(a.v128[0], 16 - n)),
766                             c_v128_shl_n_byte(a.v128[0], n));
767   else if (n > 16)
768     return c_v256_from_v128(c_v128_shl_n_byte(a.v128[0], n - 16),
769                             c_v128_zero());
770   else
771     return c_v256_from_v128(c_v256_low_v128(a), c_v128_zero());
772 }
773 
c_v256_shr_n_byte(c_v256 a,unsigned int n)774 SIMD_INLINE c_v256 c_v256_shr_n_byte(c_v256 a, unsigned int n) {
775   if (n == 0) return a;
776   if (n < 16)
777     return c_v256_from_v128(c_v128_shr_n_byte(a.v128[1], n),
778                             c_v128_or(c_v128_shr_n_byte(a.v128[0], n),
779                                       c_v128_shl_n_byte(a.v128[1], 16 - n)));
780   else if (n > 16)
781     return c_v256_from_v128(c_v128_zero(),
782                             c_v128_shr_n_byte(a.v128[1], n - 16));
783   else
784     return c_v256_from_v128(c_v128_zero(), c_v256_high_v128(a));
785 }
786 
c_v256_align(c_v256 a,c_v256 b,unsigned int c)787 SIMD_INLINE c_v256 c_v256_align(c_v256 a, c_v256 b, unsigned int c) {
788   if (SIMD_CHECK && c > 31) {
789     fprintf(stderr, "Error: undefined alignment %d\n", c);
790     abort();
791   }
792   return c ? c_v256_or(c_v256_shr_n_byte(b, c), c_v256_shl_n_byte(a, 32 - c))
793            : b;
794 }
795 
c_v256_shl_8(c_v256 a,unsigned int c)796 SIMD_INLINE c_v256 c_v256_shl_8(c_v256 a, unsigned int c) {
797   return c_v256_from_v128(c_v128_shl_8(a.v128[1], c),
798                           c_v128_shl_8(a.v128[0], c));
799 }
800 
c_v256_shr_u8(c_v256 a,unsigned int c)801 SIMD_INLINE c_v256 c_v256_shr_u8(c_v256 a, unsigned int c) {
802   return c_v256_from_v128(c_v128_shr_u8(a.v128[1], c),
803                           c_v128_shr_u8(a.v128[0], c));
804 }
805 
c_v256_shr_s8(c_v256 a,unsigned int c)806 SIMD_INLINE c_v256 c_v256_shr_s8(c_v256 a, unsigned int c) {
807   return c_v256_from_v128(c_v128_shr_s8(a.v128[1], c),
808                           c_v128_shr_s8(a.v128[0], c));
809 }
810 
c_v256_shl_16(c_v256 a,unsigned int c)811 SIMD_INLINE c_v256 c_v256_shl_16(c_v256 a, unsigned int c) {
812   return c_v256_from_v128(c_v128_shl_16(a.v128[1], c),
813                           c_v128_shl_16(a.v128[0], c));
814 }
815 
c_v256_shr_u16(c_v256 a,unsigned int c)816 SIMD_INLINE c_v256 c_v256_shr_u16(c_v256 a, unsigned int c) {
817   return c_v256_from_v128(c_v128_shr_u16(a.v128[1], c),
818                           c_v128_shr_u16(a.v128[0], c));
819 }
820 
c_v256_shr_s16(c_v256 a,unsigned int c)821 SIMD_INLINE c_v256 c_v256_shr_s16(c_v256 a, unsigned int c) {
822   return c_v256_from_v128(c_v128_shr_s16(a.v128[1], c),
823                           c_v128_shr_s16(a.v128[0], c));
824 }
825 
c_v256_shl_32(c_v256 a,unsigned int c)826 SIMD_INLINE c_v256 c_v256_shl_32(c_v256 a, unsigned int c) {
827   return c_v256_from_v128(c_v128_shl_32(a.v128[1], c),
828                           c_v128_shl_32(a.v128[0], c));
829 }
830 
c_v256_shr_u32(c_v256 a,unsigned int c)831 SIMD_INLINE c_v256 c_v256_shr_u32(c_v256 a, unsigned int c) {
832   return c_v256_from_v128(c_v128_shr_u32(a.v128[1], c),
833                           c_v128_shr_u32(a.v128[0], c));
834 }
835 
c_v256_shr_s32(c_v256 a,unsigned int c)836 SIMD_INLINE c_v256 c_v256_shr_s32(c_v256 a, unsigned int c) {
837   return c_v256_from_v128(c_v128_shr_s32(a.v128[1], c),
838                           c_v128_shr_s32(a.v128[0], c));
839 }
840 
c_v256_shr_s64(c_v256 a,unsigned int n)841 SIMD_INLINE c_v256 c_v256_shr_s64(c_v256 a, unsigned int n) {
842   c_v256 t;
843   if (SIMD_CHECK && n > 63) {
844     fprintf(stderr, "Error: undefined s64 shift right %d\n", n);
845     abort();
846   }
847   t.s64[3] = a.s64[3] >> n;
848   t.s64[2] = a.s64[2] >> n;
849   t.s64[1] = a.s64[1] >> n;
850   t.s64[0] = a.s64[0] >> n;
851   return t;
852 }
853 
c_v256_shr_u64(c_v256 a,unsigned int n)854 SIMD_INLINE c_v256 c_v256_shr_u64(c_v256 a, unsigned int n) {
855   c_v256 t;
856   if (SIMD_CHECK && n > 63) {
857     fprintf(stderr, "Error: undefined s64 shift right %d\n", n);
858     abort();
859   }
860   t.u64[3] = a.u64[3] >> n;
861   t.u64[2] = a.u64[2] >> n;
862   t.u64[1] = a.u64[1] >> n;
863   t.u64[0] = a.u64[0] >> n;
864   return t;
865 }
866 
c_v256_shl_64(c_v256 a,unsigned int n)867 SIMD_INLINE c_v256 c_v256_shl_64(c_v256 a, unsigned int n) {
868   c_v256 t;
869   if (SIMD_CHECK && n > 63) {
870     fprintf(stderr, "Error: undefined s64 shift right %d\n", n);
871     abort();
872   }
873   t.u64[3] = a.u64[3] << n;
874   t.u64[2] = a.u64[2] << n;
875   t.u64[1] = a.u64[1] << n;
876   t.u64[0] = a.u64[0] << n;
877   return t;
878 }
879 
c_v256_shl_n_8(c_v256 a,unsigned int n)880 SIMD_INLINE c_v256 c_v256_shl_n_8(c_v256 a, unsigned int n) {
881   return c_v256_shl_8(a, n);
882 }
883 
c_v256_shl_n_16(c_v256 a,unsigned int n)884 SIMD_INLINE c_v256 c_v256_shl_n_16(c_v256 a, unsigned int n) {
885   return c_v256_shl_16(a, n);
886 }
887 
c_v256_shl_n_32(c_v256 a,unsigned int n)888 SIMD_INLINE c_v256 c_v256_shl_n_32(c_v256 a, unsigned int n) {
889   return c_v256_shl_32(a, n);
890 }
891 
c_v256_shl_n_64(c_v256 a,unsigned int n)892 SIMD_INLINE c_v256 c_v256_shl_n_64(c_v256 a, unsigned int n) {
893   return c_v256_shl_64(a, n);
894 }
895 
c_v256_shr_n_u8(c_v256 a,unsigned int n)896 SIMD_INLINE c_v256 c_v256_shr_n_u8(c_v256 a, unsigned int n) {
897   return c_v256_shr_u8(a, n);
898 }
899 
c_v256_shr_n_u16(c_v256 a,unsigned int n)900 SIMD_INLINE c_v256 c_v256_shr_n_u16(c_v256 a, unsigned int n) {
901   return c_v256_shr_u16(a, n);
902 }
903 
c_v256_shr_n_u32(c_v256 a,unsigned int n)904 SIMD_INLINE c_v256 c_v256_shr_n_u32(c_v256 a, unsigned int n) {
905   return c_v256_shr_u32(a, n);
906 }
907 
c_v256_shr_n_u64(c_v256 a,unsigned int n)908 SIMD_INLINE c_v256 c_v256_shr_n_u64(c_v256 a, unsigned int n) {
909   return c_v256_shr_u64(a, n);
910 }
911 
c_v256_shr_n_s8(c_v256 a,unsigned int n)912 SIMD_INLINE c_v256 c_v256_shr_n_s8(c_v256 a, unsigned int n) {
913   return c_v256_shr_s8(a, n);
914 }
915 
c_v256_shr_n_s16(c_v256 a,unsigned int n)916 SIMD_INLINE c_v256 c_v256_shr_n_s16(c_v256 a, unsigned int n) {
917   return c_v256_shr_s16(a, n);
918 }
919 
c_v256_shr_n_s32(c_v256 a,unsigned int n)920 SIMD_INLINE c_v256 c_v256_shr_n_s32(c_v256 a, unsigned int n) {
921   return c_v256_shr_s32(a, n);
922 }
923 
c_v256_shr_n_s64(c_v256 a,unsigned int n)924 SIMD_INLINE c_v256 c_v256_shr_n_s64(c_v256 a, unsigned int n) {
925   return c_v256_shr_s64(a, n);
926 }
927 
c_v256_shr_n_word(c_v256 a,const unsigned int n)928 SIMD_INLINE c_v256 c_v256_shr_n_word(c_v256 a, const unsigned int n) {
929   return c_v256_shr_n_byte(a, 2 * n);
930 }
c_v256_shl_n_word(c_v256 a,const unsigned int n)931 SIMD_INLINE c_v256 c_v256_shl_n_word(c_v256 a, const unsigned int n) {
932   return c_v256_shl_n_byte(a, 2 * n);
933 }
934 
935 typedef uint32_t c_sad256_internal_u16;
936 
c_v256_sad_u16_init()937 SIMD_INLINE c_sad256_internal_u16 c_v256_sad_u16_init() { return 0; }
938 
939 /* Implementation dependent return value.  Result must be finalised with
940    v256_sad_u16_sum(). */
c_v256_sad_u16(c_sad256_internal_u16 s,c_v256 a,c_v256 b)941 SIMD_INLINE c_sad256_internal_u16 c_v256_sad_u16(c_sad256_internal_u16 s,
942                                                  c_v256 a, c_v256 b) {
943   int c;
944   for (c = 0; c < 16; c++)
945     s += a.u16[c] > b.u16[c] ? a.u16[c] - b.u16[c] : b.u16[c] - a.u16[c];
946   return s;
947 }
948 
c_v256_sad_u16_sum(c_sad256_internal_u16 s)949 SIMD_INLINE uint32_t c_v256_sad_u16_sum(c_sad256_internal_u16 s) { return s; }
950 
951 typedef uint64_t c_ssd256_internal_s16;
952 
c_v256_ssd_s16_init()953 SIMD_INLINE c_ssd256_internal_s16 c_v256_ssd_s16_init() { return 0; }
954 
955 /* Implementation dependent return value.  Result must be finalised with
956  * v256_ssd_s16_sum(). */
c_v256_ssd_s16(c_ssd256_internal_s16 s,c_v256 a,c_v256 b)957 SIMD_INLINE c_ssd256_internal_s16 c_v256_ssd_s16(c_ssd256_internal_s16 s,
958                                                  c_v256 a, c_v256 b) {
959   int c;
960   for (c = 0; c < 16; c++)
961     s += (int32_t)(int16_t)(a.s16[c] - b.s16[c]) *
962          (int32_t)(int16_t)(a.s16[c] - b.s16[c]);
963   return s;
964 }
965 
c_v256_ssd_s16_sum(c_ssd256_internal_s16 s)966 SIMD_INLINE uint64_t c_v256_ssd_s16_sum(c_ssd256_internal_s16 s) { return s; }
967 
968 #endif  // AOM_AOM_DSP_SIMD_V256_INTRINSICS_C_H_
969