• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2016, Alliance for Open Media. All rights reserved
3  *
4  * This source code is subject to the terms of the BSD 2 Clause License and
5  * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6  * was not distributed with this source code in the LICENSE file, you can
7  * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8  * Media Patent License 1.0 was not distributed with this source code in the
9  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10  */
11 
12 #ifndef AOM_AOM_DSP_SIMD_V256_INTRINSICS_C_H_
13 #define AOM_AOM_DSP_SIMD_V256_INTRINSICS_C_H_
14 
15 #include <stdio.h>
16 #include <stdlib.h>
17 
18 #include "config/aom_config.h"
19 
20 #include "aom_dsp/simd/v128_intrinsics_c.h"
21 
22 typedef union {
23   uint8_t u8[32];
24   uint16_t u16[16];
25   uint32_t u32[8];
26   uint64_t u64[4];
27   int8_t s8[32];
28   int16_t s16[16];
29   int32_t s32[8];
30   int64_t s64[4];
31   c_v64 v64[4];
32   c_v128 v128[2];
33 } c_v256;
34 
c_v256_low_u32(c_v256 a)35 SIMD_INLINE uint32_t c_v256_low_u32(c_v256 a) { return a.u32[0]; }
36 
c_v256_low_v64(c_v256 a)37 SIMD_INLINE c_v64 c_v256_low_v64(c_v256 a) { return a.v64[0]; }
38 
c_v256_low_u64(c_v256 a)39 SIMD_INLINE uint64_t c_v256_low_u64(c_v256 a) { return a.u64[0]; }
40 
c_v256_low_v128(c_v256 a)41 SIMD_INLINE c_v128 c_v256_low_v128(c_v256 a) { return a.v128[0]; }
42 
c_v256_high_v128(c_v256 a)43 SIMD_INLINE c_v128 c_v256_high_v128(c_v256 a) { return a.v128[1]; }
44 
c_v256_from_v128(c_v128 hi,c_v128 lo)45 SIMD_INLINE c_v256 c_v256_from_v128(c_v128 hi, c_v128 lo) {
46   c_v256 t;
47   t.v128[1] = hi;
48   t.v128[0] = lo;
49   return t;
50 }
51 
c_v256_from_64(uint64_t a,uint64_t b,uint64_t c,uint64_t d)52 SIMD_INLINE c_v256 c_v256_from_64(uint64_t a, uint64_t b, uint64_t c,
53                                   uint64_t d) {
54   c_v256 t;
55   t.u64[3] = a;
56   t.u64[2] = b;
57   t.u64[1] = c;
58   t.u64[0] = d;
59   return t;
60 }
61 
c_v256_from_v64(c_v64 a,c_v64 b,c_v64 c,c_v64 d)62 SIMD_INLINE c_v256 c_v256_from_v64(c_v64 a, c_v64 b, c_v64 c, c_v64 d) {
63   c_v256 t;
64   t.u64[3] = a.u64;
65   t.u64[2] = b.u64;
66   t.u64[1] = c.u64;
67   t.u64[0] = d.u64;
68   return t;
69 }
70 
c_v256_load_unaligned(const void * p)71 SIMD_INLINE c_v256 c_v256_load_unaligned(const void *p) {
72   c_v256 t;
73   uint8_t *pp = (uint8_t *)p;
74   uint8_t *q = (uint8_t *)&t;
75   int c;
76   for (c = 0; c < 32; c++) q[c] = pp[c];
77   return t;
78 }
79 
c_v256_load_aligned(const void * p)80 SIMD_INLINE c_v256 c_v256_load_aligned(const void *p) {
81   if (SIMD_CHECK && (uintptr_t)p & 31) {
82     fprintf(stderr, "Error: unaligned v256 load at %p\n", p);
83     abort();
84   }
85   return c_v256_load_unaligned(p);
86 }
87 
c_v256_store_unaligned(void * p,c_v256 a)88 SIMD_INLINE void c_v256_store_unaligned(void *p, c_v256 a) {
89   uint8_t *pp = (uint8_t *)p;
90   uint8_t *q = (uint8_t *)&a;
91   int c;
92   for (c = 0; c < 32; c++) pp[c] = q[c];
93 }
94 
c_v256_store_aligned(void * p,c_v256 a)95 SIMD_INLINE void c_v256_store_aligned(void *p, c_v256 a) {
96   if (SIMD_CHECK && (uintptr_t)p & 31) {
97     fprintf(stderr, "Error: unaligned v256 store at %p\n", p);
98     abort();
99   }
100   c_v256_store_unaligned(p, a);
101 }
102 
c_v256_zero()103 SIMD_INLINE c_v256 c_v256_zero() {
104   c_v256 t;
105   t.u64[3] = t.u64[2] = t.u64[1] = t.u64[0] = 0;
106   return t;
107 }
108 
c_v256_dup_8(uint8_t x)109 SIMD_INLINE c_v256 c_v256_dup_8(uint8_t x) {
110   c_v256 t;
111   t.v64[3] = t.v64[2] = t.v64[1] = t.v64[0] = c_v64_dup_8(x);
112   return t;
113 }
114 
c_v256_dup_16(uint16_t x)115 SIMD_INLINE c_v256 c_v256_dup_16(uint16_t x) {
116   c_v256 t;
117   t.v64[3] = t.v64[2] = t.v64[1] = t.v64[0] = c_v64_dup_16(x);
118   return t;
119 }
120 
c_v256_dup_32(uint32_t x)121 SIMD_INLINE c_v256 c_v256_dup_32(uint32_t x) {
122   c_v256 t;
123   t.v64[3] = t.v64[2] = t.v64[1] = t.v64[0] = c_v64_dup_32(x);
124   return t;
125 }
126 
c_v256_dup_64(uint64_t x)127 SIMD_INLINE c_v256 c_v256_dup_64(uint64_t x) {
128   c_v256 t;
129   t.u64[3] = t.u64[2] = t.u64[1] = t.u64[0] = x;
130   return t;
131 }
132 
c_v256_dotp_su8(c_v256 a,c_v256 b)133 SIMD_INLINE int64_t c_v256_dotp_su8(c_v256 a, c_v256 b) {
134   return c_v128_dotp_su8(a.v128[1], b.v128[1]) +
135          c_v128_dotp_su8(a.v128[0], b.v128[0]);
136 }
137 
c_v256_dotp_s16(c_v256 a,c_v256 b)138 SIMD_INLINE int64_t c_v256_dotp_s16(c_v256 a, c_v256 b) {
139   return c_v128_dotp_s16(a.v128[1], b.v128[1]) +
140          c_v128_dotp_s16(a.v128[0], b.v128[0]);
141 }
142 
c_v256_dotp_s32(c_v256 a,c_v256 b)143 SIMD_INLINE int64_t c_v256_dotp_s32(c_v256 a, c_v256 b) {
144   return c_v128_dotp_s32(a.v128[1], b.v128[1]) +
145          c_v128_dotp_s32(a.v128[0], b.v128[0]);
146 }
147 
c_v256_hadd_u8(c_v256 a)148 SIMD_INLINE uint64_t c_v256_hadd_u8(c_v256 a) {
149   return c_v128_hadd_u8(a.v128[1]) + c_v128_hadd_u8(a.v128[0]);
150 }
151 
152 typedef uint32_t c_sad256_internal;
153 
c_v256_sad_u8_init()154 SIMD_INLINE c_sad256_internal c_v256_sad_u8_init() { return 0; }
155 
156 /* Implementation dependent return value.  Result must be finalised with
157    v256_sad_u8_sum().
158    The result for more than 16 v256_sad_u8() calls is undefined. */
c_v256_sad_u8(c_sad256_internal s,c_v256 a,c_v256 b)159 SIMD_INLINE c_sad256_internal c_v256_sad_u8(c_sad256_internal s, c_v256 a,
160                                             c_v256 b) {
161   int c;
162   for (c = 0; c < 32; c++)
163     s += a.u8[c] > b.u8[c] ? a.u8[c] - b.u8[c] : b.u8[c] - a.u8[c];
164   return s;
165 }
166 
c_v256_sad_u8_sum(c_sad256_internal s)167 SIMD_INLINE uint32_t c_v256_sad_u8_sum(c_sad256_internal s) { return s; }
168 
169 typedef uint32_t c_ssd256_internal;
170 
c_v256_ssd_u8_init()171 SIMD_INLINE c_ssd256_internal c_v256_ssd_u8_init() { return 0; }
172 
173 /* Implementation dependent return value.  Result must be finalised with
174  * v256_ssd_u8_sum(). */
c_v256_ssd_u8(c_ssd256_internal s,c_v256 a,c_v256 b)175 SIMD_INLINE c_ssd256_internal c_v256_ssd_u8(c_ssd256_internal s, c_v256 a,
176                                             c_v256 b) {
177   int c;
178   for (c = 0; c < 32; c++) s += (a.u8[c] - b.u8[c]) * (a.u8[c] - b.u8[c]);
179   return s;
180 }
181 
c_v256_ssd_u8_sum(c_ssd256_internal s)182 SIMD_INLINE uint32_t c_v256_ssd_u8_sum(c_ssd256_internal s) { return s; }
183 
c_v256_or(c_v256 a,c_v256 b)184 SIMD_INLINE c_v256 c_v256_or(c_v256 a, c_v256 b) {
185   return c_v256_from_v128(c_v128_or(a.v128[1], b.v128[1]),
186                           c_v128_or(a.v128[0], b.v128[0]));
187 }
188 
c_v256_xor(c_v256 a,c_v256 b)189 SIMD_INLINE c_v256 c_v256_xor(c_v256 a, c_v256 b) {
190   return c_v256_from_v128(c_v128_xor(a.v128[1], b.v128[1]),
191                           c_v128_xor(a.v128[0], b.v128[0]));
192 }
193 
c_v256_and(c_v256 a,c_v256 b)194 SIMD_INLINE c_v256 c_v256_and(c_v256 a, c_v256 b) {
195   return c_v256_from_v128(c_v128_and(a.v128[1], b.v128[1]),
196                           c_v128_and(a.v128[0], b.v128[0]));
197 }
198 
c_v256_andn(c_v256 a,c_v256 b)199 SIMD_INLINE c_v256 c_v256_andn(c_v256 a, c_v256 b) {
200   return c_v256_from_v128(c_v128_andn(a.v128[1], b.v128[1]),
201                           c_v128_andn(a.v128[0], b.v128[0]));
202 }
203 
c_v256_add_8(c_v256 a,c_v256 b)204 SIMD_INLINE c_v256 c_v256_add_8(c_v256 a, c_v256 b) {
205   return c_v256_from_v128(c_v128_add_8(a.v128[1], b.v128[1]),
206                           c_v128_add_8(a.v128[0], b.v128[0]));
207 }
208 
c_v256_add_16(c_v256 a,c_v256 b)209 SIMD_INLINE c_v256 c_v256_add_16(c_v256 a, c_v256 b) {
210   return c_v256_from_v128(c_v128_add_16(a.v128[1], b.v128[1]),
211                           c_v128_add_16(a.v128[0], b.v128[0]));
212 }
213 
c_v256_sadd_s8(c_v256 a,c_v256 b)214 SIMD_INLINE c_v256 c_v256_sadd_s8(c_v256 a, c_v256 b) {
215   return c_v256_from_v128(c_v128_sadd_s8(a.v128[1], b.v128[1]),
216                           c_v128_sadd_s8(a.v128[0], b.v128[0]));
217 }
218 
c_v256_sadd_u8(c_v256 a,c_v256 b)219 SIMD_INLINE c_v256 c_v256_sadd_u8(c_v256 a, c_v256 b) {
220   return c_v256_from_v128(c_v128_sadd_u8(a.v128[1], b.v128[1]),
221                           c_v128_sadd_u8(a.v128[0], b.v128[0]));
222 }
223 
c_v256_sadd_s16(c_v256 a,c_v256 b)224 SIMD_INLINE c_v256 c_v256_sadd_s16(c_v256 a, c_v256 b) {
225   return c_v256_from_v128(c_v128_sadd_s16(a.v128[1], b.v128[1]),
226                           c_v128_sadd_s16(a.v128[0], b.v128[0]));
227 }
228 
c_v256_add_32(c_v256 a,c_v256 b)229 SIMD_INLINE c_v256 c_v256_add_32(c_v256 a, c_v256 b) {
230   return c_v256_from_v128(c_v128_add_32(a.v128[1], b.v128[1]),
231                           c_v128_add_32(a.v128[0], b.v128[0]));
232 }
233 
c_v256_add_64(c_v256 a,c_v256 b)234 SIMD_INLINE c_v256 c_v256_add_64(c_v256 a, c_v256 b) {
235   return c_v256_from_v128(c_v128_add_64(a.v128[1], b.v128[1]),
236                           c_v128_add_64(a.v128[0], b.v128[0]));
237 }
238 
c_v256_sub_64(c_v256 a,c_v256 b)239 SIMD_INLINE c_v256 c_v256_sub_64(c_v256 a, c_v256 b) {
240   return c_v256_from_v128(c_v128_sub_64(a.v128[1], b.v128[1]),
241                           c_v128_sub_64(a.v128[0], b.v128[0]));
242 }
243 
c_v256_padd_u8(c_v256 a)244 SIMD_INLINE c_v256 c_v256_padd_u8(c_v256 a) {
245   c_v256 t;
246   for (int i = 0; i < 16; i++)
247     t.u16[i] = (uint16_t)a.u8[i * 2] + (uint16_t)a.u8[i * 2 + 1];
248   return t;
249 }
250 
c_v256_padd_s16(c_v256 a)251 SIMD_INLINE c_v256 c_v256_padd_s16(c_v256 a) {
252   c_v256 t;
253   t.s32[0] = (int32_t)a.s16[0] + (int32_t)a.s16[1];
254   t.s32[1] = (int32_t)a.s16[2] + (int32_t)a.s16[3];
255   t.s32[2] = (int32_t)a.s16[4] + (int32_t)a.s16[5];
256   t.s32[3] = (int32_t)a.s16[6] + (int32_t)a.s16[7];
257   t.s32[4] = (int32_t)a.s16[8] + (int32_t)a.s16[9];
258   t.s32[5] = (int32_t)a.s16[10] + (int32_t)a.s16[11];
259   t.s32[6] = (int32_t)a.s16[12] + (int32_t)a.s16[13];
260   t.s32[7] = (int32_t)a.s16[14] + (int32_t)a.s16[15];
261   return t;
262 }
263 
c_v256_sub_8(c_v256 a,c_v256 b)264 SIMD_INLINE c_v256 c_v256_sub_8(c_v256 a, c_v256 b) {
265   return c_v256_from_v128(c_v128_sub_8(a.v128[1], b.v128[1]),
266                           c_v128_sub_8(a.v128[0], b.v128[0]));
267 }
268 
c_v256_ssub_u8(c_v256 a,c_v256 b)269 SIMD_INLINE c_v256 c_v256_ssub_u8(c_v256 a, c_v256 b) {
270   return c_v256_from_v128(c_v128_ssub_u8(a.v128[1], b.v128[1]),
271                           c_v128_ssub_u8(a.v128[0], b.v128[0]));
272 }
273 
c_v256_ssub_s8(c_v256 a,c_v256 b)274 SIMD_INLINE c_v256 c_v256_ssub_s8(c_v256 a, c_v256 b) {
275   return c_v256_from_v128(c_v128_ssub_s8(a.v128[1], b.v128[1]),
276                           c_v128_ssub_s8(a.v128[0], b.v128[0]));
277 }
278 
c_v256_sub_16(c_v256 a,c_v256 b)279 SIMD_INLINE c_v256 c_v256_sub_16(c_v256 a, c_v256 b) {
280   return c_v256_from_v128(c_v128_sub_16(a.v128[1], b.v128[1]),
281                           c_v128_sub_16(a.v128[0], b.v128[0]));
282 }
283 
c_v256_ssub_s16(c_v256 a,c_v256 b)284 SIMD_INLINE c_v256 c_v256_ssub_s16(c_v256 a, c_v256 b) {
285   return c_v256_from_v128(c_v128_ssub_s16(a.v128[1], b.v128[1]),
286                           c_v128_ssub_s16(a.v128[0], b.v128[0]));
287 }
288 
c_v256_ssub_u16(c_v256 a,c_v256 b)289 SIMD_INLINE c_v256 c_v256_ssub_u16(c_v256 a, c_v256 b) {
290   return c_v256_from_v128(c_v128_ssub_u16(a.v128[1], b.v128[1]),
291                           c_v128_ssub_u16(a.v128[0], b.v128[0]));
292 }
293 
c_v256_sub_32(c_v256 a,c_v256 b)294 SIMD_INLINE c_v256 c_v256_sub_32(c_v256 a, c_v256 b) {
295   return c_v256_from_v128(c_v128_sub_32(a.v128[1], b.v128[1]),
296                           c_v128_sub_32(a.v128[0], b.v128[0]));
297 }
298 
c_v256_abs_s16(c_v256 a)299 SIMD_INLINE c_v256 c_v256_abs_s16(c_v256 a) {
300   return c_v256_from_v128(c_v128_abs_s16(a.v128[1]), c_v128_abs_s16(a.v128[0]));
301 }
302 
c_v256_abs_s8(c_v256 a)303 SIMD_INLINE c_v256 c_v256_abs_s8(c_v256 a) {
304   return c_v256_from_v128(c_v128_abs_s8(a.v128[1]), c_v128_abs_s8(a.v128[0]));
305 }
306 
c_v256_mul_s16(c_v128 a,c_v128 b)307 SIMD_INLINE c_v256 c_v256_mul_s16(c_v128 a, c_v128 b) {
308   c_v128 lo_bits = c_v128_mullo_s16(a, b);
309   c_v128 hi_bits = c_v128_mulhi_s16(a, b);
310   return c_v256_from_v128(c_v128_ziphi_16(hi_bits, lo_bits),
311                           c_v128_ziplo_16(hi_bits, lo_bits));
312 }
313 
c_v256_mullo_s16(c_v256 a,c_v256 b)314 SIMD_INLINE c_v256 c_v256_mullo_s16(c_v256 a, c_v256 b) {
315   return c_v256_from_v128(c_v128_mullo_s16(a.v128[1], b.v128[1]),
316                           c_v128_mullo_s16(a.v128[0], b.v128[0]));
317 }
318 
c_v256_mulhi_s16(c_v256 a,c_v256 b)319 SIMD_INLINE c_v256 c_v256_mulhi_s16(c_v256 a, c_v256 b) {
320   return c_v256_from_v128(c_v128_mulhi_s16(a.v128[1], b.v128[1]),
321                           c_v128_mulhi_s16(a.v128[0], b.v128[0]));
322 }
323 
c_v256_mullo_s32(c_v256 a,c_v256 b)324 SIMD_INLINE c_v256 c_v256_mullo_s32(c_v256 a, c_v256 b) {
325   return c_v256_from_v128(c_v128_mullo_s32(a.v128[1], b.v128[1]),
326                           c_v128_mullo_s32(a.v128[0], b.v128[0]));
327 }
328 
c_v256_madd_s16(c_v256 a,c_v256 b)329 SIMD_INLINE c_v256 c_v256_madd_s16(c_v256 a, c_v256 b) {
330   return c_v256_from_v128(c_v128_madd_s16(a.v128[1], b.v128[1]),
331                           c_v128_madd_s16(a.v128[0], b.v128[0]));
332 }
333 
c_v256_madd_us8(c_v256 a,c_v256 b)334 SIMD_INLINE c_v256 c_v256_madd_us8(c_v256 a, c_v256 b) {
335   return c_v256_from_v128(c_v128_madd_us8(a.v128[1], b.v128[1]),
336                           c_v128_madd_us8(a.v128[0], b.v128[0]));
337 }
338 
c_v256_avg_u8(c_v256 a,c_v256 b)339 SIMD_INLINE c_v256 c_v256_avg_u8(c_v256 a, c_v256 b) {
340   return c_v256_from_v128(c_v128_avg_u8(a.v128[1], b.v128[1]),
341                           c_v128_avg_u8(a.v128[0], b.v128[0]));
342 }
343 
c_v256_rdavg_u8(c_v256 a,c_v256 b)344 SIMD_INLINE c_v256 c_v256_rdavg_u8(c_v256 a, c_v256 b) {
345   return c_v256_from_v128(c_v128_rdavg_u8(a.v128[1], b.v128[1]),
346                           c_v128_rdavg_u8(a.v128[0], b.v128[0]));
347 }
348 
c_v256_rdavg_u16(c_v256 a,c_v256 b)349 SIMD_INLINE c_v256 c_v256_rdavg_u16(c_v256 a, c_v256 b) {
350   return c_v256_from_v128(c_v128_rdavg_u16(a.v128[1], b.v128[1]),
351                           c_v128_rdavg_u16(a.v128[0], b.v128[0]));
352 }
353 
c_v256_avg_u16(c_v256 a,c_v256 b)354 SIMD_INLINE c_v256 c_v256_avg_u16(c_v256 a, c_v256 b) {
355   return c_v256_from_v128(c_v128_avg_u16(a.v128[1], b.v128[1]),
356                           c_v128_avg_u16(a.v128[0], b.v128[0]));
357 }
358 
c_v256_min_u8(c_v256 a,c_v256 b)359 SIMD_INLINE c_v256 c_v256_min_u8(c_v256 a, c_v256 b) {
360   return c_v256_from_v128(c_v128_min_u8(a.v128[1], b.v128[1]),
361                           c_v128_min_u8(a.v128[0], b.v128[0]));
362 }
363 
c_v256_max_u8(c_v256 a,c_v256 b)364 SIMD_INLINE c_v256 c_v256_max_u8(c_v256 a, c_v256 b) {
365   return c_v256_from_v128(c_v128_max_u8(a.v128[1], b.v128[1]),
366                           c_v128_max_u8(a.v128[0], b.v128[0]));
367 }
368 
c_v256_min_s8(c_v256 a,c_v256 b)369 SIMD_INLINE c_v256 c_v256_min_s8(c_v256 a, c_v256 b) {
370   return c_v256_from_v128(c_v128_min_s8(a.v128[1], b.v128[1]),
371                           c_v128_min_s8(a.v128[0], b.v128[0]));
372 }
373 
c_v256_movemask_8(c_v256 a)374 SIMD_INLINE uint32_t c_v256_movemask_8(c_v256 a) {
375   return ((a.s8[31] < 0) << 31) | ((a.s8[30] < 0) << 30) |
376          ((a.s8[29] < 0) << 29) | ((a.s8[28] < 0) << 28) |
377          ((a.s8[27] < 0) << 27) | ((a.s8[26] < 0) << 26) |
378          ((a.s8[25] < 0) << 25) | ((a.s8[24] < 0) << 24) |
379          ((a.s8[23] < 0) << 23) | ((a.s8[22] < 0) << 22) |
380          ((a.s8[21] < 0) << 21) | ((a.s8[20] < 0) << 20) |
381          ((a.s8[19] < 0) << 19) | ((a.s8[18] < 0) << 18) |
382          ((a.s8[17] < 0) << 17) | ((a.s8[16] < 0) << 16) |
383          ((a.s8[15] < 0) << 15) | ((a.s8[14] < 0) << 14) |
384          ((a.s8[13] < 0) << 13) | ((a.s8[12] < 0) << 12) |
385          ((a.s8[11] < 0) << 11) | ((a.s8[10] < 0) << 10) |
386          ((a.s8[9] < 0) << 9) | ((a.s8[8] < 0) << 8) | ((a.s8[7] < 0) << 7) |
387          ((a.s8[6] < 0) << 6) | ((a.s8[5] < 0) << 5) | ((a.s8[4] < 0) << 4) |
388          ((a.s8[3] < 0) << 3) | ((a.s8[2] < 0) << 2) | ((a.s8[1] < 0) << 1) |
389          ((a.s8[0] < 0) << 0);
390 }
391 
c_v256_blend_8(c_v256 a,c_v256 b,c_v256 c)392 SIMD_INLINE c_v256 c_v256_blend_8(c_v256 a, c_v256 b, c_v256 c) {
393   c_v256 t;
394   for (int i = 0; i < 32; i++) t.u8[i] = c.s8[i] < 0 ? b.u8[i] : a.u8[i];
395   return t;
396 }
397 
c_v256_max_s8(c_v256 a,c_v256 b)398 SIMD_INLINE c_v256 c_v256_max_s8(c_v256 a, c_v256 b) {
399   return c_v256_from_v128(c_v128_max_s8(a.v128[1], b.v128[1]),
400                           c_v128_max_s8(a.v128[0], b.v128[0]));
401 }
402 
c_v256_min_s16(c_v256 a,c_v256 b)403 SIMD_INLINE c_v256 c_v256_min_s16(c_v256 a, c_v256 b) {
404   return c_v256_from_v128(c_v128_min_s16(a.v128[1], b.v128[1]),
405                           c_v128_min_s16(a.v128[0], b.v128[0]));
406 }
407 
c_v256_max_s16(c_v256 a,c_v256 b)408 SIMD_INLINE c_v256 c_v256_max_s16(c_v256 a, c_v256 b) {
409   return c_v256_from_v128(c_v128_max_s16(a.v128[1], b.v128[1]),
410                           c_v128_max_s16(a.v128[0], b.v128[0]));
411 }
412 
c_v256_min_s32(c_v256 a,c_v256 b)413 SIMD_INLINE c_v256 c_v256_min_s32(c_v256 a, c_v256 b) {
414   return c_v256_from_v128(c_v128_min_s32(a.v128[1], b.v128[1]),
415                           c_v128_min_s32(a.v128[0], b.v128[0]));
416 }
417 
c_v256_max_s32(c_v256 a,c_v256 b)418 SIMD_INLINE c_v256 c_v256_max_s32(c_v256 a, c_v256 b) {
419   return c_v256_from_v128(c_v128_max_s32(a.v128[1], b.v128[1]),
420                           c_v128_max_s32(a.v128[0], b.v128[0]));
421 }
422 
c_v256_ziplo_8(c_v256 a,c_v256 b)423 SIMD_INLINE c_v256 c_v256_ziplo_8(c_v256 a, c_v256 b) {
424   return c_v256_from_v128(c_v128_ziphi_8(a.v128[0], b.v128[0]),
425                           c_v128_ziplo_8(a.v128[0], b.v128[0]));
426 }
427 
c_v256_ziphi_8(c_v256 a,c_v256 b)428 SIMD_INLINE c_v256 c_v256_ziphi_8(c_v256 a, c_v256 b) {
429   return c_v256_from_v128(c_v128_ziphi_8(a.v128[1], b.v128[1]),
430                           c_v128_ziplo_8(a.v128[1], b.v128[1]));
431 }
432 
c_v256_ziplo_16(c_v256 a,c_v256 b)433 SIMD_INLINE c_v256 c_v256_ziplo_16(c_v256 a, c_v256 b) {
434   return c_v256_from_v128(c_v128_ziphi_16(a.v128[0], b.v128[0]),
435                           c_v128_ziplo_16(a.v128[0], b.v128[0]));
436 }
437 
c_v256_ziphi_16(c_v256 a,c_v256 b)438 SIMD_INLINE c_v256 c_v256_ziphi_16(c_v256 a, c_v256 b) {
439   return c_v256_from_v128(c_v128_ziphi_16(a.v128[1], b.v128[1]),
440                           c_v128_ziplo_16(a.v128[1], b.v128[1]));
441 }
442 
c_v256_ziplo_32(c_v256 a,c_v256 b)443 SIMD_INLINE c_v256 c_v256_ziplo_32(c_v256 a, c_v256 b) {
444   return c_v256_from_v128(c_v128_ziphi_32(a.v128[0], b.v128[0]),
445                           c_v128_ziplo_32(a.v128[0], b.v128[0]));
446 }
447 
c_v256_ziphi_32(c_v256 a,c_v256 b)448 SIMD_INLINE c_v256 c_v256_ziphi_32(c_v256 a, c_v256 b) {
449   return c_v256_from_v128(c_v128_ziphi_32(a.v128[1], b.v128[1]),
450                           c_v128_ziplo_32(a.v128[1], b.v128[1]));
451 }
452 
c_v256_ziplo_64(c_v256 a,c_v256 b)453 SIMD_INLINE c_v256 c_v256_ziplo_64(c_v256 a, c_v256 b) {
454   return c_v256_from_v128(c_v128_ziphi_64(a.v128[0], b.v128[0]),
455                           c_v128_ziplo_64(a.v128[0], b.v128[0]));
456 }
457 
c_v256_ziphi_64(c_v256 a,c_v256 b)458 SIMD_INLINE c_v256 c_v256_ziphi_64(c_v256 a, c_v256 b) {
459   return c_v256_from_v128(c_v128_ziphi_64(a.v128[1], b.v128[1]),
460                           c_v128_ziplo_64(a.v128[1], b.v128[1]));
461 }
462 
c_v256_ziplo_128(c_v256 a,c_v256 b)463 SIMD_INLINE c_v256 c_v256_ziplo_128(c_v256 a, c_v256 b) {
464   return c_v256_from_v128(a.v128[0], b.v128[0]);
465 }
466 
c_v256_ziphi_128(c_v256 a,c_v256 b)467 SIMD_INLINE c_v256 c_v256_ziphi_128(c_v256 a, c_v256 b) {
468   return c_v256_from_v128(a.v128[1], b.v128[1]);
469 }
470 
c_v256_zip_8(c_v128 a,c_v128 b)471 SIMD_INLINE c_v256 c_v256_zip_8(c_v128 a, c_v128 b) {
472   return c_v256_from_v128(c_v128_ziphi_8(a, b), c_v128_ziplo_8(a, b));
473 }
474 
c_v256_zip_16(c_v128 a,c_v128 b)475 SIMD_INLINE c_v256 c_v256_zip_16(c_v128 a, c_v128 b) {
476   return c_v256_from_v128(c_v128_ziphi_16(a, b), c_v128_ziplo_16(a, b));
477 }
478 
c_v256_zip_32(c_v128 a,c_v128 b)479 SIMD_INLINE c_v256 c_v256_zip_32(c_v128 a, c_v128 b) {
480   return c_v256_from_v128(c_v128_ziphi_32(a, b), c_v128_ziplo_32(a, b));
481 }
482 
_c_v256_unzip_8(c_v256 a,c_v256 b,int mode)483 SIMD_INLINE c_v256 _c_v256_unzip_8(c_v256 a, c_v256 b, int mode) {
484   c_v256 t;
485   int i;
486   if (mode) {
487     for (i = 0; i < 16; i++) {
488       t.u8[i] = a.u8[i * 2 + 1];
489       t.u8[i + 16] = b.u8[i * 2 + 1];
490     }
491   } else {
492     for (i = 0; i < 16; i++) {
493       t.u8[i] = b.u8[i * 2];
494       t.u8[i + 16] = a.u8[i * 2];
495     }
496   }
497   return t;
498 }
499 
c_v256_unziplo_8(c_v256 a,c_v256 b)500 SIMD_INLINE c_v256 c_v256_unziplo_8(c_v256 a, c_v256 b) {
501   return CONFIG_BIG_ENDIAN ? _c_v256_unzip_8(a, b, 1)
502                            : _c_v256_unzip_8(a, b, 0);
503 }
504 
c_v256_unziphi_8(c_v256 a,c_v256 b)505 SIMD_INLINE c_v256 c_v256_unziphi_8(c_v256 a, c_v256 b) {
506   return CONFIG_BIG_ENDIAN ? _c_v256_unzip_8(b, a, 0)
507                            : _c_v256_unzip_8(b, a, 1);
508 }
509 
_c_v256_unzip_16(c_v256 a,c_v256 b,int mode)510 SIMD_INLINE c_v256 _c_v256_unzip_16(c_v256 a, c_v256 b, int mode) {
511   c_v256 t;
512   int i;
513   if (mode) {
514     for (i = 0; i < 8; i++) {
515       t.u16[i] = a.u16[i * 2 + 1];
516       t.u16[i + 8] = b.u16[i * 2 + 1];
517     }
518   } else {
519     for (i = 0; i < 8; i++) {
520       t.u16[i] = b.u16[i * 2];
521       t.u16[i + 8] = a.u16[i * 2];
522     }
523   }
524   return t;
525 }
526 
c_v256_unziplo_16(c_v256 a,c_v256 b)527 SIMD_INLINE c_v256 c_v256_unziplo_16(c_v256 a, c_v256 b) {
528   return CONFIG_BIG_ENDIAN ? _c_v256_unzip_16(a, b, 1)
529                            : _c_v256_unzip_16(a, b, 0);
530 }
531 
c_v256_unziphi_16(c_v256 a,c_v256 b)532 SIMD_INLINE c_v256 c_v256_unziphi_16(c_v256 a, c_v256 b) {
533   return CONFIG_BIG_ENDIAN ? _c_v256_unzip_16(b, a, 0)
534                            : _c_v256_unzip_16(b, a, 1);
535 }
536 
_c_v256_unzip_32(c_v256 a,c_v256 b,int mode)537 SIMD_INLINE c_v256 _c_v256_unzip_32(c_v256 a, c_v256 b, int mode) {
538   c_v256 t;
539   if (mode) {
540     t.u32[7] = b.u32[7];
541     t.u32[6] = b.u32[5];
542     t.u32[5] = b.u32[3];
543     t.u32[4] = b.u32[1];
544     t.u32[3] = a.u32[7];
545     t.u32[2] = a.u32[5];
546     t.u32[1] = a.u32[3];
547     t.u32[0] = a.u32[1];
548   } else {
549     t.u32[7] = a.u32[6];
550     t.u32[6] = a.u32[4];
551     t.u32[5] = a.u32[2];
552     t.u32[4] = a.u32[0];
553     t.u32[3] = b.u32[6];
554     t.u32[2] = b.u32[4];
555     t.u32[1] = b.u32[2];
556     t.u32[0] = b.u32[0];
557   }
558   return t;
559 }
560 
c_v256_unziplo_32(c_v256 a,c_v256 b)561 SIMD_INLINE c_v256 c_v256_unziplo_32(c_v256 a, c_v256 b) {
562   return CONFIG_BIG_ENDIAN ? _c_v256_unzip_32(a, b, 1)
563                            : _c_v256_unzip_32(a, b, 0);
564 }
565 
c_v256_unziphi_32(c_v256 a,c_v256 b)566 SIMD_INLINE c_v256 c_v256_unziphi_32(c_v256 a, c_v256 b) {
567   return CONFIG_BIG_ENDIAN ? _c_v256_unzip_32(b, a, 0)
568                            : _c_v256_unzip_32(b, a, 1);
569 }
570 
_c_v256_unzip_64(c_v256 a,c_v256 b,int mode)571 SIMD_INLINE c_v256 _c_v256_unzip_64(c_v256 a, c_v256 b, int mode) {
572   c_v256 t;
573   if (mode) {
574     t.u64[3] = b.u64[3];
575     t.u64[2] = b.u64[1];
576     t.u64[1] = a.u64[3];
577     t.u64[0] = a.u64[1];
578   } else {
579     t.u64[3] = a.u64[2];
580     t.u64[2] = a.u64[0];
581     t.u64[1] = b.u64[2];
582     t.u64[0] = b.u64[0];
583   }
584   return t;
585 }
586 
c_v256_unziplo_64(c_v256 a,c_v256 b)587 SIMD_INLINE c_v256 c_v256_unziplo_64(c_v256 a, c_v256 b) {
588   return CONFIG_BIG_ENDIAN ? _c_v256_unzip_64(a, b, 1)
589                            : _c_v256_unzip_64(a, b, 0);
590 }
591 
c_v256_unziphi_64(c_v256 a,c_v256 b)592 SIMD_INLINE c_v256 c_v256_unziphi_64(c_v256 a, c_v256 b) {
593   return CONFIG_BIG_ENDIAN ? _c_v256_unzip_64(b, a, 0)
594                            : _c_v256_unzip_64(b, a, 1);
595 }
596 
c_v256_unpack_u8_s16(c_v128 a)597 SIMD_INLINE c_v256 c_v256_unpack_u8_s16(c_v128 a) {
598   return c_v256_from_v128(c_v128_unpackhi_u8_s16(a), c_v128_unpacklo_u8_s16(a));
599 }
600 
c_v256_unpacklo_u8_s16(c_v256 a)601 SIMD_INLINE c_v256 c_v256_unpacklo_u8_s16(c_v256 a) {
602   return c_v256_from_v128(c_v128_unpackhi_u8_s16(a.v128[0]),
603                           c_v128_unpacklo_u8_s16(a.v128[0]));
604 }
605 
c_v256_unpackhi_u8_s16(c_v256 a)606 SIMD_INLINE c_v256 c_v256_unpackhi_u8_s16(c_v256 a) {
607   return c_v256_from_v128(c_v128_unpackhi_u8_s16(a.v128[1]),
608                           c_v128_unpacklo_u8_s16(a.v128[1]));
609 }
610 
c_v256_unpack_s8_s16(c_v128 a)611 SIMD_INLINE c_v256 c_v256_unpack_s8_s16(c_v128 a) {
612   return c_v256_from_v128(c_v128_unpackhi_s8_s16(a), c_v128_unpacklo_s8_s16(a));
613 }
614 
c_v256_unpacklo_s8_s16(c_v256 a)615 SIMD_INLINE c_v256 c_v256_unpacklo_s8_s16(c_v256 a) {
616   return c_v256_from_v128(c_v128_unpackhi_s8_s16(a.v128[0]),
617                           c_v128_unpacklo_s8_s16(a.v128[0]));
618 }
619 
c_v256_unpackhi_s8_s16(c_v256 a)620 SIMD_INLINE c_v256 c_v256_unpackhi_s8_s16(c_v256 a) {
621   return c_v256_from_v128(c_v128_unpackhi_s8_s16(a.v128[1]),
622                           c_v128_unpacklo_s8_s16(a.v128[1]));
623 }
624 
c_v256_pack_s32_s16(c_v256 a,c_v256 b)625 SIMD_INLINE c_v256 c_v256_pack_s32_s16(c_v256 a, c_v256 b) {
626   return c_v256_from_v128(c_v128_pack_s32_s16(a.v128[1], a.v128[0]),
627                           c_v128_pack_s32_s16(b.v128[1], b.v128[0]));
628 }
629 
c_v256_pack_s32_u16(c_v256 a,c_v256 b)630 SIMD_INLINE c_v256 c_v256_pack_s32_u16(c_v256 a, c_v256 b) {
631   return c_v256_from_v128(c_v128_pack_s32_u16(a.v128[1], a.v128[0]),
632                           c_v128_pack_s32_u16(b.v128[1], b.v128[0]));
633 }
634 
c_v256_pack_s16_u8(c_v256 a,c_v256 b)635 SIMD_INLINE c_v256 c_v256_pack_s16_u8(c_v256 a, c_v256 b) {
636   return c_v256_from_v128(c_v128_pack_s16_u8(a.v128[1], a.v128[0]),
637                           c_v128_pack_s16_u8(b.v128[1], b.v128[0]));
638 }
639 
c_v256_pack_s16_s8(c_v256 a,c_v256 b)640 SIMD_INLINE c_v256 c_v256_pack_s16_s8(c_v256 a, c_v256 b) {
641   return c_v256_from_v128(c_v128_pack_s16_s8(a.v128[1], a.v128[0]),
642                           c_v128_pack_s16_s8(b.v128[1], b.v128[0]));
643 }
644 
c_v256_unpack_u16_s32(c_v128 a)645 SIMD_INLINE c_v256 c_v256_unpack_u16_s32(c_v128 a) {
646   return c_v256_from_v128(c_v128_unpackhi_u16_s32(a),
647                           c_v128_unpacklo_u16_s32(a));
648 }
649 
c_v256_unpack_s16_s32(c_v128 a)650 SIMD_INLINE c_v256 c_v256_unpack_s16_s32(c_v128 a) {
651   return c_v256_from_v128(c_v128_unpackhi_s16_s32(a),
652                           c_v128_unpacklo_s16_s32(a));
653 }
654 
c_v256_unpacklo_u16_s32(c_v256 a)655 SIMD_INLINE c_v256 c_v256_unpacklo_u16_s32(c_v256 a) {
656   return c_v256_from_v128(c_v128_unpackhi_u16_s32(a.v128[0]),
657                           c_v128_unpacklo_u16_s32(a.v128[0]));
658 }
659 
c_v256_unpacklo_s16_s32(c_v256 a)660 SIMD_INLINE c_v256 c_v256_unpacklo_s16_s32(c_v256 a) {
661   return c_v256_from_v128(c_v128_unpackhi_s16_s32(a.v128[0]),
662                           c_v128_unpacklo_s16_s32(a.v128[0]));
663 }
664 
c_v256_unpackhi_u16_s32(c_v256 a)665 SIMD_INLINE c_v256 c_v256_unpackhi_u16_s32(c_v256 a) {
666   return c_v256_from_v128(c_v128_unpackhi_u16_s32(a.v128[1]),
667                           c_v128_unpacklo_u16_s32(a.v128[1]));
668 }
669 
c_v256_unpackhi_s16_s32(c_v256 a)670 SIMD_INLINE c_v256 c_v256_unpackhi_s16_s32(c_v256 a) {
671   return c_v256_from_v128(c_v128_unpackhi_s16_s32(a.v128[1]),
672                           c_v128_unpacklo_s16_s32(a.v128[1]));
673 }
674 
c_v256_shuffle_8(c_v256 a,c_v256 pattern)675 SIMD_INLINE c_v256 c_v256_shuffle_8(c_v256 a, c_v256 pattern) {
676   c_v256 t;
677   int c;
678   for (c = 0; c < 32; c++)
679     t.u8[c] = a.u8[CONFIG_BIG_ENDIAN ? 31 - (pattern.u8[c] & 31)
680                                      : pattern.u8[c] & 31];
681 
682   return t;
683 }
684 
c_v256_wideshuffle_8(c_v256 a,c_v256 b,c_v256 pattern)685 SIMD_INLINE c_v256 c_v256_wideshuffle_8(c_v256 a, c_v256 b, c_v256 pattern) {
686   c_v256 t;
687   int c;
688   for (c = 0; c < 32; c++)
689     t.u8[c] = (pattern.u8[c] < 32
690                    ? b.u8
691                    : a.u8)[CONFIG_BIG_ENDIAN ? 31 - (pattern.u8[c] & 31)
692                                              : pattern.u8[c] & 31];
693   return t;
694 }
695 
696 // Pairwise / dual-lane shuffle: shuffle two 128 bit lates.
c_v256_pshuffle_8(c_v256 a,c_v256 pattern)697 SIMD_INLINE c_v256 c_v256_pshuffle_8(c_v256 a, c_v256 pattern) {
698   return c_v256_from_v128(
699       c_v128_shuffle_8(c_v256_high_v128(a), c_v256_high_v128(pattern)),
700       c_v128_shuffle_8(c_v256_low_v128(a), c_v256_low_v128(pattern)));
701 }
702 
c_v256_cmpgt_s8(c_v256 a,c_v256 b)703 SIMD_INLINE c_v256 c_v256_cmpgt_s8(c_v256 a, c_v256 b) {
704   return c_v256_from_v128(c_v128_cmpgt_s8(a.v128[1], b.v128[1]),
705                           c_v128_cmpgt_s8(a.v128[0], b.v128[0]));
706 }
707 
c_v256_cmplt_s8(c_v256 a,c_v256 b)708 SIMD_INLINE c_v256 c_v256_cmplt_s8(c_v256 a, c_v256 b) {
709   return c_v256_from_v128(c_v128_cmplt_s8(a.v128[1], b.v128[1]),
710                           c_v128_cmplt_s8(a.v128[0], b.v128[0]));
711 }
712 
c_v256_cmpeq_8(c_v256 a,c_v256 b)713 SIMD_INLINE c_v256 c_v256_cmpeq_8(c_v256 a, c_v256 b) {
714   return c_v256_from_v128(c_v128_cmpeq_8(a.v128[1], b.v128[1]),
715                           c_v128_cmpeq_8(a.v128[0], b.v128[0]));
716 }
717 
c_v256_cmpgt_s16(c_v256 a,c_v256 b)718 SIMD_INLINE c_v256 c_v256_cmpgt_s16(c_v256 a, c_v256 b) {
719   return c_v256_from_v128(c_v128_cmpgt_s16(a.v128[1], b.v128[1]),
720                           c_v128_cmpgt_s16(a.v128[0], b.v128[0]));
721 }
722 
c_v256_cmplt_s16(c_v256 a,c_v256 b)723 SIMD_INLINE c_v256 c_v256_cmplt_s16(c_v256 a, c_v256 b) {
724   return c_v256_from_v128(c_v128_cmplt_s16(a.v128[1], b.v128[1]),
725                           c_v128_cmplt_s16(a.v128[0], b.v128[0]));
726 }
727 
c_v256_cmpeq_16(c_v256 a,c_v256 b)728 SIMD_INLINE c_v256 c_v256_cmpeq_16(c_v256 a, c_v256 b) {
729   return c_v256_from_v128(c_v128_cmpeq_16(a.v128[1], b.v128[1]),
730                           c_v128_cmpeq_16(a.v128[0], b.v128[0]));
731 }
732 
c_v256_cmpgt_s32(c_v256 a,c_v256 b)733 SIMD_INLINE c_v256 c_v256_cmpgt_s32(c_v256 a, c_v256 b) {
734   return c_v256_from_v128(c_v128_cmpgt_s32(a.v128[1], b.v128[1]),
735                           c_v128_cmpgt_s32(a.v128[0], b.v128[0]));
736 }
737 
c_v256_cmplt_s32(c_v256 a,c_v256 b)738 SIMD_INLINE c_v256 c_v256_cmplt_s32(c_v256 a, c_v256 b) {
739   return c_v256_from_v128(c_v128_cmplt_s32(a.v128[1], b.v128[1]),
740                           c_v128_cmplt_s32(a.v128[0], b.v128[0]));
741 }
742 
c_v256_cmpeq_32(c_v256 a,c_v256 b)743 SIMD_INLINE c_v256 c_v256_cmpeq_32(c_v256 a, c_v256 b) {
744   return c_v256_from_v128(c_v128_cmpeq_32(a.v128[1], b.v128[1]),
745                           c_v128_cmpeq_32(a.v128[0], b.v128[0]));
746 }
747 
c_v256_shl_n_byte(c_v256 a,unsigned int n)748 SIMD_INLINE c_v256 c_v256_shl_n_byte(c_v256 a, unsigned int n) {
749   if (n < 16)
750     return c_v256_from_v128(c_v128_or(c_v128_shl_n_byte(a.v128[1], n),
751                                       c_v128_shr_n_byte(a.v128[0], 16 - n)),
752                             c_v128_shl_n_byte(a.v128[0], n));
753   else if (n > 16)
754     return c_v256_from_v128(c_v128_shl_n_byte(a.v128[0], n - 16),
755                             c_v128_zero());
756   else
757     return c_v256_from_v128(c_v256_low_v128(a), c_v128_zero());
758 }
759 
c_v256_shr_n_byte(c_v256 a,unsigned int n)760 SIMD_INLINE c_v256 c_v256_shr_n_byte(c_v256 a, unsigned int n) {
761   if (n < 16)
762     return c_v256_from_v128(c_v128_shr_n_byte(a.v128[1], n),
763                             c_v128_or(c_v128_shr_n_byte(a.v128[0], n),
764                                       c_v128_shl_n_byte(a.v128[1], 16 - n)));
765   else if (n > 16)
766     return c_v256_from_v128(c_v128_zero(),
767                             c_v128_shr_n_byte(a.v128[1], n - 16));
768   else
769     return c_v256_from_v128(c_v128_zero(), c_v256_high_v128(a));
770 }
771 
c_v256_align(c_v256 a,c_v256 b,unsigned int c)772 SIMD_INLINE c_v256 c_v256_align(c_v256 a, c_v256 b, unsigned int c) {
773   if (SIMD_CHECK && c > 31) {
774     fprintf(stderr, "Error: undefined alignment %d\n", c);
775     abort();
776   }
777   return c ? c_v256_or(c_v256_shr_n_byte(b, c), c_v256_shl_n_byte(a, 32 - c))
778            : b;
779 }
780 
c_v256_shl_8(c_v256 a,unsigned int c)781 SIMD_INLINE c_v256 c_v256_shl_8(c_v256 a, unsigned int c) {
782   return c_v256_from_v128(c_v128_shl_8(a.v128[1], c),
783                           c_v128_shl_8(a.v128[0], c));
784 }
785 
c_v256_shr_u8(c_v256 a,unsigned int c)786 SIMD_INLINE c_v256 c_v256_shr_u8(c_v256 a, unsigned int c) {
787   return c_v256_from_v128(c_v128_shr_u8(a.v128[1], c),
788                           c_v128_shr_u8(a.v128[0], c));
789 }
790 
c_v256_shr_s8(c_v256 a,unsigned int c)791 SIMD_INLINE c_v256 c_v256_shr_s8(c_v256 a, unsigned int c) {
792   return c_v256_from_v128(c_v128_shr_s8(a.v128[1], c),
793                           c_v128_shr_s8(a.v128[0], c));
794 }
795 
c_v256_shl_16(c_v256 a,unsigned int c)796 SIMD_INLINE c_v256 c_v256_shl_16(c_v256 a, unsigned int c) {
797   return c_v256_from_v128(c_v128_shl_16(a.v128[1], c),
798                           c_v128_shl_16(a.v128[0], c));
799 }
800 
c_v256_shr_u16(c_v256 a,unsigned int c)801 SIMD_INLINE c_v256 c_v256_shr_u16(c_v256 a, unsigned int c) {
802   return c_v256_from_v128(c_v128_shr_u16(a.v128[1], c),
803                           c_v128_shr_u16(a.v128[0], c));
804 }
805 
c_v256_shr_s16(c_v256 a,unsigned int c)806 SIMD_INLINE c_v256 c_v256_shr_s16(c_v256 a, unsigned int c) {
807   return c_v256_from_v128(c_v128_shr_s16(a.v128[1], c),
808                           c_v128_shr_s16(a.v128[0], c));
809 }
810 
c_v256_shl_32(c_v256 a,unsigned int c)811 SIMD_INLINE c_v256 c_v256_shl_32(c_v256 a, unsigned int c) {
812   return c_v256_from_v128(c_v128_shl_32(a.v128[1], c),
813                           c_v128_shl_32(a.v128[0], c));
814 }
815 
c_v256_shr_u32(c_v256 a,unsigned int c)816 SIMD_INLINE c_v256 c_v256_shr_u32(c_v256 a, unsigned int c) {
817   return c_v256_from_v128(c_v128_shr_u32(a.v128[1], c),
818                           c_v128_shr_u32(a.v128[0], c));
819 }
820 
c_v256_shr_s32(c_v256 a,unsigned int c)821 SIMD_INLINE c_v256 c_v256_shr_s32(c_v256 a, unsigned int c) {
822   return c_v256_from_v128(c_v128_shr_s32(a.v128[1], c),
823                           c_v128_shr_s32(a.v128[0], c));
824 }
825 
c_v256_shr_s64(c_v256 a,unsigned int n)826 SIMD_INLINE c_v256 c_v256_shr_s64(c_v256 a, unsigned int n) {
827   c_v256 t;
828   if (SIMD_CHECK && n > 63) {
829     fprintf(stderr, "Error: undefined s64 shift right %d\n", n);
830     abort();
831   }
832   t.s64[3] = a.s64[3] >> n;
833   t.s64[2] = a.s64[2] >> n;
834   t.s64[1] = a.s64[1] >> n;
835   t.s64[0] = a.s64[0] >> n;
836   return t;
837 }
838 
c_v256_shr_u64(c_v256 a,unsigned int n)839 SIMD_INLINE c_v256 c_v256_shr_u64(c_v256 a, unsigned int n) {
840   c_v256 t;
841   if (SIMD_CHECK && n > 63) {
842     fprintf(stderr, "Error: undefined s64 shift right %d\n", n);
843     abort();
844   }
845   t.u64[3] = a.u64[3] >> n;
846   t.u64[2] = a.u64[2] >> n;
847   t.u64[1] = a.u64[1] >> n;
848   t.u64[0] = a.u64[0] >> n;
849   return t;
850 }
851 
c_v256_shl_64(c_v256 a,unsigned int n)852 SIMD_INLINE c_v256 c_v256_shl_64(c_v256 a, unsigned int n) {
853   c_v256 t;
854   if (SIMD_CHECK && n > 63) {
855     fprintf(stderr, "Error: undefined s64 shift right %d\n", n);
856     abort();
857   }
858   t.u64[3] = a.u64[3] << n;
859   t.u64[2] = a.u64[2] << n;
860   t.u64[1] = a.u64[1] << n;
861   t.u64[0] = a.u64[0] << n;
862   return t;
863 }
864 
c_v256_shl_n_8(c_v256 a,unsigned int n)865 SIMD_INLINE c_v256 c_v256_shl_n_8(c_v256 a, unsigned int n) {
866   return c_v256_shl_8(a, n);
867 }
868 
c_v256_shl_n_16(c_v256 a,unsigned int n)869 SIMD_INLINE c_v256 c_v256_shl_n_16(c_v256 a, unsigned int n) {
870   return c_v256_shl_16(a, n);
871 }
872 
c_v256_shl_n_32(c_v256 a,unsigned int n)873 SIMD_INLINE c_v256 c_v256_shl_n_32(c_v256 a, unsigned int n) {
874   return c_v256_shl_32(a, n);
875 }
876 
c_v256_shl_n_64(c_v256 a,unsigned int n)877 SIMD_INLINE c_v256 c_v256_shl_n_64(c_v256 a, unsigned int n) {
878   return c_v256_shl_64(a, n);
879 }
880 
c_v256_shr_n_u8(c_v256 a,unsigned int n)881 SIMD_INLINE c_v256 c_v256_shr_n_u8(c_v256 a, unsigned int n) {
882   return c_v256_shr_u8(a, n);
883 }
884 
c_v256_shr_n_u16(c_v256 a,unsigned int n)885 SIMD_INLINE c_v256 c_v256_shr_n_u16(c_v256 a, unsigned int n) {
886   return c_v256_shr_u16(a, n);
887 }
888 
c_v256_shr_n_u32(c_v256 a,unsigned int n)889 SIMD_INLINE c_v256 c_v256_shr_n_u32(c_v256 a, unsigned int n) {
890   return c_v256_shr_u32(a, n);
891 }
892 
c_v256_shr_n_u64(c_v256 a,unsigned int n)893 SIMD_INLINE c_v256 c_v256_shr_n_u64(c_v256 a, unsigned int n) {
894   return c_v256_shr_u64(a, n);
895 }
896 
c_v256_shr_n_s8(c_v256 a,unsigned int n)897 SIMD_INLINE c_v256 c_v256_shr_n_s8(c_v256 a, unsigned int n) {
898   return c_v256_shr_s8(a, n);
899 }
900 
c_v256_shr_n_s16(c_v256 a,unsigned int n)901 SIMD_INLINE c_v256 c_v256_shr_n_s16(c_v256 a, unsigned int n) {
902   return c_v256_shr_s16(a, n);
903 }
904 
c_v256_shr_n_s32(c_v256 a,unsigned int n)905 SIMD_INLINE c_v256 c_v256_shr_n_s32(c_v256 a, unsigned int n) {
906   return c_v256_shr_s32(a, n);
907 }
908 
c_v256_shr_n_s64(c_v256 a,unsigned int n)909 SIMD_INLINE c_v256 c_v256_shr_n_s64(c_v256 a, unsigned int n) {
910   return c_v256_shr_s64(a, n);
911 }
912 
c_v256_shr_n_word(c_v256 a,const unsigned int n)913 SIMD_INLINE c_v256 c_v256_shr_n_word(c_v256 a, const unsigned int n) {
914   return c_v256_shr_n_byte(a, 2 * n);
915 }
c_v256_shl_n_word(c_v256 a,const unsigned int n)916 SIMD_INLINE c_v256 c_v256_shl_n_word(c_v256 a, const unsigned int n) {
917   return c_v256_shl_n_byte(a, 2 * n);
918 }
919 
920 typedef uint32_t c_sad256_internal_u16;
921 
c_v256_sad_u16_init()922 SIMD_INLINE c_sad256_internal_u16 c_v256_sad_u16_init() { return 0; }
923 
924 /* Implementation dependent return value.  Result must be finalised with
925    v256_sad_u16_sum(). */
c_v256_sad_u16(c_sad256_internal_u16 s,c_v256 a,c_v256 b)926 SIMD_INLINE c_sad256_internal_u16 c_v256_sad_u16(c_sad256_internal_u16 s,
927                                                  c_v256 a, c_v256 b) {
928   int c;
929   for (c = 0; c < 16; c++)
930     s += a.u16[c] > b.u16[c] ? a.u16[c] - b.u16[c] : b.u16[c] - a.u16[c];
931   return s;
932 }
933 
c_v256_sad_u16_sum(c_sad256_internal_u16 s)934 SIMD_INLINE uint32_t c_v256_sad_u16_sum(c_sad256_internal_u16 s) { return s; }
935 
936 typedef uint64_t c_ssd256_internal_s16;
937 
c_v256_ssd_s16_init()938 SIMD_INLINE c_ssd256_internal_s16 c_v256_ssd_s16_init() { return 0; }
939 
940 /* Implementation dependent return value.  Result must be finalised with
941  * v256_ssd_s16_sum(). */
c_v256_ssd_s16(c_ssd256_internal_s16 s,c_v256 a,c_v256 b)942 SIMD_INLINE c_ssd256_internal_s16 c_v256_ssd_s16(c_ssd256_internal_s16 s,
943                                                  c_v256 a, c_v256 b) {
944   int c;
945   for (c = 0; c < 16; c++)
946     s += (int32_t)(int16_t)(a.s16[c] - b.s16[c]) *
947          (int32_t)(int16_t)(a.s16[c] - b.s16[c]);
948   return s;
949 }
950 
c_v256_ssd_s16_sum(c_ssd256_internal_s16 s)951 SIMD_INLINE uint64_t c_v256_ssd_s16_sum(c_ssd256_internal_s16 s) { return s; }
952 
953 #endif  // AOM_AOM_DSP_SIMD_V256_INTRINSICS_C_H_
954