• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Auto-generated file. Do not edit!
2 //   Template: src/x8-lut/ssse3.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2021 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <tmmintrin.h>
13 
14 #include <xnnpack/lut.h>
15 #include <xnnpack/common.h>
16 
17 
xnn_x8_lut_ukernel__ssse3_x16(size_t n,const uint8_t * x,uint8_t * y,const uint8_t t[restrict XNN_MIN_ELEMENTS (256)])18 void xnn_x8_lut_ukernel__ssse3_x16(
19     size_t n,
20     const uint8_t* x,
21     uint8_t* y,
22     const uint8_t t[restrict XNN_MIN_ELEMENTS(256)])
23 {
24   assert(n != 0);
25   assert(x != NULL);
26   assert(y != NULL);
27 
28   const __m128i vt0 = _mm_load_si128((const __m128i*) t);
29   const __m128i vt1 = _mm_load_si128((const __m128i*) (t + 16));
30   const __m128i vt2 = _mm_load_si128((const __m128i*) (t + 32));
31   const __m128i vt3 = _mm_load_si128((const __m128i*) (t + 48));
32   const __m128i vt4 = _mm_load_si128((const __m128i*) (t + 64));
33   const __m128i vt5 = _mm_load_si128((const __m128i*) (t + 80));
34   const __m128i vt6 = _mm_load_si128((const __m128i*) (t + 96));
35   const __m128i vt7 = _mm_load_si128((const __m128i*) (t + 112));
36   const __m128i vt8 = _mm_load_si128((const __m128i*) (t + 128));
37   const __m128i vt9 = _mm_load_si128((const __m128i*) (t + 144));
38   const __m128i vtA = _mm_load_si128((const __m128i*) (t + 160));
39   const __m128i vtB = _mm_load_si128((const __m128i*) (t + 176));
40   const __m128i vtC = _mm_load_si128((const __m128i*) (t + 192));
41   const __m128i vtD = _mm_load_si128((const __m128i*) (t + 208));
42   const __m128i vtE = _mm_load_si128((const __m128i*) (t + 224));
43   const __m128i vtF = _mm_load_si128((const __m128i*) (t + 240));
44 
45   const __m128i vtable0 = vt0;
46   const __m128i vtable1 = _mm_xor_si128(vt0, vt1);
47   const __m128i vtable2 = _mm_xor_si128(vt1, vt2);
48   const __m128i vtable3 = _mm_xor_si128(vt2, vt3);
49   const __m128i vtable4 = _mm_xor_si128(vt3, vt4);
50   const __m128i vtable5 = _mm_xor_si128(vt4, vt5);
51   const __m128i vtable6 = _mm_xor_si128(vt5, vt6);
52   const __m128i vtable7 = _mm_xor_si128(vt6, vt7);
53   const __m128i vtable8 = _mm_xor_si128(_mm_xor_si128(vt7, vt8), vtable0);
54   const __m128i vtable9 = _mm_xor_si128(_mm_xor_si128(vt8, vt9), vtable1);
55   const __m128i vtableA = _mm_xor_si128(_mm_xor_si128(vt9, vtA), vtable2);
56   const __m128i vtableB = _mm_xor_si128(_mm_xor_si128(vtA, vtB), vtable3);
57   const __m128i vtableC = _mm_xor_si128(_mm_xor_si128(vtB, vtC), vtable4);
58   const __m128i vtableD = _mm_xor_si128(_mm_xor_si128(vtC, vtD), vtable5);
59   const __m128i vtableE = _mm_xor_si128(_mm_xor_si128(vtD, vtE), vtable6);
60   const __m128i vtableF = _mm_xor_si128(_mm_xor_si128(vtE, vtF), vtable7);
61 
62   const __m128i voffset = _mm_set1_epi8(16);
63   for (; n >= 16 * sizeof(uint8_t); n -= 16 * sizeof(uint8_t)) {
64     __m128i vx = _mm_loadu_si128((const __m128i*) x);
65     x += 16;
66 
67     __m128i vy = _mm_shuffle_epi8(vtable0, vx);
68 
69     vx = _mm_sub_epi8(vx, voffset);
70     vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable1, vx));
71     vx = _mm_sub_epi8(vx, voffset);
72     vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable2, vx));
73     vx = _mm_sub_epi8(vx, voffset);
74     vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable3, vx));
75     vx = _mm_sub_epi8(vx, voffset);
76     vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable4, vx));
77     vx = _mm_sub_epi8(vx, voffset);
78     vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable5, vx));
79     vx = _mm_sub_epi8(vx, voffset);
80     vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable6, vx));
81     vx = _mm_sub_epi8(vx, voffset);
82     vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable7, vx));
83     vx = _mm_sub_epi8(vx, voffset);
84     vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable8, vx));
85 
86     vx = _mm_subs_epi8(vx, voffset);
87     vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable9, vx));
88     vx = _mm_subs_epi8(vx, voffset);
89     vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableA, vx));
90     vx = _mm_subs_epi8(vx, voffset);
91     vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableB, vx));
92     vx = _mm_subs_epi8(vx, voffset);
93     vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableC, vx));
94     vx = _mm_subs_epi8(vx, voffset);
95     vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableD, vx));
96     vx = _mm_subs_epi8(vx, voffset);
97     vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableE, vx));
98     vx = _mm_subs_epi8(vx, voffset);
99     vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableF, vx));
100 
101     _mm_storeu_si128((__m128i*) y, vy);
102     y += 16;
103   }
104   if XNN_UNLIKELY(n != 0) {
105     __m128i vx = _mm_loadu_si128((const __m128i*) x);
106 
107     __m128i vy = _mm_shuffle_epi8(vtable0, vx);
108 
109     vx = _mm_sub_epi8(vx, voffset);
110     vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable1, vx));
111     vx = _mm_sub_epi8(vx, voffset);
112     vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable2, vx));
113     vx = _mm_sub_epi8(vx, voffset);
114     vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable3, vx));
115     vx = _mm_sub_epi8(vx, voffset);
116     vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable4, vx));
117     vx = _mm_sub_epi8(vx, voffset);
118     vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable5, vx));
119     vx = _mm_sub_epi8(vx, voffset);
120     vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable6, vx));
121     vx = _mm_sub_epi8(vx, voffset);
122     vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable7, vx));
123     vx = _mm_sub_epi8(vx, voffset);
124     vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable8, vx));
125 
126     vx = _mm_subs_epi8(vx, voffset);
127     vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable9, vx));
128     vx = _mm_subs_epi8(vx, voffset);
129     vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableA, vx));
130     vx = _mm_subs_epi8(vx, voffset);
131     vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableB, vx));
132     vx = _mm_subs_epi8(vx, voffset);
133     vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableC, vx));
134     vx = _mm_subs_epi8(vx, voffset);
135     vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableD, vx));
136     vx = _mm_subs_epi8(vx, voffset);
137     vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableE, vx));
138     vx = _mm_subs_epi8(vx, voffset);
139     vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableF, vx));
140 
141     if (n & (8 * sizeof(uint8_t))) {
142       _mm_storel_epi64((__m128i*) y, vy);
143       vy = _mm_unpackhi_epi64(vy, vy);
144       y += 8;
145     }
146     if (n & (4 * sizeof(uint8_t))) {
147       *((uint32_t*) y) = (uint32_t) _mm_cvtsi128_si32(vy);
148       vy = _mm_srli_epi64(vy, 32);
149       y += 4;
150     }
151     uint32_t vy_lo = (uint32_t) _mm_cvtsi128_si32(vy);
152     if (n & (2 * sizeof(uint8_t))) {
153       *((uint16_t*) y) = (uint16_t) vy_lo;
154       vy_lo >>= 16;
155       y += 2;
156     }
157     if (n & (1 * sizeof(uint8_t))) {
158       *y = (uint8_t) vy_lo;
159     }
160   }
161 }
162