1 // Auto-generated file. Do not edit!
2 // Template: src/x8-lut/neon-tbx128x4.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2021 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <arm_neon.h>
13
14 #include <xnnpack/intrinsics-polyfill.h>
15 #include <xnnpack/lut.h>
16 #include <xnnpack/common.h>
17
18
xnn_x8_lut_ukernel__neon_tbx128x4_x48(size_t n,const uint8_t * x,uint8_t * y,const uint8_t t[restrict XNN_MIN_ELEMENTS (256)])19 void xnn_x8_lut_ukernel__neon_tbx128x4_x48(
20 size_t n,
21 const uint8_t* x,
22 uint8_t* y,
23 const uint8_t t[restrict XNN_MIN_ELEMENTS(256)])
24 {
25 assert(n != 0);
26 assert(x != NULL);
27 assert(y != NULL);
28
29 const uint8x16x4_t vtable0123 = vld1q_u8_x4(t);
30 const uint8x16x4_t vtable4567 = vld1q_u8_x4(t + 64);
31 const uint8x16x4_t vtable89AB = vld1q_u8_x4(t + 128);
32 const uint8x16x4_t vtableCDEF = vld1q_u8_x4(t + 192);
33 const uint8x16_t voffset = vmovq_n_u8(64);
34 for (; n >= 48 * sizeof(uint8_t); n -= 48 * sizeof(uint8_t)) {
35 uint8x16_t vx0 = vld1q_u8(x); x += 16;
36 uint8x16_t vx1 = vld1q_u8(x); x += 16;
37 uint8x16_t vx2 = vld1q_u8(x); x += 16;
38
39 uint8x16_t vy0 = vqtbl4q_u8(vtable0123, vx0);
40 vx0 = vsubq_u8(vx0, voffset);
41 uint8x16_t vy1 = vqtbl4q_u8(vtable0123, vx1);
42 vx1 = vsubq_u8(vx1, voffset);
43 uint8x16_t vy2 = vqtbl4q_u8(vtable0123, vx2);
44 vx2 = vsubq_u8(vx2, voffset);
45
46 vy0 = vqtbx4q_u8(vy0, vtable4567, vx0);
47 vx0 = vsubq_u8(vx0, voffset);
48 vy1 = vqtbx4q_u8(vy1, vtable4567, vx1);
49 vx1 = vsubq_u8(vx1, voffset);
50 vy2 = vqtbx4q_u8(vy2, vtable4567, vx2);
51 vx2 = vsubq_u8(vx2, voffset);
52
53 vy0 = vqtbx4q_u8(vy0, vtable89AB, vx0);
54 vx0 = vsubq_u8(vx0, voffset);
55 vy1 = vqtbx4q_u8(vy1, vtable89AB, vx1);
56 vx1 = vsubq_u8(vx1, voffset);
57 vy2 = vqtbx4q_u8(vy2, vtable89AB, vx2);
58 vx2 = vsubq_u8(vx2, voffset);
59
60 vy0 = vqtbx4q_u8(vy0, vtableCDEF, vx0);
61 vy1 = vqtbx4q_u8(vy1, vtableCDEF, vx1);
62 vy2 = vqtbx4q_u8(vy2, vtableCDEF, vx2);
63
64 vst1q_u8(y, vy0); y += 16;
65 vst1q_u8(y, vy1); y += 16;
66 vst1q_u8(y, vy2); y += 16;
67 }
68 for (; n >= 16 * sizeof(uint8_t); n -= 16 * sizeof(uint8_t)) {
69 uint8x16_t vx = vld1q_u8(x); x += 16;
70
71 uint8x16_t vy = vqtbl4q_u8(vtable0123, vx);
72
73 vx = vsubq_u8(vx, voffset);
74 vy = vqtbx4q_u8(vy, vtable4567, vx);
75
76 vx = vsubq_u8(vx, voffset);
77 vy = vqtbx4q_u8(vy, vtable89AB, vx);
78
79 vx = vsubq_u8(vx, voffset);
80 vy = vqtbx4q_u8(vy, vtableCDEF, vx);
81
82 vst1q_u8(y, vy); y += 16;
83 }
84 if XNN_UNLIKELY(n != 0) {
85 uint8x16_t vx = vld1q_u8(x);
86
87 uint8x16_t vy = vqtbl4q_u8(vtable0123, vx);
88
89 vx = vsubq_u8(vx, voffset);
90 vy = vqtbx4q_u8(vy, vtable4567, vx);
91
92 vx = vsubq_u8(vx, voffset);
93 vy = vqtbx4q_u8(vy, vtable89AB, vx);
94
95 vx = vsubq_u8(vx, voffset);
96 vy = vqtbx4q_u8(vy, vtableCDEF, vx);
97
98 uint8x8_t vy_lo = vget_low_u8(vy);
99 if (n & (8 * sizeof(uint8_t))) {
100 vst1_u8(y, vy_lo); y += 8;
101 vy_lo = vget_high_u8(vy);
102 }
103 if (n & (4 * sizeof(uint8_t))) {
104 vst1_lane_u32((void*) y, vreinterpret_u32_u8(vy_lo), 0); y += 4;
105 vy_lo = vext_u8(vy_lo, vy_lo, 4);
106 }
107 if (n & (2 * sizeof(uint8_t))) {
108 vst1_lane_u16((void*) y, vreinterpret_u16_u8(vy_lo), 0); y += 2;
109 vy_lo = vext_u8(vy_lo, vy_lo, 2);
110 }
111 if (n & (1 * sizeof(uint8_t))) {
112 vst1_lane_u8(y, vy_lo, 0);
113 }
114 }
115 }
116