• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Auto-generated file. Do not edit!
2 //   Template: src/qs8-vmul/neon.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2021 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <arm_neon.h>
13 
14 #include <xnnpack/intrinsics-polyfill.h>
15 #include <xnnpack/vmul.h>
16 
17 
xnn_qu8_vmul_minmax_fp32_ukernel__neonv8_ld128_x16(size_t n,const uint8_t * input_a,const uint8_t * input_b,uint8_t * output,const union xnn_qu8_mul_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_qu8_vmul_minmax_fp32_ukernel__neonv8_ld128_x16(
19     size_t n,
20     const uint8_t* input_a,
21     const uint8_t* input_b,
22     uint8_t* output,
23     const union xnn_qu8_mul_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
24 {
25   #if XNN_ARCH_ARM64
26     const uint8x16_t va_zero_point = vld1q_dup_u8(params->fp32_neonv8.a_zero_point);
27     const uint8x16_t vb_zero_point = vld1q_dup_u8(params->fp32_neonv8.b_zero_point);
28   #else
29     const uint8x8_t va_zero_point = vld1_dup_u8(params->fp32_neonv8.a_zero_point);
30     const uint8x8_t vb_zero_point = vld1_dup_u8(params->fp32_neonv8.b_zero_point);
31   #endif
32   const float32x4_t vscale = vld1q_dup_f32(&params->fp32_neonv8.scale);
33   const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->fp32_neonv8.output_zero_point);
34   const uint8x16_t voutput_min = vld1q_dup_u8(&params->fp32_neonv8.output_min);
35   const uint8x16_t voutput_max = vld1q_dup_u8(&params->fp32_neonv8.output_max);
36 
37   for (; n >= 16 * sizeof(uint8_t); n -= 16 * sizeof(uint8_t)) {
38     const uint8x16_t va0123456789ABCDEF = vld1q_u8(input_a); input_a += 16;
39     const uint8x16_t vb0123456789ABCDEF = vld1q_u8(input_b); input_b += 16;
40 
41     #if XNN_ARCH_ARM64
42       const int16x8_t vxa01234567 = vreinterpretq_s16_u16(vsubl_u8(vget_low_u8(va0123456789ABCDEF), vget_low_u8(va_zero_point)));
43       const int16x8_t vxa89ABCDEF = vreinterpretq_s16_u16(vsubl_high_u8(va0123456789ABCDEF, va_zero_point));
44       const int16x8_t vxb01234567 = vreinterpretq_s16_u16(vsubl_u8(vget_low_u8(vb0123456789ABCDEF), vget_low_u8(vb_zero_point)));
45       const int16x8_t vxb89ABCDEF = vreinterpretq_s16_u16(vsubl_high_u8(vb0123456789ABCDEF, vb_zero_point));
46     #else  // !XNN_ARCH_ARM64
47       const int16x8_t vxa01234567 = vreinterpretq_s16_u16(vsubl_u8(vget_low_u8(va0123456789ABCDEF), va_zero_point));
48       const int16x8_t vxa89ABCDEF = vreinterpretq_s16_u16(vsubl_u8(vget_high_u8(va0123456789ABCDEF), va_zero_point));
49       const int16x8_t vxb01234567 = vreinterpretq_s16_u16(vsubl_u8(vget_low_u8(vb0123456789ABCDEF), vb_zero_point));
50       const int16x8_t vxb89ABCDEF = vreinterpretq_s16_u16(vsubl_u8(vget_high_u8(vb0123456789ABCDEF), vb_zero_point));
51     #endif  // XNN_ARCH_ARM64
52 
53     int32x4_t vacc0123 = vmull_s16(vget_low_s16(vxa01234567), vget_low_s16(vxb01234567));
54     int32x4_t vacc4567 = vmull_s16(vget_high_s16(vxa01234567), vget_high_s16(vxb01234567));
55     int32x4_t vacc89AB = vmull_s16(vget_low_s16(vxa89ABCDEF), vget_low_s16(vxb89ABCDEF));
56     int32x4_t vaccCDEF = vmull_s16(vget_high_s16(vxa89ABCDEF), vget_high_s16(vxb89ABCDEF));
57 
58     float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
59     float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
60     float32x4_t vfpacc89AB = vcvtq_f32_s32(vacc89AB);
61     float32x4_t vfpaccCDEF = vcvtq_f32_s32(vaccCDEF);
62 
63     vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
64     vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
65     vfpacc89AB = vmulq_f32(vfpacc89AB, vscale);
66     vfpaccCDEF = vmulq_f32(vfpaccCDEF, vscale);
67 
68     vacc0123 = vcvtnq_s32_f32(vfpacc0123);
69     vacc4567 = vcvtnq_s32_f32(vfpacc4567);
70     vacc89AB = vcvtnq_s32_f32(vfpacc89AB);
71     vaccCDEF = vcvtnq_s32_f32(vfpaccCDEF);
72 
73     #if XNN_ARCH_ARM64
74       int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
75       int16x8_t vacc89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc89AB), vaccCDEF);
76     #else
77       int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
78       int16x8_t vacc89ABCDEF = vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF));
79     #endif
80 
81     vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
82     vacc89ABCDEF = vqaddq_s16(vacc89ABCDEF, voutput_zero_point);
83 
84     #if XNN_ARCH_ARM64
85       uint8x16_t vout0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc01234567), vacc89ABCDEF);
86     #else
87       uint8x16_t vout0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc01234567), vqmovun_s16(vacc89ABCDEF));
88     #endif
89 
90     vout0123456789ABCDEF = vmaxq_u8(vout0123456789ABCDEF, voutput_min);
91 
92     vout0123456789ABCDEF = vminq_u8(vout0123456789ABCDEF, voutput_max);
93 
94     vst1q_u8(output, vout0123456789ABCDEF); output += 16;
95   }
96   if XNN_UNLIKELY(n != 0) {
97     do {
98       const uint8x8_t va01234567 = vld1_u8(input_a); input_a += 8;
99       const uint8x8_t vb01234567 = vld1_u8(input_b); input_b += 8;
100 
101       #if XNN_ARCH_ARM64
102         const int16x8_t vxa01234567 = vreinterpretq_s16_u16(vsubl_u8(va01234567, vget_low_u8(va_zero_point)));
103         const int16x8_t vxb01234567 = vreinterpretq_s16_u16(vsubl_u8(vb01234567, vget_low_u8(vb_zero_point)));
104       #else  // !XNN_ARCH_ARM64
105         const int16x8_t vxa01234567 = vreinterpretq_s16_u16(vsubl_u8(va01234567, va_zero_point));
106         const int16x8_t vxb01234567 = vreinterpretq_s16_u16(vsubl_u8(vb01234567, vb_zero_point));
107       #endif  // XNN_ARCH_ARM64
108 
109       int32x4_t vacc0123 = vmull_s16(vget_low_s16(vxa01234567), vget_low_s16(vxb01234567));
110       int32x4_t vacc4567 = vmull_s16(vget_high_s16(vxa01234567), vget_high_s16(vxb01234567));
111 
112       float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
113       float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
114 
115       vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
116       vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
117 
118       vacc0123 = vcvtnq_s32_f32(vfpacc0123);
119       vacc4567 = vcvtnq_s32_f32(vfpacc4567);
120 
121       #if XNN_ARCH_ARM64
122         int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
123       #else
124         int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
125       #endif
126 
127       vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
128 
129       uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
130 
131       vout01234567 = vmax_u8(vout01234567, vget_low_u8(voutput_min));
132       vout01234567 = vmin_u8(vout01234567, vget_low_u8(voutput_max));
133       if XNN_LIKELY(n >= (8 * sizeof(uint8_t))) {
134         vst1_u8(output, vout01234567); output += 8;
135         n -= 8 * sizeof(uint8_t);
136       } else {
137         if (n & (4 * sizeof(uint8_t))) {
138           vst1_lane_u32((void*) output, vreinterpret_u32_u8(vout01234567), 0); output += 4;
139           vout01234567 = vext_u8(vout01234567, vout01234567, 4);
140         }
141         if (n & (2 * sizeof(uint8_t))) {
142           vst1_lane_u16((void*) output, vreinterpret_u16_u8(vout01234567), 0); output += 2;
143           vout01234567 = vext_u8(vout01234567, vout01234567, 2);
144         }
145         if (n & (1 * sizeof(uint8_t))) {
146           vst1_lane_u8(output, vout01234567, 0);
147         }
148         n = 0;
149       }
150     } while (n != 0);
151   }
152 }
153