• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Auto-generated file. Do not edit!
2 //   Template: src/f16-vhswish/neonfp16arith.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <arm_neon.h>
13 
14 #include <xnnpack/common.h>
15 #include <xnnpack/vunary.h>
16 
17 
xnn_f16_vhswish_ukernel__neonfp16arith_x16(size_t n,const void * restrict x_ptr,void * restrict y_ptr,const union xnn_f16_hswish_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_f16_vhswish_ukernel__neonfp16arith_x16(
19     size_t n,
20     const void* restrict x_ptr,
21     void* restrict y_ptr,
22     const union xnn_f16_hswish_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
23 {
24   assert(n != 0);
25   assert(n % sizeof(__fp16) == 0);
26 
27   const __fp16* x = (const __fp16*) x_ptr;
28   __fp16* y = (__fp16*) y_ptr;
29 
30   const float16x8_t vsixth = vreinterpretq_f16_u16(vld1q_dup_u16(&params->neon.sixth));
31   const float16x8_t vthree = vreinterpretq_f16_u16(vld1q_dup_u16(&params->neon.three));
32   const int16x8_t vsix = vreinterpretq_s16_u16(vld1q_dup_u16(&params->neon.six));
33   const int16x8_t vzero = vdupq_n_s16(0);
34 
35   for (; n >= 16 * sizeof(__fp16); n -= 16 * sizeof(__fp16)) {
36     float16x8_t vx01234567 = vld1q_f16(x); x += 8;
37     float16x8_t vx89ABCDEF = vld1q_f16(x); x += 8;
38 
39     float16x8_t vacc01234567 = vaddq_f16(vx01234567, vthree);
40     vx01234567 = vmulq_f16(vx01234567, vsixth);
41     float16x8_t vacc89ABCDEF = vaddq_f16(vx89ABCDEF, vthree);
42     vx89ABCDEF = vmulq_f16(vx89ABCDEF, vsixth);
43 
44     vacc01234567 = vreinterpretq_f16_s16(vmaxq_s16(vreinterpretq_s16_f16(vacc01234567), vzero));
45     vacc89ABCDEF = vreinterpretq_f16_s16(vmaxq_s16(vreinterpretq_s16_f16(vacc89ABCDEF), vzero));
46 
47     vacc01234567 = vreinterpretq_f16_s16(vminq_s16(vreinterpretq_s16_f16(vacc01234567), vsix));
48     vacc89ABCDEF = vreinterpretq_f16_s16(vminq_s16(vreinterpretq_s16_f16(vacc89ABCDEF), vsix));
49 
50     vacc01234567 = vmulq_f16(vacc01234567, vx01234567);
51     vacc89ABCDEF = vmulq_f16(vacc89ABCDEF, vx89ABCDEF);
52 
53     vst1q_f16(y, vacc01234567); y += 8;
54     vst1q_f16(y, vacc89ABCDEF); y += 8;
55   }
56   for (; n >= 8 * sizeof(__fp16); n -= 8 * sizeof(__fp16)) {
57     float16x8_t vx = vld1q_f16(x); x += 8;
58     float16x8_t vacc = vaddq_f16(vx, vthree);
59     vx = vmulq_f16(vx, vsixth);
60     vacc = vreinterpretq_f16_s16(vmaxq_s16(vreinterpretq_s16_f16(vacc), vzero));
61     vacc = vreinterpretq_f16_s16(vminq_s16(vreinterpretq_s16_f16(vacc), vsix));
62     vacc = vmulq_f16(vacc, vx);
63     vst1q_f16(y, vacc); y += 8;
64   }
65   if XNN_UNLIKELY(n != 0) {
66     float16x8_t vx = vld1q_f16(x);
67     float16x8_t vacc = vaddq_f16(vx, vthree);
68     vx = vmulq_f16(vx, vsixth);
69     vacc = vreinterpretq_f16_s16(vmaxq_s16(vreinterpretq_s16_f16(vacc), vzero));
70     vacc = vreinterpretq_f16_s16(vminq_s16(vreinterpretq_s16_f16(vacc), vsix));
71     vacc = vmulq_f16(vacc, vx);
72 
73     float16x4_t vacc_lo = vget_low_f16(vacc);
74     if (n & (4 * sizeof(__fp16))) {
75       vst1_f16(y, vacc_lo); y += 4;
76       vacc_lo = vget_high_f16(vacc);
77     }
78     if (n & (2 * sizeof(__fp16))) {
79       vst1_lane_u32((void*) y, vreinterpret_u32_f16(vacc_lo), 0); y += 2;
80       vacc_lo = vext_f16(vacc_lo, vacc_lo, 2);
81     }
82     if (n & (1 * sizeof(__fp16))) {
83       vst1_lane_f16(y, vacc_lo, 0);
84     }
85   }
86 }
87