• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1// Copyright 2019 Google LLC
2//
3// This source code is licensed under the BSD-style license found in the
4// LICENSE file in the root directory of this source tree.
5
6$assert CHANNEL_TILE % 4 == 0
7$assert CHANNEL_TILE >= 4
8$assert ROW_TILE >= 1
9$ABC = "0123456789ABCDEFGHIJKLMN"
10#include <assert.h>
11
12#include <arm_neon.h>
13
14#include <xnnpack/math.h>
15#include <xnnpack/prelu.h>
16
17
18void xnn_f32_prelu_ukernel__neon_${ROW_TILE}x${CHANNEL_TILE}(
19    size_t rows,
20    size_t channels,
21    const float*restrict input,
22    size_t input_stride,
23    const float*restrict weights,
24    float*restrict output,
25    size_t output_stride,
26    const union xnn_f32_output_params params[restrict static 1])
27{
28  assert(rows != 0);
29  assert(channels != 0);
30  assert(channels % sizeof(float) == 0);
31
32  const float* i0 = input;
33  float* o0 = output;
34  $for M in range(1, ROW_TILE):
35    const float* i${M} = (const float*) ((uintptr_t) i${M-1} + input_stride);
36    float* o${M} = (float*) ((uintptr_t) o${M-1} + output_stride);
37    $if M % 2 == 0:
38      if XNN_UNPREDICTABLE(rows <= ${M}) {
39        i${M} = i${M-1};
40        o${M} = o${M-1};
41      }
42    $else:
43      if XNN_UNPREDICTABLE(rows < ${M+1}) {
44        i${M} = i${M-1};
45        o${M} = o${M-1};
46      }
47
48  const size_t input_increment = input_stride * ${ROW_TILE} - channels;
49  const size_t output_increment = output_stride * ${ROW_TILE} - channels;
50
51  const float32x4_t vmin = vld1q_dup_f32(&params->scalar.min);
52  const float32x4_t vmax = vld1q_dup_f32(&params->scalar.max);
53  do {
54    const float* w = weights;
55    size_t c = channels;
56    for (; c >= ${CHANNEL_TILE} * sizeof(float); c -= ${CHANNEL_TILE} * sizeof(float)) {
57      $for C in range(0, CHANNEL_TILE, 4):
58        const float32x4_t vw${ABC[C:C+4]} = vld1q_f32(w); w += 4;
59
60      $for M in range(ROW_TILE):
61        $for C in range(0, CHANNEL_TILE, 4):
62          const float32x4_t vi${M}x${ABC[C:C+4]} = vld1q_f32(i${M}); i${M} += 4;
63
64      $for M in range(ROW_TILE):
65        $for C in range(0, CHANNEL_TILE, 4):
66          float32x4_t vacc${M}x${ABC[C:C+4]} = vmulq_f32(vi${M}x${ABC[C:C+4]}, vw${ABC[C:C+4]});
67          const uint32x4_t vm${M}x${ABC[C:C+4]} = vcltq_s32(vreinterpretq_s32_f32(vi${M}x${ABC[C:C+4]}), vmovq_n_s32(0));
68
69      $for M in range(ROW_TILE):
70        $for C in range(0, CHANNEL_TILE, 4):
71          vacc${M}x${ABC[C:C+4]} = vbslq_f32(vm${M}x${ABC[C:C+4]}, vacc${M}x${ABC[C:C+4]}, vi${M}x${ABC[C:C+4]});
72
73      $for M in range(ROW_TILE):
74        $for C in range(0, CHANNEL_TILE, 4):
75          vacc${M}x${ABC[C:C+4]} = vmaxq_f32(vacc${M}x${ABC[C:C+4]}, vmin);
76
77      $for M in range(ROW_TILE):
78        $for C in range(0, CHANNEL_TILE, 4):
79          vacc${M}x${ABC[C:C+4]} = vminq_f32(vacc${M}x${ABC[C:C+4]}, vmax);
80
81      $for M in range(ROW_TILE):
82        $for C in range(0, CHANNEL_TILE, 4):
83          vst1q_f32(o${M}, vacc${M}x${ABC[C:C+4]}); o${M} += 4;
84    }
85    $if CHANNEL_TILE != 4:
86      for (; c >= 4 * sizeof(float); c -= 4 * sizeof(float)) {
87        const float32x4_t vw0123 = vld1q_f32(w); w += 4;
88
89        $for M in range(ROW_TILE):
90          const float32x4_t vi${M}x0123 = vld1q_f32(i${M});
91          i${M} += 4;
92
93        $for M in range(ROW_TILE):
94          float32x4_t vacc${M}x0123 = vmulq_f32(vi${M}x0123, vw0123);
95          const uint32x4_t vm${M}x0123 = vcltq_s32(vreinterpretq_s32_f32(vi${M}x0123), vmovq_n_s32(0));
96
97        $for M in range(ROW_TILE):
98          vacc${M}x0123 = vbslq_f32(vm${M}x0123, vacc${M}x0123, vi${M}x0123);
99
100        $for M in range(ROW_TILE):
101          vacc${M}x0123 = vmaxq_f32(vacc${M}x0123, vmin);
102
103        $for M in range(ROW_TILE):
104          vacc${M}x0123 = vminq_f32(vacc${M}x0123, vmax);
105
106        $for M in range(ROW_TILE):
107          vst1q_f32(o${M}, vacc${M}x0123); o${M} += 4;
108      }
109    if XNN_UNLIKELY(c != 0) {
110      const float32x4_t vw0123 = vld1q_f32(w); w += 4;
111
112      $for M in range(ROW_TILE):
113        const float32x4_t vi${M}x0123 = vld1q_f32(i${M});
114        i${M} = (const float*) ((uintptr_t) i${M} + c);
115
116      $for M in range(ROW_TILE):
117        float32x4_t vacc${M}x0123 = vmulq_f32(vi${M}x0123, vw0123);
118        const uint32x4_t vm${M}x0123 = vcltq_s32(vreinterpretq_s32_f32(vi${M}x0123), vmovq_n_s32(0));
119
120      $for M in range(ROW_TILE):
121        vacc${M}x0123 = vbslq_f32(vm${M}x0123, vacc${M}x0123, vi${M}x0123);
122
123      $for M in range(ROW_TILE):
124        vacc${M}x0123 = vmaxq_f32(vacc${M}x0123, vmin);
125
126      $for M in range(ROW_TILE):
127        vacc${M}x0123 = vminq_f32(vacc${M}x0123, vmax);
128
129      $for M in range(ROW_TILE):
130        float32x2_t vacc${M}x01 = vget_low_f32(vacc${M}x0123);
131      if (c & (2 * sizeof(float))) {
132        $for M in range(ROW_TILE):
133          vst1_f32(o${M}, vacc${M}x01); o${M} += 2;
134
135        $for M in range(ROW_TILE):
136          vacc${M}x01 = vget_high_f32(vacc${M}x0123);
137      }
138      if (c & (1 * sizeof(float))) {
139        $for M in range(ROW_TILE):
140          vst1_lane_f32(o${M}, vacc${M}x01, 0); o${M} += 1;
141      }
142    }
143    $for M in range(ROW_TILE):
144      i${M} = (const float*) ((uintptr_t) i${M} + input_increment);
145      o${M} = (float*) ((uintptr_t) o${M} + output_increment);
146      $if M % 2 == 1:
147        if XNN_UNPREDICTABLE(rows < ${ROW_TILE+M+1}) {
148          i${M} = i${M-1};
149          o${M} = o${M-1};
150        }
151      $elif M != 0:
152        if XNN_UNPREDICTABLE(rows <= ${ROW_TILE+M}) {
153          i${M} = i${M-1};
154          o${M} = o${M-1};
155        }
156    rows = doz(rows, ${ROW_TILE});
157  } while (rows != 0);
158}
159