• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1// Copyright 2021 Google LLC
2//
3// This source code is licensed under the BSD-style license found in the
4// LICENSE file in the root directory of this source tree.
5
6$assert BATCH_TILE % 8 == 0
7$assert BATCH_TILE >= 8
8$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
9#include <assert.h>
10
11#include <arm_neon.h>
12
13#include <xnnpack/common.h>
14#include <xnnpack/intrinsics-polyfill.h>
15#include <xnnpack/vcvt.h>
16
17
18$XINT8_T = {"QS8": "int8_t", "QU8": "uint8_t"}[DATATYPE]
19$XINT8X8_T = {"QS8": "int8x8_t", "QU8": "uint8x8_t"}[DATATYPE]
20$XINT8X16_T = {"QS8": "int8x16_t", "QU8": "uint8x16_t"}[DATATYPE]
21$VLD1Q_DUP_X8 = {"QS8": "vld1q_dup_s8", "QU8": "vld1q_dup_u8"}[DATATYPE]
22$VLD1_DUP_X8 = {"QS8": "vld1_dup_s8", "QU8": "vld1_dup_u8"}[DATATYPE]
23$VST1Q_X8 = {"QS8": "vst1q_s8", "QU8": "vst1q_u8"}[DATATYPE]
24$VST1_X8 = {"QS8": "vst1_s8", "QU8": "vst1_u8"}[DATATYPE]
25$VST1_LANE_X8 = {"QS8": "vst1_lane_s8", "QU8": "vst1_lane_u8"}[DATATYPE]
26$VQMOVXN_S16 = {"QS8": "vqmovn_s16", "QU8": "vqmovun_s16"}[DATATYPE]
27$VEXT_X8 = {"QS8": "vext_s8", "QU8": "vext_u8"}[DATATYPE]
28$VCOMBINE_X8 = {"QS8": "vcombine_s8", "QU8": "vcombine_u8"}[DATATYPE]
29$VGET_LOW_X8 = {"QS8": "vget_low_s8", "QU8": "vget_low_u8"}[DATATYPE]
30$VREINTERPRET_U16_X8 = {"QS8": "vreinterpret_u16_s8", "QU8": "vreinterpret_u16_u8"}[DATATYPE]
31$VREINTERPRET_U32_X8 = {"QS8": "vreinterpret_u32_s8", "QU8": "vreinterpret_u32_u8"}[DATATYPE]
32$VMAXQ_X8 = {"QS8": "vmaxq_s8", "QU8": "vmaxq_u8"}[DATATYPE]
33$VMAX_X8 = {"QS8": "vmax_s8", "QU8": "vmax_u8"}[DATATYPE]
34$VMINQ_X8 = {"QS8": "vminq_s8", "QU8": "vminq_u8"}[DATATYPE]
35$VMIN_X8 = {"QS8": "vmin_s8", "QU8": "vmin_u8"}[DATATYPE]
36void xnn_f32_${DATATYPE.lower()}_vcvt_ukernel__neonv8_x${BATCH_TILE}(
37    size_t n,
38    const float* x,
39    ${XINT8_T}* y,
40    const union xnn_f32_${DATATYPE.lower()}_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
41{
42  assert(n != 0);
43  assert(n % sizeof(float) == 0);
44  assert(x != NULL);
45  assert(y != NULL);
46
47  const float32x4_t vscale = vld1q_dup_f32(&params->neonv8.scale);
48  const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->neonv8.output_zero_point);
49  $if BATCH_TILE > 8:
50    const ${XINT8X16_T} voutput_min = ${VLD1Q_DUP_X8}(&params->neonv8.output_min);
51    const ${XINT8X16_T} voutput_max = ${VLD1Q_DUP_X8}(&params->neonv8.output_max);
52  $else:
53    const ${XINT8X8_T} voutput_min = ${VLD1_DUP_X8}(&params->neonv8.output_min);
54    const ${XINT8X8_T} voutput_max = ${VLD1_DUP_X8}(&params->neonv8.output_max);
55  $if BATCH_TILE > 8:
56    for (; n >= ${BATCH_TILE} * sizeof(float); n -= ${BATCH_TILE} * sizeof(float)) {
57      $for N in range(0, BATCH_TILE, 4):
58        float32x4_t vx${ABC[N:N+4]} = vld1q_f32(x); x += 4;
59
60      $for N in range(0, BATCH_TILE, 4):
61        vx${ABC[N:N+4]} = vmulq_f32(vx${ABC[N:N+4]}, vscale);
62
63      $for N in range(0, BATCH_TILE, 4):
64        const int32x4_t vacc${ABC[N:N+4]} = vcvtnq_s32_f32(vx${ABC[N:N+4]});
65
66      $for N in range(0, BATCH_TILE, 8):
67        int16x8_t vacc${ABC[N:N+8]} = vcombine_s16(vqmovn_s32(vacc${ABC[N:N+4]}), vqmovn_s32(vacc${ABC[N+4:N+8]}));
68
69      $for N in range(0, BATCH_TILE, 8):
70        vacc${ABC[N:N+8]} = vqaddq_s16(vacc${ABC[N:N+8]}, voutput_zero_point);
71
72      $for N in range(0, BATCH_TILE, 16):
73        $if N + 8 < BATCH_TILE:
74          ${XINT8X16_T} vy${ABC[N:N+16]} = ${VCOMBINE_X8}(${VQMOVXN_S16}(vacc${ABC[N:N+8]}), ${VQMOVXN_S16}(vacc${ABC[N+8:N+16]}));
75        $else:
76          ${XINT8X8_T} vy${ABC[N:N+8]} = ${VQMOVXN_S16}(vacc${ABC[N:N+8]});
77
78      $for N in range(0, BATCH_TILE, 16):
79        $if N + 8 < BATCH_TILE:
80          vy${ABC[N:N+16]} = ${VMAXQ_X8}(vy${ABC[N:N+16]}, voutput_min);
81        $else:
82          vy${ABC[N:N+8]} = ${VMAX_X8}(vy${ABC[N:N+8]}, ${VGET_LOW_X8}(voutput_min));
83
84      $for N in range(0, BATCH_TILE, 16):
85        $if N + 8 < BATCH_TILE:
86          vy${ABC[N:N+16]} = ${VMINQ_X8}(vy${ABC[N:N+16]}, voutput_max);
87        $else:
88          vy${ABC[N:N+8]} = ${VMIN_X8}(vy${ABC[N:N+8]}, ${VGET_LOW_X8}(voutput_max));
89
90      $for N in range(0, BATCH_TILE, 16):
91        $if N + 8 < BATCH_TILE:
92          ${VST1Q_X8}(y, vy${ABC[N:N+16]}); y += 16;
93        $else:
94          ${VST1_X8}(y, vy${ABC[N:N+8]}); y += 8;
95    }
96  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
97    float32x4_t vx_lo = vld1q_f32(x); x += 4;
98    float32x4_t vx_hi = vld1q_f32(x); x += 4;
99
100    vx_lo = vmulq_f32(vx_lo, vscale);
101    vx_hi = vmulq_f32(vx_hi, vscale);
102
103    const int32x4_t vacc_lo = vcvtnq_s32_f32(vx_lo);
104    const int32x4_t vacc_hi = vcvtnq_s32_f32(vx_hi);
105
106    int16x8_t vacc = vcombine_s16(vqmovn_s32(vacc_lo), vqmovn_s32(vacc_hi));
107    vacc = vqaddq_s16(vacc, voutput_zero_point);
108
109    ${XINT8X8_T} vy = ${VQMOVXN_S16}(vacc);
110    $if BATCH_TILE > 8:
111      vy = ${VMAX_X8}(vy, ${VGET_LOW_X8}(voutput_min));
112      vy = ${VMIN_X8}(vy, ${VGET_LOW_X8}(voutput_max));
113    $else:
114      vy = ${VMAX_X8}(vy, voutput_min);
115      vy = ${VMIN_X8}(vy, voutput_max);
116    ${VST1_X8}(y, vy); y += 8;
117  }
118  if XNN_UNLIKELY(n != 0) {
119    assert(n >= 1 * sizeof(float));
120    assert(n <= 7 * sizeof(float));
121    float32x4_t vx_lo = vld1q_f32(x);
122    const float* x_hi = (const float*) ((uintptr_t) x + (n & (4 * sizeof(float))));
123    float32x4_t vx_hi = vld1q_f32(x_hi);
124
125    vx_lo = vmulq_f32(vx_lo, vscale);
126    vx_hi = vmulq_f32(vx_hi, vscale);
127
128    const int32x4_t vacc_lo = vcvtnq_s32_f32(vx_lo);
129    const int32x4_t vacc_hi = vcvtnq_s32_f32(vx_hi);
130
131    int16x8_t vacc = vcombine_s16(vqmovn_s32(vacc_lo), vqmovn_s32(vacc_hi));
132    vacc = vqaddq_s16(vacc, voutput_zero_point);
133
134    ${XINT8X8_T} vy = ${VQMOVXN_S16}(vacc);
135    $if BATCH_TILE > 8:
136      vy = ${VMAX_X8}(vy, ${VGET_LOW_X8}(voutput_min));
137      vy = ${VMIN_X8}(vy, ${VGET_LOW_X8}(voutput_max));
138    $else:
139      vy = ${VMAX_X8}(vy, voutput_min);
140      vy = ${VMIN_X8}(vy, voutput_max);
141
142    if (n & (4 * sizeof(float))) {
143      vst1_lane_u32((void*) y, ${VREINTERPRET_U32_X8}(vy), 0); y += 4;
144      vy = ${VEXT_X8}(vy, vy, 4);
145    }
146    if (n & (2 * sizeof(float))) {
147      vst1_lane_u16((void*) y, ${VREINTERPRET_U16_X8}(vy), 0); y += 2;
148      vy = ${VEXT_X8}(vy, vy, 2);
149    }
150    if (n & (1 * sizeof(float))) {
151      ${VST1_LANE_X8}(y, vy, 0);
152    }
153  }
154}
155