• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1// Copyright 2021 Google LLC
2//
3// This source code is licensed under the BSD-style license found in the
4// LICENSE file in the root directory of this source tree.
5
6$assert DATATYPE in ["QS8", "QU8"]
7$assert REQUANTIZATION == "FP32"
8$assert BATCH_TILE >= 1
9#include <assert.h>
10
11#include <fp16.h>
12
13#include <xnnpack/math.h>
14#include <xnnpack/vmul.h>
15
16
17$XINT8_T = {"QS8": "int8_t", "QU8": "uint8_t"}[DATATYPE]
18void xnn_${DATATYPE.lower()}_vmulc_minmax_${REQUANTIZATION.lower()}_ukernel__scalar_x${BATCH_TILE}(
19    size_t n,
20    const ${XINT8_T}* input_a,
21    const ${XINT8_T}* input_b,
22    ${XINT8_T}* output,
23    const union xnn_${DATATYPE.lower()}_mul_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
24{
25  const int32_t va_zero_point = params->fp32_scalar.a_zero_point;
26  const float vscale = params->fp32_scalar.scale;
27  const float voutput_min_less_zero_point = params->fp32_scalar.output_min_less_zero_point;
28  const float voutput_max_less_zero_point = params->fp32_scalar.output_max_less_zero_point;
29  const float vmagic_bias = params->fp32_scalar.magic_bias;
30  const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar.magic_bias_less_output_zero_point;
31
32  const int32_t vb = (int32_t) *input_b - params->fp32_scalar.b_zero_point;
33  $if BATCH_TILE == 1:
34    do {
35      const int32_t va = (int32_t) *input_a++ - va_zero_point;
36      const int32_t vacc = va * vb;
37
38      float vfpacc = (float) vacc * vscale;
39      vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
40      vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
41      vfpacc += vmagic_bias;
42      const int32_t vout = (int32_t) fp32_to_bits(vfpacc) - vmagic_bias_less_output_zero_point;
43      *output++ = (${XINT8_T}) vout;
44
45      n -= sizeof(${XINT8_T});
46    } while (n != 0);
47  $else:
48    for (; n >= ${BATCH_TILE} * sizeof(${XINT8_T}); n -= ${BATCH_TILE} * sizeof(${XINT8_T})) {
49      $for N in range(BATCH_TILE):
50        const int32_t va${N} = input_a[${N}] - va_zero_point;
51      input_a += ${BATCH_TILE};
52
53      $for N in range(BATCH_TILE):
54        const int32_t vacc${N} = va${N} * vb;
55
56      $for N in range(BATCH_TILE):
57        float vfpacc${N} = (float) vacc${N} * vscale;
58
59      $for N in range(BATCH_TILE):
60        vfpacc${N} = math_max_f32(vfpacc${N}, voutput_min_less_zero_point);
61
62      $for N in range(BATCH_TILE):
63        vfpacc${N} = math_min_f32(vfpacc${N}, voutput_max_less_zero_point);
64
65      $for N in range(BATCH_TILE):
66        vfpacc${N} += vmagic_bias;
67
68      $for N in range(BATCH_TILE):
69        const int32_t vout${N} = (int32_t) fp32_to_bits(vfpacc${N}) - vmagic_bias_less_output_zero_point;
70
71      $for N in range(BATCH_TILE):
72        output[${N}] = (${XINT8_T}) vout${N};
73      output += ${BATCH_TILE};
74    }
75    if XNN_UNLIKELY(n != 0) {
76      $if BATCH_TILE == 2:
77        const int32_t va = (int32_t) *input_a - va_zero_point;
78        const int32_t vacc = va * vb;
79
80        float vfpacc = (float) vacc * vscale;
81        vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
82        vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
83        vfpacc += vmagic_bias;
84        const int32_t vout = (int32_t) fp32_to_bits(vfpacc) - vmagic_bias_less_output_zero_point;
85        *output = (${XINT8_T}) vout;
86      $else:
87        do {
88          const int32_t va = (int32_t) *input_a++ - va_zero_point;
89          const int32_t vacc = va * vb;
90
91          float vfpacc = (float) vacc * vscale;
92          vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
93          vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
94          vfpacc += vmagic_bias;
95          const int32_t vout = (int32_t) fp32_to_bits(vfpacc) - vmagic_bias_less_output_zero_point;
96          *output++ = (${XINT8_T}) vout;
97
98          n -= sizeof(${XINT8_T});
99        } while (n != 0);
100    }
101}
102