• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright (c) Facebook, Inc. and its affiliates.
2 // All rights reserved.
3 //
4 // Copyright 2019 Google LLC
5 //
6 // This source code is licensed under the BSD-style license found in the
7 // LICENSE file in the root directory of this source tree.
8 
9 #include <assert.h>
10 #include <stdint.h>
11 
12 #include <arm_neon.h>
13 
14 #include <xnnpack/requantization-stubs.h>
15 
16 
xnn_requantize_fp32__neon(size_t n,const int32_t * input,float scale,uint8_t zero_point,uint8_t qmin,uint8_t qmax,uint8_t * output)17 void xnn_requantize_fp32__neon(
18     size_t n,
19     const int32_t* input,
20     float scale,
21     uint8_t zero_point,
22     uint8_t qmin,
23     uint8_t qmax,
24     uint8_t* output)
25 {
26   assert(n % 16 == 0);
27   assert(scale < 1.0f);
28   assert(scale >= 0x1.0p-32f);
29 
30   const float32x4_t vscale = vdupq_n_f32(scale);
31 #ifdef __aarch64__
32   const int16x8_t vzero_point = vdupq_n_s16((int16_t)(uint16_t) zero_point);
33   const uint8x16_t vqmin = vdupq_n_u8(qmin);
34   const uint8x16_t vqmax = vdupq_n_u8(qmax);
35 #else
36   const float32x4_t vfmin = vdupq_n_f32((float) ((int32_t)(uint32_t) qmin - (int32_t)(uint32_t) zero_point));
37   const float32x4_t vfmax = vdupq_n_f32((float) ((int32_t)(uint32_t) qmax - (int32_t)(uint32_t) zero_point));
38   const float32x4_t vfmagic = vdupq_n_f32(12582912.0f);
39   const int32x4_t vimagic = vdupq_n_s32(INT32_C(0x4B400000) - (int32_t)(uint32_t) zero_point);
40 #endif
41   for (; n != 0; n -= 16) {
42     const int32x4_t x = vld1q_s32(input);
43     const int32x4_t y = vld1q_s32(input + 4);
44     const int32x4_t z = vld1q_s32(input + 8);
45     const int32x4_t w = vld1q_s32(input + 12);
46     input += 16;
47 
48     // Convert int32_t input to FP32 and multiply by FP32 scale.
49     // Both operations involve statistically unbiased roundings:
50     // - Large int32_t values can't be exactly represented as FP32. The conversion instruction in ARM NEON would
51     //   round it to nearest FP32 value with ties to even.
52     // - Product of two FP32 values is generally not exactly representation as an FP32 value, and will be rounded
53     //   to nearest FP32 value with ties to even.
54     const float32x4_t x_scaled = vmulq_f32(vcvtq_f32_s32(x), vscale);
55     const float32x4_t y_scaled = vmulq_f32(vcvtq_f32_s32(y), vscale);
56     const float32x4_t z_scaled = vmulq_f32(vcvtq_f32_s32(z), vscale);
57     const float32x4_t w_scaled = vmulq_f32(vcvtq_f32_s32(w), vscale);
58 
59 #ifdef __aarch64__
60     // Leverage "Floating-point Convert to Signed integer, rouding to nearest with ties to even" instruction.
61     // This is an ARMv8 instruction (always available in AArch64), which saturates result on overflow.
62     // We don't need to specifically consider saturated results, they will be clamped at the last stage.
63     const int32x4_t x_rounded = vcvtnq_s32_f32(x_scaled);
64     const int32x4_t y_rounded = vcvtnq_s32_f32(y_scaled);
65     const int32x4_t z_rounded = vcvtnq_s32_f32(z_scaled);
66     const int32x4_t w_rounded = vcvtnq_s32_f32(w_scaled);
67 
68     // Standard final sequence on ARM NEON:
69     // - Pack to int16_t and saturate
70     // - Add zero point
71     // - Pack to uint8_t and saturate
72     // - Clamp between qmin and qmax
73     const int16x8_t xy_packed = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(x_rounded), y_rounded), vzero_point);
74     const int16x8_t zw_packed = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(z_rounded), w_rounded), vzero_point);
75     const uint8x16_t xyzw_packed = vqmovun_high_s16(vqmovun_s16(xy_packed), zw_packed);
76     const uint8x16_t xyzw_clamped = vmaxq_u8(vminq_u8(xyzw_packed, vqmax), vqmin);
77 
78     vst1q_u8(output, xyzw_clamped);
79     output += 16;
80 #else
81     // ARMv7 NEON offers only a floating-point to integer conversion instruction with rounding towards zero.
82     // In lieu of conversion instruction with rounding-to-nearest-even, we use a magic trick of adding a large
83     // number (1.5 * 2**23) to scaled value to cause rounding to integer, and then substracing this magic number as
84     // integer. This trick works only in a limited range (absolute value of input must be less than 2**22), so
85     // generally we have to clamp input to this range before using the magic. However, clamping to any smaller range
86     // works just as well, and thus we clamp to [qmin - zero point, qmax - zero point] range so that after we add
87     // zero point to the result, it gets into target [qmin, qmax] range.
88     const float32x4_t x_clamped = vminq_f32(vmaxq_f32(x_scaled, vfmin), vfmax);
89     const float32x4_t y_clamped = vminq_f32(vmaxq_f32(y_scaled, vfmin), vfmax);
90     const float32x4_t z_clamped = vminq_f32(vmaxq_f32(z_scaled, vfmin), vfmax);
91     const float32x4_t w_clamped = vminq_f32(vmaxq_f32(w_scaled, vfmin), vfmax);
92 
93     // Conversion to integer using the "magic trick". Rounding is performed in the output of addition operation,
94     // and result is rounded to nearest even integer with ties to even.
95     const int32x4_t x_biased = vsubq_s32(vreinterpretq_s32_f32(vaddq_f32(x_clamped, vfmagic)), vimagic);
96     const int32x4_t y_biased = vsubq_s32(vreinterpretq_s32_f32(vaddq_f32(y_clamped, vfmagic)), vimagic);
97     const int32x4_t z_biased = vsubq_s32(vreinterpretq_s32_f32(vaddq_f32(z_clamped, vfmagic)), vimagic);
98     const int32x4_t w_biased = vsubq_s32(vreinterpretq_s32_f32(vaddq_f32(w_clamped, vfmagic)), vimagic);
99 
100     // Select low 8 bits of each 32-bit integer in the vectors for the output.
101     // Since result is already clamped to [qmin, qmax] subrange of [0, 255], saturation is not needed.
102     const int16x8_t xy_packed = vcombine_s16(vmovn_s32(x_biased), vmovn_s32(y_biased));
103     const int16x8_t zw_packed = vcombine_s16(vmovn_s32(z_biased), vmovn_s32(w_biased));
104     const uint8x16_t xyzw_packed = vreinterpretq_u8_s8(vcombine_s8(vmovn_s16(xy_packed), vmovn_s16(zw_packed)));
105 
106     // AArch32 version:
107     //   4x VCVT.F32.S32 Qd, Qm
108     //   4x VMUL.F32 Qd, Qm, Qn
109     //   4x VMIN.F32 Qd, Qm, Qn
110     //   4x VMAX.F32 Qd, Qm, Qn
111     //   4x VADD.F32 Qd, Qm, Qn
112     //   4x VSUB.S32 Qd, Qm, Qn
113     //   4x VMOVN.I32 Dd, Qm
114     //   2x VMOVN.I16 Dd, Qm
115     // ---------------------
116     // 30 instructions total
117     //
118     // AArch64 version:
119     //   4x SCVTF Vd.4S, Vn.4S
120     //   4x FMUL Vd.4S, Vn.4S, Vm.4S
121     //   4x FCVTNS Vd.4S, Vn.4S
122     //   2x SQXTN Vd.4H, Vn.4S
123     //   2x SQXTN2 Vd.8H, Vn.4S
124     //   2x ADD Vd.8H, Vn.8H, Vm.8H
125     //   1x SQXTUN Vd.8B, Vn.8H
126     //   1x SQXTUN2 Vd.16B, Vn.8H
127     //   1x UMIN Vd.16B, Vn.16B, Vm.16B
128     //   1x UMAX Vd.16B, Vn.16B, Vm.16B
129     // ---------------------
130     // 22 instructions total
131 
132     vst1q_u8(output, xyzw_packed);
133     output += 16;
134 #endif
135   }
136 }
137