1 // Copyright (c) Facebook, Inc. and its affiliates.
2 // All rights reserved.
3 //
4 // Copyright 2019 Google LLC
5 //
6 // This source code is licensed under the BSD-style license found in the
7 // LICENSE file in the root directory of this source tree.
8
9 #include <assert.h>
10 #include <stdint.h>
11 #include <stddef.h>
12
13 #include <arm_neon.h>
14
15 #include <fp16/bitcasts.h>
16
17 #include <xnnpack/requantization-stubs.h>
18
19
xnn_qu8_requantize_precise__neon(size_t n,const int32_t * input,float scale,uint8_t zero_point,uint8_t qmin,uint8_t qmax,uint8_t * output)20 void xnn_qu8_requantize_precise__neon(
21 size_t n,
22 const int32_t* input,
23 float scale,
24 uint8_t zero_point,
25 uint8_t qmin,
26 uint8_t qmax,
27 uint8_t* output)
28 {
29 assert(n % 16 == 0);
30 assert(scale < 1.0f);
31 assert(scale >= 0x1.0p-32f);
32
33 const uint32_t scale_bits = fp32_to_bits(scale);
34 const int32_t multiplier = ((int32_t) scale_bits & INT32_C(0x007FFFFF)) | INT32_C(0x00800000);
35 const int32_t shift = 127 + 23 - (scale_bits >> 23);
36 assert(shift >= 24);
37 assert(shift < 56);
38
39 #if defined(__aarch64__)
40 const int32x4_t vmultiplier = vdupq_n_s32(multiplier);
41 #else
42 const int32x2_t vmultiplier = vdup_n_s32(multiplier);
43 #endif
44 const int16x8_t vzero_point = vdupq_n_s16((int16_t)(uint16_t) zero_point);
45 const int64x2_t vshift = vdupq_n_s64(-shift);
46 const uint8x16_t vqmin = vdupq_n_u8(qmin);
47 const uint8x16_t vqmax = vdupq_n_u8(qmax);
48 for (; n != 0; n -= 16) {
49 const int32x4_t x = vld1q_s32(input);
50 const int32x4_t y = vld1q_s32(input + 4);
51 const int32x4_t z = vld1q_s32(input + 8);
52 const int32x4_t w = vld1q_s32(input + 12);
53 input += 16;
54
55 const uint32x4_t x_neg_mask = vcltq_s32(x, vmovq_n_s32(0));
56 const uint32x4_t y_neg_mask = vcltq_s32(y, vmovq_n_s32(0));
57 const uint32x4_t z_neg_mask = vcltq_s32(z, vmovq_n_s32(0));
58 const uint32x4_t w_neg_mask = vcltq_s32(w, vmovq_n_s32(0));
59
60 #if defined(__aarch64__)
61 const int64x2_t x01_product = vmull_s32(vget_low_s32(x), vget_low_s32(vmultiplier));
62 const int64x2_t x23_product = vmull_high_s32(x, vmultiplier);
63 const int64x2_t y01_product = vmull_s32(vget_low_s32(y), vget_low_s32(vmultiplier));
64 const int64x2_t y23_product = vmull_high_s32(y, vmultiplier);
65 const int64x2_t z01_product = vmull_s32(vget_low_s32(z), vget_low_s32(vmultiplier));
66 const int64x2_t z23_product = vmull_high_s32(z, vmultiplier);
67 const int64x2_t w01_product = vmull_s32(vget_low_s32(w), vget_low_s32(vmultiplier));
68 const int64x2_t w23_product = vmull_high_s32(w, vmultiplier);
69 #else
70 const int64x2_t x01_product = vmull_s32(vget_low_s32(x), vmultiplier);
71 const int64x2_t x23_product = vmull_s32(vget_high_s32(x), vmultiplier);
72 const int64x2_t y01_product = vmull_s32(vget_low_s32(y), vmultiplier);
73 const int64x2_t y23_product = vmull_s32(vget_high_s32(y), vmultiplier);
74 const int64x2_t z01_product = vmull_s32(vget_low_s32(z), vmultiplier);
75 const int64x2_t z23_product = vmull_s32(vget_high_s32(z), vmultiplier);
76 const int64x2_t w01_product = vmull_s32(vget_low_s32(w), vmultiplier);
77 const int64x2_t w23_product = vmull_s32(vget_high_s32(w), vmultiplier);
78 #endif
79
80 #if defined(__aarch64__)
81 const int64x2_t x01_adjusted_product = vaddw_s32(x01_product, vreinterpret_s32_u32(vget_low_u32(x_neg_mask)));
82 const int64x2_t x23_adjusted_product = vaddw_high_s32(x23_product, vreinterpretq_s32_u32(x_neg_mask));
83 const int64x2_t y01_adjusted_product = vaddw_s32(y01_product, vreinterpret_s32_u32(vget_low_u32(y_neg_mask)));
84 const int64x2_t y23_adjusted_product = vaddw_high_s32(y23_product, vreinterpretq_s32_u32(y_neg_mask));
85 const int64x2_t z01_adjusted_product = vaddw_s32(z01_product, vreinterpret_s32_u32(vget_low_u32(z_neg_mask)));
86 const int64x2_t z23_adjusted_product = vaddw_high_s32(z23_product, vreinterpretq_s32_u32(z_neg_mask));
87 const int64x2_t w01_adjusted_product = vaddw_s32(w01_product, vreinterpret_s32_u32(vget_low_u32(w_neg_mask)));
88 const int64x2_t w23_adjusted_product = vaddw_high_s32(w23_product, vreinterpretq_s32_u32(w_neg_mask));
89 #else
90 const int64x2_t x01_adjusted_product = vaddw_s32(x01_product, vreinterpret_s32_u32(vget_low_u32(x_neg_mask)));
91 const int64x2_t x23_adjusted_product = vaddw_s32(x23_product, vreinterpret_s32_u32(vget_high_u32(x_neg_mask)));
92 const int64x2_t y01_adjusted_product = vaddw_s32(y01_product, vreinterpret_s32_u32(vget_low_u32(y_neg_mask)));
93 const int64x2_t y23_adjusted_product = vaddw_s32(y23_product, vreinterpret_s32_u32(vget_high_u32(y_neg_mask)));
94 const int64x2_t z01_adjusted_product = vaddw_s32(z01_product, vreinterpret_s32_u32(vget_low_u32(z_neg_mask)));
95 const int64x2_t z23_adjusted_product = vaddw_s32(z23_product, vreinterpret_s32_u32(vget_high_u32(z_neg_mask)));
96 const int64x2_t w01_adjusted_product = vaddw_s32(w01_product, vreinterpret_s32_u32(vget_low_u32(w_neg_mask)));
97 const int64x2_t w23_adjusted_product = vaddw_s32(w23_product, vreinterpret_s32_u32(vget_high_u32(w_neg_mask)));
98 #endif
99
100 const int64x2_t x01_scaled = vrshlq_s64(x01_adjusted_product, vshift);
101 const int64x2_t x23_scaled = vrshlq_s64(x23_adjusted_product, vshift);
102 const int64x2_t y01_scaled = vrshlq_s64(y01_adjusted_product, vshift);
103 const int64x2_t y23_scaled = vrshlq_s64(y23_adjusted_product, vshift);
104 const int64x2_t z01_scaled = vrshlq_s64(z01_adjusted_product, vshift);
105 const int64x2_t z23_scaled = vrshlq_s64(z23_adjusted_product, vshift);
106 const int64x2_t w01_scaled = vrshlq_s64(w01_adjusted_product, vshift);
107 const int64x2_t w23_scaled = vrshlq_s64(w23_adjusted_product, vshift);
108
109 #ifdef __aarch64__
110 const int32x4_t x_scaled = vuzp1q_s32(vreinterpretq_s32_s64(x01_scaled), vreinterpretq_s32_s64(x23_scaled));
111 const int32x4_t y_scaled = vuzp1q_s32(vreinterpretq_s32_s64(y01_scaled), vreinterpretq_s32_s64(y23_scaled));
112 const int32x4_t z_scaled = vuzp1q_s32(vreinterpretq_s32_s64(z01_scaled), vreinterpretq_s32_s64(z23_scaled));
113 const int32x4_t w_scaled = vuzp1q_s32(vreinterpretq_s32_s64(w01_scaled), vreinterpretq_s32_s64(w23_scaled));
114
115 const int16x8_t xy_packed = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(x_scaled), y_scaled), vzero_point);
116 const int16x8_t zw_packed = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(z_scaled), w_scaled), vzero_point);
117 const uint8x16_t xyzw_packed = vqmovun_high_s16(vqmovun_s16(xy_packed), zw_packed);
118 #else
119 const int32x4_t x_scaled = vcombine_s32(vmovn_s64(x01_scaled), vmovn_s64(x23_scaled));
120 const int32x4_t y_scaled = vcombine_s32(vmovn_s64(y01_scaled), vmovn_s64(y23_scaled));
121 const int32x4_t z_scaled = vcombine_s32(vmovn_s64(z01_scaled), vmovn_s64(z23_scaled));
122 const int32x4_t w_scaled = vcombine_s32(vmovn_s64(w01_scaled), vmovn_s64(w23_scaled));
123
124 const int16x8_t xy_packed = vqaddq_s16(vcombine_s16(vqmovn_s32(x_scaled), vqmovn_s32(y_scaled)), vzero_point);
125 const int16x8_t zw_packed = vqaddq_s16(vcombine_s16(vqmovn_s32(z_scaled), vqmovn_s32(w_scaled)), vzero_point);
126 const uint8x16_t xyzw_packed = vcombine_u8(vqmovun_s16(xy_packed), vqmovun_s16(zw_packed));
127 #endif
128
129 const uint8x16_t xyzw_clamped = vmaxq_u8(vminq_u8(xyzw_packed, vqmax), vqmin);
130
131 // AArch32 version:
132 // 4x VCLT.S32 Qd, Qm, #0
133 // 8x VMULL.S32 Qd, Dm, Dn
134 // 8x VADDW.S32 Qd, Qm, Dn
135 // 8x VRSHL.S32 Qd, Qm, Qn
136 // 8x VMOVN.S64 Dd, Qm
137 // 4x VQMOVN.S32 Dd, Qm
138 // 2x VADD.S16 Qd, Qm, Qn
139 // 2x VQMOVUN.S16 Dd, Qm
140 // 1x VMAX.U8 Qd, Qm, Qn
141 // 1x VMIN.U8 Qd, Qm, Qn
142 // ---------------------
143 // 46 instructions total
144 //
145 // AArch64 version:
146 // 4x CMLT Vd.4S, Vn.4S, #0
147 // 4x SMULL Vd.2D, Vn.2S, Vm.2S
148 // 4x SMULL2 Vd.2D, Vn.4S, Vm.4S
149 // 4x SADDW Vd.2D, Vn.2D, Vm.2S
150 // 4x SADDW2 Vd.2D, Vn.2D, Vm.4S
151 // 8x SRSHL Vd.2D, Vn.2D, Vm.2D
152 // 4x UZP1 Vd.4S, Vn.4S, Vm.4S
153 // 2x SQXTN Vd.4H, Vn.4S
154 // 2x SQXTN2 Vd.8H, Vn.4S
155 // 2x ADD Vd.8H, Vn.8H, Vm.8H
156 // 1x SQXTUN Vd.8B, Vn.8H
157 // 1x SQXTUN2 Vd.16B, Vn.8H
158 // 1x UMIN Vd.16B, Vn.16B, Vm.16B
159 // 1x UMAX Vd.16B, Vn.16B, Vm.16B
160 // ---------------------
161 // 42 instructions total
162
163 vst1q_u8(output, xyzw_clamped);
164 output += 16;
165 }
166 }
167