1 // Copyright 2021 Google LLC
2 //
3 // This source code is licensed under the BSD-style license found in the
4 // LICENSE file in the root directory of this source tree.
5
6 #include <assert.h>
7 #include <stdint.h>
8 #include <stddef.h>
9
10 #include <arm_neon.h>
11
12 #include <fp16/bitcasts.h>
13
14 #include <xnnpack/requantization-stubs.h>
15
16
xnn_qs8_requantize_rndnu__neon_mull(size_t n,const int32_t * input,float scale,int8_t zero_point,int8_t qmin,int8_t qmax,int8_t * output)17 void xnn_qs8_requantize_rndnu__neon_mull(
18 size_t n,
19 const int32_t* input,
20 float scale,
21 int8_t zero_point,
22 int8_t qmin,
23 int8_t qmax,
24 int8_t* output)
25 {
26 assert(n % 16 == 0);
27 assert(scale < 1.0f);
28 assert(scale >= 0x1.0p-32f);
29
30 const uint32_t scale_bits = fp32_to_bits(scale);
31 const int32_t multiplier = ((int32_t) scale_bits & INT32_C(0x007FFFFF)) | INT32_C(0x00800000);
32 const int32_t shift = 127 + 23 - (scale_bits >> 23);
33 assert(shift >= 24);
34 assert(shift < 56);
35
36 #if defined(__aarch64__)
37 const int32x4_t vmultiplier = vdupq_n_s32(multiplier);
38 #else
39 const int32x2_t vmultiplier = vdup_n_s32(multiplier);
40 #endif
41 const int16x8_t vzero_point = vdupq_n_s16((int16_t) zero_point);
42 const int64x2_t vshift = vdupq_n_s64(-shift);
43 const int8x16_t vqmin = vdupq_n_s8(qmin);
44 const int8x16_t vqmax = vdupq_n_s8(qmax);
45 for (; n != 0; n -= 16) {
46 const int32x4_t x = vld1q_s32(input);
47 const int32x4_t y = vld1q_s32(input + 4);
48 const int32x4_t z = vld1q_s32(input + 8);
49 const int32x4_t w = vld1q_s32(input + 12);
50 input += 16;
51
52 #if defined(__aarch64__)
53 const int64x2_t x01_product = vmull_s32(vget_low_s32(x), vget_low_s32(vmultiplier));
54 const int64x2_t x23_product = vmull_high_s32(x, vmultiplier);
55 const int64x2_t y01_product = vmull_s32(vget_low_s32(y), vget_low_s32(vmultiplier));
56 const int64x2_t y23_product = vmull_high_s32(y, vmultiplier);
57 const int64x2_t z01_product = vmull_s32(vget_low_s32(z), vget_low_s32(vmultiplier));
58 const int64x2_t z23_product = vmull_high_s32(z, vmultiplier);
59 const int64x2_t w01_product = vmull_s32(vget_low_s32(w), vget_low_s32(vmultiplier));
60 const int64x2_t w23_product = vmull_high_s32(w, vmultiplier);
61 #else
62 const int64x2_t x01_product = vmull_s32(vget_low_s32(x), vmultiplier);
63 const int64x2_t x23_product = vmull_s32(vget_high_s32(x), vmultiplier);
64 const int64x2_t y01_product = vmull_s32(vget_low_s32(y), vmultiplier);
65 const int64x2_t y23_product = vmull_s32(vget_high_s32(y), vmultiplier);
66 const int64x2_t z01_product = vmull_s32(vget_low_s32(z), vmultiplier);
67 const int64x2_t z23_product = vmull_s32(vget_high_s32(z), vmultiplier);
68 const int64x2_t w01_product = vmull_s32(vget_low_s32(w), vmultiplier);
69 const int64x2_t w23_product = vmull_s32(vget_high_s32(w), vmultiplier);
70 #endif
71
72 const int64x2_t x01_scaled = vrshlq_s64(x01_product, vshift);
73 const int64x2_t x23_scaled = vrshlq_s64(x23_product, vshift);
74 const int64x2_t y01_scaled = vrshlq_s64(y01_product, vshift);
75 const int64x2_t y23_scaled = vrshlq_s64(y23_product, vshift);
76 const int64x2_t z01_scaled = vrshlq_s64(z01_product, vshift);
77 const int64x2_t z23_scaled = vrshlq_s64(z23_product, vshift);
78 const int64x2_t w01_scaled = vrshlq_s64(w01_product, vshift);
79 const int64x2_t w23_scaled = vrshlq_s64(w23_product, vshift);
80
81 #ifdef __aarch64__
82 const int32x4_t x_scaled = vuzp1q_s32(vreinterpretq_s32_s64(x01_scaled), vreinterpretq_s32_s64(x23_scaled));
83 const int32x4_t y_scaled = vuzp1q_s32(vreinterpretq_s32_s64(y01_scaled), vreinterpretq_s32_s64(y23_scaled));
84 const int32x4_t z_scaled = vuzp1q_s32(vreinterpretq_s32_s64(z01_scaled), vreinterpretq_s32_s64(z23_scaled));
85 const int32x4_t w_scaled = vuzp1q_s32(vreinterpretq_s32_s64(w01_scaled), vreinterpretq_s32_s64(w23_scaled));
86
87 const int16x8_t xy_packed = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(x_scaled), y_scaled), vzero_point);
88 const int16x8_t zw_packed = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(z_scaled), w_scaled), vzero_point);
89 const int8x16_t xyzw_packed = vqmovn_high_s16(vqmovn_s16(xy_packed), zw_packed);
90 #else
91 const int32x4_t x_scaled = vcombine_s32(vmovn_s64(x01_scaled), vmovn_s64(x23_scaled));
92 const int32x4_t y_scaled = vcombine_s32(vmovn_s64(y01_scaled), vmovn_s64(y23_scaled));
93 const int32x4_t z_scaled = vcombine_s32(vmovn_s64(z01_scaled), vmovn_s64(z23_scaled));
94 const int32x4_t w_scaled = vcombine_s32(vmovn_s64(w01_scaled), vmovn_s64(w23_scaled));
95
96 const int16x8_t xy_packed = vqaddq_s16(vcombine_s16(vqmovn_s32(x_scaled), vqmovn_s32(y_scaled)), vzero_point);
97 const int16x8_t zw_packed = vqaddq_s16(vcombine_s16(vqmovn_s32(z_scaled), vqmovn_s32(w_scaled)), vzero_point);
98 const int8x16_t xyzw_packed = vcombine_s8(vqmovn_s16(xy_packed), vqmovn_s16(zw_packed));
99 #endif
100
101 const int8x16_t xyzw_clamped = vmaxq_s8(vminq_s8(xyzw_packed, vqmax), vqmin);
102
103 // AArch32 version:
104 // 8x VMULL.S32 Qd, Dm, Dn
105 // 8x VRSHL.S32 Qd, Qm, Qn
106 // 8x VMOVN.S64 Dd, Qm
107 // 4x VQMOVN.S32 Dd, Qm
108 // 2x VQADD.S16 Qd, Qm, Qn
109 // 2x VQMOVUN.S16 Dd, Qm
110 // 1x VMAX.U8 Qd, Qm, Qn
111 // 1x VMIN.U8 Qd, Qm, Qn
112 // ---------------------
113 // 34 instructions total
114 //
115 // AArch64 version:
116 // 4x SMULL Vd.2D, Vn.2S, Vm.2S
117 // 4x SMULL2 Vd.2D, Vn.4S, Vm.4S
118 // 8x SRSHL Vd.2D, Vn.2D, Vm.2D
119 // 4x UZP1 Vd.4S, Vn.4S, Vm.4S
120 // 2x SQXTN Vd.4H, Vn.4S
121 // 2x SQXTN2 Vd.8H, Vn.4S
122 // 2x SQADD Vd.8H, Vn.8H, Vm.8H
123 // 1x SQXTN Vd.8B, Vn.8H
124 // 1x SQXTN2 Vd.16B, Vn.8H
125 // 1x SMIN Vd.16B, Vn.16B, Vm.16B
126 // 1x SMAX Vd.16B, Vn.16B, Vm.16B
127 // ---------------------
128 // 30 instructions total
129
130 vst1q_s8(output, xyzw_clamped);
131 output += 16;
132 }
133 }
134