1 // Auto-generated file. Do not edit!
2 // Template: src/f32-qs8-vcvt/wasmsimd-magic.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2021 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <arm_neon.h>
13
14 #include <xnnpack/common.h>
15 #include <xnnpack/intrinsics-polyfill.h>
16 #include <xnnpack/vcvt.h>
17
18
xnn_f32_qu8_vcvt_ukernel__wasmsimd_magic_x16(size_t n,const float * x,uint8_t * y,const union xnn_f32_qu8_cvt_params params[restrict XNN_MIN_ELEMENTS (1)])19 void xnn_f32_qu8_vcvt_ukernel__wasmsimd_magic_x16(
20 size_t n,
21 const float* x,
22 uint8_t* y,
23 const union xnn_f32_qu8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
24 {
25 assert(n != 0);
26 assert(n % sizeof(float) == 0);
27 assert(x != NULL);
28 assert(y != NULL);
29
30 const v128_t vscale = wasm_v128_load64_splat(params->wasmsimd_magic.scale);
31 const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_magic.magic_bias);
32 const v128_t vmagic_min = wasm_v128_load64_splat(params->wasmsimd_magic.magic_min);
33 const v128_t vmagic_bias_less_zero_point = wasm_v128_load64_splat(params->wasmsimd_magic.magic_bias_less_zero_point);
34 const v128_t voutput_max = wasm_v128_load64_splat(params->wasmsimd_magic.output_max);
35 for (; n >= 16 * sizeof(float); n -= 16 * sizeof(float)) {
36 v128_t vx0123 = wasm_v128_load(x);
37 v128_t vx4567 = wasm_v128_load(x + 4);
38 v128_t vx89AB = wasm_v128_load(x + 8);
39 v128_t vxCDEF = wasm_v128_load(x + 12);
40 x += 16;
41
42 vx0123 = wasm_f32x4_mul(vx0123, vscale);
43 vx4567 = wasm_f32x4_mul(vx4567, vscale);
44 vx89AB = wasm_f32x4_mul(vx89AB, vscale);
45 vxCDEF = wasm_f32x4_mul(vxCDEF, vscale);
46
47 vx0123 = wasm_f32x4_add(vx0123, vmagic_bias);
48 vx4567 = wasm_f32x4_add(vx4567, vmagic_bias);
49 vx89AB = wasm_f32x4_add(vx89AB, vmagic_bias);
50 vxCDEF = wasm_f32x4_add(vxCDEF, vmagic_bias);
51
52 v128_t vacc0123 = wasm_i32x4_max(vx0123, vmagic_min);
53 v128_t vacc4567 = wasm_i32x4_max(vx4567, vmagic_min);
54 v128_t vacc89AB = wasm_i32x4_max(vx89AB, vmagic_min);
55 v128_t vaccCDEF = wasm_i32x4_max(vxCDEF, vmagic_min);
56
57 vacc0123 = wasm_i32x4_sub(vacc0123, vmagic_bias_less_zero_point);
58 vacc4567 = wasm_i32x4_sub(vacc4567, vmagic_bias_less_zero_point);
59 vacc89AB = wasm_i32x4_sub(vacc89AB, vmagic_bias_less_zero_point);
60 vaccCDEF = wasm_i32x4_sub(vaccCDEF, vmagic_bias_less_zero_point);
61
62 const v128_t vacc01234567 = wasm_i16x8_narrow_i32x4(vacc0123, vacc4567);
63 const v128_t vacc89ABCDEF = wasm_i16x8_narrow_i32x4(vacc89AB, vaccCDEF);
64
65 v128_t vy0123456789ABCDEF = wasm_u8x16_narrow_i16x8(vacc01234567, vacc89ABCDEF);
66
67 vy0123456789ABCDEF = wasm_u8x16_min(vy0123456789ABCDEF, voutput_max);
68
69 wasm_v128_store(y, vy0123456789ABCDEF);
70 y += 16;
71 }
72 for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
73 v128_t vx_lo = wasm_v128_load(x);
74 v128_t vx_hi = wasm_v128_load(x + 4);
75 x += 8;
76
77 vx_lo = wasm_f32x4_mul(vx_lo, vscale);
78 vx_hi = wasm_f32x4_mul(vx_hi, vscale);
79
80 vx_lo = wasm_f32x4_add(vx_lo, vmagic_bias);
81 vx_hi = wasm_f32x4_add(vx_hi, vmagic_bias);
82
83 v128_t vacc_lo = wasm_i32x4_max(vx_lo, vmagic_min);
84 v128_t vacc_hi = wasm_i32x4_max(vx_hi, vmagic_min);
85
86 vacc_lo = wasm_i32x4_sub(vacc_lo, vmagic_bias_less_zero_point);
87 vacc_hi = wasm_i32x4_sub(vacc_hi, vmagic_bias_less_zero_point);
88
89 const v128_t vacc = wasm_i16x8_narrow_i32x4(vacc_lo, vacc_hi);
90
91 v128_t vy = wasm_u8x16_narrow_i16x8(vacc, vacc);
92 vy = wasm_u8x16_min(vy, voutput_max);
93 *((double*) y) = wasm_f64x2_extract_lane(vy, 0);
94 y += 8;
95 }
96 if XNN_UNLIKELY(n != 0) {
97 assert(n >= 1 * sizeof(float));
98 assert(n <= 7 * sizeof(float));
99 v128_t vx_lo = wasm_v128_load(x);
100 const float* x_hi = (const float*) ((uintptr_t) x + (n & (4 * sizeof(float))));
101 v128_t vx_hi = wasm_v128_load(x_hi);
102
103 vx_lo = wasm_f32x4_mul(vx_lo, vscale);
104 vx_hi = wasm_f32x4_mul(vx_hi, vscale);
105
106 vx_lo = wasm_f32x4_add(vx_lo, vmagic_bias);
107 vx_hi = wasm_f32x4_add(vx_hi, vmagic_bias);
108
109 v128_t vacc_lo = wasm_i32x4_max(vx_lo, vmagic_min);
110 v128_t vacc_hi = wasm_i32x4_max(vx_hi, vmagic_min);
111
112 vacc_lo = wasm_i32x4_sub(vacc_lo, vmagic_bias_less_zero_point);
113 vacc_hi = wasm_i32x4_sub(vacc_hi, vmagic_bias_less_zero_point);
114
115 const v128_t vacc = wasm_i16x8_narrow_i32x4(vacc_lo, vacc_hi);
116
117 v128_t vy = wasm_u8x16_narrow_i16x8(vacc, vacc);
118 vy = wasm_u8x16_min(vy, voutput_max);
119
120 if (n & (4 * sizeof(float))) {
121 *((float*) y) = wasm_f32x4_extract_lane(vy, 0);
122 y += 4;
123 vy = wasm_u64x2_shr(vy, 32);
124 }
125 uint32_t vy_lo = (uint32_t) wasm_i32x4_extract_lane(vy, 0);
126 if (n & (2 * sizeof(float))) {
127 *((uint16_t*) y) = (uint16_t) vy_lo;
128 y += 2;
129 vy_lo >>= 16;
130 }
131 if (n & (1 * sizeof(float))) {
132 *y = (uint8_t) vy_lo;
133 }
134 }
135 }
136