1 // Auto-generated file. Do not edit!
2 // Template: src/f16-f32-vcvt/wasmsimd-int32.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2021 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <wasm_simd128.h>
13
14 #include <xnnpack/common.h>
15 #include <xnnpack/vcvt.h>
16
17
xnn_f16_f32_vcvt_ukernel__wasmsimd_int32_x32(size_t n,const void * input,float * output,const union xnn_f16_f32_cvt_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_f16_f32_vcvt_ukernel__wasmsimd_int32_x32(
19 size_t n,
20 const void* input,
21 float* output,
22 const union xnn_f16_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
23 {
24 assert(n != 0);
25 assert(n % sizeof(uint16_t) == 0);
26 assert(input != NULL);
27 assert(output != NULL);
28
29 const v128_t vsign_mask = wasm_v128_load64_splat(params->wasmsimd_int32.sign_mask);
30 const v128_t vexp_offset = wasm_v128_load64_splat(params->wasmsimd_int32.exp_offset);
31 const v128_t vexp_scale = wasm_v128_load64_splat(params->wasmsimd_int32.exp_scale);
32 const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_int32.magic_bias);
33 const v128_t vdenorm_cutoff = wasm_v128_load64_splat(params->wasmsimd_int32.denorm_cutoff);
34
35 const uint16_t* i = (const uint16_t*) input;
36 for (; n >= 32 * sizeof(uint16_t); n -= 32 * sizeof(uint16_t)) {
37 const v128_t vh0 = wasm_v128_load(i);
38 const v128_t vh1 = wasm_v128_load(i + 8);
39 const v128_t vh2 = wasm_v128_load(i + 16);
40 const v128_t vh3 = wasm_v128_load(i + 24);
41 i += 32;
42
43 const v128_t vzero = wasm_i16x8_const_splat(0);
44 const v128_t vw0 = wasm_v16x8_shuffle(vzero, vh0, 0, 8, 1, 9, 2, 10, 3, 11);
45 const v128_t vw1 = wasm_v16x8_shuffle(vzero, vh0, 4, 12, 5, 13, 6, 14, 7, 15);
46 const v128_t vw2 = wasm_v16x8_shuffle(vzero, vh1, 0, 8, 1, 9, 2, 10, 3, 11);
47 const v128_t vw3 = wasm_v16x8_shuffle(vzero, vh1, 4, 12, 5, 13, 6, 14, 7, 15);
48 const v128_t vw4 = wasm_v16x8_shuffle(vzero, vh2, 0, 8, 1, 9, 2, 10, 3, 11);
49 const v128_t vw5 = wasm_v16x8_shuffle(vzero, vh2, 4, 12, 5, 13, 6, 14, 7, 15);
50 const v128_t vw6 = wasm_v16x8_shuffle(vzero, vh3, 0, 8, 1, 9, 2, 10, 3, 11);
51 const v128_t vw7 = wasm_v16x8_shuffle(vzero, vh3, 4, 12, 5, 13, 6, 14, 7, 15);
52
53 const v128_t vsign0 = wasm_v128_and(vw0, vsign_mask);
54 const v128_t vsign1 = wasm_v128_and(vw1, vsign_mask);
55 const v128_t vsign2 = wasm_v128_and(vw2, vsign_mask);
56 const v128_t vsign3 = wasm_v128_and(vw3, vsign_mask);
57 const v128_t vsign4 = wasm_v128_and(vw4, vsign_mask);
58 const v128_t vsign5 = wasm_v128_and(vw5, vsign_mask);
59 const v128_t vsign6 = wasm_v128_and(vw6, vsign_mask);
60 const v128_t vsign7 = wasm_v128_and(vw7, vsign_mask);
61
62 const v128_t vnonsign0 = wasm_v128_xor(vw0, vsign0);
63 const v128_t vnonsign1 = wasm_v128_xor(vw1, vsign1);
64 const v128_t vnonsign2 = wasm_v128_xor(vw2, vsign2);
65 const v128_t vnonsign3 = wasm_v128_xor(vw3, vsign3);
66 const v128_t vnonsign4 = wasm_v128_xor(vw4, vsign4);
67 const v128_t vnonsign5 = wasm_v128_xor(vw5, vsign5);
68 const v128_t vnonsign6 = wasm_v128_xor(vw6, vsign6);
69 const v128_t vnonsign7 = wasm_v128_xor(vw7, vsign7);
70
71 const v128_t vnorm0 = wasm_f32x4_mul(wasm_i32x4_add(wasm_u32x4_shr(vnonsign0, 3), vexp_offset), vexp_scale);
72 const v128_t vnorm1 = wasm_f32x4_mul(wasm_i32x4_add(wasm_u32x4_shr(vnonsign1, 3), vexp_offset), vexp_scale);
73 const v128_t vnorm2 = wasm_f32x4_mul(wasm_i32x4_add(wasm_u32x4_shr(vnonsign2, 3), vexp_offset), vexp_scale);
74 const v128_t vnorm3 = wasm_f32x4_mul(wasm_i32x4_add(wasm_u32x4_shr(vnonsign3, 3), vexp_offset), vexp_scale);
75 const v128_t vnorm4 = wasm_f32x4_mul(wasm_i32x4_add(wasm_u32x4_shr(vnonsign4, 3), vexp_offset), vexp_scale);
76 const v128_t vnorm5 = wasm_f32x4_mul(wasm_i32x4_add(wasm_u32x4_shr(vnonsign5, 3), vexp_offset), vexp_scale);
77 const v128_t vnorm6 = wasm_f32x4_mul(wasm_i32x4_add(wasm_u32x4_shr(vnonsign6, 3), vexp_offset), vexp_scale);
78 const v128_t vnorm7 = wasm_f32x4_mul(wasm_i32x4_add(wasm_u32x4_shr(vnonsign7, 3), vexp_offset), vexp_scale);
79
80 const v128_t vdenorm0 = wasm_f32x4_sub(wasm_v128_or(wasm_u32x4_shr(vnonsign0, 16), vmagic_bias), vmagic_bias);
81 const v128_t vdenorm1 = wasm_f32x4_sub(wasm_v128_or(wasm_u32x4_shr(vnonsign1, 16), vmagic_bias), vmagic_bias);
82 const v128_t vdenorm2 = wasm_f32x4_sub(wasm_v128_or(wasm_u32x4_shr(vnonsign2, 16), vmagic_bias), vmagic_bias);
83 const v128_t vdenorm3 = wasm_f32x4_sub(wasm_v128_or(wasm_u32x4_shr(vnonsign3, 16), vmagic_bias), vmagic_bias);
84 const v128_t vdenorm4 = wasm_f32x4_sub(wasm_v128_or(wasm_u32x4_shr(vnonsign4, 16), vmagic_bias), vmagic_bias);
85 const v128_t vdenorm5 = wasm_f32x4_sub(wasm_v128_or(wasm_u32x4_shr(vnonsign5, 16), vmagic_bias), vmagic_bias);
86 const v128_t vdenorm6 = wasm_f32x4_sub(wasm_v128_or(wasm_u32x4_shr(vnonsign6, 16), vmagic_bias), vmagic_bias);
87 const v128_t vdenorm7 = wasm_f32x4_sub(wasm_v128_or(wasm_u32x4_shr(vnonsign7, 16), vmagic_bias), vmagic_bias);
88
89 const v128_t vxmask0 = wasm_i32x4_gt(vnonsign0, vdenorm_cutoff);
90 const v128_t vxmask1 = wasm_i32x4_gt(vnonsign1, vdenorm_cutoff);
91 const v128_t vxmask2 = wasm_i32x4_gt(vnonsign2, vdenorm_cutoff);
92 const v128_t vxmask3 = wasm_i32x4_gt(vnonsign3, vdenorm_cutoff);
93 const v128_t vxmask4 = wasm_i32x4_gt(vnonsign4, vdenorm_cutoff);
94 const v128_t vxmask5 = wasm_i32x4_gt(vnonsign5, vdenorm_cutoff);
95 const v128_t vxmask6 = wasm_i32x4_gt(vnonsign6, vdenorm_cutoff);
96 const v128_t vxmask7 = wasm_i32x4_gt(vnonsign7, vdenorm_cutoff);
97
98 const v128_t vf0 = wasm_v128_or(vsign0, wasm_v128_bitselect(vnorm0, vdenorm0, vxmask0));
99 const v128_t vf1 = wasm_v128_or(vsign1, wasm_v128_bitselect(vnorm1, vdenorm1, vxmask1));
100 const v128_t vf2 = wasm_v128_or(vsign2, wasm_v128_bitselect(vnorm2, vdenorm2, vxmask2));
101 const v128_t vf3 = wasm_v128_or(vsign3, wasm_v128_bitselect(vnorm3, vdenorm3, vxmask3));
102 const v128_t vf4 = wasm_v128_or(vsign4, wasm_v128_bitselect(vnorm4, vdenorm4, vxmask4));
103 const v128_t vf5 = wasm_v128_or(vsign5, wasm_v128_bitselect(vnorm5, vdenorm5, vxmask5));
104 const v128_t vf6 = wasm_v128_or(vsign6, wasm_v128_bitselect(vnorm6, vdenorm6, vxmask6));
105 const v128_t vf7 = wasm_v128_or(vsign7, wasm_v128_bitselect(vnorm7, vdenorm7, vxmask7));
106
107 wasm_v128_store(output, vf0);
108 wasm_v128_store(output + 4, vf1);
109 wasm_v128_store(output + 8, vf2);
110 wasm_v128_store(output + 12, vf3);
111 wasm_v128_store(output + 16, vf4);
112 wasm_v128_store(output + 20, vf5);
113 wasm_v128_store(output + 24, vf6);
114 wasm_v128_store(output + 28, vf7);
115 output += 32;
116 }
117 for (; n >= 8 * sizeof(uint16_t); n -= 8 * sizeof(uint16_t)) {
118 const v128_t vh = wasm_v128_load(i);
119 i += 8;
120
121 const v128_t vzero = wasm_i16x8_const_splat(0);
122 const v128_t vw_lo = wasm_v16x8_shuffle(vzero, vh, 0, 8, 1, 9, 2, 10, 3, 11);
123 const v128_t vw_hi = wasm_v16x8_shuffle(vzero, vh, 4, 12, 5, 13, 6, 14, 7, 15);
124
125 const v128_t vsign_lo = wasm_v128_and(vw_lo, vsign_mask);
126 const v128_t vsign_hi = wasm_v128_and(vw_hi, vsign_mask);
127
128 const v128_t vnonsign_lo = wasm_v128_xor(vw_lo, vsign_lo);
129 const v128_t vnonsign_hi = wasm_v128_xor(vw_hi, vsign_hi);
130
131 const v128_t vnorm_lo = wasm_f32x4_mul(wasm_i32x4_add(wasm_u32x4_shr(vnonsign_lo, 3), vexp_offset), vexp_scale);
132 const v128_t vnorm_hi = wasm_f32x4_mul(wasm_i32x4_add(wasm_u32x4_shr(vnonsign_hi, 3), vexp_offset), vexp_scale);
133
134 const v128_t vdenorm_lo = wasm_f32x4_sub(wasm_v128_or(wasm_u32x4_shr(vnonsign_lo, 16), vmagic_bias), vmagic_bias);
135 const v128_t vdenorm_hi = wasm_f32x4_sub(wasm_v128_or(wasm_u32x4_shr(vnonsign_hi, 16), vmagic_bias), vmagic_bias);
136
137 const v128_t vxmask_lo = wasm_i32x4_gt(vnonsign_lo, vdenorm_cutoff);
138 const v128_t vxmask_hi = wasm_i32x4_gt(vnonsign_hi, vdenorm_cutoff);
139
140 const v128_t vf_lo = wasm_v128_or(vsign_lo, wasm_v128_bitselect(vnorm_lo, vdenorm_lo, vxmask_lo));
141 const v128_t vf_hi = wasm_v128_or(vsign_hi, wasm_v128_bitselect(vnorm_hi, vdenorm_hi, vxmask_hi));
142
143 wasm_v128_store(output, vf_lo);
144 wasm_v128_store(output + 4, vf_hi);
145 output += 8;
146 }
147 if XNN_UNLIKELY(n != 0) {
148 assert(n >= 1 * sizeof(uint16_t));
149 assert(n <= 7 * sizeof(uint16_t));
150 const v128_t vh = wasm_v128_load(i);
151
152 const v128_t vzero = wasm_i16x8_const_splat(0);
153 const v128_t vw_lo = wasm_v16x8_shuffle(vzero, vh, 0, 8, 1, 9, 2, 10, 3, 11);
154 const v128_t vw_hi = wasm_v16x8_shuffle(vzero, vh, 4, 12, 5, 13, 6, 14, 7, 15);
155
156 const v128_t vsign_lo = wasm_v128_and(vw_lo, vsign_mask);
157 const v128_t vsign_hi = wasm_v128_and(vw_hi, vsign_mask);
158
159 const v128_t vnonsign_lo = wasm_v128_xor(vw_lo, vsign_lo);
160 const v128_t vnonsign_hi = wasm_v128_xor(vw_hi, vsign_hi);
161
162 const v128_t vnorm_lo = wasm_f32x4_mul(wasm_i32x4_add(wasm_u32x4_shr(vnonsign_lo, 3), vexp_offset), vexp_scale);
163 const v128_t vnorm_hi = wasm_f32x4_mul(wasm_i32x4_add(wasm_u32x4_shr(vnonsign_hi, 3), vexp_offset), vexp_scale);
164
165 const v128_t vdenorm_lo = wasm_f32x4_sub(wasm_v128_or(wasm_u32x4_shr(vnonsign_lo, 16), vmagic_bias), vmagic_bias);
166 const v128_t vdenorm_hi = wasm_f32x4_sub(wasm_v128_or(wasm_u32x4_shr(vnonsign_hi, 16), vmagic_bias), vmagic_bias);
167
168 const v128_t vxmask_lo = wasm_i32x4_gt(vnonsign_lo, vdenorm_cutoff);
169 v128_t vf = wasm_v128_or(vsign_lo, wasm_v128_bitselect(vnorm_lo, vdenorm_lo, vxmask_lo));
170
171 if (n & (4 * sizeof(uint16_t))) {
172 wasm_v128_store(output, vf);
173 output += 4;
174
175 const v128_t vxmask_hi = wasm_i32x4_gt(vnonsign_hi, vdenorm_cutoff);
176 vf = wasm_v128_or(vsign_hi, wasm_v128_bitselect(vnorm_hi, vdenorm_hi, vxmask_hi));
177 }
178 if (n & (2 * sizeof(uint16_t))) {
179 *((double*) output) = wasm_f64x2_extract_lane(vf, 0);
180 output += 2;
181
182 vf = wasm_v64x2_shuffle(vf, vf, 1, 1);
183 }
184 if (n & (1 * sizeof(uint16_t))) {
185 *((float*) output) = wasm_f32x4_extract_lane(vf, 0);
186 }
187 }
188 }
189