1 // Auto-generated file. Do not edit!
2 // Template: src/f16-f32-vcvt/sse-int16.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2021 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <emmintrin.h>
13
14 #include <xnnpack/common.h>
15 #include <xnnpack/vcvt.h>
16
17
xnn_f16_f32_vcvt_ukernel__sse2_int16_x16(size_t n,const void * input,float * output,const union xnn_f16_f32_cvt_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_f16_f32_vcvt_ukernel__sse2_int16_x16(
19 size_t n,
20 const void* input,
21 float* output,
22 const union xnn_f16_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
23 {
24 assert(n != 0);
25 assert(n % sizeof(uint16_t) == 0);
26 assert(input != NULL);
27 assert(output != NULL);
28
29 const __m128i vsign_mask = _mm_load_si128((const __m128i*) params->sse_int16.sign_mask);
30 const __m128i vexp_offset = _mm_load_si128((const __m128i*) params->sse_int16.exp_offset);
31 const __m128 vexp_scale = _mm_load_ps(params->sse_int16.exp_scale);
32 const __m128i vmagic_mask = _mm_load_si128((const __m128i*) params->sse_int16.magic_mask);
33 const __m128 vmagic_bias = _mm_load_ps(params->sse_int16.magic_bias);
34 const __m128i vdenorm_cutoff = _mm_load_si128((const __m128i*) params->sse_int16.denorm_cutoff);
35
36 const uint16_t* i = (const uint16_t*) input;
37 for (; n >= 16 * sizeof(uint16_t); n -= 16 * sizeof(uint16_t)) {
38 const __m128i vh0 = _mm_loadu_si128((const __m128i*) i);
39 const __m128i vh1 = _mm_loadu_si128((const __m128i*) (i + 8));
40 i += 16;
41
42 const __m128i vsign0 = _mm_and_si128(vh0, vsign_mask);
43 const __m128i vsign1 = _mm_and_si128(vh1, vsign_mask);
44
45 const __m128i vnonsign0 = _mm_xor_si128(vh0, vsign0);
46 const __m128i vnonsign1 = _mm_xor_si128(vh1, vsign1);
47
48 const __m128i vprenorm0 = _mm_slli_epi16(vnonsign0, 13);
49 const __m128i vprenorm1 = _mm_add_epi16(_mm_srli_epi16(vnonsign0, 3), vexp_offset);
50 const __m128i vprenorm2 = _mm_slli_epi16(vnonsign1, 13);
51 const __m128i vprenorm3 = _mm_add_epi16(_mm_srli_epi16(vnonsign1, 3), vexp_offset);
52
53 const __m128i vnorm0 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm0, vprenorm1)), vexp_scale));
54 const __m128i vnorm1 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm0, vprenorm1)), vexp_scale));
55 const __m128i vnorm2 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm2, vprenorm3)), vexp_scale));
56 const __m128i vnorm3 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm2, vprenorm3)), vexp_scale));
57
58 const __m128i vdenorm0 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign0, vmagic_mask)), vmagic_bias));
59 const __m128i vdenorm1 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign0, vmagic_mask)), vmagic_bias));
60 const __m128i vdenorm2 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign1, vmagic_mask)), vmagic_bias));
61 const __m128i vdenorm3 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign1, vmagic_mask)), vmagic_bias));
62
63 const __m128i vmask0 = _mm_cmpgt_epi16(vnonsign0, vdenorm_cutoff);
64 const __m128i vmask1 = _mm_cmpgt_epi16(vnonsign1, vdenorm_cutoff);
65
66 const __m128i vxmask0 = _mm_unpacklo_epi16(vmask0, vmask0);
67 const __m128i vf0 = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign0),
68 _mm_or_si128(_mm_and_si128(vxmask0, vnorm0), _mm_andnot_si128(vxmask0, vdenorm0)));
69 const __m128i vxmask1 = _mm_unpackhi_epi16(vmask0, vmask0);
70 const __m128i vf1 = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign0),
71 _mm_or_si128(_mm_and_si128(vxmask1, vnorm1), _mm_andnot_si128(vxmask1, vdenorm1)));
72 const __m128i vxmask2 = _mm_unpacklo_epi16(vmask1, vmask1);
73 const __m128i vf2 = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign1),
74 _mm_or_si128(_mm_and_si128(vxmask2, vnorm2), _mm_andnot_si128(vxmask2, vdenorm2)));
75 const __m128i vxmask3 = _mm_unpackhi_epi16(vmask1, vmask1);
76 const __m128i vf3 = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign1),
77 _mm_or_si128(_mm_and_si128(vxmask3, vnorm3), _mm_andnot_si128(vxmask3, vdenorm3)));
78
79 _mm_storeu_ps(output, _mm_castsi128_ps(vf0));
80 _mm_storeu_ps(output + 4, _mm_castsi128_ps(vf1));
81 _mm_storeu_ps(output + 8, _mm_castsi128_ps(vf2));
82 _mm_storeu_ps(output + 12, _mm_castsi128_ps(vf3));
83 output += 16;
84 }
85 for (; n >= 8 * sizeof(uint16_t); n -= 8 * sizeof(uint16_t)) {
86 const __m128i vh = _mm_loadu_si128((const __m128i*) i);
87 i += 8;
88
89 const __m128i vsign = _mm_and_si128(vh, vsign_mask);
90
91 const __m128i vnonsign = _mm_xor_si128(vh, vsign);
92
93 const __m128i vprenorm_lo = _mm_slli_epi16(vnonsign, 13);
94 const __m128i vprenorm_hi = _mm_add_epi16(_mm_srli_epi16(vnonsign, 3), vexp_offset);
95
96 const __m128i vnorm_lo = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale));
97 const __m128i vnorm_hi = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale));
98
99 const __m128i vdenorm_lo = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign, vmagic_mask)), vmagic_bias));
100 const __m128i vdenorm_hi = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign, vmagic_mask)), vmagic_bias));
101
102 const __m128i vmask = _mm_cmpgt_epi16(vnonsign, vdenorm_cutoff);
103
104 const __m128i vxmask_lo = _mm_unpacklo_epi16(vmask, vmask);
105 const __m128i vf_lo = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign),
106 _mm_or_si128(_mm_and_si128(vxmask_lo, vnorm_lo), _mm_andnot_si128(vxmask_lo, vdenorm_lo)));
107
108 const __m128i vxmask_hi = _mm_unpackhi_epi16(vmask, vmask);
109 const __m128i vf_hi = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign),
110 _mm_or_si128(_mm_and_si128(vxmask_hi, vnorm_hi), _mm_andnot_si128(vxmask_hi, vdenorm_hi)));
111
112 _mm_storeu_ps(output, _mm_castsi128_ps(vf_lo));
113 _mm_storeu_ps(output + 4, _mm_castsi128_ps(vf_hi));
114 output += 8;
115 }
116 if XNN_UNPREDICTABLE(n != 0) {
117 const __m128i vh = _mm_loadu_si128((const __m128i*) i);
118
119 const __m128i vsign = _mm_and_si128(vh, vsign_mask);
120
121 const __m128i vnonsign = _mm_xor_si128(vh, vsign);
122
123 const __m128i vprenorm_lo = _mm_slli_epi16(vnonsign, 13);
124 const __m128i vprenorm_hi = _mm_add_epi16(_mm_srli_epi16(vnonsign, 3), vexp_offset);
125
126 const __m128i vnorm_lo = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale));
127 const __m128i vnorm_hi = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale));
128
129 const __m128i vdenorm_lo = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign, vmagic_mask)), vmagic_bias));
130 const __m128i vdenorm_hi = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign, vmagic_mask)), vmagic_bias));
131
132 const __m128i vmask = _mm_cmpgt_epi16(vnonsign, vdenorm_cutoff);
133
134 const __m128i vxmask_lo = _mm_unpacklo_epi16(vmask, vmask);
135 __m128i vf = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign),
136 _mm_or_si128(_mm_and_si128(vxmask_lo, vnorm_lo), _mm_andnot_si128(vxmask_lo, vdenorm_lo)));
137
138 if (n & (4 * sizeof(uint16_t))) {
139 _mm_storeu_ps(output, _mm_castsi128_ps(vf));
140 output += 4;
141
142 const __m128i vxmask_hi = _mm_unpackhi_epi16(vmask, vmask);
143 vf = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign),
144 _mm_or_si128(_mm_and_si128(vxmask_hi, vnorm_hi), _mm_andnot_si128(vxmask_hi, vdenorm_hi)));
145 }
146 if (n & (2 * sizeof(uint16_t))) {
147 _mm_storel_pi((__m64*) output, _mm_castsi128_ps(vf));
148 output += 2;
149
150 vf = _mm_castps_si128(_mm_movehl_ps(_mm_castsi128_ps(vf), _mm_castsi128_ps(vf)));
151 }
152 if (n & (1 * sizeof(uint16_t))) {
153 _mm_store_ss(output, _mm_castsi128_ps(vf));
154 }
155 }
156 }
157