1 // Copyright 2019 Google LLC
2 //
3 // This source code is licensed under the BSD-style license found in the
4 // LICENSE file in the root directory of this source tree.
5
6 #pragma once
7
8 #include <gtest/gtest.h>
9
10 #include <algorithm>
11 #include <cassert>
12 #include <cmath>
13 #include <cstddef>
14 #include <cstdlib>
15 #include <functional>
16 #include <random>
17 #include <vector>
18
19 #include <fp16.h>
20
21 #include <xnnpack.h>
22 #include <xnnpack/AlignedAllocator.h>
23 #include <xnnpack/params-init.h>
24 #include <xnnpack/params.h>
25
26
is_fp16_zero(uint16_t x)27 static inline bool is_fp16_zero(uint16_t x) {
28 const uint16_t two_x = x + x;
29 return two_x == 0;
30 }
31
32 class SpMMMicrokernelTester {
33 public:
34 enum class Variant {
35 Native,
36 Scalar,
37 };
38
mr(size_t mr)39 inline SpMMMicrokernelTester& mr(size_t mr) {
40 this->mr_ = mr;
41 return *this;
42 }
43
mr()44 inline size_t mr() const {
45 return this->mr_;
46 }
47
nr(size_t nr)48 inline SpMMMicrokernelTester& nr(size_t nr) {
49 this->nr_ = nr;
50 return *this;
51 }
52
nr()53 inline size_t nr() const {
54 return this->nr_;
55 }
56
m(size_t m)57 inline SpMMMicrokernelTester& m(size_t m) {
58 this->m_ = m;
59 return *this;
60 }
61
m()62 inline size_t m() const {
63 return this->m_;
64 }
65
n(size_t n)66 inline SpMMMicrokernelTester& n(size_t n) {
67 this->n_ = n;
68 return *this;
69 }
70
n()71 inline size_t n() const {
72 return this->n_;
73 }
74
k(size_t k)75 inline SpMMMicrokernelTester& k(size_t k) {
76 this->k_ = k;
77 return *this;
78 }
79
k()80 inline size_t k() const {
81 return this->k_;
82 }
83
output_stride(size_t output_stride)84 inline SpMMMicrokernelTester& output_stride(size_t output_stride) {
85 assert(output_stride != 0);
86 this->output_stride_ = output_stride;
87 return *this;
88 }
89
output_stride()90 inline size_t output_stride() const {
91 if (this->output_stride_ == 0) {
92 return m();
93 } else {
94 assert(this->output_stride_ >= m());
95 return this->output_stride_;
96 }
97 }
98
sparsity(float sparsity)99 inline SpMMMicrokernelTester& sparsity(float sparsity) {
100 this->sparsity_ = sparsity;
101 return *this;
102 }
103
sparsity()104 inline float sparsity() const {
105 return this->sparsity_;
106 }
107
qmin(uint8_t qmin)108 inline SpMMMicrokernelTester& qmin(uint8_t qmin) {
109 this->qmin_ = qmin;
110 return *this;
111 }
112
qmin()113 inline uint8_t qmin() const {
114 return this->qmin_;
115 }
116
qmax(uint8_t qmax)117 inline SpMMMicrokernelTester& qmax(uint8_t qmax) {
118 this->qmax_ = qmax;
119 return *this;
120 }
121
qmax()122 inline uint8_t qmax() const {
123 return this->qmax_;
124 }
125
iterations(size_t iterations)126 inline SpMMMicrokernelTester& iterations(size_t iterations) {
127 this->iterations_ = iterations;
128 return *this;
129 }
130
iterations()131 inline size_t iterations() const {
132 return this->iterations_;
133 }
134
135 void Test(xnn_f32_spmm_minmax_ukernel_function spmm, Variant variant = Variant::Native) const {
136 ASSERT_GE(m(), 1);
137 ASSERT_GE(n(), 1);
138 ASSERT_GE(k(), 1);
139
140 std::random_device random_device;
141 auto rng = std::mt19937(random_device());
142 auto f32rng = std::bind(std::uniform_real_distribution<float>(), rng);
143 auto prng = std::bind(std::uniform_real_distribution<float>(), rng);
144
145 std::vector<float, AlignedAllocator<float, 64>> input(k() * m());
146 // Think of b as (n/nr + n % nr) x k, expansion happens later.
147 const size_t ncols = n() / nr() + n() % nr();
148 std::vector<float> b(ncols * k());
149 std::vector<float> bias(n());
150 // Number of non-zero weights per N (output channel).
151 std::vector<uint32_t> nmap(n());
152 // Mapping from index of non-zero weight to increment of K (input channel) following this index.
153 std::vector<int32_t> dmap(n() * k());
154 std::vector<float> w(n() * k() + n());
155 std::vector<float> output((n() - 1) * output_stride() + m());
156 std::vector<float> output_ref(n() * m());
157
158 for (size_t iteration = 0; iteration < iterations(); iteration++) {
159 std::generate(input.begin(), input.end(), std::ref(f32rng));
160 std::generate(b.begin(), b.end(), std::ref(f32rng));
161 std::generate(bias.begin(), bias.end(), std::ref(f32rng));
162 std::fill(output.begin(), output.end(), nanf(""));
163 std::fill(output_ref.begin(), output_ref.end(), 0.0f);
164 std::fill(nmap.begin(), nmap.end(), 0);
165 std::fill(dmap.begin(), dmap.end(), 0);
166 std::fill(w.begin(), w.end(), 0.0f);
167
168 for (float& b_value : b) {
169 if (prng() <= sparsity()) {
170 b_value = 0.0f;
171 }
172 }
173
174 uint32_t nnz = 0;
175 uint32_t wcnt = 0;
176 size_t last_kk = 0;
177 bool first_nzz = true;
178 size_t first_kk = 0;
179 for (size_t nn = 0; nn < n() / nr(); nn++) {
180 for (size_t i = 0; i < nr(); ++i)
181 w[wcnt++] = bias[nr() * nn + i];
182 for (size_t kk = 0; kk < k(); kk++) {
183 if (b[nn * k() + kk] != 0.0f) {
184 // Every non-zero actually corresponds to nr adjacent non-zeros.
185 for (size_t i = 0; i < nr(); ++i)
186 w[wcnt++] = b[nn * k() + kk] + static_cast<float>(i);
187 // Skip the very first non-zero weight as we record only the difference.
188 if (first_nzz) {
189 first_kk = kk;
190 } else {
191 const int32_t increment = int32_t(kk - last_kk) * int32_t(m() * sizeof(float));
192 dmap[nnz++] = increment;
193 }
194 last_kk = kk;
195 first_nzz = false;
196 nmap[nn] += 1;
197 }
198 }
199 }
200
201 // now we've constructed the matrix for the blocked part and switch to the
202 // leftovers, which we do as nr=1 always.
203 for (size_t nn = n() / nr(); nn < ncols; nn++) {
204 w[wcnt++] = bias[(n() / nr()) * nr() + (nn - n() / nr())];
205 for (size_t kk = 0; kk < k(); kk++) {
206 if (b[nn * k() + kk] != 0.0f) {
207 // Every non-zero actually corresponds to nr adjacent non-zeros.
208 w[wcnt++] = b[nn * k() + kk];
209 // Skip the very first non-zero weight as we record only the difference.
210 if (first_nzz) {
211 first_kk = kk;
212 } else {
213 const int32_t increment = int32_t(kk - last_kk) * int32_t(m() * sizeof(float));
214 dmap[nnz++] = increment;
215 }
216 last_kk = kk;
217 first_nzz = false;
218 nmap[nn] += 1;
219 }
220 }
221 }
222 // In the end, we must return input pointer to the initial value.
223 const int64_t increment = int32_t(first_kk - last_kk) * int32_t(m() * sizeof(float));
224 dmap[nnz++] = increment;
225
226 // Generate expanded b which will be used in reference calculation.
227 // Everywhere there is input non-zero in the original we copy it and add an
228 // adjacent non-zero with incremented weight value.
229 std::vector<float> b_full(n() * k());
230 if (nr() == 1) {
231 b_full = b;
232 }
233 else {
234 for (size_t nn = 0; nn < n() / nr(); nn++) {
235 for (size_t kk = 0; kk < k(); kk++) {
236 if (b[nn * k() + kk] != 0.0f) {
237 for (size_t i = 0; i < nr(); ++i)
238 b_full[nr() * nn * k() + i * k() + kk] = b[nn * k() + kk] + static_cast<float>(i);
239 }
240 }
241 }
242 for (size_t nn = n() / nr(); nn < ncols; nn++) {
243 for (size_t kk = 0; kk < k(); kk++) {
244 if (b[nn * k() + kk] != 0.0f) {
245 b_full[nr() * (n() / nr()) * k() + (nn - n() / nr()) * k() + kk] = b[nn * k() + kk];
246 }
247 }
248 }
249 }
250
251 for (size_t oc = 0; oc < n(); oc++) {
252 for (size_t pxb = 0; pxb < m(); pxb++) {
253 output_ref[oc * m() + pxb] = bias[oc];
254 for (size_t ic = 0; ic < k(); ic++) {
255 output_ref[oc * m() + pxb] += input[ic * m() + pxb] * b_full[oc * k() + ic];
256 }
257 }
258 }
259
260 // Micro-kernel can access one element beyond w and dmap for software pipelining.
261 w.resize(wcnt + 1);
262 dmap.resize(nnz + 1);
263
264 // Compute clamping parameters.
265 const float accumulated_min = *std::min_element(output_ref.cbegin(), output_ref.cend());
266 const float accumulated_max = *std::max_element(output_ref.cbegin(), output_ref.cend());
267 const float output_min = accumulated_min + (accumulated_max - accumulated_min) / 255.0f * float(qmin());
268 const float output_max = accumulated_max - (accumulated_max - accumulated_min) / 255.0f * float(255 - qmax());
269
270 // Clamp reference results.
271 for (float& output_value : output_ref) {
272 output_value = std::min(std::max(output_value, output_min), output_max);
273 }
274
275 // Prepare parameters.
276 xnn_f32_minmax_params params = { };
277 switch (variant) {
278 case Variant::Native:
279 params = xnn_init_f32_minmax_params(output_min, output_max);
280 break;
281 case Variant::Scalar:
282 params = xnn_init_scalar_f32_minmax_params(output_min, output_max);
283 break;
284 }
285
286 spmm(m() * sizeof(float), n(),
287 input.data() + first_kk * m(),
288 w.data(), dmap.data(), nmap.data(),
289 output.data(), output_stride() * sizeof(float),
290 ¶ms);
291
292 // Validate micro-kernel outputs.
293 for (size_t i = 0; i < m(); i++) {
294 for (size_t j = 0; j < n(); j++) {
295 ASSERT_NEAR(
296 output[j * output_stride() + i],
297 output_ref[j * m() + i],
298 std::abs(output_ref[j * m() + i]) * 1.0e-6f)
299 << "at M index " << i << " / " << m() << " (tile " << mr() << ")"
300 << ", N index " << j << " / " << n() << " (tile " << nr() << ")"
301 << ", K = " << k();
302 }
303 }
304 }
305 }
306
Test(xnn_f16_spmm_minmax_ukernel_function spmm)307 void Test(xnn_f16_spmm_minmax_ukernel_function spmm) const {
308 ASSERT_GE(m(), 1);
309 ASSERT_GE(n(), 1);
310 ASSERT_GE(k(), 1);
311
312 std::random_device random_device;
313 auto rng = std::mt19937(random_device());
314 auto f32rng = std::bind(std::uniform_real_distribution<float>(), rng);
315 auto f16rng = std::bind(fp16_ieee_from_fp32_value, f32rng);
316 auto prng = std::bind(std::uniform_real_distribution<float>(), rng);
317
318 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> input(k() * m());
319 // Think of b as (n/nr + n % nr) x k, expansion happens later.
320 const size_t ncols = n() / nr() + n() % nr();
321 std::vector<uint16_t> b(ncols * k());
322 std::vector<uint16_t> bias(n());
323 // Number of non-zero weights per N (output channel).
324 std::vector<uint32_t> nmap(n());
325 // Mapping from index of non-zero weight to increment of K (input channel) following this index.
326 std::vector<int32_t> dmap(n() * k());
327 std::vector<uint16_t> w(n() * k() + n());
328 std::vector<uint16_t> output((n() - 1) * output_stride() + m());
329 std::vector<float> output_ref(n() * m());
330
331 for (size_t iteration = 0; iteration < iterations(); iteration++) {
332 std::generate(input.begin(), input.end(), std::ref(f16rng));
333 std::generate(b.begin(), b.end(), std::ref(f16rng));
334 std::generate(bias.begin(), bias.end(), std::ref(f16rng));
335 std::fill(output.begin(), output.end(), 0xC000);
336 std::fill(output_ref.begin(), output_ref.end(), 0.0f);
337 std::fill(nmap.begin(), nmap.end(), 0);
338 std::fill(dmap.begin(), dmap.end(), 0);
339 std::fill(w.begin(), w.end(), 0);
340
341 for (uint16_t& b_value : b) {
342 if (prng() <= sparsity()) {
343 b_value = 0;
344 }
345 }
346
347 uint32_t nnz = 0;
348 uint32_t wcnt = 0;
349 size_t last_kk = 0;
350 bool first_nzz = true;
351 size_t first_kk = 0;
352 for (size_t nn = 0; nn < n() / nr(); nn++) {
353 for (size_t i = 0; i < nr(); ++i)
354 w[wcnt++] = bias[nr() * nn + i];
355 for (size_t kk = 0; kk < k(); kk++) {
356 if (!is_fp16_zero(b[nn * k() + kk])) {
357 // Every non-zero actually corresponds to nr adjacent non-zeros.
358 for (size_t i = 0; i < nr(); ++i)
359 w[wcnt++] = fp16_ieee_from_fp32_value(fp16_ieee_to_fp32_value(b[nn * k() + kk]) + static_cast<float>(i));
360 // Skip the very first non-zero weight as we record only the difference.
361 if (first_nzz) {
362 first_kk = kk;
363 } else {
364 const int32_t increment = int32_t(kk - last_kk) * int32_t(m() * sizeof(uint16_t));
365 dmap[nnz++] = increment;
366 }
367 last_kk = kk;
368 first_nzz = false;
369 nmap[nn] += 1;
370 }
371 }
372 }
373
374 // now we've constructed the matrix for the blocked part and switch to the
375 // leftovers, which we do as nr=1 always.
376 for (size_t nn = n() / nr(); nn < ncols; nn++) {
377 w[wcnt++] = bias[(n() / nr()) * nr() + (nn - n() / nr())];
378 for (size_t kk = 0; kk < k(); kk++) {
379 if (!is_fp16_zero(b[nn * k() + kk])) {
380 // Every non-zero actually corresponds to nr adjacent non-zeros.
381 w[wcnt++] = b[nn * k() + kk];
382 // Skip the very first non-zero weight as we record only the difference.
383 if (first_nzz) {
384 first_kk = kk;
385 } else {
386 const int32_t increment = int32_t(kk - last_kk) * int32_t(m() * sizeof(uint16_t));
387 dmap[nnz++] = increment;
388 }
389 last_kk = kk;
390 first_nzz = false;
391 nmap[nn] += 1;
392 }
393 }
394 }
395 // In the end, we must return input pointer to the initial value.
396 const int64_t increment = int32_t(first_kk - last_kk) * int32_t(m() * sizeof(uint16_t));
397 dmap[nnz++] = increment;
398
399 // Generate expanded b which will be used in reference calculation.
400 // Everywhere there is input non-zero in the original we copy it and add an
401 // adjacent non-zero with incremented weight value.
402 std::vector<uint16_t> b_full(n() * k());
403 if (nr() == 1) {
404 b_full = b;
405 }
406 else {
407 for (size_t nn = 0; nn < n() / nr(); nn++) {
408 for (size_t kk = 0; kk < k(); kk++) {
409 if (b[nn * k() + kk] != 0.0f) {
410 for (size_t i = 0; i < nr(); ++i)
411 b_full[nr() * nn * k() + i * k() + kk] = fp16_ieee_from_fp32_value(
412 fp16_ieee_to_fp32_value(b[nn * k() + kk]) + static_cast<float>(i));
413 }
414 }
415 }
416 for (size_t nn = n() / nr(); nn < ncols; nn++) {
417 for (size_t kk = 0; kk < k(); kk++) {
418 if (b[nn * k() + kk] != 0.0f) {
419 b_full[nr() * (n() / nr()) * k() + (nn - n() / nr()) * k() + kk] = b[nn * k() + kk];
420 }
421 }
422 }
423 }
424
425 for (size_t oc = 0; oc < n(); oc++) {
426 for (size_t pxb = 0; pxb < m(); pxb++) {
427 output_ref[oc * m() + pxb] = fp16_ieee_to_fp32_value(bias[oc]);
428 for (size_t ic = 0; ic < k(); ic++) {
429 output_ref[oc * m() + pxb] += fp16_ieee_to_fp32_value(input[ic * m() + pxb]) * fp16_ieee_to_fp32_value(b_full[oc * k() + ic]);
430 }
431 }
432 }
433
434 // Micro-kernel can access one element beyond w and dmap for software pipelining.
435 w.resize(wcnt + 1);
436 dmap.resize(nnz + 1);
437
438 // Compute clamping parameters.
439 const float accumulated_min = *std::min_element(output_ref.cbegin(), output_ref.cend());
440 const float accumulated_max = *std::max_element(output_ref.cbegin(), output_ref.cend());
441 const float output_min = accumulated_min + (accumulated_max - accumulated_min) / 255.0f * float(qmin());
442 const float output_max = accumulated_max - (accumulated_max - accumulated_min) / 255.0f * float(255 - qmax());
443
444 // Clamp reference results.
445 for (float& output_value : output_ref) {
446 output_value = std::min(std::max(output_value, output_min), output_max);
447 }
448
449 // Prepare parameters.
450 xnn_f16_scaleminmax_params params;
451 params.scale = UINT16_C(0x3C00) /* 1.0 */;
452 params.max = fp16_ieee_from_fp32_value(output_max);
453 params.min = fp16_ieee_from_fp32_value(output_min);
454
455 spmm(m() * sizeof(uint16_t), n(),
456 input.data() + first_kk * m(),
457 w.data(), dmap.data(), nmap.data(),
458 output.data(), output_stride() * sizeof(uint16_t),
459 ¶ms);
460
461 // Validate micro-kernel outputs.
462 for (size_t i = 0; i < m(); i++) {
463 for (size_t j = 0; j < n(); j++) {
464 ASSERT_NEAR(
465 fp16_ieee_to_fp32_value(output[j * output_stride() + i]),
466 output_ref[j * m() + i],
467 std::max(1.0e-4f, std::abs(output_ref[j * m() + i]) * 1.0e-2f))
468 << "at M index " << i << " / " << m() << " (tile " << mr() << ")"
469 << ", N index " << j << " / " << n() << " (tile " << nr() << ")"
470 << ", K = " << k();
471 }
472 }
473 }
474 }
475
476 private:
477 size_t mr_{1};
478 size_t nr_{1};
479 size_t m_{1};
480 size_t n_{1};
481 size_t k_{1};
482 size_t output_stride_{0};
483 float sparsity_{0.5f};
484 uint8_t qmin_{0};
485 uint8_t qmax_{255};
486 size_t iterations_{1};
487 };
488