1 // Copyright (c) Facebook, Inc. and its affiliates. 2 // All rights reserved. 3 // 4 // Copyright 2019 Google LLC 5 // 6 // This source code is licensed under the BSD-style license found in the 7 // LICENSE file in the root directory of this source tree. 8 9 #pragma once 10 11 #include <gtest/gtest.h> 12 13 #include <algorithm> 14 #include <cmath> 15 #include <cassert> 16 #include <cstddef> 17 #include <cstdlib> 18 #include <functional> 19 #include <limits> 20 #include <random> 21 #include <vector> 22 23 #include <xnnpack.h> 24 25 26 class AveragePoolingOperatorTester { 27 public: padding_tf_same(bool padding_same)28 inline AveragePoolingOperatorTester& padding_tf_same(bool padding_same) { 29 if (padding_same) { 30 assert(padding_top() == 0); 31 assert(padding_left() == 0); 32 assert(padding_bottom() == 0); 33 assert(padding_right() == 0); 34 } 35 this->padding_tf_same_ = padding_same; 36 return *this; 37 } 38 padding_tf_same()39 inline bool padding_tf_same() const { 40 return this->padding_tf_same_; 41 } 42 padding(uint32_t padding)43 inline AveragePoolingOperatorTester& padding(uint32_t padding) { 44 assert(!padding_tf_same()); 45 this->padding_top_ = padding; 46 this->padding_right_ = padding; 47 this->padding_bottom_ = padding; 48 this->padding_left_ = padding; 49 return *this; 50 } 51 padding(uint32_t padding_height,uint32_t padding_width)52 inline AveragePoolingOperatorTester& padding(uint32_t padding_height, uint32_t padding_width) { 53 assert(!padding_tf_same()); 54 this->padding_top_ = padding_height; 55 this->padding_right_ = padding_width; 56 this->padding_bottom_ = padding_height; 57 this->padding_left_ = padding_width; 58 return *this; 59 } 60 padding_height(uint32_t padding_height)61 inline AveragePoolingOperatorTester& padding_height(uint32_t padding_height) { 62 assert(!padding_tf_same()); 63 this->padding_top_ = padding_height; 64 this->padding_bottom_ = padding_height; 65 return *this; 66 } 67 padding_width(uint32_t padding_width)68 inline AveragePoolingOperatorTester& padding_width(uint32_t padding_width) { 69 assert(!padding_tf_same()); 70 this->padding_right_ = padding_width; 71 this->padding_left_ = padding_width; 72 return *this; 73 } 74 padding_top(uint32_t padding_top)75 inline AveragePoolingOperatorTester& padding_top(uint32_t padding_top) { 76 assert(!padding_tf_same()); 77 this->padding_top_ = padding_top; 78 return *this; 79 } 80 padding_top()81 inline uint32_t padding_top() const { 82 if (padding_tf_same()) { 83 const uint32_t total_padding_height = 84 (output_height() - 1) * stride_height() + pooling_height() - input_height(); 85 return total_padding_height / 2; 86 } else { 87 return this->padding_top_; 88 } 89 } 90 padding_left(uint32_t padding_left)91 inline AveragePoolingOperatorTester& padding_left(uint32_t padding_left) { 92 assert(!padding_tf_same()); 93 this->padding_left_ = padding_left; 94 return *this; 95 } 96 padding_left()97 inline uint32_t padding_left() const { 98 if (padding_tf_same()) { 99 const uint32_t total_padding_width = 100 (output_width() - 1) * stride_width() + pooling_width() - input_width(); 101 return total_padding_width / 2; 102 } else { 103 return this->padding_left_; 104 } 105 } 106 padding_bottom(uint32_t padding_bottom)107 inline AveragePoolingOperatorTester& padding_bottom(uint32_t padding_bottom) { 108 assert(!padding_tf_same()); 109 this->padding_bottom_ = padding_bottom; 110 return *this; 111 } 112 padding_bottom()113 inline uint32_t padding_bottom() const { 114 if (padding_tf_same()) { 115 const uint32_t total_padding_height = 116 (output_height() - 1) * stride_height() + pooling_height() - input_height(); 117 return total_padding_height - total_padding_height / 2; 118 } else { 119 return this->padding_bottom_; 120 } 121 } 122 padding_right(uint32_t padding_right)123 inline AveragePoolingOperatorTester& padding_right(uint32_t padding_right) { 124 assert(!padding_tf_same()); 125 this->padding_right_ = padding_right; 126 return *this; 127 } 128 padding_right()129 inline uint32_t padding_right() const { 130 if (padding_tf_same()) { 131 const uint32_t total_padding_width = 132 (output_width() - 1) * stride_width() + pooling_width() - input_width(); 133 return total_padding_width - total_padding_width / 2; 134 } else { 135 return this->padding_right_; 136 } 137 } 138 input_size(size_t input_height,size_t input_width)139 inline AveragePoolingOperatorTester& input_size(size_t input_height, size_t input_width) { 140 assert(input_height >= 1); 141 assert(input_width >= 1); 142 this->input_height_ = input_height; 143 this->input_width_ = input_width; 144 return *this; 145 } 146 input_height(size_t input_height)147 inline AveragePoolingOperatorTester& input_height(size_t input_height) { 148 assert(input_height >= 1); 149 this->input_height_ = input_height; 150 return *this; 151 } 152 input_height()153 inline size_t input_height() const { 154 return this->input_height_; 155 } 156 input_width(size_t input_width)157 inline AveragePoolingOperatorTester& input_width(size_t input_width) { 158 assert(input_width >= 1); 159 this->input_width_ = input_width; 160 return *this; 161 } 162 input_width()163 inline size_t input_width() const { 164 return this->input_width_; 165 } 166 channels(size_t channels)167 inline AveragePoolingOperatorTester& channels(size_t channels) { 168 assert(channels != 0); 169 this->channels_ = channels; 170 return *this; 171 } 172 channels()173 inline size_t channels() const { 174 return this->channels_; 175 } 176 batch_size(size_t batch_size)177 inline AveragePoolingOperatorTester& batch_size(size_t batch_size) { 178 assert(batch_size != 0); 179 this->batch_size_ = batch_size; 180 return *this; 181 } 182 batch_size()183 inline size_t batch_size() const { 184 return this->batch_size_; 185 } 186 pooling_size(uint32_t pooling_size)187 inline AveragePoolingOperatorTester& pooling_size(uint32_t pooling_size) { 188 assert(pooling_size >= 1); 189 this->pooling_height_ = pooling_size; 190 this->pooling_width_ = pooling_size; 191 return *this; 192 } 193 pooling_size(uint32_t pooling_height,uint32_t pooling_width)194 inline AveragePoolingOperatorTester& pooling_size(uint32_t pooling_height, uint32_t pooling_width) { 195 assert(pooling_height >= 1); 196 assert(pooling_width >= 1); 197 this->pooling_height_ = pooling_height; 198 this->pooling_width_ = pooling_width; 199 return *this; 200 } 201 pooling_height(uint32_t pooling_height)202 inline AveragePoolingOperatorTester& pooling_height(uint32_t pooling_height) { 203 assert(pooling_height >= 1); 204 this->pooling_height_ = pooling_height; 205 return *this; 206 } 207 pooling_height()208 inline uint32_t pooling_height() const { 209 return this->pooling_height_; 210 } 211 pooling_width(uint32_t pooling_width)212 inline AveragePoolingOperatorTester& pooling_width(uint32_t pooling_width) { 213 assert(pooling_width >= 1); 214 this->pooling_width_ = pooling_width; 215 return *this; 216 } 217 pooling_width()218 inline uint32_t pooling_width() const { 219 return this->pooling_width_; 220 } 221 stride(uint32_t stride)222 inline AveragePoolingOperatorTester& stride(uint32_t stride) { 223 assert(stride >= 1); 224 this->stride_height_ = stride; 225 this->stride_width_ = stride; 226 return *this; 227 } 228 stride(uint32_t stride_height,uint32_t stride_width)229 inline AveragePoolingOperatorTester& stride(uint32_t stride_height, uint32_t stride_width) { 230 assert(stride_height >= 1); 231 assert(stride_width >= 1); 232 this->stride_height_ = stride_height; 233 this->stride_width_ = stride_width; 234 return *this; 235 } 236 stride_height(uint32_t stride_height)237 inline AveragePoolingOperatorTester& stride_height(uint32_t stride_height) { 238 assert(stride_height >= 1); 239 this->stride_height_ = stride_height; 240 return *this; 241 } 242 stride_height()243 inline uint32_t stride_height() const { 244 return this->stride_height_; 245 } 246 stride_width(uint32_t stride_width)247 inline AveragePoolingOperatorTester& stride_width(uint32_t stride_width) { 248 assert(stride_width >= 1); 249 this->stride_width_ = stride_width; 250 return *this; 251 } 252 stride_width()253 inline uint32_t stride_width() const { 254 return this->stride_width_; 255 } 256 output_height()257 inline size_t output_height() const { 258 if (padding_tf_same()) { 259 return (input_height() + stride_height() - 1) / stride_height(); 260 } else { 261 const size_t padded_input_height = padding_top() + input_height() + padding_bottom(); 262 if (padded_input_height <= pooling_height()) { 263 return 1; 264 } else { 265 return (padded_input_height - pooling_height()) / stride_height() + 1; 266 } 267 } 268 } 269 output_width()270 inline size_t output_width() const { 271 if (padding_tf_same()) { 272 return (input_width() + stride_width() - 1) / stride_width(); 273 } else { 274 const size_t padded_input_width = padding_left() + input_width() + padding_right(); 275 if (padded_input_width <= pooling_width()) { 276 return 1; 277 } else { 278 return (padded_input_width - pooling_width()) / stride_width() + 1; 279 } 280 } 281 } 282 input_pixel_stride(size_t input_pixel_stride)283 inline AveragePoolingOperatorTester& input_pixel_stride(size_t input_pixel_stride) { 284 assert(input_pixel_stride != 0); 285 this->input_pixel_stride_ = input_pixel_stride; 286 return *this; 287 } 288 input_pixel_stride()289 inline size_t input_pixel_stride() const { 290 if (this->input_pixel_stride_ == 0) { 291 return channels(); 292 } else { 293 assert(this->input_pixel_stride_ >= channels()); 294 return this->input_pixel_stride_; 295 } 296 } 297 output_pixel_stride(size_t output_pixel_stride)298 inline AveragePoolingOperatorTester& output_pixel_stride(size_t output_pixel_stride) { 299 assert(output_pixel_stride != 0); 300 this->output_pixel_stride_ = output_pixel_stride; 301 return *this; 302 } 303 output_pixel_stride()304 inline size_t output_pixel_stride() const { 305 if (this->output_pixel_stride_ == 0) { 306 return channels(); 307 } else { 308 assert(this->output_pixel_stride_ >= channels()); 309 return this->output_pixel_stride_; 310 } 311 } 312 next_input_size(uint32_t next_input_height,uint32_t next_input_width)313 inline AveragePoolingOperatorTester& next_input_size(uint32_t next_input_height, uint32_t next_input_width) { 314 assert(next_input_height >= 1); 315 assert(next_input_width >= 1); 316 this->next_input_height_ = next_input_height; 317 this->next_input_width_ = next_input_width; 318 return *this; 319 } 320 next_input_height(uint32_t next_input_height)321 inline AveragePoolingOperatorTester& next_input_height(uint32_t next_input_height) { 322 assert(next_input_height >= 1); 323 this->next_input_height_ = next_input_height; 324 return *this; 325 } 326 next_input_height()327 inline uint32_t next_input_height() const { 328 if (this->next_input_height_ == 0) { 329 return input_height(); 330 } else { 331 return this->next_input_height_; 332 } 333 } 334 next_input_width(uint32_t next_input_width)335 inline AveragePoolingOperatorTester& next_input_width(uint32_t next_input_width) { 336 assert(next_input_width >= 1); 337 this->next_input_width_ = next_input_width; 338 return *this; 339 } 340 next_input_width()341 inline uint32_t next_input_width() const { 342 if (this->next_input_width_ == 0) { 343 return input_width(); 344 } else { 345 return this->next_input_width_; 346 } 347 } 348 next_output_height()349 inline size_t next_output_height() const { 350 const size_t padded_next_input_height = padding_top() + next_input_height() + padding_bottom(); 351 if (padded_next_input_height <= pooling_height()) { 352 return 1; 353 } else { 354 return (padded_next_input_height - pooling_height()) / stride_height() + 1; 355 } 356 } 357 next_output_width()358 inline size_t next_output_width() const { 359 const size_t padded_next_input_width = padding_left() + next_input_width() + padding_right(); 360 if (padded_next_input_width <= pooling_width()) { 361 return 1; 362 } else { 363 return (padded_next_input_width - pooling_width()) / stride_width() + 1; 364 } 365 } 366 next_batch_size(size_t next_batch_size)367 inline AveragePoolingOperatorTester& next_batch_size(size_t next_batch_size) { 368 assert(next_batch_size >= 1); 369 this->next_batch_size_ = next_batch_size; 370 return *this; 371 } 372 next_batch_size()373 inline size_t next_batch_size() const { 374 if (this->next_batch_size_ == 0) { 375 return batch_size(); 376 } else { 377 return this->next_batch_size_; 378 } 379 } 380 input_scale(float input_scale)381 inline AveragePoolingOperatorTester& input_scale(float input_scale) { 382 assert(input_scale > 0.0f); 383 assert(std::isnormal(input_scale)); 384 this->input_scale_ = input_scale; 385 return *this; 386 } 387 input_scale()388 inline float input_scale() const { 389 return this->input_scale_; 390 } 391 input_zero_point(uint8_t input_zero_point)392 inline AveragePoolingOperatorTester& input_zero_point(uint8_t input_zero_point) { 393 this->input_zero_point_ = input_zero_point; 394 return *this; 395 } 396 input_zero_point()397 inline uint8_t input_zero_point() const { 398 return this->input_zero_point_; 399 } 400 output_scale(float output_scale)401 inline AveragePoolingOperatorTester& output_scale(float output_scale) { 402 assert(output_scale > 0.0f); 403 assert(std::isnormal(output_scale)); 404 this->output_scale_ = output_scale; 405 return *this; 406 } 407 output_scale()408 inline float output_scale() const { 409 return this->output_scale_; 410 } 411 output_zero_point(uint8_t output_zero_point)412 inline AveragePoolingOperatorTester& output_zero_point(uint8_t output_zero_point) { 413 this->output_zero_point_ = output_zero_point; 414 return *this; 415 } 416 output_zero_point()417 inline uint8_t output_zero_point() const { 418 return this->output_zero_point_; 419 } 420 qmin(uint8_t qmin)421 inline AveragePoolingOperatorTester& qmin(uint8_t qmin) { 422 this->qmin_ = qmin; 423 return *this; 424 } 425 qmin()426 inline uint8_t qmin() const { 427 return this->qmin_; 428 } 429 qmax(uint8_t qmax)430 inline AveragePoolingOperatorTester& qmax(uint8_t qmax) { 431 this->qmax_ = qmax; 432 return *this; 433 } 434 qmax()435 inline uint8_t qmax() const { 436 return this->qmax_; 437 } 438 iterations(size_t iterations)439 inline AveragePoolingOperatorTester& iterations(size_t iterations) { 440 this->iterations_ = iterations; 441 return *this; 442 } 443 iterations()444 inline size_t iterations() const { 445 return this->iterations_; 446 } 447 TestQU8()448 void TestQU8() const { 449 std::random_device random_device; 450 auto rng = std::mt19937(random_device()); 451 auto u8rng = std::bind(std::uniform_int_distribution<uint32_t>(0, std::numeric_limits<uint8_t>::max()), rng); 452 453 std::vector<uint8_t> input((batch_size() * input_height() * input_width() - 1) * input_pixel_stride() + channels() + XNN_EXTRA_BYTES / sizeof(uint8_t)); 454 std::vector<uint8_t> output((batch_size() * output_height() * output_width() - 1) * output_pixel_stride() + channels()); 455 std::vector<float> output_ref(batch_size() * output_height() * output_width() * channels()); 456 for (size_t iteration = 0; iteration < iterations(); iteration++) { 457 std::generate(input.begin(), input.end(), std::ref(u8rng)); 458 std::fill(output.begin(), output.end(), 0xA5); 459 460 // Compute reference results. 461 const double scale = double(input_scale()) / (double(output_scale()) * double(pooling_height() * pooling_width())); 462 for (size_t i = 0; i < batch_size(); i++) { 463 for (size_t oy = 0; oy < output_height(); oy++) { 464 for (size_t ox = 0; ox < output_width(); ox++) { 465 for (size_t c = 0; c < channels(); c++) { 466 double acc = 0.0f; 467 for (size_t py = 0; py < pooling_height(); py++) { 468 const size_t iy = oy * stride_height() + py - padding_top(); 469 for (size_t px = 0; px < pooling_width(); px++) { 470 const size_t ix = ox * stride_width() + px - padding_left(); 471 if (ix < input_width() && iy < input_height()) { 472 acc += double(int32_t(input[((i * input_height() + iy) * input_width() + ix) * input_pixel_stride() + c]) - int32_t(input_zero_point())); 473 } 474 } 475 } 476 output_ref[((i * output_height() + oy) * output_width() + ox) * channels() + c] = float(acc * scale + double(output_zero_point())); 477 output_ref[((i * output_height() + oy) * output_width() + ox) * channels() + c] = 478 std::min<float>(output_ref[((i * output_height() + oy) * output_width() + ox) * channels() + c], float(qmax())); 479 output_ref[((i * output_height() + oy) * output_width() + ox) * channels() + c] = 480 std::max<float>(output_ref[((i * output_height() + oy) * output_width() + ox) * channels() + c], float(qmin())); 481 } 482 } 483 } 484 } 485 486 // Create, setup, run, and destroy Average Pooling operator. 487 ASSERT_EQ(xnn_status_success, xnn_initialize(nullptr /* allocator */)); 488 xnn_operator_t average_pooling_op = nullptr; 489 490 ASSERT_EQ(xnn_status_success, 491 xnn_create_average_pooling2d_nhwc_qu8( 492 padding_top(), padding_right(), padding_bottom(), padding_left(), 493 pooling_height(), pooling_width(), 494 stride_height(), stride_width(), 495 channels(), input_pixel_stride(), output_pixel_stride(), 496 input_zero_point(), input_scale(), 497 output_zero_point(), output_scale(), 498 qmin(), qmax(), 499 0, &average_pooling_op)); 500 ASSERT_NE(nullptr, average_pooling_op); 501 502 // Smart pointer to automatically delete average_pooling_op. 503 std::unique_ptr<xnn_operator, decltype(&xnn_delete_operator)> auto_average_pooling_op(average_pooling_op, xnn_delete_operator); 504 505 ASSERT_EQ(xnn_status_success, 506 xnn_setup_average_pooling2d_nhwc_qu8( 507 average_pooling_op, 508 batch_size(), input_height(), input_width(), 509 input.data(), output.data(), 510 nullptr /* thread pool */)); 511 512 ASSERT_EQ(xnn_status_success, 513 xnn_run_operator(average_pooling_op, nullptr /* thread pool */)); 514 515 // Verify results. 516 for (size_t i = 0; i < batch_size(); i++) { 517 for (size_t y = 0; y < output_height(); y++) { 518 for (size_t x = 0; x < output_width(); x++) { 519 for (size_t c = 0; c < channels(); c++) { 520 ASSERT_LE(uint32_t(output[((i * output_height() + y) * output_width() + x) * output_pixel_stride() + c]), uint32_t(qmax())); 521 ASSERT_GE(uint32_t(output[((i * output_height() + y) * output_width() + x) * output_pixel_stride() + c]), uint32_t(qmin())); 522 ASSERT_NEAR(float(int32_t(output[((i * output_height() + y) * output_width() + x) * output_pixel_stride() + c])), 523 output_ref[((i * output_height() + y) * output_width() + x) * channels() + c], 0.80f) << 524 "in batch index " << i << ", pixel (" << y << ", " << x << "), channel " << c; 525 } 526 } 527 } 528 } 529 } 530 } 531 TestF32()532 void TestF32() const { 533 std::random_device random_device; 534 auto rng = std::mt19937(random_device()); 535 auto f32rng = std::bind(std::uniform_real_distribution<float>(), rng); 536 537 std::vector<float> input((batch_size() * input_height() * input_width() - 1) * input_pixel_stride() + channels() + XNN_EXTRA_BYTES / sizeof(float)); 538 std::vector<float> output((batch_size() * output_height() * output_width() - 1) * output_pixel_stride() + channels()); 539 std::vector<float> output_ref(batch_size() * output_height() * output_width() * channels()); 540 for (size_t iteration = 0; iteration < iterations(); iteration++) { 541 std::generate(input.begin(), input.end(), std::ref(f32rng)); 542 std::fill(output.begin(), output.end(), std::nanf("")); 543 544 // Compute reference results, without clamping. 545 for (size_t i = 0; i < batch_size(); i++) { 546 for (size_t oy = 0; oy < output_height(); oy++) { 547 for (size_t ox = 0; ox < output_width(); ox++) { 548 for (size_t c = 0; c < channels(); c++) { 549 float acc = 0.0f; 550 int32_t n = 0; 551 for (size_t py = 0; py < pooling_height(); py++) { 552 const size_t iy = oy * stride_height() + py - padding_top(); 553 for (size_t px = 0; px < pooling_width(); px++) { 554 const size_t ix = ox * stride_width() + px - padding_left(); 555 if (ix < input_width() && iy < input_height()) { 556 acc += input[((i * input_height() + iy) * input_width() + ix) * input_pixel_stride() + c]; 557 n += 1; 558 } 559 } 560 } 561 output_ref[((i * output_height() + oy) * output_width() + ox) * channels() + c] = acc / float(n); 562 } 563 } 564 } 565 } 566 567 // Compute clamping parameters. 568 const float accumulated_min = *std::min_element(output_ref.cbegin(), output_ref.cend()); 569 const float accumulated_max = *std::max_element(output_ref.cbegin(), output_ref.cend()); 570 const float accumulated_range = accumulated_max - accumulated_min; 571 const float output_min = accumulated_range == 0.0f ? 572 -std::numeric_limits<float>::infinity() : 573 accumulated_min + accumulated_range / 255.0f * float(qmin()); 574 const float output_max = accumulated_range == 0.0f ? 575 +std::numeric_limits<float>::infinity() : 576 accumulated_max - accumulated_range / 255.0f * float(255 - qmax()); 577 578 // Clamp reference results. 579 for (float& value : output_ref) { 580 value = std::max(std::min(value, output_max), output_min); 581 } 582 583 // Create, setup, run, and destroy Average Pooling operator. 584 ASSERT_EQ(xnn_status_success, xnn_initialize(nullptr /* allocator */)); 585 xnn_operator_t average_pooling_op = nullptr; 586 587 ASSERT_EQ(xnn_status_success, 588 xnn_create_average_pooling2d_nhwc_f32( 589 padding_top(), padding_right(), padding_bottom(), padding_left(), 590 pooling_height(), pooling_width(), 591 stride_height(), stride_width(), 592 channels(), input_pixel_stride(), output_pixel_stride(), 593 output_min, output_max, 594 0, &average_pooling_op)); 595 ASSERT_NE(nullptr, average_pooling_op); 596 597 // Smart pointer to automatically delete average_pooling_op. 598 std::unique_ptr<xnn_operator, decltype(&xnn_delete_operator)> auto_average_pooling_op(average_pooling_op, xnn_delete_operator); 599 600 ASSERT_EQ(xnn_status_success, 601 xnn_setup_average_pooling2d_nhwc_f32( 602 average_pooling_op, 603 batch_size(), input_height(), input_width(), 604 input.data(), output.data(), 605 nullptr /* thread pool */)); 606 607 ASSERT_EQ(xnn_status_success, 608 xnn_run_operator(average_pooling_op, nullptr /* thread pool */)); 609 610 // Verify results. 611 for (size_t i = 0; i < batch_size(); i++) { 612 for (size_t y = 0; y < output_height(); y++) { 613 for (size_t x = 0; x < output_width(); x++) { 614 for (size_t c = 0; c < channels(); c++) { 615 ASSERT_LE(output[((i * output_height() + y) * output_width() + x) * output_pixel_stride() + c], output_max); 616 ASSERT_GE(output[((i * output_height() + y) * output_width() + x) * output_pixel_stride() + c], output_min); 617 ASSERT_NEAR(output[((i * output_height() + y) * output_width() + x) * output_pixel_stride() + c], 618 output_ref[((i * output_height() + y) * output_width() + x) * channels() + c], 619 std::abs(output_ref[((i * output_height() + y) * output_width() + x) * channels() + c]) * 1.0e-6f) << 620 "in batch index " << i << ", pixel (" << y << ", " << x << "), channel " << c; 621 } 622 } 623 } 624 } 625 } 626 } 627 TestSetupQU8()628 void TestSetupQU8() const { 629 std::random_device random_device; 630 auto rng = std::mt19937(random_device()); 631 auto u8rng = std::bind(std::uniform_int_distribution<uint32_t>(0, std::numeric_limits<uint8_t>::max()), rng); 632 633 std::vector<uint8_t> input(XNN_EXTRA_BYTES / sizeof(uint8_t) + std::max( 634 (batch_size() * input_height() * input_width() - 1) * input_pixel_stride() + channels(), 635 (next_batch_size() * next_input_height() * next_input_width() - 1) * input_pixel_stride() + channels())); 636 std::vector<uint8_t> output(std::max( 637 (batch_size() * output_height() * output_width() - 1) * output_pixel_stride() + channels(), 638 (next_batch_size() * next_output_height() * next_output_width() - 1) * output_pixel_stride() + channels())); 639 std::vector<float> output_ref(batch_size() * output_height() * output_width() * channels()); 640 std::vector<float> next_output_ref(next_batch_size() * next_output_height() * next_output_width() * channels()); 641 for (size_t iteration = 0; iteration < iterations(); iteration++) { 642 std::generate(input.begin(), input.end(), std::ref(u8rng)); 643 std::fill(output.begin(), output.end(), 0xA5); 644 645 // Compute reference results. 646 const double scale = double(input_scale()) / (double(output_scale()) * double(pooling_height() * pooling_width())); 647 for (size_t i = 0; i < batch_size(); i++) { 648 for (size_t oy = 0; oy < output_height(); oy++) { 649 for (size_t ox = 0; ox < output_width(); ox++) { 650 for (size_t c = 0; c < channels(); c++) { 651 double acc = 0.0f; 652 for (size_t py = 0; py < pooling_height(); py++) { 653 const size_t iy = oy * stride_height() + py - padding_top(); 654 for (size_t px = 0; px < pooling_width(); px++) { 655 const size_t ix = ox * stride_width() + px - padding_left(); 656 if (ix < input_width() && iy < input_height()) { 657 acc += double(int32_t(input[((i * input_height() + iy) * input_width() + ix) * input_pixel_stride() + c]) - int32_t(input_zero_point())); 658 } 659 } 660 } 661 output_ref[((i * output_height() + oy) * output_width() + ox) * channels() + c] = float(acc * scale + double(output_zero_point())); 662 output_ref[((i * output_height() + oy) * output_width() + ox) * channels() + c] = 663 std::min<float>(output_ref[((i * output_height() + oy) * output_width() + ox) * channels() + c], float(qmax())); 664 output_ref[((i * output_height() + oy) * output_width() + ox) * channels() + c] = 665 std::max<float>(output_ref[((i * output_height() + oy) * output_width() + ox) * channels() + c], float(qmin())); 666 } 667 } 668 } 669 } 670 671 // Create, setup, and run Average Pooling operator once. 672 ASSERT_EQ(xnn_status_success, xnn_initialize(nullptr /* allocator */)); 673 xnn_operator_t average_pooling_op = nullptr; 674 675 ASSERT_EQ(xnn_status_success, 676 xnn_create_average_pooling2d_nhwc_qu8( 677 padding_top(), padding_right(), padding_bottom(), padding_left(), 678 pooling_height(), pooling_width(), 679 stride_height(), stride_width(), 680 channels(), input_pixel_stride(), output_pixel_stride(), 681 input_zero_point(), input_scale(), 682 output_zero_point(), output_scale(), 683 qmin(), qmax(), 684 0, &average_pooling_op)); 685 ASSERT_NE(nullptr, average_pooling_op); 686 687 ASSERT_EQ(xnn_status_success, 688 xnn_setup_average_pooling2d_nhwc_qu8( 689 average_pooling_op, 690 batch_size(), input_height(), input_width(), 691 input.data(), output.data(), 692 nullptr /* thread pool */)); 693 694 ASSERT_EQ(xnn_status_success, 695 xnn_run_operator(average_pooling_op, nullptr /* thread pool */)); 696 697 // Verify results of the first run. 698 for (size_t i = 0; i < batch_size(); i++) { 699 for (size_t y = 0; y < output_height(); y++) { 700 for (size_t x = 0; x < output_width(); x++) { 701 for (size_t c = 0; c < channels(); c++) { 702 ASSERT_LE(uint32_t(output[((i * output_height() + y) * output_width() + x) * output_pixel_stride() + c]), uint32_t(qmax())); 703 ASSERT_GE(uint32_t(output[((i * output_height() + y) * output_width() + x) * output_pixel_stride() + c]), uint32_t(qmin())); 704 ASSERT_NEAR(float(int32_t(output[((i * output_height() + y) * output_width() + x) * output_pixel_stride() + c])), 705 output_ref[((i * output_height() + y) * output_width() + x) * channels() + c], 0.80f) << 706 "in batch index " << i << ", pixel (" << y << ", " << x << "), channel " << c; 707 } 708 } 709 } 710 } 711 712 // Re-generate data for the second run. 713 std::generate(input.begin(), input.end(), std::ref(u8rng)); 714 std::fill(output.begin(), output.end(), 0xA5); 715 716 // Compute reference results for the second run. 717 for (size_t i = 0; i < next_batch_size(); i++) { 718 for (size_t oy = 0; oy < next_output_height(); oy++) { 719 for (size_t ox = 0; ox < next_output_width(); ox++) { 720 for (size_t c = 0; c < channels(); c++) { 721 double acc = 0.0f; 722 for (size_t py = 0; py < pooling_height(); py++) { 723 const size_t iy = oy * stride_height() + py - padding_top(); 724 for (size_t px = 0; px < pooling_width(); px++) { 725 const size_t ix = ox * stride_width() + px - padding_left(); 726 if (ix < next_input_width() && iy < next_input_height()) { 727 acc += double(int32_t(input[((i * next_input_height() + iy) * next_input_width() + ix) * input_pixel_stride() + c]) - int32_t(input_zero_point())); 728 } 729 } 730 } 731 next_output_ref[((i * next_output_height() + oy) * next_output_width() + ox) * channels() + c] = float(acc * scale + double(output_zero_point())); 732 next_output_ref[((i * next_output_height() + oy) * next_output_width() + ox) * channels() + c] = 733 std::min<float>(next_output_ref[((i * next_output_height() + oy) * next_output_width() + ox) * channels() + c], float(qmax())); 734 next_output_ref[((i * next_output_height() + oy) * next_output_width() + ox) * channels() + c] = 735 std::max<float>(next_output_ref[((i * next_output_height() + oy) * next_output_width() + ox) * channels() + c], float(qmin())); 736 } 737 } 738 } 739 } 740 741 // Setup and run Average Pooling operator the second time, and destroy the operator. 742 ASSERT_EQ(xnn_status_success, 743 xnn_setup_average_pooling2d_nhwc_qu8( 744 average_pooling_op, 745 next_batch_size(), next_input_height(), next_input_width(), 746 input.data(), output.data(), 747 nullptr /* thread pool */)); 748 749 ASSERT_EQ(xnn_status_success, 750 xnn_run_operator(average_pooling_op, nullptr /* thread pool */)); 751 752 ASSERT_EQ(xnn_status_success, 753 xnn_delete_operator(average_pooling_op)); 754 average_pooling_op = nullptr; 755 756 // Verify results of the second run. 757 for (size_t i = 0; i < next_batch_size(); i++) { 758 for (size_t y = 0; y < next_output_height(); y++) { 759 for (size_t x = 0; x < next_output_width(); x++) { 760 for (size_t c = 0; c < channels(); c++) { 761 ASSERT_LE(uint32_t(output[((i * next_output_height() + y) * next_output_width() + x) * output_pixel_stride() + c]), uint32_t(qmax())); 762 ASSERT_GE(uint32_t(output[((i * next_output_height() + y) * next_output_width() + x) * output_pixel_stride() + c]), uint32_t(qmin())); 763 ASSERT_NEAR(float(int32_t(output[((i * next_output_height() + y) * next_output_width() + x) * output_pixel_stride() + c])), 764 next_output_ref[((i * next_output_height() + y) * next_output_width() + x) * channels() + c], 0.80f) << 765 "in batch index " << i << ", pixel (" << y << ", " << x << "), channel " << c; 766 } 767 } 768 } 769 } 770 } 771 } 772 TestSetupF32()773 void TestSetupF32() const { 774 std::random_device random_device; 775 auto rng = std::mt19937(random_device()); 776 auto f32rng = std::bind(std::uniform_real_distribution<float>(), rng); 777 778 std::vector<float> input(XNN_EXTRA_BYTES / sizeof(float) + std::max( 779 (batch_size() * input_height() * input_width() - 1) * input_pixel_stride() + channels(), 780 (next_batch_size() * next_input_height() * next_input_width() - 1) * input_pixel_stride() + channels())); 781 std::vector<float> output(std::max( 782 (batch_size() * output_height() * output_width() - 1) * output_pixel_stride() + channels(), 783 (next_batch_size() * next_output_height() * next_output_width() - 1) * output_pixel_stride() + channels())); 784 std::vector<float> output_ref(batch_size() * output_height() * output_width() * channels()); 785 std::vector<float> next_output_ref(next_batch_size() * next_output_height() * next_output_width() * channels()); 786 for (size_t iteration = 0; iteration < iterations(); iteration++) { 787 std::generate(input.begin(), input.end(), std::ref(f32rng)); 788 std::fill(output.begin(), output.end(), std::nanf("")); 789 790 // Compute reference results, without clamping. 791 for (size_t i = 0; i < batch_size(); i++) { 792 for (size_t oy = 0; oy < output_height(); oy++) { 793 for (size_t ox = 0; ox < output_width(); ox++) { 794 for (size_t c = 0; c < channels(); c++) { 795 float acc = 0.0f; 796 size_t n = 0; 797 for (size_t py = 0; py < pooling_height(); py++) { 798 const size_t iy = oy * stride_height() + py - padding_top(); 799 for (size_t px = 0; px < pooling_width(); px++) { 800 const size_t ix = ox * stride_width() + px - padding_left(); 801 if (ix < input_width() && iy < input_height()) { 802 acc += input[((i * input_height() + iy) * input_width() + ix) * input_pixel_stride() + c]; 803 n += 1; 804 } 805 } 806 } 807 output_ref[((i * output_height() + oy) * output_width() + ox) * channels() + c] = acc / float(n); 808 } 809 } 810 } 811 } 812 813 // Compute clamping parameters. 814 const float accumulated_min = *std::min_element(output_ref.cbegin(), output_ref.cend()); 815 const float accumulated_max = *std::max_element(output_ref.cbegin(), output_ref.cend()); 816 const float accumulated_range = accumulated_max - accumulated_min; 817 const float output_min = accumulated_range == 0.0f ? 818 -std::numeric_limits<float>::infinity() : 819 accumulated_min + accumulated_range / 255.0f * float(qmin()); 820 const float output_max = accumulated_range == 0.0f ? 821 +std::numeric_limits<float>::infinity() : 822 accumulated_max - accumulated_range / 255.0f * float(255 - qmax()); 823 824 // Clamp reference results. 825 for (float& value : output_ref) { 826 value = std::max(std::min(value, output_max), output_min); 827 } 828 829 // Create, setup, and run Average Pooling operator once. 830 ASSERT_EQ(xnn_status_success, xnn_initialize(nullptr /* allocator */)); 831 xnn_operator_t average_pooling_op = nullptr; 832 833 ASSERT_EQ(xnn_status_success, 834 xnn_create_average_pooling2d_nhwc_f32( 835 padding_top(), padding_right(), padding_bottom(), padding_left(), 836 pooling_height(), pooling_width(), 837 stride_height(), stride_width(), 838 channels(), input_pixel_stride(), output_pixel_stride(), 839 output_min, output_max, 840 0, &average_pooling_op)); 841 ASSERT_NE(nullptr, average_pooling_op); 842 843 ASSERT_EQ(xnn_status_success, 844 xnn_setup_average_pooling2d_nhwc_f32( 845 average_pooling_op, 846 batch_size(), input_height(), input_width(), 847 input.data(), output.data(), 848 nullptr /* thread pool */)); 849 850 ASSERT_EQ(xnn_status_success, 851 xnn_run_operator(average_pooling_op, nullptr /* thread pool */)); 852 853 // Verify results of the first run. 854 for (size_t i = 0; i < batch_size(); i++) { 855 for (size_t y = 0; y < output_height(); y++) { 856 for (size_t x = 0; x < output_width(); x++) { 857 for (size_t c = 0; c < channels(); c++) { 858 ASSERT_LE(output[((i * output_height() + y) * output_width() + x) * output_pixel_stride() + c], output_max); 859 ASSERT_GE(output[((i * output_height() + y) * output_width() + x) * output_pixel_stride() + c], output_min); 860 ASSERT_NEAR(output[((i * output_height() + y) * output_width() + x) * output_pixel_stride() + c], 861 output_ref[((i * output_height() + y) * output_width() + x) * channels() + c], 862 std::abs(output_ref[((i * output_height() + y) * output_width() + x) * channels() + c]) * 1.0e-6f) << 863 "in batch index " << i << ", pixel (" << y << ", " << x << "), channel " << c; 864 } 865 } 866 } 867 } 868 869 // Re-generate data for the second run. 870 std::generate(input.begin(), input.end(), std::ref(f32rng)); 871 std::fill(output.begin(), output.end(), std::nanf("")); 872 873 // Compute reference results for the second run. 874 for (size_t i = 0; i < next_batch_size(); i++) { 875 for (size_t oy = 0; oy < next_output_height(); oy++) { 876 for (size_t ox = 0; ox < next_output_width(); ox++) { 877 for (size_t c = 0; c < channels(); c++) { 878 float acc = 0.0f; 879 int32_t n = 0; 880 for (size_t py = 0; py < pooling_height(); py++) { 881 const size_t iy = oy * stride_height() + py - padding_top(); 882 for (size_t px = 0; px < pooling_width(); px++) { 883 const size_t ix = ox * stride_width() + px - padding_left(); 884 if (ix < next_input_width() && iy < next_input_height()) { 885 acc += input[((i * next_input_height() + iy) * next_input_width() + ix) * input_pixel_stride() + c]; 886 n += 1; 887 } 888 } 889 } 890 next_output_ref[((i * next_output_height() + oy) * next_output_width() + ox) * channels() + c] = 891 std::max(std::min(acc / float(n), output_max), output_min); 892 } 893 } 894 } 895 } 896 897 // Setup and run Average Pooling operator the second time, and destroy the operator. 898 ASSERT_EQ(xnn_status_success, 899 xnn_setup_average_pooling2d_nhwc_f32( 900 average_pooling_op, 901 next_batch_size(), next_input_height(), next_input_width(), 902 input.data(), output.data(), 903 nullptr /* thread pool */)); 904 905 ASSERT_EQ(xnn_status_success, 906 xnn_run_operator(average_pooling_op, nullptr /* thread pool */)); 907 908 ASSERT_EQ(xnn_status_success, 909 xnn_delete_operator(average_pooling_op)); 910 average_pooling_op = nullptr; 911 912 // Verify results of the second run. 913 for (size_t i = 0; i < next_batch_size(); i++) { 914 for (size_t y = 0; y < next_output_height(); y++) { 915 for (size_t x = 0; x < next_output_width(); x++) { 916 for (size_t c = 0; c < channels(); c++) { 917 ASSERT_LE(output[((i * next_output_height() + y) * next_output_width() + x) * output_pixel_stride() + c], output_max); 918 ASSERT_GE(output[((i * next_output_height() + y) * next_output_width() + x) * output_pixel_stride() + c], output_min); 919 ASSERT_NEAR(output[((i * next_output_height() + y) * next_output_width() + x) * output_pixel_stride() + c], 920 next_output_ref[((i * next_output_height() + y) * next_output_width() + x) * channels() + c], 921 std::abs(next_output_ref[((i * next_output_height() + y) * next_output_width() + x) * channels() + c]) * 1.0e-6f) << 922 "in batch index " << i << ", pixel (" << y << ", " << x << "), channel " << c; 923 } 924 } 925 } 926 } 927 } 928 } 929 930 private: 931 uint32_t padding_top_{0}; 932 uint32_t padding_right_{0}; 933 uint32_t padding_bottom_{0}; 934 uint32_t padding_left_{0}; 935 bool padding_tf_same_{false}; 936 size_t input_height_{1}; 937 size_t input_width_{1}; 938 size_t channels_{1}; 939 size_t batch_size_{1}; 940 size_t input_pixel_stride_{0}; 941 size_t output_pixel_stride_{0}; 942 uint32_t pooling_height_{1}; 943 uint32_t pooling_width_{1}; 944 uint32_t stride_height_{1}; 945 uint32_t stride_width_{1}; 946 size_t next_input_height_{0}; 947 size_t next_input_width_{0}; 948 size_t next_batch_size_{0}; 949 float input_scale_{1.0f}; 950 float output_scale_{1.0f}; 951 uint8_t input_zero_point_{121}; 952 uint8_t output_zero_point_{133}; 953 uint8_t qmin_{0}; 954 uint8_t qmax_{255}; 955 size_t iterations_{1}; 956 }; 957