1 /*
2 * Copyright (c) Meta Platforms, Inc. and affiliates.
3 * All rights reserved.
4 *
5 * This source code is licensed under the BSD-style license found in the
6 * LICENSE file in the root directory of this source tree.
7 */
8
9 #include <gtest/gtest.h>
10
11 #include <executorch/backends/xnnpack/runtime/utils/utils.h>
12 #include <executorch/extension/aten_util/aten_bridge.h>
13 #include <executorch/runtime/core/exec_aten/exec_aten.h>
14 #include <executorch/runtime/core/exec_aten/testing_util/tensor_factory.h>
15 #include <executorch/runtime/core/exec_aten/testing_util/tensor_util.h>
16 #include <executorch/runtime/platform/runtime.h>
17
18 using namespace ::testing;
19
20 using executorch::aten::ScalarType;
21 using executorch::aten::Tensor;
22 using executorch::runtime::Error;
23 using executorch::runtime::testing::TensorFactory;
24 namespace utils = executorch::backends::xnnpack::utils;
25
TEST(TestUtils,choose_quantization_params)26 TEST(TestUtils, choose_quantization_params) {
27 Error e;
28 utils::QuantizationParams qparams;
29 float min = -128.0 * 10.0;
30 float max = +127.0 * 10.0;
31 e = utils::ChooseQuantizationParams(
32 min, max, 0, 255, qparams, false, false, false);
33 ASSERT_EQ(e, Error::Ok);
34 ASSERT_EQ(qparams.zero_point, 128);
35 ASSERT_EQ(qparams.scale, 10.0);
36 }
37
TEST(TestUtils,choose_quantization_params_fails)38 TEST(TestUtils, choose_quantization_params_fails) {
39 executorch::runtime::runtime_init();
40 Error e;
41 utils::QuantizationParams qparams;
42 float min = -128.0 * 10.0;
43 float max = +127.0 * 10.0;
44 e = utils::ChooseQuantizationParams(
45 max, min, 0, 255, qparams, false, false, false);
46 ASSERT_EQ(e, Error::Internal);
47 }
48
TEST(TestUtils,quantize_per_tensor)49 TEST(TestUtils, quantize_per_tensor) {
50 TensorFactory<ScalarType::Float> tf;
51 const Tensor input = tf.full({3, 5}, 4);
52 double scale = 0.5;
53 int zero_point = 127;
54 TensorFactory<ScalarType::QUInt8> tfo;
55 Tensor output = tfo.zeros({3, 5});
56 // 4 / 0.5 + 127
57 auto at_tensor = at::full({3, 5}, 4.f);
58 auto at_expected = at::quantize_per_tensor(
59 at_tensor, scale, zero_point, at::ScalarType::QUInt8);
60 Tensor expected = tfo.zeros_like(output);
61 at_expected = at_expected.contiguous();
62 executorch::extension::alias_etensor_to_attensor(at_expected, expected);
63 Error e = utils::QuantizePerTensor(input, output, scale, zero_point);
64 ASSERT_EQ(e, Error::Ok);
65 EXPECT_TENSOR_EQ(output, expected);
66 }
67
TEST(TestUtils,generate_requantizeation_scale)68 TEST(TestUtils, generate_requantizeation_scale) {
69 TensorFactory<ScalarType::Float> tf;
70 const Tensor weight_scales = tf.full({3, 5}, 4.0);
71 float input_scale = 2.0;
72 float output_scale = 3.0;
73 std::vector<float> req_scales(15, 0);
74 Error e = utils::GenerateRequantizationScale(
75 weight_scales, input_scale, output_scale, req_scales);
76 ASSERT_EQ(e, Error::Ok);
77 for (auto m : req_scales) {
78 EXPECT_FLOAT_EQ(m, 4.0 * 2.0 / 3.0);
79 }
80 }
81
TEST(TestUtils,get_min_max)82 TEST(TestUtils, get_min_max) {
83 TensorFactory<ScalarType::Float> tf;
84 float min, max;
85
86 float val = 4.12345;
87 const Tensor ft = tf.full({3, 5}, val);
88 std::tie(min, max) = utils::GetMinMax(ft);
89 EXPECT_FLOAT_EQ(min, val);
90 EXPECT_FLOAT_EQ(max, val);
91
92 const Tensor ft_min = tf.make(
93 {2, 1},
94 {std::numeric_limits<float>::min(), std::numeric_limits<float>::max()});
95 std::tie(min, max) = utils::GetMinMax(ft_min);
96 EXPECT_FLOAT_EQ(min, std::numeric_limits<float>::min());
97 EXPECT_FLOAT_EQ(max, std::numeric_limits<float>::max());
98
99 const Tensor ft_lowest = tf.make(
100 {2, 1},
101 {std::numeric_limits<float>::lowest(),
102 std::numeric_limits<float>::max()});
103 std::tie(min, max) = utils::GetMinMax(ft_lowest);
104 EXPECT_FLOAT_EQ(min, std::numeric_limits<float>::lowest());
105 EXPECT_FLOAT_EQ(max, std::numeric_limits<float>::max());
106
107 const Tensor ft_random = tf.make({5, 1}, {-2.2, -1.1, 0, 1.1, 2.2});
108 std::tie(min, max) = utils::GetMinMax(ft_random);
109 EXPECT_FLOAT_EQ(min, -2.2);
110 EXPECT_FLOAT_EQ(max, 2.2);
111 }
112