1 /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6
7 http://www.apache.org/licenses/LICENSE-2.0
8
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15
16 #include "tensorflow/lite/micro/test_helpers.h"
17 #include "tensorflow/lite/micro/testing/micro_test.h"
18
19 TF_LITE_MICRO_TESTS_BEGIN
20
TF_LITE_MICRO_TEST(CreateQuantizedBiasTensor)21 TF_LITE_MICRO_TEST(CreateQuantizedBiasTensor) {
22 float input_scale = 0.5;
23 float weight_scale = 0.5;
24 constexpr int tensor_size = 12;
25 int dims_arr[] = {4, 2, 3, 2, 1};
26 int32_t quantized[tensor_size];
27 float pre_quantized[] = {-10, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 10};
28 int32_t expected_quantized_values[] = {-40, -20, -16, -12, -8, -4,
29 0, 4, 8, 12, 16, 40};
30 TfLiteIntArray* dims = tflite::testing::IntArrayFromInts(dims_arr);
31
32 TfLiteTensor result = tflite::testing::CreateQuantizedBiasTensor(
33 pre_quantized, quantized, dims, input_scale, weight_scale);
34
35 TF_LITE_MICRO_EXPECT_EQ(result.bytes, tensor_size * sizeof(int32_t));
36 TF_LITE_MICRO_EXPECT(result.dims == dims);
37 TF_LITE_MICRO_EXPECT_EQ(result.params.scale, input_scale * weight_scale);
38 for (int i = 0; i < tensor_size; i++) {
39 TF_LITE_MICRO_EXPECT_EQ(expected_quantized_values[i], result.data.i32[i]);
40 }
41 }
42
TF_LITE_MICRO_TEST(CreatePerChannelQuantizedBiasTensor)43 TF_LITE_MICRO_TEST(CreatePerChannelQuantizedBiasTensor) {
44 float input_scale = 0.5;
45 float weight_scales[] = {0.5, 1, 2, 4};
46 constexpr int tensor_size = 12;
47 const int channels = 4;
48 int dims_arr[] = {4, 4, 3, 1, 1};
49 int32_t quantized[tensor_size];
50 float scales[channels + 1];
51 int zero_points[] = {4, 0, 0, 0, 0};
52 float pre_quantized[] = {-10, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 10};
53 int32_t expected_quantized_values[] = {-40, -20, -16, -6, -4, -2,
54 0, 1, 2, 2, 2, 5};
55 TfLiteIntArray* dims = tflite::testing::IntArrayFromInts(dims_arr);
56
57 TfLiteAffineQuantization quant;
58 TfLiteTensor result = tflite::testing::CreatePerChannelQuantizedBiasTensor(
59 pre_quantized, quantized, dims, input_scale, weight_scales, scales,
60 zero_points, &quant, 0);
61
62 // Values in scales array start at index 1 since index 0 is dedicated to
63 // tracking the tensor size.
64 for (int i = 0; i < channels; i++) {
65 TF_LITE_MICRO_EXPECT_EQ(scales[i + 1], input_scale * weight_scales[i]);
66 }
67
68 TF_LITE_MICRO_EXPECT_EQ(result.bytes, tensor_size * sizeof(int32_t));
69 TF_LITE_MICRO_EXPECT(result.dims == dims);
70 for (int i = 0; i < tensor_size; i++) {
71 TF_LITE_MICRO_EXPECT_EQ(expected_quantized_values[i], result.data.i32[i]);
72 }
73 }
74
TF_LITE_MICRO_TEST(CreateSymmetricPerChannelQuantizedTensor)75 TF_LITE_MICRO_TEST(CreateSymmetricPerChannelQuantizedTensor) {
76 const int tensor_size = 12;
77 constexpr int channels = 2;
78 const int dims_arr[] = {4, channels, 3, 2, 1};
79 int8_t quantized[12];
80 const float pre_quantized[] = {-127, -55, -4, -3, -2, -1,
81 0, 1, 2, 3, 4, 63.5};
82 const int8_t expected_quantized_values[] = {-127, -55, -4, -3, -2, -1,
83 0, 2, 4, 6, 8, 127};
84 float expected_scales[] = {1.0, 0.5};
85 TfLiteIntArray* dims = tflite::testing::IntArrayFromInts(dims_arr);
86
87 int zero_points[channels + 1];
88 float scales[channels + 1];
89 TfLiteAffineQuantization quant;
90 TfLiteTensor result =
91 tflite::testing::CreateSymmetricPerChannelQuantizedTensor(
92 pre_quantized, quantized, dims, scales, zero_points, &quant, 0);
93
94 TF_LITE_MICRO_EXPECT_EQ(result.bytes, tensor_size * sizeof(int8_t));
95 TF_LITE_MICRO_EXPECT(result.dims == dims);
96 TfLiteFloatArray* result_scales =
97 static_cast<TfLiteAffineQuantization*>(result.quantization.params)->scale;
98 for (int i = 0; i < channels; i++) {
99 TF_LITE_MICRO_EXPECT_EQ(result_scales->data[i], expected_scales[i]);
100 }
101 for (int i = 0; i < tensor_size; i++) {
102 TF_LITE_MICRO_EXPECT_EQ(expected_quantized_values[i], result.data.int8[i]);
103 }
104 }
105
106 TF_LITE_MICRO_TESTS_END
107