• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 
16 #include <random>
17 
18 #include "tensorflow/lite/c/builtin_op_data.h"
19 #include "tensorflow/lite/c/common.h"
20 #include "tensorflow/lite/micro/all_ops_resolver.h"
21 #include "tensorflow/lite/micro/kernels/kernel_runner.h"
22 #include "tensorflow/lite/micro/test_helpers.h"
23 #include "tensorflow/lite/micro/testing/micro_test.h"
24 
25 namespace tflite {
26 namespace testing {
27 namespace {
28 
GenerateUniformRandomVector(int size,float min,float max,std::minstd_rand * random_engine,float * result)29 void GenerateUniformRandomVector(int size, float min, float max,
30                                  std::minstd_rand* random_engine,
31                                  float* result) {
32   // Never use std::uniform_*_distribution in tests, it's
33   // implementation-defined. Likewise, don't use std::default_random_engine,
34   // implementation-defined. Implementation-defined is bad because it means that
35   // any toolchain update or new platform may run into test failures.
36   // std::minstd_rand is a standard instantiation of
37   // std::linear_congruential_engine, the cheapest generator in c++11 stdlib,
38   // it's good enough here.
39   for (int i = 0; i < size; i++) {
40     // We don't care whether the `max` value may ever be produced exactly.
41     // It may actually be thanks to rounding, as std::minstd_rand::modulus
42     // is 2^31 - 1 is greater than the inverse float epsilon.
43     float random_value_scaled_0_1 =
44         (*random_engine)() *
45         (1.0f / static_cast<float>(std::minstd_rand::modulus));
46     result[i] = min + (max - min) * random_value_scaled_0_1;
47   }
48 }
49 
EvalTestReferenceHardSwish(int size,float * input,float * result)50 void EvalTestReferenceHardSwish(int size, float* input, float* result) {
51   for (int i = 0; i < size; i++) {
52     const float in = input[i];
53     result[i] = in * std::min(6.0f, std::max(0.0f, in + 3)) * (1.0f / 6.0f);
54   }
55 }
56 
57 template <typename T>
TestHardSwishQuantized(int size,const T * output_data,T * input_data_quantized,float * dequantized_output,float input_min,float input_max,float output_min,float output_max,std::minstd_rand * random_engine,float * float_input_values,float * float_ref_output_values)58 void TestHardSwishQuantized(int size, const T* output_data,
59                             T* input_data_quantized, float* dequantized_output,
60                             float input_min, float input_max, float output_min,
61                             float output_max, std::minstd_rand* random_engine,
62                             float* float_input_values,
63                             float* float_ref_output_values) {
64   const int input_dims_data[] = {2, 1, size};
65   const int output_dims_data[] = {2, 1, size};
66   const float input_scale = ScaleFromMinMax<T>(input_min, input_max);
67   const int input_zero_point = ZeroPointFromMinMax<T>(input_min, input_max);
68   const float output_scale = ScaleFromMinMax<T>(output_min, output_max);
69   const int output_zero_point = ZeroPointFromMinMax<T>(output_min, output_max);
70 
71   // The numerical error for any 8bit quantized function is at least one half
72   // times the quantization step: 0.5 * (kOutMax - kOutMin) / 256.
73   // To that we add again the quantization step (kOutMax - kOutMin) / 256
74   // to allow for an off-by-one rounding error.
75   const float kTolerance =
76       std::max(input_max - input_min, output_max - output_min) * (1.5f / 256.f);
77 
78   TfLiteIntArray* input_dims = IntArrayFromInts(input_dims_data);
79   TfLiteIntArray* output_dims = IntArrayFromInts(output_dims_data);
80   const int output_elements_count = ElementCount(*output_dims);
81 
82   TF_LITE_MICRO_EXPECT_EQ(output_elements_count, size);
83 
84   GenerateUniformRandomVector(size, input_min, input_max, random_engine,
85                               float_input_values);
86   EvalTestReferenceHardSwish(size, float_input_values, float_ref_output_values);
87   for (int i = 0; i < size; i++) {
88     float val = float_ref_output_values[i];
89     float_ref_output_values[i] =
90         std::min(output_max, std::max(output_min, val));
91   }
92 
93   constexpr int inputs_size = 1;
94   constexpr int outputs_size = 1;
95   constexpr int tensors_size = inputs_size + outputs_size;
96   TfLiteTensor tensors[tensors_size] = {
97       CreateQuantizedTensor(float_input_values, input_data_quantized,
98                             input_dims, input_scale, input_zero_point),
99       CreateQuantizedTensor(output_data, output_dims, output_scale,
100                             output_zero_point),
101   };
102 
103   int inputs_array_data[] = {1, 0};
104   TfLiteIntArray* inputs_array = IntArrayFromInts(inputs_array_data);
105   int outputs_array_data[] = {1, 1};
106   TfLiteIntArray* outputs_array = IntArrayFromInts(outputs_array_data);
107 
108   const TfLiteRegistration registration =
109       tflite::ops::micro::Register_HARD_SWISH();
110   micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array,
111                              outputs_array, /*builtin_data=*/nullptr);
112 
113   TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare());
114   TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke());
115 
116   Dequantize<T>(output_data, output_elements_count, output_scale,
117                 output_zero_point, dequantized_output);
118 
119   for (int i = 0; i < output_elements_count; ++i) {
120     TF_LITE_MICRO_EXPECT_NEAR(float_ref_output_values[i], dequantized_output[i],
121                               kTolerance);
122   }
123 }
124 
125 template <typename T>
TestHardSwishQuantizedBias(const int size,const T * output_data,T * input_data_quantized,float * dequantized_output,float input_min,float input_max,float output_min,float output_max,float tolerated_bias,float * float_input_values,float * float_ref_output_values)126 void TestHardSwishQuantizedBias(const int size, const T* output_data,
127                                 T* input_data_quantized,
128                                 float* dequantized_output, float input_min,
129                                 float input_max, float output_min,
130                                 float output_max, float tolerated_bias,
131                                 float* float_input_values,
132                                 float* float_ref_output_values) {
133   const float input_scale = ScaleFromMinMax<T>(input_min, input_max);
134   const float output_scale = ScaleFromMinMax<T>(output_min, output_max);
135 
136   const int input_zero_point = ZeroPointFromMinMax<T>(input_min, input_max);
137   const int output_zero_point = ZeroPointFromMinMax<T>(output_min, output_max);
138 
139   const float max_scale = std::max(output_scale, input_scale);
140 
141   // In this bias-focused test case, no need for randomly generated input
142   // values.
143   TF_LITE_MICRO_EXPECT_LE(input_min, -3.0f);
144   TF_LITE_MICRO_EXPECT_GE(input_max, 3.0f);
145   const int quantized_input_negative_three = std::round(
146       std::numeric_limits<T>::min() + (-3.0f - input_min) / input_scale);
147   const int quantized_input_positive_three = std::round(
148       std::numeric_limits<T>::min() + (3.0f - input_min) / input_scale);
149 
150   for (int i = quantized_input_negative_three;
151        i < size && i <= quantized_input_positive_three; i++) {
152     float_input_values[i] =
153         input_min + (i - std::numeric_limits<T>::min()) * input_scale;
154   }
155 
156   EvalTestReferenceHardSwish(size, float_input_values, float_ref_output_values);
157   for (int i = 0; i < size; i++) {
158     float val = float_ref_output_values[i];
159     float_ref_output_values[i] =
160         std::min(output_max, std::max(output_min, val));
161   }
162 
163   const int input_dims_data[] = {2, 1, size};
164   const int output_dims_data[] = {2, 1, size};
165 
166   TfLiteIntArray* input_dims = IntArrayFromInts(input_dims_data);
167   TfLiteIntArray* output_dims = IntArrayFromInts(output_dims_data);
168   const int output_elements_count = ElementCount(*output_dims);
169 
170   TF_LITE_MICRO_EXPECT_EQ(output_elements_count, size);
171 
172   constexpr int inputs_size = 1;
173   constexpr int outputs_size = 1;
174   constexpr int tensors_size = inputs_size + outputs_size;
175   TfLiteTensor tensors[tensors_size] = {
176       CreateQuantizedTensor(float_input_values, input_data_quantized,
177                             input_dims, input_scale, input_zero_point),
178       CreateQuantizedTensor(output_data, output_dims, output_scale,
179                             output_zero_point),
180   };
181 
182   int inputs_array_data[] = {1, 0};
183   TfLiteIntArray* inputs_array = IntArrayFromInts(inputs_array_data);
184   int outputs_array_data[] = {1, 1};
185   TfLiteIntArray* outputs_array = IntArrayFromInts(outputs_array_data);
186 
187   const TfLiteRegistration registration =
188       tflite::ops::micro::Register_HARD_SWISH();
189   micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array,
190                              outputs_array, /*builtin_data=*/nullptr);
191 
192   TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare());
193   TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke());
194 
195   Dequantize<T>(output_data, output_elements_count, output_scale,
196                 output_zero_point, dequantized_output);
197 
198   float sum_diff = 0;
199   for (int i = 0; i < size; i++) {
200     sum_diff += dequantized_output[i] - float_ref_output_values[i];
201   }
202   const float bias = sum_diff / (size * max_scale);
203   TF_LITE_MICRO_EXPECT_LE(std::abs(bias), tolerated_bias);
204 }
205 
TestHardSwishFloat(const int size,float * output_data,std::minstd_rand * random_engine,float * float_input_values,float * float_ref_output_values)206 void TestHardSwishFloat(const int size, float* output_data,
207                         std::minstd_rand* random_engine,
208                         float* float_input_values,
209                         float* float_ref_output_values) {
210   const float kMin = -10.0f;
211   const float kMax = 10.0f;
212   GenerateUniformRandomVector(size, kMin, kMax, random_engine,
213                               float_input_values);
214 
215   EvalTestReferenceHardSwish(size, float_input_values, float_ref_output_values);
216 
217   const int input_dims_data[] = {1, size};
218   const int output_dims_data[] = {1, size};
219 
220   TfLiteIntArray* input_dims = IntArrayFromInts(input_dims_data);
221   TfLiteIntArray* output_dims = IntArrayFromInts(output_dims_data);
222   const int output_elements_count = ElementCount(*output_dims);
223 
224   TF_LITE_MICRO_EXPECT_EQ(output_elements_count, size);
225 
226   constexpr int inputs_size = 1;
227   constexpr int outputs_size = 1;
228   constexpr int tensors_size = inputs_size + outputs_size;
229   TfLiteTensor tensors[tensors_size] = {
230       CreateTensor(float_input_values, input_dims),
231       CreateTensor(output_data, output_dims),
232   };
233 
234   int inputs_array_data[] = {1, 0};
235   TfLiteIntArray* inputs_array = IntArrayFromInts(inputs_array_data);
236   int outputs_array_data[] = {1, 1};
237   TfLiteIntArray* outputs_array = IntArrayFromInts(outputs_array_data);
238 
239   const TfLiteRegistration registration =
240       tflite::ops::micro::Register_HARD_SWISH();
241   micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array,
242                              outputs_array, /*builtin_data=*/nullptr);
243 
244   TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare());
245   TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke());
246 
247   for (int i = 0; i < output_elements_count; ++i) {
248     TF_LITE_MICRO_EXPECT_NEAR(float_ref_output_values[i], output_data[i],
249                               1e-5f);
250   }
251 }
252 
253 }  // namespace
254 }  // namespace testing
255 }  // namespace tflite
256 
257 TF_LITE_MICRO_TESTS_BEGIN
258 
TF_LITE_MICRO_TEST(SimpleHardSwishTestFloat)259 TF_LITE_MICRO_TEST(SimpleHardSwishTestFloat) {
260   std::minstd_rand random_engine;
261   constexpr int size = 100;
262   float output_data[size] = {0.f};
263   float input_values[size] = {0.f};
264   float output_values[size] = {0.f};
265 
266   tflite::testing::TestHardSwishFloat(size, output_data, &random_engine,
267                                       input_values, output_values);
268 }
269 
TF_LITE_MICRO_TEST(SimpleHardSwishTestInt8)270 TF_LITE_MICRO_TEST(SimpleHardSwishTestInt8) {
271   std::minstd_rand random_engine;
272   constexpr int pairs = 4, one_pair = 2;
273   constexpr int size = 101;
274   constexpr float minmax_pairs[pairs][one_pair] = {
275       {0.f, 1.f}, {-2.f, 1.f}, {-5.f, 10.f}, {-40.f, 60.f}};
276   int8_t output_data[size] = {0};
277   int8_t input_data_quantized[size] = {0};
278   float dequantized_output[size] = {0.f};
279   float input_values[size] = {0.f};
280   float output_values[size] = {0.f};
281 
282   for (int x = 0; x < pairs; x++) {
283     for (int y = 0; y < pairs; y++) {
284       float input_min = minmax_pairs[x][0];
285       float input_max = minmax_pairs[x][1];
286       float output_min = minmax_pairs[y][0];
287       float output_max = minmax_pairs[y][1];
288 
289       tflite::testing::TestHardSwishQuantized<int8_t>(
290           size, output_data, input_data_quantized, dequantized_output,
291           input_min, input_max, output_min, output_max, &random_engine,
292           input_values, output_values);
293     }
294   }
295 }
296 
TF_LITE_MICRO_TEST(SimpleHardSwishTestUint8)297 TF_LITE_MICRO_TEST(SimpleHardSwishTestUint8) {
298   std::minstd_rand random_engine;
299   constexpr int size = 99;
300   constexpr int pairs = 4, one_pair = 2;
301   constexpr float minmax_pairs[pairs][one_pair] = {
302       {0.f, 1.f}, {-2.f, 1.f}, {-5.f, 10.f}, {-40.f, 60.f}};
303   uint8_t output_data[size] = {0};
304   uint8_t input_data_quantized[size] = {0};
305   float dequantized_output[size] = {0.f};
306   float input_values[size] = {0.f};
307   float output_values[size] = {0.f};
308 
309   for (int x = 0; x < pairs; x++) {
310     for (int y = 0; y < pairs; y++) {
311       float input_min = minmax_pairs[x][0];
312       float input_max = minmax_pairs[x][1];
313       float output_min = minmax_pairs[y][0];
314       float output_max = minmax_pairs[y][1];
315 
316       tflite::testing::TestHardSwishQuantized<uint8_t>(
317           size, output_data, input_data_quantized, dequantized_output,
318           input_min, input_max, output_min, output_max, &random_engine,
319           input_values, output_values);
320     }
321   }
322 }
323 
324 // See the comment in the reference implementation of quantized HardSwish:
325 // A numerical issue significantly affecting ImageNet classification accuracy
326 // with MobileNet v3 is only observable at the scale of HardSwish unit tests
327 // if we monitor specifically bias. This testcase is extracted from one of the
328 // HardSwish nodes in that MobileNet v3 that exhibited this issue.
TF_LITE_MICRO_TEST(SimpleHardSwishTestQuantizedBias)329 TF_LITE_MICRO_TEST(SimpleHardSwishTestQuantizedBias) {
330   constexpr int size = 43;
331   uint8_t output_data[size] = {0};
332   uint8_t input_data_quantized[size] = {0};
333   float dequantized_output[size] = {0.f};
334   float input_values[size] = {0.f};
335   float output_values[size] = {0.f};
336 
337   tflite::testing::TestHardSwishQuantizedBias<uint8_t>(
338       size, output_data, input_data_quantized, dequantized_output, -11.654928f,
339       25.036512f, -0.3905796f, 24.50887f, 0.035, input_values, output_values);
340 }
341 
342 TF_LITE_MICRO_TESTS_END
343