• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 #include <stdint.h>
16 
17 #include <limits>
18 #include <vector>
19 
20 #include <gtest/gtest.h>
21 #include "flatbuffers/flatbuffers.h"  // from @flatbuffers
22 #include "tensorflow/lite/kernels/test_util.h"
23 #include "tensorflow/lite/schema/schema_generated.h"
24 
25 namespace tflite {
26 namespace {
27 
28 using ::testing::ElementsAreArray;
29 
30 class BaseSubOpModel : public SingleOpModel {
31  public:
BaseSubOpModel(const TensorData & input1,const TensorData & input2,const TensorData & output,ActivationFunctionType activation_type)32   BaseSubOpModel(const TensorData& input1, const TensorData& input2,
33                  const TensorData& output,
34                  ActivationFunctionType activation_type) {
35     input1_ = AddInput(input1);
36     input2_ = AddInput(input2);
37     output_ = AddOutput(output);
38     SetBuiltinOp(BuiltinOperator_SUB, BuiltinOptions_SubOptions,
39                  CreateSubOptions(builder_, activation_type).Union());
40     BuildInterpreter({GetShape(input1_), GetShape(input2_)});
41   }
42 
input1()43   int input1() { return input1_; }
input2()44   int input2() { return input2_; }
45 
GetOutputShape()46   std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
47 
48  protected:
49   int input1_;
50   int input2_;
51   int output_;
52 };
53 
54 class FloatSubOpModel : public BaseSubOpModel {
55  public:
56   using BaseSubOpModel::BaseSubOpModel;
57 
GetOutput()58   std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
59 };
60 
61 class IntegerSubOpModel : public BaseSubOpModel {
62  public:
63   using BaseSubOpModel::BaseSubOpModel;
64 
GetOutput()65   std::vector<int32_t> GetOutput() { return ExtractVector<int32_t>(output_); }
66 };
67 
68 class Int64SubOpModel : public BaseSubOpModel {
69  public:
70   using BaseSubOpModel::BaseSubOpModel;
71 
GetOutput()72   std::vector<int64_t> GetOutput() { return ExtractVector<int64_t>(output_); }
73 };
74 
75 class QuantizedSubOpModel : public BaseSubOpModel {
76  public:
QuantizedSubOpModel(TensorData input1,TensorData input2,TensorData output,ActivationFunctionType activation_type)77   QuantizedSubOpModel(TensorData input1, TensorData input2, TensorData output,
78                       ActivationFunctionType activation_type)
79       : BaseSubOpModel(SymmetricInt16Scaling(std::move(input1)),
80                        SymmetricInt16Scaling(std::move(input2)),
81                        SymmetricInt16Scaling(std::move(output)),
82                        activation_type) {}
83 
84   template <typename integer_dtype>
GetDequantizedOutput()85   std::vector<float> GetDequantizedOutput() {
86     return Dequantize<integer_dtype>(ExtractVector<integer_dtype>(output_),
87                                      GetScale(output_), GetZeroPoint(output_));
88   }
89 
90  private:
SymmetricInt16Scaling(TensorData tensor)91   TensorData SymmetricInt16Scaling(TensorData tensor) {
92     // Symmetric range and null zero-point is required for INT16 tensors. As
93     // SingleOpModel::QuantizationParams calculates the scale on an asymmetric
94     // base [int_type::min, int_type::max], manually calculate the scale on a
95     // symmetric range [int_type::min+1, int_type::max] to ensure a null
96     // zero-point.
97     if (tensor.type == TensorType_INT16) {
98       CHECK_EQ(std::abs(tensor.min), tensor.max);
99       tensor.scale = tensor.max / std::numeric_limits<int16_t>::max();
100       tensor.zero_point = 0;
101       tensor.min = 0;
102       tensor.max = 0;
103     }
104 
105     return tensor;
106   }
107 };
108 
109 // for quantized Sub, the error shouldn't exceed step
110 template <typename T>
GetTolerance(float min,float max)111 float GetTolerance(float min, float max) {
112   float kQuantizedStep = (max - min) / (std::numeric_limits<T>::max() -
113                                         std::numeric_limits<T>::min());
114   return 2.0 * kQuantizedStep;
115 }
116 
TEST(FloatSubOpModel,FirstInputZero)117 TEST(FloatSubOpModel, FirstInputZero) {
118   if (SingleOpModel::GetForceUseNnapi()) {
119     return;
120   }
121   FloatSubOpModel m({TensorType_FLOAT32, {0}}, {TensorType_FLOAT32, {}},
122                     {TensorType_FLOAT32, {}}, ActivationFunctionType_NONE);
123   m.PopulateTensor<float>(m.input2(), {0.1});
124   m.Invoke();
125   EXPECT_THAT(m.GetOutputShape(), ElementsAreArray<int>({0}));
126 }
127 
TEST(FloatSubOpModel,SecondInputZero)128 TEST(FloatSubOpModel, SecondInputZero) {
129   if (SingleOpModel::GetForceUseNnapi()) {
130     return;
131   }
132   FloatSubOpModel m({TensorType_FLOAT32, {}}, {TensorType_FLOAT32, {0}},
133                     {TensorType_FLOAT32, {}}, ActivationFunctionType_NONE);
134   m.PopulateTensor<float>(m.input1(), {0.1});
135   m.Invoke();
136   EXPECT_THAT(m.GetOutputShape(), ElementsAreArray<int>({0}));
137 }
138 
TEST(FloatSubOpModel,NoActivation)139 TEST(FloatSubOpModel, NoActivation) {
140   FloatSubOpModel m({TensorType_FLOAT32, {1, 2, 2, 1}},
141                     {TensorType_FLOAT32, {1, 2, 2, 1}},
142                     {TensorType_FLOAT32, {}}, ActivationFunctionType_NONE);
143   m.PopulateTensor<float>(m.input1(), {-2.0, 0.2, 1.7, 0.5});
144   m.PopulateTensor<float>(m.input2(), {0.1, 0.2, 0.3, 0.8});
145   m.Invoke();
146   EXPECT_THAT(m.GetOutput(),
147               ElementsAreArray(ArrayFloatNear({-2.1, 0.0, 1.4, -0.3})));
148 }
149 
TEST(FloatSubOpModel,ActivationRELU_N1_TO_1)150 TEST(FloatSubOpModel, ActivationRELU_N1_TO_1) {
151   FloatSubOpModel m(
152       {TensorType_FLOAT32, {1, 2, 2, 1}}, {TensorType_FLOAT32, {1, 2, 2, 1}},
153       {TensorType_FLOAT32, {}}, ActivationFunctionType_RELU_N1_TO_1);
154   m.PopulateTensor<float>(m.input1(), {-2.0, 0.2, 1.7, 0.5});
155   m.PopulateTensor<float>(m.input2(), {0.1, 0.2, 0.3, 0.8});
156   m.Invoke();
157   EXPECT_THAT(m.GetOutput(),
158               ElementsAreArray(ArrayFloatNear({-1.0, 0.0, 1.0, -0.3})));
159 }
160 
TEST(FloatSubOpModel,VariousInputShapes)161 TEST(FloatSubOpModel, VariousInputShapes) {
162   std::vector<std::vector<int>> test_shapes = {
163       {6}, {2, 3}, {2, 1, 3}, {1, 3, 1, 2}};
164   for (int i = 0; i < test_shapes.size(); ++i) {
165     FloatSubOpModel m({TensorType_FLOAT32, test_shapes[i]},
166                       {TensorType_FLOAT32, test_shapes[i]},
167                       {TensorType_FLOAT32, {}}, ActivationFunctionType_NONE);
168     m.PopulateTensor<float>(m.input1(), {-2.0, 0.2, 1.7, 0.5, -1.1, 2.0});
169     m.PopulateTensor<float>(m.input2(), {0.1, 0.2, 0.3, 0.8, -1.1, 0.1});
170     m.Invoke();
171     EXPECT_THAT(
172         m.GetOutput(),
173         ElementsAreArray(ArrayFloatNear({-2.1, 0.0, 1.4, -0.3, 0.0, 1.9})))
174         << "With shape number " << i;
175   }
176 }
177 
TEST(FloatSubOpModel,WithBroadcast)178 TEST(FloatSubOpModel, WithBroadcast) {
179   std::vector<std::vector<int>> test_shapes = {
180       {6}, {2, 3}, {2, 1, 3}, {1, 3, 1, 2}};
181   for (int i = 0; i < test_shapes.size(); ++i) {
182     FloatSubOpModel m({TensorType_FLOAT32, test_shapes[i]},
183                       {TensorType_FLOAT32, {}},  // always a scalar
184                       {TensorType_FLOAT32, {}}, ActivationFunctionType_NONE);
185     m.PopulateTensor<float>(m.input1(), {-2.0, 0.2, 1.7, 0.5, -1.1, 2.0});
186     m.PopulateTensor<float>(m.input2(), {0.5});
187     m.Invoke();
188     EXPECT_THAT(
189         m.GetOutput(),
190         ElementsAreArray(ArrayFloatNear({-2.5, -0.3, 1.2, 0.0, -1.6, 1.5})))
191         << "With shape number " << i;
192   }
193 }
194 
TEST(FloatSubOpModel,WithBroadcast5D)195 TEST(FloatSubOpModel, WithBroadcast5D) {
196   std::vector<std::vector<int>> test_shapes = {{1, 3, 1, 2, 1}};
197   for (int i = 0; i < test_shapes.size(); ++i) {
198     FloatSubOpModel m({TensorType_FLOAT32, test_shapes[i]},
199                       {TensorType_FLOAT32, {}},  // always a scalar
200                       {TensorType_FLOAT32, {}}, ActivationFunctionType_NONE);
201     m.PopulateTensor<float>(m.input1(), {-2.0, 0.2, 1.7, 0.5, -1.1, 2.0});
202     m.PopulateTensor<float>(m.input2(), {0.5});
203     m.Invoke();
204     EXPECT_THAT(
205         m.GetOutput(),
206         ElementsAreArray(ArrayFloatNear({-2.5, -0.3, 1.2, 0.0, -1.6, 1.5})))
207         << "With shape number " << i;
208   }
209 }
210 
TEST(IntegerSubOpModel,NoActivation)211 TEST(IntegerSubOpModel, NoActivation) {
212   IntegerSubOpModel m({TensorType_INT32, {1, 2, 2, 1}},
213                       {TensorType_INT32, {1, 2, 2, 1}}, {TensorType_INT32, {}},
214                       ActivationFunctionType_NONE);
215   m.PopulateTensor<int32_t>(m.input1(), {-20, 2, 7, 8});
216   m.PopulateTensor<int32_t>(m.input2(), {1, 2, 3, 5});
217   m.Invoke();
218   EXPECT_THAT(m.GetOutput(), ElementsAreArray({-21, 0, 4, 3}));
219 }
220 
TEST(IntegerSubOpModel,ActivationRELU_N1_TO_1)221 TEST(IntegerSubOpModel, ActivationRELU_N1_TO_1) {
222   IntegerSubOpModel m({TensorType_INT32, {1, 2, 2, 1}},
223                       {TensorType_INT32, {1, 2, 2, 1}}, {TensorType_INT32, {}},
224                       ActivationFunctionType_RELU_N1_TO_1);
225   m.PopulateTensor<int32_t>(m.input1(), {-20, 2, 7, 8});
226   m.PopulateTensor<int32_t>(m.input2(), {1, 2, 3, 5});
227   m.Invoke();
228   EXPECT_THAT(m.GetOutput(), ElementsAreArray({-1, 0, 1, 1}));
229 }
230 
TEST(IntegerSubOpModel,VariousInputShapes)231 TEST(IntegerSubOpModel, VariousInputShapes) {
232   std::vector<std::vector<int>> test_shapes = {
233       {6}, {2, 3}, {2, 1, 3}, {1, 3, 1, 2}};
234   for (int i = 0; i < test_shapes.size(); ++i) {
235     IntegerSubOpModel m({TensorType_INT32, test_shapes[i]},
236                         {TensorType_INT32, test_shapes[i]},
237                         {TensorType_INT32, {}}, ActivationFunctionType_NONE);
238     m.PopulateTensor<int32_t>(m.input1(), {-20, 2, 7, 8, 11, 20});
239     m.PopulateTensor<int32_t>(m.input2(), {1, 2, 3, 5, 11, 1});
240     m.Invoke();
241     EXPECT_THAT(m.GetOutput(), ElementsAreArray({-21, 0, 4, 3, 0, 19}))
242         << "With shape number " << i;
243   }
244 }
245 
TEST(IntegerSubOpModel,WithBroadcast)246 TEST(IntegerSubOpModel, WithBroadcast) {
247   std::vector<std::vector<int>> test_shapes = {
248       {6}, {2, 3}, {2, 1, 3}, {1, 3, 1, 2}, {1, 3, 1, 2, 1}};
249   for (int i = 0; i < test_shapes.size(); ++i) {
250     IntegerSubOpModel m({TensorType_INT32, test_shapes[i]},
251                         {TensorType_INT32, {}},  // always a scalar
252                         {TensorType_INT32, {}}, ActivationFunctionType_NONE);
253     m.PopulateTensor<int32_t>(m.input1(), {-20, 2, 7, 8, 11, 20});
254     m.PopulateTensor<int32_t>(m.input2(), {1});
255     m.Invoke();
256     EXPECT_THAT(m.GetOutput(),
257                 ElementsAreArray(ArrayFloatNear({-21, 1, 6, 7, 10, 19})))
258         << "With shape number " << i;
259   }
260 }
261 
TEST(Int64SubOpModel,NoActivation)262 TEST(Int64SubOpModel, NoActivation) {
263   Int64SubOpModel m({TensorType_INT64, {1, 2, 2, 1}},
264                     {TensorType_INT64, {1, 2, 2, 1}}, {TensorType_INT64, {}},
265                     ActivationFunctionType_NONE);
266   m.PopulateTensor<int64_t>(m.input1(), {-20, 2, 7, 8});
267   m.PopulateTensor<int64_t>(m.input2(), {1, 2, 3, 5});
268   m.Invoke();
269   EXPECT_THAT(m.GetOutput(), ElementsAreArray({-21, 0, 4, 3}));
270 }
271 
TEST(Int64SubOpModel,ActivationRELU_N1_TO_1)272 TEST(Int64SubOpModel, ActivationRELU_N1_TO_1) {
273   Int64SubOpModel m({TensorType_INT64, {1, 2, 2, 1}},
274                     {TensorType_INT64, {1, 2, 2, 1}}, {TensorType_INT64, {}},
275                     ActivationFunctionType_RELU_N1_TO_1);
276   m.PopulateTensor<int64_t>(m.input1(), {-20, 2, 7, 8});
277   m.PopulateTensor<int64_t>(m.input2(), {1, 2, 3, 5});
278   m.Invoke();
279   EXPECT_THAT(m.GetOutput(), ElementsAreArray({-1, 0, 1, 1}));
280 }
281 
TEST(Int64SubOpModel,VariousInputShapes)282 TEST(Int64SubOpModel, VariousInputShapes) {
283   std::vector<std::vector<int>> test_shapes = {
284       {6}, {2, 3}, {2, 1, 3}, {1, 3, 1, 2}};
285   for (int i = 0; i < test_shapes.size(); ++i) {
286     Int64SubOpModel m({TensorType_INT64, test_shapes[i]},
287                       {TensorType_INT64, test_shapes[i]},
288                       {TensorType_INT64, {}}, ActivationFunctionType_NONE);
289     m.PopulateTensor<int64_t>(m.input1(), {-20, 2, 7, 8, 11, 20});
290     m.PopulateTensor<int64_t>(m.input2(), {1, 2, 3, 5, 11, 1});
291     m.Invoke();
292     EXPECT_THAT(m.GetOutput(), ElementsAreArray({-21, 0, 4, 3, 0, 19}))
293         << "With shape number " << i;
294   }
295 }
296 
TEST(Int64SubOpModel,WithBroadcast)297 TEST(Int64SubOpModel, WithBroadcast) {
298   std::vector<std::vector<int>> test_shapes = {
299       {6}, {2, 3}, {2, 1, 3}, {1, 3, 1, 2}, {1, 3, 1, 2, 1}};
300   for (int i = 0; i < test_shapes.size(); ++i) {
301     Int64SubOpModel m({TensorType_INT64, test_shapes[i]},
302                       {TensorType_INT64, {}},  // always a scalar
303                       {TensorType_INT64, {}}, ActivationFunctionType_NONE);
304     m.PopulateTensor<int64_t>(m.input1(), {-20, 2, 7, 8, 11, 20});
305     m.PopulateTensor<int64_t>(m.input2(), {1});
306     m.Invoke();
307     EXPECT_THAT(m.GetOutput(),
308                 ElementsAreArray(ArrayFloatNear({-21, 1, 6, 7, 10, 19})))
309         << "With shape number " << i;
310   }
311 }
312 
313 template <TensorType tensor_type, typename integer_dtype>
QuantizedTestsNoActivation()314 void QuantizedTestsNoActivation() {
315   float kQuantizedTolerance = GetTolerance<integer_dtype>(-1.0, 1.0);
316   std::vector<std::vector<float>> inputs1 = {
317       {0.1, 0.2, 0.3, 0.4}, {-0.2, 0.2, 0.4, 0.7}, {-0.01, 0.2, 0.7, 0.3}};
318   std::vector<std::vector<float>> inputs2 = {
319       {0.6, 0.4, 0.3, 0.1}, {0.6, 0.4, 0.5, -0.2}, {0.6, 0.4, -0.18, 0.5}};
320   std::vector<std::vector<float>> results = {{-0.5, -0.2, 0.0, 0.3},
321                                              {-0.8, -0.2, -0.1, 0.9},
322                                              {-0.61, -0.2, 0.88, -0.2}};
323   for (int i = 0; i < inputs1.size(); ++i) {
324     QuantizedSubOpModel m({tensor_type, {1, 2, 2, 1}, -1.0, 1.0},
325                           {tensor_type, {1, 2, 2, 1}, -1.0, 1.0},
326                           {tensor_type, {}, -1.0, 1.0},
327                           ActivationFunctionType_NONE);
328     m.QuantizeAndPopulate<integer_dtype>(m.input1(), inputs1[i]);
329     m.QuantizeAndPopulate<integer_dtype>(m.input2(), inputs2[i]);
330     m.Invoke();
331     EXPECT_THAT(
332         m.GetDequantizedOutput<integer_dtype>(),
333         ElementsAreArray(ArrayFloatNear(results[i], kQuantizedTolerance)))
334         << "With test number " << i;
335   }
336 }
337 
TEST(QuantizedSubOpModel,QuantizedTestsNoActivationUInt8)338 TEST(QuantizedSubOpModel, QuantizedTestsNoActivationUInt8) {
339   QuantizedTestsNoActivation<TensorType_UINT8, uint8_t>();
340 }
341 
TEST(QuantizedSubOpModel,QuantizedTestsNoActivationInt8)342 TEST(QuantizedSubOpModel, QuantizedTestsNoActivationInt8) {
343   QuantizedTestsNoActivation<TensorType_INT8, int8_t>();
344 }
345 
TEST(QuantizedSubOpModel,QuantizedTestsNoActivationGenericInt16)346 TEST(QuantizedSubOpModel, QuantizedTestsNoActivationGenericInt16) {
347   QuantizedTestsNoActivation<TensorType_INT16, int16_t>();
348 }
349 
350 template <TensorType tensor_type, typename integer_dtype>
QuantizedTestsActivationRELU_N1_TO_1()351 void QuantizedTestsActivationRELU_N1_TO_1() {
352   float kQuantizedTolerance = GetTolerance<integer_dtype>(-1.0, 1.0);
353   std::vector<std::vector<float>> inputs1 = {{-0.8, 0.2, 0.9, 0.7},
354                                              {-0.8, 0.2, 0.7, 0.5}};
355   std::vector<std::vector<float>> inputs2 = {{0.6, 0.4, 0.9, -0.8},
356                                              {0.6, 0.4, -0.8, 0.3}};
357   std::vector<std::vector<float>> results = {{-1.0, -0.2, 0.0, 1.0},
358                                              {-1.0, -0.2, 1.0, 0.2}};
359   for (int i = 0; i < inputs1.size(); ++i) {
360     QuantizedSubOpModel m({tensor_type, {1, 2, 2, 1}, -1.0, 1.0},
361                           {tensor_type, {1, 2, 2, 1}, -1.0, 1.0},
362                           {tensor_type, {}, -1.0, 1.0},
363                           ActivationFunctionType_RELU_N1_TO_1);
364     m.QuantizeAndPopulate<integer_dtype>(m.input1(), inputs1[i]);
365     m.QuantizeAndPopulate<integer_dtype>(m.input2(), inputs2[i]);
366     m.Invoke();
367     EXPECT_THAT(
368         m.GetDequantizedOutput<integer_dtype>(),
369         ElementsAreArray(ArrayFloatNear(results[i], kQuantizedTolerance)))
370         << "With test number " << i;
371   }
372 }
TEST(QuantizedSubOpModel,QuantizedTestsActivationRELUN1TO1UInt8)373 TEST(QuantizedSubOpModel, QuantizedTestsActivationRELUN1TO1UInt8) {
374   QuantizedTestsActivationRELU_N1_TO_1<TensorType_UINT8, uint8_t>();
375 }
376 
TEST(QuantizedSubOpModel,QuantizedTestsActivationRELUN1TO1Int8)377 TEST(QuantizedSubOpModel, QuantizedTestsActivationRELUN1TO1Int8) {
378   QuantizedTestsActivationRELU_N1_TO_1<TensorType_INT8, int8_t>();
379 }
380 
TEST(QuantizedSubOpModel,QuantizedTestsActivationRELUN1TO1Int16)381 TEST(QuantizedSubOpModel, QuantizedTestsActivationRELUN1TO1Int16) {
382   QuantizedTestsActivationRELU_N1_TO_1<TensorType_INT16, int16_t>();
383 }
384 
385 template <TensorType tensor_type, typename integer_dtype>
QuantizedVariousInputShapes()386 void QuantizedVariousInputShapes() {
387   float kQuantizedTolerance = GetTolerance<integer_dtype>(-3.0, 3.0);
388   std::vector<std::vector<int>> test_shapes = {
389       {6}, {2, 3}, {2, 1, 3}, {1, 3, 1, 2}};
390   for (int i = 0; i < test_shapes.size(); ++i) {
391     QuantizedSubOpModel m({tensor_type, test_shapes[i], -3.0, 3.0},
392                           {tensor_type, test_shapes[i], -3.0, 3.0},
393                           {tensor_type, {}, -3.0, 3.0},
394                           ActivationFunctionType_NONE);
395     m.QuantizeAndPopulate<integer_dtype>(m.input1(),
396                                          {-2.0, 0.2, 0.7, 0.8, 1.1, 2.0});
397     m.QuantizeAndPopulate<integer_dtype>(m.input2(),
398                                          {0.1, 0.3, 0.3, 0.5, 1.1, 0.1});
399     m.Invoke();
400     EXPECT_THAT(m.GetDequantizedOutput<integer_dtype>(),
401                 ElementsAreArray(ArrayFloatNear(
402                     {-2.1, -0.1, 0.4, 0.3, 0.0, 1.9}, kQuantizedTolerance)))
403         << "With shape number " << i;
404   }
405 }
406 
TEST(QuantizedSubOpModel,QuantizedVariousInputShapesUInt8)407 TEST(QuantizedSubOpModel, QuantizedVariousInputShapesUInt8) {
408   QuantizedVariousInputShapes<TensorType_UINT8, uint8_t>();
409 }
410 
TEST(QuantizedSubOpModel,QuantizedVariousInputShapesInt8)411 TEST(QuantizedSubOpModel, QuantizedVariousInputShapesInt8) {
412   QuantizedVariousInputShapes<TensorType_INT8, int8_t>();
413 }
414 
TEST(QuantizedSubOpModel,QuantizedVariousInputShapesInt16)415 TEST(QuantizedSubOpModel, QuantizedVariousInputShapesInt16) {
416   QuantizedVariousInputShapes<TensorType_INT16, int16_t>();
417 }
418 
419 template <TensorType tensor_type, typename integer_dtype>
QuantizedWithBroadcast()420 void QuantizedWithBroadcast() {
421   float kQuantizedTolerance = GetTolerance<integer_dtype>(-3.0, 3.0);
422   std::vector<std::vector<int>> test_shapes = {
423       {6}, {2, 3}, {2, 1, 3}, {1, 3, 1, 2}};
424   for (int i = 0; i < test_shapes.size(); ++i) {
425     QuantizedSubOpModel m(
426         {tensor_type, test_shapes[i], -3.0, 3.0}, {tensor_type, {}, -1.0, 1.0},
427         {tensor_type, {}, -3.0, 3.0}, ActivationFunctionType_NONE);
428     m.QuantizeAndPopulate<integer_dtype>(m.input1(),
429                                          {-2.0, 0.2, 0.7, 0.8, 1.1, 2.0});
430     m.QuantizeAndPopulate<integer_dtype>(m.input2(), {0.7});
431     m.Invoke();
432     EXPECT_THAT(m.GetDequantizedOutput<integer_dtype>(),
433                 ElementsAreArray(ArrayFloatNear(
434                     {-2.7, -0.5, 0.0, 0.1, 0.4, 1.3}, kQuantizedTolerance)))
435         << "With shape number " << i;
436   }
437 }
438 
TEST(QuantizedSubOpModel,QuantizedWithBroadcastUInt8)439 TEST(QuantizedSubOpModel, QuantizedWithBroadcastUInt8) {
440   QuantizedWithBroadcast<TensorType_UINT8, uint8_t>();
441 }
442 
TEST(QuantizedSubOpModel,QuantizedWithBroadcastInt8)443 TEST(QuantizedSubOpModel, QuantizedWithBroadcastInt8) {
444   QuantizedWithBroadcast<TensorType_INT8, int8_t>();
445 }
446 
TEST(QuantizedSubOpModel,QuantizedWithBroadcastInt16)447 TEST(QuantizedSubOpModel, QuantizedWithBroadcastInt16) {
448   QuantizedWithBroadcast<TensorType_INT16, int16_t>();
449 }
450 
TEST(QuantizedSubOpModel,QuantizedTestsNoActivationInt16)451 TEST(QuantizedSubOpModel, QuantizedTestsNoActivationInt16) {
452   float kQuantizedTolerance = GetTolerance<int16_t>(-2.0, 2.0);
453   std::vector<std::vector<float>> inputs1 = {
454       {0.7, 0.6, 0.6, 0.5}, {-0.2, 0.6, 0.9, -0.1}, {-0.2, 0.6, -0.3, 0.8}};
455   std::vector<std::vector<float>> inputs2 = {
456       {0.6, 0.4, 0.3, 0.1}, {0.6, 0.4, 0.5, -0.8}, {0.6, 0.4, 0.8, 0.5}};
457   std::vector<std::vector<float>> results = {
458       {0.1, 0.2, 0.3, 0.4}, {-0.8, 0.2, 0.4, 0.7}, {-0.8, 0.2, -1.1, 0.3}};
459   for (int i = 0; i < inputs1.size(); ++i) {
460     QuantizedSubOpModel m({TensorType_INT16, {1, 2, 2, 1}, -2.0, 2.0},
461                           {TensorType_INT16, {1, 2, 2, 1}, -1.0, 1.0},
462                           {TensorType_INT16, {}, -2.0, 2.0},
463                           ActivationFunctionType_NONE);
464     m.QuantizeAndPopulate<int16_t>(m.input1(), inputs1[i]);
465     m.QuantizeAndPopulate<int16_t>(m.input2(), inputs2[i]);
466     m.Invoke();
467     EXPECT_THAT(
468         m.GetDequantizedOutput<int16_t>(),
469         ElementsAreArray(ArrayFloatNear(results[i], kQuantizedTolerance)))
470         << "With test number " << i;
471   }
472 }
473 
TEST(QuantizedSubOpModel,QuantizedTestsReluActivationInt16)474 TEST(QuantizedSubOpModel, QuantizedTestsReluActivationInt16) {
475   float kQuantizedTolerance = GetTolerance<int16_t>(-2.0, 2.0);
476   std::vector<std::vector<float>> inputs1 = {{-0.8, 0.2, 0.9, 0.7},
477                                              {-0.8, 0.2, 0.7, 0.5}};
478   std::vector<std::vector<float>> inputs2 = {{0.6, 0.4, 0.9, -0.8},
479                                              {0.6, 0.4, -0.8, 0.3}};
480   std::vector<std::vector<float>> results = {{-1.0, -0.2, 0.0, 1.0},
481                                              {-1.0, -0.2, 1.0, 0.2}};
482   for (int i = 0; i < inputs1.size(); ++i) {
483     QuantizedSubOpModel m({TensorType_INT16, {1, 2, 2, 1}, -2.0, 2.0},
484                           {TensorType_INT16, {1, 2, 2, 1}, -1.0, 1.0},
485                           {TensorType_INT16, {}, -2.0, 2.0},
486                           ActivationFunctionType_RELU_N1_TO_1);
487     m.QuantizeAndPopulate<int16_t>(m.input1(), inputs1[i]);
488     m.QuantizeAndPopulate<int16_t>(m.input2(), inputs2[i]);
489     m.Invoke();
490     EXPECT_THAT(
491         m.GetDequantizedOutput<int16_t>(),
492         ElementsAreArray(ArrayFloatNear(results[i], kQuantizedTolerance)))
493         << "With test number " << i;
494   }
495 }
496 
TEST(QuantizedSubOpModel,QuantizedTestsNoActivationBroadcastInt16)497 TEST(QuantizedSubOpModel, QuantizedTestsNoActivationBroadcastInt16) {
498   float kQuantizedTolerance = GetTolerance<int16_t>(-2.0, 2.0);
499   std::vector<std::vector<int>> test_shapes = {
500       {6}, {2, 3}, {2, 1, 3}, {1, 3, 1, 2}, {1, 3, 1, 2, 1}};
501   for (int i = 0; i < test_shapes.size(); ++i) {
502     QuantizedSubOpModel m({TensorType_INT16, test_shapes[i], -2.0, 2.0},
503                           {TensorType_INT16, {}, -1.0, 1.0},
504                           {TensorType_INT16, {}, -2.0, 2.0},
505                           ActivationFunctionType_NONE);
506     m.QuantizeAndPopulate<int16_t>(m.input1(),
507                                    {-0.9, -0.7, -0.3, 0.0, 0.3, 0.5});
508     m.QuantizeAndPopulate<int16_t>(m.input2(), {0.2});
509     m.Invoke();
510     EXPECT_THAT(m.GetDequantizedOutput<int16_t>(),
511                 ElementsAreArray(ArrayFloatNear(
512                     {-1.1, -0.9, -0.5, -0.2, 0.1, 0.3}, kQuantizedTolerance)))
513         << "With shape number " << i;
514   }
515 }
516 
TEST(QuantizedSubOpModel,QuantizedTestsReluActivationBroadcastInt16)517 TEST(QuantizedSubOpModel, QuantizedTestsReluActivationBroadcastInt16) {
518   float kQuantizedTolerance = GetTolerance<int16_t>(-2.0, 2.0);
519   std::vector<std::vector<int>> test_shapes = {
520       {6}, {2, 3}, {2, 1, 3}, {1, 3, 1, 2}, {1, 3, 1, 2, 1}};
521   for (int i = 0; i < test_shapes.size(); ++i) {
522     QuantizedSubOpModel m({TensorType_INT16, test_shapes[i], -2.0, 2.0},
523                           {TensorType_INT16, {}, -1.0, 1.0},
524                           {TensorType_INT16, {}, -2.0, 2.0},
525                           ActivationFunctionType_RELU_N1_TO_1);
526     m.QuantizeAndPopulate<int16_t>(m.input1(),
527                                    {-0.9, -0.7, -0.3, 0.0, 0.3, 0.5});
528     m.QuantizeAndPopulate<int16_t>(m.input2(), {0.2});
529     m.Invoke();
530     EXPECT_THAT(m.GetDequantizedOutput<int16_t>(),
531                 ElementsAreArray(ArrayFloatNear(
532                     {-1.0, -0.9, -0.5, -0.2, 0.1, 0.3}, kQuantizedTolerance)))
533         << "With shape number " << i;
534   }
535 }
536 
537 }  // namespace
538 }  // namespace tflite
539