• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 #include <cstdarg>
16 
17 #include <gtest/gtest.h>
18 #include "absl/memory/memory.h"
19 #include "tensorflow/contrib/lite/interpreter.h"
20 #include "tensorflow/contrib/lite/kernels/register.h"
21 #include "tensorflow/contrib/lite/kernels/test_util.h"
22 #include "tensorflow/contrib/lite/model.h"
23 
24 namespace tflite {
25 
26 namespace ops {
27 namespace builtin {
28 
29 TfLiteRegistration* Register_CONVOLUTION_REF();
30 TfLiteRegistration* Register_CONVOLUTION_GENERIC_OPT();
31 TfLiteRegistration* Register_CONVOLUTION_MULTITHREADED_OPT();
32 TfLiteRegistration* Register_CONVOLUTION_CBLAS_OPT();
33 
34 }  // namespace builtin
35 }  // namespace ops
36 
37 namespace {
38 
39 using ::testing::ElementsAreArray;
40 
41 class BaseConvolutionOpModel : public SingleOpModel {
42  public:
43   // TODO(ahentz): Also test different activation types, bias, padding types,
44   // stride values.
BaseConvolutionOpModel(TfLiteRegistration * registration,const TensorData & input,const TensorData & filter,const TensorData & output,int stride_width=2,int stride_height=2,enum Padding padding=Padding_VALID,enum ActivationFunctionType activation=ActivationFunctionType_NONE)45   BaseConvolutionOpModel(
46       TfLiteRegistration* registration, const TensorData& input,
47       const TensorData& filter, const TensorData& output, int stride_width = 2,
48       int stride_height = 2, enum Padding padding = Padding_VALID,
49       enum ActivationFunctionType activation = ActivationFunctionType_NONE) {
50     input_ = AddInput(input);
51     filter_ = AddInput(filter);
52 
53     int bias_size = GetShape(filter_)[0];
54     if (input.type == TensorType_FLOAT32) {
55       bias_ = AddInput({TensorType_FLOAT32, {bias_size}});
56     } else {
57       // This is a quantized version. The scale of 'bias' depends on the scales
58       // of input and filter. Supposedly this is correctly set during quantized
59       // training.
60       auto bias_scale = GetScale(input_) * GetScale(filter_);
61       TensorData bias{TensorType_INT32, {bias_size}, 0, 0, bias_scale};
62       bias_ = AddInput(bias);
63     }
64 
65     output_ = AddOutput(output);
66     if (input.type != TensorType_FLOAT32) {
67       // The following is required by quantized inference. It is the unittest's
68       // responsibility to make sure the output scale falls into the correct
69       // range.
70       CHECK_LT(GetScale(input_) * GetScale(filter_), GetScale(output_));
71     }
72 
73     SetBuiltinOp(BuiltinOperator_CONV_2D, BuiltinOptions_Conv2DOptions,
74                  CreateConv2DOptions(builder_, padding, stride_width,
75                                      stride_height, activation)
76                      .Union());
77 
78     resolver_ = absl::make_unique<SingleOpResolver>(BuiltinOperator_CONV_2D,
79                                                     registration);
80     BuildInterpreter({GetShape(input_), GetShape(filter_), GetShape(bias_)});
81   }
82 
83  protected:
84   int input_;
85   int filter_;
86   int bias_;
87   int output_;
88 };
89 
90 class ConvolutionOpModel : public BaseConvolutionOpModel {
91  public:
92   using BaseConvolutionOpModel::BaseConvolutionOpModel;
93 
SetFilter(std::initializer_list<float> f)94   void SetFilter(std::initializer_list<float> f) { PopulateTensor(filter_, f); }
95 
SetBias(std::initializer_list<float> f)96   void SetBias(std::initializer_list<float> f) { PopulateTensor(bias_, f); }
97 
SetInput(std::initializer_list<float> data)98   void SetInput(std::initializer_list<float> data) {
99     PopulateTensor(input_, data);
100   }
GetOutput()101   std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
102 };
103 
104 const auto kKernelMap = new std::map<string, TfLiteRegistration*>({
105     {"Reference", ops::builtin::Register_CONVOLUTION_REF()},
106     {"GenericOptimized", ops::builtin::Register_CONVOLUTION_GENERIC_OPT()},
107     {"MultithreadedOptimized",
108      ops::builtin::Register_CONVOLUTION_MULTITHREADED_OPT()},
109     {"CblasOptimized", ops::builtin::Register_CONVOLUTION_CBLAS_OPT()},
110 });
111 
112 class ConvolutionOpTest : public SingleOpTest {
113  protected:
GetKernelMap()114   const std::map<string, TfLiteRegistration*>& GetKernelMap() override {
115     return *kKernelMap;
116   }
117 };
118 
TEST_P(ConvolutionOpTest,SimpleTestFloat32)119 TEST_P(ConvolutionOpTest, SimpleTestFloat32) {
120   ConvolutionOpModel m(GetRegistration(), {TensorType_FLOAT32, {2, 2, 4, 1}},
121                        {TensorType_FLOAT32, {3, 2, 2, 1}},
122                        {TensorType_FLOAT32, {}});
123 
124   m.SetInput({
125       // First batch
126       1, 1, 1, 1,  // row = 1
127       2, 2, 2, 2,  // row = 2
128       // Second batch
129       1, 2, 3, 4,  // row = 1
130       1, 2, 3, 4,  // row = 2
131   });
132   m.SetFilter({
133       1, 2, 3, 4,    // first 2x2 filter
134       -1, 1, -1, 1,  // second 2x2 filter
135       -1, -1, 1, 1,  // third 2x2 filter
136   });
137   m.SetBias({1, 2, 3});
138 
139   m.Invoke();
140 
141   EXPECT_THAT(m.GetOutput(), ElementsAreArray({
142                                  18, 2, 5,  // first batch, left
143                                  18, 2, 5,  // first batch, right
144                                  17, 4, 3,  // second batch, left
145                                  37, 4, 3,  // second batch, right
146                              }));
147 }
148 
TEST_P(ConvolutionOpTest,SimpleTestFloat32WithAnisotropicStrides)149 TEST_P(ConvolutionOpTest, SimpleTestFloat32WithAnisotropicStrides) {
150   ConvolutionOpModel m(GetRegistration(), {TensorType_FLOAT32, {1, 3, 6, 1}},
151                        {TensorType_FLOAT32, {1, 2, 2, 1}},
152                        {TensorType_FLOAT32, {}},
153                        /*stride_width=*/3, /*stride_height=*/1);
154   m.SetInput({
155       3, 2, 1, -1, -2, -3,  //
156       4, 3, 2, -2, -3, -4,  //
157       5, 4, 3, -3, -4, -5,  //
158   });
159   m.SetFilter({
160       1, 2,  //
161       3, 4,  //
162   });
163   m.SetBias({-1});
164   m.Invoke();
165   EXPECT_THAT(m.GetOutput(), ElementsAreArray({
166                                  30, -24,  //
167                                  40, -34,  //
168                              }));
169 }
170 
TEST_P(ConvolutionOpTest,HandCalculatedFloat32)171 TEST_P(ConvolutionOpTest, HandCalculatedFloat32) {
172   const int depth = 1;
173   const int image_width = 4;
174   const int image_height = 3;
175   const int image_batch_count = 1;
176   const int filter_size = 3;
177   const int filter_count = 1;
178   const int stride_width = 1;
179   const int stride_height = 1;
180   const Padding padding = Padding_SAME;
181   ConvolutionOpModel m(
182       GetRegistration(),
183       {TensorType_FLOAT32,
184        {image_batch_count, image_height, image_width, depth}},
185       {TensorType_FLOAT32, {depth, filter_size, filter_size, filter_count}},
186       {TensorType_FLOAT32, {}}, stride_width, stride_height, padding);
187 
188   // The image matrix is:
189   // |  1 |  2 |  3 |  4 |
190   // |  5 |  6 |  7 |  8 |
191   // |  9 | 10 | 11 | 12 |
192   m.SetInput({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12});
193   // The filter matrix is:
194   // | 1 | 4 | 7 |
195   // | 2 | 5 | 8 |
196   // | 3 | 6 | 9 |
197   m.SetFilter({1, 4, 7, 2, 5, 8, 3, 6, 9});
198   // No bias for this test.
199   m.SetBias({0});
200 
201   m.Invoke();
202   // We're sliding the 3x3 filter across the 3x4 image, with accesses outside
203   // the input set to zero because we're using the 'SAME' padding mode.
204   // The calculations behind the expected output are:
205   // (1*0)+(4*0)+(7*0)+(2*0)+(5*1)+(8*2)+(3*0)+(6*5)+(9*6)=105
206   // (1*0)+(4*0)+(7*0)+(2*1)+(5*2)+(8*3)+(3*5)+(6*6)+(9*7)=150
207   // (1*0)+(4*0)+(7*0)+(2*2)+(5*3)+(8*4)+(3*6)+(6*7)+(9*8)=183
208   // (1*0)+(4*0)+(7*0)+(2*3)+(5*4)+(8*0)+(3*7)+(6*8)+(9*0)=95
209   // (1*0)+(4*1)+(7*2)+(2*0)+(5*5)+(8*6)+(3*0)+(6*9)+(9*10)=235
210   // (1*1)+(4*2)+(7*3)+(2*5)+(5*6)+(8*7)+(3*9)+(6*10)+(9*11)=312
211   // (1*2)+(4*3)+(7*4)+(2*6)+(5*7)+(8*8)+(3*10)+(6*11)+(9*12)=357
212   // (1*3)+(4*4)+(7*0)+(2*7)+(5*8)+(8*0)+(3*11)+(6*12)+(9*0)=178
213   // (1*0)+(4*5)+(7*6)+(2*0)+(5*9)+(8*10)+(3*0)+(6*0)+(9*0)=187
214   // (1*5)+(4*6)+(7*7)+(2*9)+(5*10)+(8*11)+(3*0)+(6*0)+(9*0)=234
215   // (1*6)+(4*7)+(7*8)+(2*10)+(5*11)+(8*12)+(3*0)+(6*0)+(9*0)=261
216   // (1*7)+(4*11)+(7*0)+(2*8)+(5*12)+(8*0)+(3*0)+(6*0)+(9*0)=121
217   // This means we should end up with this matrix:
218   // |  105  |  150  |  183  |   95  |
219   // |  235  |  312  |  357  |  178  |
220   // |  187  |  234  |  261  |  121  |
221   EXPECT_THAT(m.GetOutput(), ElementsAreArray({105, 150, 183, 95, 235, 312, 357,
222                                                178, 187, 234, 261, 121}));
223 }
224 
TEST_P(ConvolutionOpTest,HandCalculatedWithBiasFloat32)225 TEST_P(ConvolutionOpTest, HandCalculatedWithBiasFloat32) {
226   const int depth = 1;
227   const int image_width = 4;
228   const int image_height = 3;
229   const int image_batch_count = 1;
230   const int filter_size = 3;
231   const int filter_count = 1;
232   const int stride_width = 1;
233   const int stride_height = 1;
234   const Padding padding = Padding_SAME;
235   ConvolutionOpModel m(
236       GetRegistration(),
237       {TensorType_FLOAT32,
238        {image_batch_count, image_height, image_width, depth}},
239       {TensorType_FLOAT32, {depth, filter_size, filter_size, filter_count}},
240       {TensorType_FLOAT32, {}}, stride_width, stride_height, padding);
241 
242   // The image matrix is:
243   // |  1 |  2 |  3 |  4 |
244   // |  5 |  6 |  7 |  8 |
245   // |  9 | 10 | 11 | 12 |
246   m.SetInput({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12});
247   // The filter matrix is:
248   // | 1 | 4 | 7 |
249   // | 2 | 5 | 8 |
250   // | 3 | 6 | 9 |
251   m.SetFilter({1, 4, 7, 2, 5, 8, 3, 6, 9});
252   // Bias is | 10 |.
253   m.SetBias({10});
254 
255   m.Invoke();
256   // We're sliding the 3x3 filter across the 3x4 image, with accesses outside
257   // the input set to zero because we're using the 'SAME' padding mode.
258   // The calculations behind the expected output are:
259   // (1*0)+(4*0)+(7*0)+(2*0)+(5*1)+(8*2)+(3*0)+(6*5)+(9*6)+10=115
260   // (1*0)+(4*0)+(7*0)+(2*1)+(5*2)+(8*3)+(3*5)+(6*6)+(9*7)+10=160
261   // (1*0)+(4*0)+(7*0)+(2*2)+(5*3)+(8*4)+(3*6)+(6*7)+(9*8)+10=193
262   // (1*0)+(4*0)+(7*0)+(2*3)+(5*4)+(8*0)+(3*7)+(6*8)+(9*0)+10=105
263   // (1*0)+(4*1)+(7*2)+(2*0)+(5*5)+(8*6)+(3*0)+(6*9)+(9*10)+10=245
264   // (1*1)+(4*2)+(7*3)+(2*5)+(5*6)+(8*7)+(3*9)+(6*10)+(9*11)+10=322
265   // (1*2)+(4*3)+(7*4)+(2*6)+(5*7)+(8*8)+(3*10)+(6*11)+(9*12)+10=367
266   // (1*3)+(4*4)+(7*0)+(2*7)+(5*8)+(8*0)+(3*11)+(6*12)+(9*0)+10=188
267   // (1*0)+(4*5)+(7*6)+(2*0)+(5*9)+(8*10)+(3*0)+(6*0)+(9*0)+10=197
268   // (1*5)+(4*6)+(7*7)+(2*9)+(5*10)+(8*11)+(3*0)+(6*0)+(9*0)+10=244
269   // (1*6)+(4*7)+(7*8)+(2*10)+(5*11)+(8*12)+(3*0)+(6*0)+(9*0)+10=271
270   // (1*7)+(4*11)+(7*0)+(2*8)+(5*12)+(8*0)+(3*0)+(6*0)+(9*0)+10=131
271   // This means we should end up with this matrix:
272   // |  115  |  160  |  193  |  105  |
273   // |  245  |  322  |  367  |  188  |
274   // |  197  |  244  |  271  |  131  |
275   EXPECT_THAT(m.GetOutput(), ElementsAreArray({115, 160, 193, 105, 245, 322,
276                                                367, 188, 197, 244, 271, 131}));
277 }
278 
TEST_P(ConvolutionOpTest,HandCalculatedWithReluFloat32)279 TEST_P(ConvolutionOpTest, HandCalculatedWithReluFloat32) {
280   const int depth = 1;
281   const int image_width = 4;
282   const int image_height = 3;
283   const int image_batch_count = 1;
284   const int filter_size = 3;
285   const int filter_count = 1;
286   const int stride_width = 1;
287   const int stride_height = 1;
288   const Padding padding = Padding_SAME;
289   ConvolutionOpModel m(
290       GetRegistration(),
291       {TensorType_FLOAT32,
292        {image_batch_count, image_height, image_width, depth}},
293       {TensorType_FLOAT32, {depth, filter_size, filter_size, filter_count}},
294       {TensorType_FLOAT32, {}}, stride_width, stride_height, padding,
295       ActivationFunctionType_RELU);
296 
297   // The image matrix is:
298   // |  1 |  2 |  3 |  4 |
299   // |  5 |  6 |  7 |  8 |
300   // |  9 | 10 | 11 | 12 |
301   m.SetInput({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12});
302   // The filter matrix is:
303   // | 1 | 4 | 7 |
304   // | 2 | 5 | 8 |
305   // | 3 | 6 | 9 |
306   m.SetFilter({1, 4, 7, 2, 5, 8, 3, 6, 9});
307   // Bias is | -200 |.
308   m.SetBias({-200});
309 
310   m.Invoke();
311   // We're sliding the 3x3 filter across the 3x4 image, with accesses outside
312   // the input set to zero because we're using the 'SAME' padding mode.
313   // The calculations behind the expected output are:
314   // (1*0)+(4*0)+(7*0)+(2*0)+(5*1)+(8*2)+(3*0)+(6*5)+(9*6)-200=-95
315   // (1*0)+(4*0)+(7*0)+(2*1)+(5*2)+(8*3)+(3*5)+(6*6)+(9*7)-200=-50
316   // (1*0)+(4*0)+(7*0)+(2*2)+(5*3)+(8*4)+(3*6)+(6*7)+(9*8)-200=-17
317   // (1*0)+(4*0)+(7*0)+(2*3)+(5*4)+(8*0)+(3*7)+(6*8)+(9*0)-200=-105
318   // (1*0)+(4*1)+(7*2)+(2*0)+(5*5)+(8*6)+(3*0)+(6*9)+(9*10)-200=35
319   // (1*1)+(4*2)+(7*3)+(2*5)+(5*6)+(8*7)+(3*9)+(6*10)+(9*11)-200=112
320   // (1*2)+(4*3)+(7*4)+(2*6)+(5*7)+(8*8)+(3*10)+(6*11)+(9*12)-200=157
321   // (1*3)+(4*4)+(7*0)+(2*7)+(5*8)+(8*0)+(3*11)+(6*12)+(9*0)-200=-22
322   // (1*0)+(4*5)+(7*6)+(2*0)+(5*9)+(8*10)+(3*0)+(6*0)+(9*0)-200=-13
323   // (1*5)+(4*6)+(7*7)+(2*9)+(5*10)+(8*11)+(3*0)+(6*0)+(9*0)-200=34
324   // (1*6)+(4*7)+(7*8)+(2*10)+(5*11)+(8*12)+(3*0)+(6*0)+(9*0)-200=61
325   // (1*7)+(4*11)+(7*0)+(2*8)+(5*12)+(8*0)+(3*0)+(6*0)+(9*0)-200=-79
326   // All negative values are gated to zero by the Relu activation function.
327   // This means we should end up with this matrix:
328   // |   0 |   0 |   0 |   0 |
329   // |  35 | 112 | 157 |   0 |
330   // |   0 |  34 |  61 |   0 |
331   EXPECT_THAT(m.GetOutput(),
332               ElementsAreArray({0, 0, 0, 0, 35, 112, 157, 0, 0, 34, 61, 0}));
333 }
334 
TEST_P(ConvolutionOpTest,HandCalculatedValidFloat32)335 TEST_P(ConvolutionOpTest, HandCalculatedValidFloat32) {
336   const int depth = 1;
337   const int image_width = 4;
338   const int image_height = 3;
339   const int image_batch_count = 1;
340   const int filter_size = 3;
341   const int filter_count = 1;
342   const int stride_width = 1;
343   const int stride_height = 1;
344   const Padding padding = Padding_VALID;
345   ConvolutionOpModel m(
346       GetRegistration(),
347       {TensorType_FLOAT32,
348        {image_batch_count, image_height, image_width, depth}},
349       {TensorType_FLOAT32, {depth, filter_size, filter_size, filter_count}},
350       {TensorType_FLOAT32, {}}, stride_width, stride_height, padding);
351 
352   // The image matrix is:
353   // |  1 |  2 |  3 |  4 |
354   // |  5 |  6 |  7 |  8 |
355   // |  9 | 10 | 11 | 12 |
356   m.SetInput({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12});
357   // The filter matrix is:
358   // | 1 | 4 | 7 |
359   // | 2 | 5 | 8 |
360   // | 3 | 6 | 9 |
361   m.SetFilter({1, 4, 7, 2, 5, 8, 3, 6, 9});
362   // No bias for this test.
363   m.SetBias({0});
364 
365   m.Invoke();
366   // We're sliding the 3x3 filter across the 3x4 image, with no accesses outside
367   // the input because we're using the 'VALID' padding mode, giving a 2x1
368   // output.
369   // The calculations behind the expected output are:
370   // (1*1)+(4*2)+(7*3)+(2*5)+(5*6)+(8*7)+(3*9)+(6*10)+(9*11)=312
371   // (1*2)+(4*3)+(7*4)+(2*6)+(5*7)+(8*8)+(3*10)+(6*11)+(9*12)=357
372   // This means we should end up with this matrix:
373   // |  312  |  357  |
374   EXPECT_THAT(m.GetOutput(), ElementsAreArray({312, 357}));
375 }
376 
377 class QuantizedConvolutionOpModel : public BaseConvolutionOpModel {
378  public:
379   using BaseConvolutionOpModel::BaseConvolutionOpModel;
380 
SetInput(std::initializer_list<float> data)381   void SetInput(std::initializer_list<float> data) {
382     QuantizeAndPopulate<uint8_t>(input_, data);
383   }
384 
SetFilter(std::initializer_list<float> data)385   void SetFilter(std::initializer_list<float> data) {
386     QuantizeAndPopulate<uint8_t>(filter_, data);
387   }
388 
SetBias(std::initializer_list<float> data)389   void SetBias(std::initializer_list<float> data) {
390     QuantizeAndPopulate<int32_t>(bias_, data);
391   }
392 
GetOutput()393   std::vector<uint8_t> GetOutput() { return ExtractVector<uint8_t>(output_); }
GetDequantizedOutput()394   std::vector<float> GetDequantizedOutput() {
395     return Dequantize<uint8_t>(ExtractVector<uint8_t>(output_),
396                                GetScale(output_), GetZeroPoint(output_));
397   }
398 };
399 
400 // In this tests we set the input and output scales so that the results
401 // match exactly the 'non-quantized' version.
TEST_P(ConvolutionOpTest,SimpleTestQuantized)402 TEST_P(ConvolutionOpTest, SimpleTestQuantized) {
403   QuantizedConvolutionOpModel m(GetRegistration(),
404                                 {TensorType_UINT8, {2, 2, 4, 1}, -63.5, 64},
405                                 {TensorType_UINT8, {3, 2, 2, 1}, -63.5, 64},
406                                 {TensorType_UINT8, {}, -127, 128});
407   m.SetInput({
408       // First batch
409       1, 1, 1, 1,  // row = 1
410       2, 2, 2, 2,  // row = 2
411       // Second batch
412       1, 2, 3, 4,  // row = 1
413       1, 2, 3, 4,  // row = 2
414   });
415   m.SetFilter({
416       1, 2, 3, 4,    // first 2x2 filter
417       -1, 1, -1, 1,  // second 2x2 filter
418       -1, -1, 1, 1,  // third 2x2 filter
419   });
420   m.SetBias({1, 2, 3});
421 
422   m.Invoke();
423 
424   EXPECT_THAT(m.GetDequantizedOutput(),
425               ElementsAreArray(ArrayFloatNear(
426                   {
427                       18, 2, 5,  // first batch, left
428                       18, 2, 5,  // first batch, right
429                       17, 4, 3,  // second batch, left
430                       37, 4, 3,  // second batch, right
431                   },
432                   1e-5)));
433   // For good  measure, let's also verify the quantized values:
434   EXPECT_THAT(m.GetOutput(), ElementsAreArray({
435                                  145, 129, 132,  //
436                                  145, 129, 132,  //
437                                  144, 131, 130,  //
438                                  164, 131, 130,  //
439                              }));
440 }
441 
TEST_P(ConvolutionOpTest,SimpleTestQuantizedWithAnisotropicStrides)442 TEST_P(ConvolutionOpTest, SimpleTestQuantizedWithAnisotropicStrides) {
443   QuantizedConvolutionOpModel m(GetRegistration(),
444                                 {TensorType_UINT8, {1, 3, 6, 1}, -63.5, 64},
445                                 {TensorType_UINT8, {1, 2, 2, 1}, -63.5, 64},
446                                 {TensorType_UINT8, {}, -127, 128},
447                                 /*stride_width=*/3, /*stride_height=*/1);
448   m.SetInput({
449       3, 2, 1, -1, -2, -3,  //
450       4, 3, 2, -2, -3, -4,  //
451       5, 4, 3, -3, -4, -5,  //
452   });
453   m.SetFilter({
454       1, 2,  //
455       3, 4,  //
456   });
457   m.SetBias({-1});
458   m.Invoke();
459   EXPECT_THAT(m.GetDequantizedOutput(), ElementsAreArray(ArrayFloatNear({
460                                             30, -24,  //
461                                             40, -34,  //
462                                         })));
463   EXPECT_THAT(m.GetOutput(), ElementsAreArray({
464                                  157, 103,  //
465                                  167, 93,   //
466                              }));
467 }
468 
469 INSTANTIATE_TEST_CASE_P(
470     ConvolutionOpTest, ConvolutionOpTest,
471     ::testing::ValuesIn(SingleOpTest::GetKernelTags(*kKernelMap)));
472 
473 }  // namespace
474 }  // namespace tflite
475 
main(int argc,char ** argv)476 int main(int argc, char** argv) {
477   ::tflite::LogToStderr();
478   ::testing::InitGoogleTest(&argc, argv);
479   return RUN_ALL_TESTS();
480 }
481