• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 
16 #include "tensorflow/lite/micro/kernels/conv_test.h"
17 
18 namespace tflite {
19 namespace testing {
20 
21 template <typename T>
InvokeConv(TfLiteTensor * tensors,int tensors_size,int output_length,TfLiteConvParams * conv_params,TfLiteRegistration registration,T * output_data)22 TfLiteStatus InvokeConv(TfLiteTensor* tensors, int tensors_size,
23                         int output_length, TfLiteConvParams* conv_params,
24                         TfLiteRegistration registration, T* output_data) {
25   int inputs_array_data[] = {3, 0, 1, 2};
26   TfLiteIntArray* inputs_array = IntArrayFromInts(inputs_array_data);
27   int outputs_array_data[] = {1, 3};
28   TfLiteIntArray* outputs_array = IntArrayFromInts(outputs_array_data);
29 
30   micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array,
31                              outputs_array, conv_params);
32 
33   const char* init_data = reinterpret_cast<const char*>(conv_params);
34   TfLiteStatus status = runner.InitAndPrepare(init_data);
35   if (status != kTfLiteOk) {
36     return status;
37   }
38   return runner.Invoke();
39 }
40 
41 template <typename T>
ValidateConvGoldens(TfLiteTensor * tensors,int tensors_size,const T * expected_output_data,int output_length,TfLiteConvParams * conv_params,TfLiteRegistration registration,T * output_data,float tolerance)42 TfLiteStatus ValidateConvGoldens(TfLiteTensor* tensors, int tensors_size,
43                                  const T* expected_output_data,
44                                  int output_length,
45                                  TfLiteConvParams* conv_params,
46                                  TfLiteRegistration registration,
47                                  T* output_data, float tolerance) {
48   TfLiteStatus status = InvokeConv(tensors, tensors_size, output_length,
49                                    conv_params, registration, output_data);
50   if (status != kTfLiteOk) {
51     return status;
52   }
53   for (int i = 0; i < output_length; ++i) {
54     TF_LITE_MICRO_EXPECT_NEAR(expected_output_data[i], output_data[i],
55                               tolerance);
56   }
57   return kTfLiteOk;
58 }
59 
InvokeConv(TfLiteTensor * tensors,int tensors_size,int output_length,TfLiteConvParams * conv_params,TfLiteRegistration registration,float * output_data)60 TfLiteStatus InvokeConv(TfLiteTensor* tensors, int tensors_size,
61                         int output_length, TfLiteConvParams* conv_params,
62                         TfLiteRegistration registration, float* output_data) {
63   return InvokeConv<float>(tensors, tensors_size, output_length, conv_params,
64                            registration, output_data);
65 }
66 
InvokeConv(TfLiteTensor * tensors,int tensors_size,int output_length,TfLiteConvParams * conv_params,TfLiteRegistration registration,int8_t * output_data)67 TfLiteStatus InvokeConv(TfLiteTensor* tensors, int tensors_size,
68                         int output_length, TfLiteConvParams* conv_params,
69                         TfLiteRegistration registration, int8_t* output_data) {
70   return InvokeConv<int8_t>(tensors, tensors_size, output_length, conv_params,
71                             registration, output_data);
72 }
73 
InvokeConv(TfLiteTensor * tensors,int tensors_size,int output_length,TfLiteConvParams * conv_params,TfLiteRegistration registration,uint8_t * output_data)74 TfLiteStatus InvokeConv(TfLiteTensor* tensors, int tensors_size,
75                         int output_length, TfLiteConvParams* conv_params,
76                         TfLiteRegistration registration, uint8_t* output_data) {
77   return InvokeConv<uint8_t>(tensors, tensors_size, output_length, conv_params,
78                              registration, output_data);
79 }
80 
ValidateConvGoldens(TfLiteTensor * tensors,int tensors_size,const float * expected_output_data,int output_length,TfLiteConvParams * conv_params,TfLiteRegistration registration,float * output_data,float tolerance)81 TfLiteStatus ValidateConvGoldens(TfLiteTensor* tensors, int tensors_size,
82                                  const float* expected_output_data,
83                                  int output_length,
84                                  TfLiteConvParams* conv_params,
85                                  TfLiteRegistration registration,
86                                  float* output_data, float tolerance) {
87   return ValidateConvGoldens<float>(tensors, tensors_size, expected_output_data,
88                                     output_length, conv_params, registration,
89                                     output_data, tolerance);
90 }
91 
ValidateConvGoldens(TfLiteTensor * tensors,int tensors_size,const int8_t * expected_output_data,int output_length,TfLiteConvParams * conv_params,TfLiteRegistration registration,int8_t * output_data,float tolerance)92 TfLiteStatus ValidateConvGoldens(TfLiteTensor* tensors, int tensors_size,
93                                  const int8_t* expected_output_data,
94                                  int output_length,
95                                  TfLiteConvParams* conv_params,
96                                  TfLiteRegistration registration,
97                                  int8_t* output_data, float tolerance) {
98   return ValidateConvGoldens<int8_t>(
99       tensors, tensors_size, expected_output_data, output_length, conv_params,
100       registration, output_data, tolerance);
101 }
102 
ValidateConvGoldens(TfLiteTensor * tensors,int tensors_size,const uint8_t * expected_output_data,int output_length,TfLiteConvParams * conv_params,TfLiteRegistration registration,uint8_t * output_data,float tolerance)103 TfLiteStatus ValidateConvGoldens(TfLiteTensor* tensors, int tensors_size,
104                                  const uint8_t* expected_output_data,
105                                  int output_length,
106                                  TfLiteConvParams* conv_params,
107                                  TfLiteRegistration registration,
108                                  uint8_t* output_data, float tolerance) {
109   return ValidateConvGoldens<uint8_t>(
110       tensors, tensors_size, expected_output_data, output_length, conv_params,
111       registration, output_data, tolerance);
112 }
113 
TestConvFloat(const int * input_dims_data,const float * input_data,const int * filter_dims_data,const float * filter_data,const int * bias_dims_data,const float * bias_data,const int * output_dims_data,const float * expected_output_data,TfLiteConvParams * conv_params,TfLiteRegistration registration,float * output_data)114 TfLiteStatus TestConvFloat(const int* input_dims_data, const float* input_data,
115                            const int* filter_dims_data,
116                            const float* filter_data, const int* bias_dims_data,
117                            const float* bias_data, const int* output_dims_data,
118                            const float* expected_output_data,
119                            TfLiteConvParams* conv_params,
120                            TfLiteRegistration registration,
121                            float* output_data) {
122   TfLiteIntArray* input_dims = IntArrayFromInts(input_dims_data);
123   TfLiteIntArray* filter_dims = IntArrayFromInts(filter_dims_data);
124   TfLiteIntArray* bias_dims = IntArrayFromInts(bias_dims_data);
125   TfLiteIntArray* output_dims = IntArrayFromInts(output_dims_data);
126   const int output_dims_count = ElementCount(*output_dims);
127   constexpr int inputs_size = 3;
128   constexpr int outputs_size = 1;
129   constexpr int tensors_size = inputs_size + outputs_size;
130   TfLiteTensor tensors[tensors_size] = {
131       CreateTensor(input_data, input_dims),
132       CreateTensor(filter_data, filter_dims),
133       CreateTensor(bias_data, bias_dims),
134       CreateTensor(output_data, output_dims),
135   };
136 
137   return ValidateConvGoldens(tensors, tensors_size, expected_output_data,
138                              output_dims_count, conv_params, registration,
139                              output_data);
140 }
141 
TestConvQuantizedPerLayer(const int * input_dims_data,const float * input_data,uint8_t * input_quantized,float input_scale,const int * filter_dims_data,const float * filter_data,uint8_t * filter_quantized,float filter_scale,const int * bias_dims_data,const float * bias_data,int32_t * bias_quantized,const int * output_dims_data,const float * expected_output_data,uint8_t * expected_output_quantized,float output_scale,TfLiteConvParams * conv_params,TfLiteRegistration registration,uint8_t * output_data)142 TfLiteStatus TestConvQuantizedPerLayer(
143     const int* input_dims_data, const float* input_data,
144     uint8_t* input_quantized, float input_scale, const int* filter_dims_data,
145     const float* filter_data, uint8_t* filter_quantized, float filter_scale,
146     const int* bias_dims_data, const float* bias_data, int32_t* bias_quantized,
147     const int* output_dims_data, const float* expected_output_data,
148     uint8_t* expected_output_quantized, float output_scale,
149     TfLiteConvParams* conv_params, TfLiteRegistration registration,
150     uint8_t* output_data) {
151   TfLiteIntArray* input_dims = IntArrayFromInts(input_dims_data);
152   TfLiteIntArray* filter_dims = IntArrayFromInts(filter_dims_data);
153   TfLiteIntArray* bias_dims = IntArrayFromInts(bias_dims_data);
154   TfLiteIntArray* output_dims = IntArrayFromInts(output_dims_data);
155   const int output_dims_count = ElementCount(*output_dims);
156 
157   tflite::Quantize(expected_output_data, expected_output_quantized,
158                    output_dims_count, output_scale, 128);
159 
160   constexpr int inputs_size = 3;
161   constexpr int outputs_size = 1;
162   constexpr int tensors_size = inputs_size + outputs_size;
163   TfLiteTensor tensors[tensors_size] = {
164       CreateQuantizedTensor(input_data, input_quantized, input_dims,
165                             input_scale, 128),
166       CreateQuantizedTensor(filter_data, filter_quantized, filter_dims,
167                             filter_scale, 128),
168       CreateQuantizedBiasTensor(bias_data, bias_quantized, bias_dims,
169                                 input_scale, filter_scale),
170       CreateQuantizedTensor(output_data, output_dims, output_scale, 128)};
171 
172   float filter_scales[] = {1, filter_scale};
173   int filter_zero_points[] = {1, 128};
174   TfLiteAffineQuantization filter_quant = {FloatArrayFromFloats(filter_scales),
175                                            IntArrayFromInts(filter_zero_points),
176                                            0};
177   tensors[1].quantization = {kTfLiteAffineQuantization, &filter_quant};
178 
179   return ValidateConvGoldens(tensors, tensors_size, expected_output_quantized,
180                              output_dims_count, conv_params, registration,
181                              output_data);
182 }
183 
TestConvQuantizedPerChannel(const int * input_dims_data,const float * input_data,int8_t * input_quantized,float input_scale,int input_zero_point,const int * filter_dims_data,const float * filter_data,int8_t * filter_data_quantized,const int * bias_dims_data,const float * bias_data,int32_t * bias_data_quantized,float * bias_scales,int * bias_zero_points,const int * output_dims_data,const float * expected_output_data,int8_t * expected_output_data_quantized,float output_scale,int output_zero_point,TfLiteConvParams * conv_params,TfLiteRegistration registration,int8_t * output_data)184 TfLiteStatus TestConvQuantizedPerChannel(
185     const int* input_dims_data, const float* input_data,
186     int8_t* input_quantized, float input_scale, int input_zero_point,
187     const int* filter_dims_data, const float* filter_data,
188     int8_t* filter_data_quantized, const int* bias_dims_data,
189     const float* bias_data, int32_t* bias_data_quantized, float* bias_scales,
190     int* bias_zero_points, const int* output_dims_data,
191     const float* expected_output_data, int8_t* expected_output_data_quantized,
192     float output_scale, int output_zero_point, TfLiteConvParams* conv_params,
193     TfLiteRegistration registration, int8_t* output_data) {
194   TfLiteIntArray* input_dims = IntArrayFromInts(input_dims_data);
195   TfLiteIntArray* filter_dims = IntArrayFromInts(filter_dims_data);
196   TfLiteIntArray* bias_dims = IntArrayFromInts(bias_dims_data);
197   TfLiteIntArray* output_dims = IntArrayFromInts(output_dims_data);
198   const int output_dims_count = ElementCount(*output_dims);
199 
200   int filter_zero_points[5];
201   float filter_scales[5];
202   TfLiteAffineQuantization filter_quant;
203   TfLiteAffineQuantization bias_quant;
204   TfLiteTensor input_tensor = CreateQuantizedTensor(
205       input_data, input_quantized, input_dims, input_scale, input_zero_point);
206   TfLiteTensor filter_tensor = CreateSymmetricPerChannelQuantizedTensor(
207       filter_data, filter_data_quantized, filter_dims, filter_scales,
208       filter_zero_points, &filter_quant, 0 /* quantized dimension */);
209   TfLiteTensor bias_tensor = CreatePerChannelQuantizedBiasTensor(
210       bias_data, bias_data_quantized, bias_dims, input_scale, &filter_scales[1],
211       bias_scales, bias_zero_points, &bias_quant, 0 /* quantized dimension */);
212   TfLiteTensor output_tensor = CreateQuantizedTensor(
213       output_data, output_dims, output_scale, output_zero_point);
214 
215   float input_scales[] = {1, input_scale};
216   int input_zero_points[] = {1, input_zero_point};
217   TfLiteAffineQuantization input_quant = {FloatArrayFromFloats(input_scales),
218                                           IntArrayFromInts(input_zero_points),
219                                           0};
220   input_tensor.quantization = {kTfLiteAffineQuantization, &input_quant};
221 
222   float output_scales[] = {1, output_scale};
223   int output_zero_points[] = {1, output_zero_point};
224   TfLiteAffineQuantization output_quant = {FloatArrayFromFloats(output_scales),
225                                            IntArrayFromInts(output_zero_points),
226                                            0};
227   output_tensor.quantization = {kTfLiteAffineQuantization, &output_quant};
228 
229   constexpr int inputs_size = 3;
230   constexpr int outputs_size = 1;
231   constexpr int tensors_size = inputs_size + outputs_size;
232   TfLiteTensor tensors[tensors_size] = {
233       input_tensor,
234       filter_tensor,
235       bias_tensor,
236       output_tensor,
237   };
238 
239   tflite::Quantize(expected_output_data, expected_output_data_quantized,
240                    output_dims_count, output_scale, output_zero_point);
241   return ValidateConvGoldens(
242       tensors, tensors_size, expected_output_data_quantized, output_dims_count,
243       conv_params, registration, output_data, 1.0 /* tolerance */);
244 }
245 
246 }  // namespace testing
247 }  // namespace tflite
248