• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 
16 #include "tensorflow/lite/micro/test_helpers.h"
17 
18 #include <cstdarg>
19 #include <cstddef>
20 #include <cstdint>
21 #include <initializer_list>
22 #include <new>
23 
24 #include "flatbuffers/flatbuffers.h"  // from @flatbuffers
25 #include "tensorflow/lite/c/common.h"
26 #include "tensorflow/lite/core/api/error_reporter.h"
27 #include "tensorflow/lite/kernels/internal/compatibility.h"
28 #include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
29 #include "tensorflow/lite/kernels/kernel_util.h"
30 #include "tensorflow/lite/micro/all_ops_resolver.h"
31 #include "tensorflow/lite/micro/micro_utils.h"
32 #include "tensorflow/lite/schema/schema_generated.h"
33 
34 // TODO(b/170464050): Use TFLM test only version of schema_utils.
35 
36 namespace tflite {
37 namespace testing {
38 namespace {
39 
40 class StackAllocator : public flatbuffers::Allocator {
41  public:
StackAllocator()42   StackAllocator() : data_(data_backing_), data_size_(0) {}
43 
allocate(size_t size)44   uint8_t* allocate(size_t size) override {
45     TFLITE_DCHECK((data_size_ + size) <= kStackAllocatorSize);
46     uint8_t* result = data_;
47     data_ += size;
48     data_size_ += size;
49     return result;
50   }
51 
deallocate(uint8_t * p,size_t)52   void deallocate(uint8_t* p, size_t) override {}
53 
instance()54   static StackAllocator& instance() {
55     // Avoid using true dynamic memory allocation to be portable to bare metal.
56     static char inst_memory[sizeof(StackAllocator)];
57     static StackAllocator* inst = new (inst_memory) StackAllocator;
58     return *inst;
59   }
60 
61   static constexpr size_t kStackAllocatorSize = 8192;
62 
63  private:
64   uint8_t data_backing_[kStackAllocatorSize];
65   uint8_t* data_;
66   int data_size_;
67 };
68 
BuilderInstance()69 flatbuffers::FlatBufferBuilder* BuilderInstance() {
70   static char inst_memory[sizeof(flatbuffers::FlatBufferBuilder)];
71   static flatbuffers::FlatBufferBuilder* inst =
72       new (inst_memory) flatbuffers::FlatBufferBuilder(
73           StackAllocator::kStackAllocatorSize, &StackAllocator::instance());
74   return inst;
75 }
76 
77 // A wrapper around FlatBuffer API to help build model easily.
78 class ModelBuilder {
79  public:
80   typedef int32_t Tensor;
81   typedef int Operator;
82   typedef int Node;
83 
84   // `builder` needs to be available until BuildModel is called.
ModelBuilder(flatbuffers::FlatBufferBuilder * builder)85   explicit ModelBuilder(flatbuffers::FlatBufferBuilder* builder)
86       : builder_(builder) {}
87 
88   // Registers an operator that will be used in the model.
89   Operator RegisterOp(BuiltinOperator op, const char* custom_code);
90 
91   // Adds a tensor to the model.
AddTensor(TensorType type,std::initializer_list<int32_t> shape)92   Tensor AddTensor(TensorType type, std::initializer_list<int32_t> shape) {
93     return AddTensorImpl(type, /* is_variable */ false, shape);
94   }
95 
96   // Adds a variable tensor to the model.
AddVariableTensor(TensorType type,std::initializer_list<int32_t> shape)97   Tensor AddVariableTensor(TensorType type,
98                            std::initializer_list<int32_t> shape) {
99     return AddTensorImpl(type, /* is_variable */ true, shape);
100   }
101 
102   // Adds a node to the model with given input and output Tensors.
103   Node AddNode(Operator op, std::initializer_list<Tensor> inputs,
104                std::initializer_list<Tensor> outputs);
105 
106   void AddMetadata(const char* description_string,
107                    const int32_t* metadata_buffer_data, size_t num_elements);
108 
109   // Constructs the flatbuffer model using `builder_` and return a pointer to
110   // it. The returned model has the same lifetime as `builder_`.
111   // Note the default value of 0 for num_subgraph_inputs means all tensor inputs
112   // are in subgraph input list.
113   const Model* BuildModel(std::initializer_list<Tensor> inputs,
114                           std::initializer_list<Tensor> outputs,
115                           size_t num_subgraph_inputs = 0);
116 
117  private:
118   // Adds a tensor to the model.
119   Tensor AddTensorImpl(TensorType type, bool is_variable,
120                        std::initializer_list<int32_t> shape);
121 
122   flatbuffers::FlatBufferBuilder* builder_;
123 
124   static constexpr int kMaxOperatorCodes = 10;
125   flatbuffers::Offset<tflite::OperatorCode> operator_codes_[kMaxOperatorCodes];
126   int next_operator_code_id_ = 0;
127 
128   static constexpr int kMaxOperators = 50;
129   flatbuffers::Offset<tflite::Operator> operators_[kMaxOperators];
130   int next_operator_id_ = 0;
131 
132   static constexpr int kMaxTensors = 50;
133   flatbuffers::Offset<tflite::Tensor> tensors_[kMaxTensors];
134 
135   static constexpr int kMaxMetadataBuffers = 10;
136 
137   static constexpr int kMaxMetadatas = 10;
138   flatbuffers::Offset<Metadata> metadata_[kMaxMetadatas];
139 
140   flatbuffers::Offset<Buffer> metadata_buffers_[kMaxMetadataBuffers];
141 
142   int nbr_of_metadata_buffers_ = 0;
143 
144   int next_tensor_id_ = 0;
145 };
146 
RegisterOp(BuiltinOperator op,const char * custom_code)147 ModelBuilder::Operator ModelBuilder::RegisterOp(BuiltinOperator op,
148                                                 const char* custom_code) {
149   TFLITE_DCHECK(next_operator_code_id_ <= kMaxOperatorCodes);
150   operator_codes_[next_operator_code_id_] = tflite::CreateOperatorCodeDirect(
151       *builder_, /*deprecated_builtin_code=*/0, custom_code, /*version=*/0, op);
152   next_operator_code_id_++;
153   return next_operator_code_id_ - 1;
154 }
155 
AddNode(ModelBuilder::Operator op,std::initializer_list<ModelBuilder::Tensor> inputs,std::initializer_list<ModelBuilder::Tensor> outputs)156 ModelBuilder::Node ModelBuilder::AddNode(
157     ModelBuilder::Operator op,
158     std::initializer_list<ModelBuilder::Tensor> inputs,
159     std::initializer_list<ModelBuilder::Tensor> outputs) {
160   TFLITE_DCHECK(next_operator_id_ <= kMaxOperators);
161   operators_[next_operator_id_] = tflite::CreateOperator(
162       *builder_, op, builder_->CreateVector(inputs.begin(), inputs.size()),
163       builder_->CreateVector(outputs.begin(), outputs.size()),
164       BuiltinOptions_NONE);
165   next_operator_id_++;
166   return next_operator_id_ - 1;
167 }
168 
AddMetadata(const char * description_string,const int32_t * metadata_buffer_data,size_t num_elements)169 void ModelBuilder::AddMetadata(const char* description_string,
170                                const int32_t* metadata_buffer_data,
171                                size_t num_elements) {
172   metadata_[ModelBuilder::nbr_of_metadata_buffers_] =
173       CreateMetadata(*builder_, builder_->CreateString(description_string),
174                      1 + ModelBuilder::nbr_of_metadata_buffers_);
175 
176   metadata_buffers_[nbr_of_metadata_buffers_] = tflite::CreateBuffer(
177       *builder_, builder_->CreateVector((uint8_t*)metadata_buffer_data,
178                                         sizeof(uint32_t) * num_elements));
179 
180   ModelBuilder::nbr_of_metadata_buffers_++;
181 }
182 
BuildModel(std::initializer_list<ModelBuilder::Tensor> inputs,std::initializer_list<ModelBuilder::Tensor> outputs,size_t num_subgraph_inputs)183 const Model* ModelBuilder::BuildModel(
184     std::initializer_list<ModelBuilder::Tensor> inputs,
185     std::initializer_list<ModelBuilder::Tensor> outputs,
186     size_t num_subgraph_inputs) {
187   // Model schema requires an empty buffer at idx 0.
188   size_t buffer_size = 1 + ModelBuilder::nbr_of_metadata_buffers_;
189   flatbuffers::Offset<Buffer> buffers[kMaxMetadataBuffers];
190   buffers[0] = tflite::CreateBuffer(*builder_);
191 
192   // Place the metadata buffers first in the buffer since the indices for them
193   // have already been set in AddMetadata()
194   for (int i = 1; i < ModelBuilder::nbr_of_metadata_buffers_ + 1; ++i) {
195     buffers[i] = metadata_buffers_[i - 1];
196   }
197 
198   // TFLM only supports single subgraph.
199   constexpr size_t subgraphs_size = 1;
200 
201   // Find out number of subgraph inputs.
202   if (num_subgraph_inputs == 0) {
203     // This is the default case.
204     num_subgraph_inputs = inputs.size();
205   } else {
206     // A non-zero value of num_subgraph_inputs means that some of
207     // the operator input tensors are not subgraph inputs.
208     TFLITE_DCHECK(num_subgraph_inputs <= inputs.size());
209   }
210 
211   const flatbuffers::Offset<SubGraph> subgraphs[subgraphs_size] = {
212       tflite::CreateSubGraph(
213           *builder_, builder_->CreateVector(tensors_, next_tensor_id_),
214           builder_->CreateVector(inputs.begin(), num_subgraph_inputs),
215           builder_->CreateVector(outputs.begin(), outputs.size()),
216           builder_->CreateVector(operators_, next_operator_id_),
217           builder_->CreateString("test_subgraph"))};
218 
219   flatbuffers::Offset<Model> model_offset;
220   if (ModelBuilder::nbr_of_metadata_buffers_ > 0) {
221     model_offset = tflite::CreateModel(
222         *builder_, 0,
223         builder_->CreateVector(operator_codes_, next_operator_code_id_),
224         builder_->CreateVector(subgraphs, subgraphs_size),
225         builder_->CreateString("teset_model"),
226         builder_->CreateVector(buffers, buffer_size), 0,
227         builder_->CreateVector(metadata_,
228                                ModelBuilder::nbr_of_metadata_buffers_));
229   } else {
230     model_offset = tflite::CreateModel(
231         *builder_, 0,
232         builder_->CreateVector(operator_codes_, next_operator_code_id_),
233         builder_->CreateVector(subgraphs, subgraphs_size),
234         builder_->CreateString("teset_model"),
235         builder_->CreateVector(buffers, buffer_size));
236   }
237 
238   tflite::FinishModelBuffer(*builder_, model_offset);
239   void* model_pointer = builder_->GetBufferPointer();
240   const Model* model = flatbuffers::GetRoot<Model>(model_pointer);
241   return model;
242 }
243 
AddTensorImpl(TensorType type,bool is_variable,std::initializer_list<int32_t> shape)244 ModelBuilder::Tensor ModelBuilder::AddTensorImpl(
245     TensorType type, bool is_variable, std::initializer_list<int32_t> shape) {
246   TFLITE_DCHECK(next_tensor_id_ <= kMaxTensors);
247   tensors_[next_tensor_id_] = tflite::CreateTensor(
248       *builder_, builder_->CreateVector(shape.begin(), shape.size()), type,
249       /* buffer */ 0, /* name */ 0, /* quantization */ 0,
250       /* is_variable */ is_variable,
251       /* sparsity */ 0);
252   next_tensor_id_++;
253   return next_tensor_id_ - 1;
254 }
255 
BuildSimpleStatefulModel()256 const Model* BuildSimpleStatefulModel() {
257   using flatbuffers::Offset;
258   flatbuffers::FlatBufferBuilder* fb_builder = BuilderInstance();
259 
260   ModelBuilder model_builder(fb_builder);
261 
262   const int op_id =
263       model_builder.RegisterOp(BuiltinOperator_CUSTOM, "simple_stateful_op");
264   const int input_tensor = model_builder.AddTensor(TensorType_UINT8, {3});
265   const int median_tensor = model_builder.AddTensor(TensorType_UINT8, {3});
266   const int invoke_count_tensor =
267       model_builder.AddTensor(TensorType_INT32, {1});
268 
269   model_builder.AddNode(op_id, {input_tensor},
270                         {median_tensor, invoke_count_tensor});
271   return model_builder.BuildModel({input_tensor},
272                                   {median_tensor, invoke_count_tensor});
273 }
274 
BuildSimpleModelWithBranch()275 const Model* BuildSimpleModelWithBranch() {
276   using flatbuffers::Offset;
277   flatbuffers::FlatBufferBuilder* fb_builder = BuilderInstance();
278 
279   ModelBuilder model_builder(fb_builder);
280   /* Model structure
281            | t0
282     +------|
283     |      v
284     |   +---------+
285     |   |   n0    |
286     |   |         |
287     |   +---------+
288     v           +
289                 |
290   +---------+   | t1
291   |   n1    |   |
292   |         |   |
293   +---------+   |
294      |          |
295  t2  |          v
296      |   +---------+
297      +-->|    n2   |
298          |         |
299          +-------|-+
300                  |t3
301                  v
302   */
303   const int op_id =
304       model_builder.RegisterOp(BuiltinOperator_CUSTOM, "mock_custom");
305   const int t0 = model_builder.AddTensor(TensorType_FLOAT32, {2, 2, 3});
306   const int t1 = model_builder.AddTensor(TensorType_FLOAT32, {2, 2, 3});
307   const int t2 = model_builder.AddTensor(TensorType_FLOAT32, {2, 2, 3});
308   const int t3 = model_builder.AddTensor(TensorType_FLOAT32, {2, 2, 3});
309   model_builder.AddNode(op_id, {t0}, {t1});      // n0
310   model_builder.AddNode(op_id, {t0}, {t2});      // n1
311   model_builder.AddNode(op_id, {t1, t2}, {t3});  // n2
312   return model_builder.BuildModel({t0}, {t3});
313 }
314 
BuildModelWithOfflinePlanning(int number_of_tensors,const int32_t * metadata_buffer,NodeConnection * node_conn,int num_conns,int num_subgraph_inputs)315 const Model* BuildModelWithOfflinePlanning(int number_of_tensors,
316                                            const int32_t* metadata_buffer,
317                                            NodeConnection* node_conn,
318                                            int num_conns,
319                                            int num_subgraph_inputs) {
320   using flatbuffers::Offset;
321   flatbuffers::FlatBufferBuilder* fb_builder = BuilderInstance();
322 
323   ModelBuilder model_builder(fb_builder);
324 
325   const int op_id =
326       model_builder.RegisterOp(BuiltinOperator_CUSTOM, "mock_custom");
327 
328   for (int i = 0; i < number_of_tensors; ++i) {
329     model_builder.AddTensor(TensorType_FLOAT32, {2, 2, 3});
330   }
331 
332   for (int i = 0; i < num_conns; ++i) {
333     model_builder.AddNode(op_id, node_conn[i].input, node_conn[i].output);
334   }
335 
336   model_builder.AddMetadata(
337       "OfflineMemoryAllocation", metadata_buffer,
338       number_of_tensors + tflite::testing::kOfflinePlannerHeaderSize);
339 
340   return model_builder.BuildModel(
341       node_conn[0].input, node_conn[num_conns - 1].output, num_subgraph_inputs);
342 }
343 
BuildSimpleMockModel()344 const Model* BuildSimpleMockModel() {
345   using flatbuffers::Offset;
346   flatbuffers::FlatBufferBuilder* builder = BuilderInstance();
347 
348   constexpr size_t buffer_data_size = 1;
349   const uint8_t buffer_data[buffer_data_size] = {21};
350   constexpr size_t buffers_size = 2;
351   const Offset<Buffer> buffers[buffers_size] = {
352       CreateBuffer(*builder),
353       CreateBuffer(*builder,
354                    builder->CreateVector(buffer_data, buffer_data_size))};
355   constexpr size_t tensor_shape_size = 1;
356   const int32_t tensor_shape[tensor_shape_size] = {1};
357   constexpr size_t tensors_size = 4;
358   const Offset<Tensor> tensors[tensors_size] = {
359       CreateTensor(*builder,
360                    builder->CreateVector(tensor_shape, tensor_shape_size),
361                    TensorType_INT32, 0,
362                    builder->CreateString("test_input_tensor"), 0, false),
363       CreateTensor(*builder,
364                    builder->CreateVector(tensor_shape, tensor_shape_size),
365                    TensorType_UINT8, 1,
366                    builder->CreateString("test_weight_tensor"), 0, false),
367       CreateTensor(*builder,
368                    builder->CreateVector(tensor_shape, tensor_shape_size),
369                    TensorType_INT32, 0,
370                    builder->CreateString("test_output_tensor"), 0, false),
371       CreateTensor(*builder,
372                    builder->CreateVector(tensor_shape, tensor_shape_size),
373                    TensorType_INT32, 0,
374                    builder->CreateString("test_output2_tensor"), 0, false),
375   };
376   constexpr size_t inputs_size = 1;
377   const int32_t inputs[inputs_size] = {0};
378   constexpr size_t outputs_size = 2;
379   const int32_t outputs[outputs_size] = {2, 3};
380   constexpr size_t operator_inputs_size = 2;
381   const int32_t operator_inputs[operator_inputs_size] = {0, 1};
382   constexpr size_t operator_outputs_size = 1;
383   const int32_t operator_outputs[operator_outputs_size] = {2};
384   const int32_t operator2_outputs[operator_outputs_size] = {3};
385   constexpr size_t operators_size = 2;
386   const Offset<Operator> operators[operators_size] = {
387       CreateOperator(
388           *builder, 0,
389           builder->CreateVector(operator_inputs, operator_inputs_size),
390           builder->CreateVector(operator_outputs, operator_outputs_size),
391           BuiltinOptions_NONE),
392       CreateOperator(
393           *builder, 0,
394           builder->CreateVector(operator_inputs, operator_inputs_size),
395           builder->CreateVector(operator2_outputs, operator_outputs_size),
396           BuiltinOptions_NONE),
397   };
398   constexpr size_t subgraphs_size = 1;
399   const Offset<SubGraph> subgraphs[subgraphs_size] = {
400       CreateSubGraph(*builder, builder->CreateVector(tensors, tensors_size),
401                      builder->CreateVector(inputs, inputs_size),
402                      builder->CreateVector(outputs, outputs_size),
403                      builder->CreateVector(operators, operators_size),
404                      builder->CreateString("test_subgraph"))};
405   constexpr size_t operator_codes_size = 1;
406   const Offset<OperatorCode> operator_codes[operator_codes_size] = {
407       CreateOperatorCodeDirect(*builder, /*deprecated_builtin_code=*/0,
408                                "mock_custom",
409                                /*version=*/0, BuiltinOperator_CUSTOM)};
410   const Offset<Model> model_offset = CreateModel(
411       *builder, 0, builder->CreateVector(operator_codes, operator_codes_size),
412       builder->CreateVector(subgraphs, subgraphs_size),
413       builder->CreateString("test_model"),
414       builder->CreateVector(buffers, buffers_size));
415   FinishModelBuffer(*builder, model_offset);
416   void* model_pointer = builder->GetBufferPointer();
417   const Model* model = flatbuffers::GetRoot<Model>(model_pointer);
418   return model;
419 }
420 
BuildComplexMockModel()421 const Model* BuildComplexMockModel() {
422   using flatbuffers::Offset;
423   flatbuffers::FlatBufferBuilder* builder = BuilderInstance();
424 
425   constexpr size_t buffer_data_size = 1;
426   const uint8_t buffer_data_1[buffer_data_size] = {21};
427   const uint8_t buffer_data_2[buffer_data_size] = {21};
428   const uint8_t buffer_data_3[buffer_data_size] = {21};
429   constexpr size_t buffers_size = 7;
430   const Offset<Buffer> buffers[buffers_size] = {
431       // Op 1 buffers:
432       CreateBuffer(*builder),
433       CreateBuffer(*builder),
434       CreateBuffer(*builder,
435                    builder->CreateVector(buffer_data_1, buffer_data_size)),
436       // Op 2 buffers:
437       CreateBuffer(*builder),
438       CreateBuffer(*builder,
439                    builder->CreateVector(buffer_data_2, buffer_data_size)),
440       // Op 3 buffers:
441       CreateBuffer(*builder),
442       CreateBuffer(*builder,
443                    builder->CreateVector(buffer_data_3, buffer_data_size)),
444   };
445   constexpr size_t tensor_shape_size = 1;
446   const int32_t tensor_shape[tensor_shape_size] = {1};
447 
448   constexpr size_t tensors_size = 10;
449   const Offset<Tensor> tensors[tensors_size] = {
450       // Op 1 inputs:
451       CreateTensor(
452           *builder, builder->CreateVector(tensor_shape, tensor_shape_size),
453           TensorType_INT32, 0, builder->CreateString("test_input_tensor_1"), 0,
454           false /* is_variable */),
455       CreateTensor(
456           *builder, builder->CreateVector(tensor_shape, tensor_shape_size),
457           TensorType_INT32, 1, builder->CreateString("test_variable_tensor_1"),
458           0, true /* is_variable */),
459       CreateTensor(
460           *builder, builder->CreateVector(tensor_shape, tensor_shape_size),
461           TensorType_UINT8, 2, builder->CreateString("test_weight_tensor_1"), 0,
462           false /* is_variable */),
463       // Op 1 output / Op 2 input:
464       CreateTensor(
465           *builder, builder->CreateVector(tensor_shape, tensor_shape_size),
466           TensorType_INT32, 0, builder->CreateString("test_output_tensor_1"), 0,
467           false /* is_variable */),
468       // Op 2 inputs:
469       CreateTensor(
470           *builder, builder->CreateVector(tensor_shape, tensor_shape_size),
471           TensorType_INT32, 1, builder->CreateString("test_variable_tensor_2"),
472           0, true /* is_variable */),
473       CreateTensor(
474           *builder, builder->CreateVector(tensor_shape, tensor_shape_size),
475           TensorType_UINT8, 2, builder->CreateString("test_weight_tensor_2"), 0,
476           false /* is_variable */),
477       // Op 2 output / Op 3 input:
478       CreateTensor(
479           *builder, builder->CreateVector(tensor_shape, tensor_shape_size),
480           TensorType_INT32, 0, builder->CreateString("test_output_tensor_2"), 0,
481           false /* is_variable */),
482       // Op 3 inputs:
483       CreateTensor(
484           *builder, builder->CreateVector(tensor_shape, tensor_shape_size),
485           TensorType_INT32, 1, builder->CreateString("test_variable_tensor_3"),
486           0, true /* is_variable */),
487       CreateTensor(
488           *builder, builder->CreateVector(tensor_shape, tensor_shape_size),
489           TensorType_UINT8, 2, builder->CreateString("test_weight_tensor_3"), 0,
490           false /* is_variable */),
491       // Op 3 output:
492       CreateTensor(
493           *builder, builder->CreateVector(tensor_shape, tensor_shape_size),
494           TensorType_INT32, 0, builder->CreateString("test_output_tensor_3"), 0,
495           false /* is_variable */),
496   };
497 
498   constexpr size_t operators_size = 3;
499   Offset<Operator> operators[operators_size];
500   {
501     // Set Op 1 attributes:
502     constexpr size_t operator_inputs_size = 3;
503     const int32_t operator_inputs[operator_inputs_size] = {0, 1, 2};
504     constexpr size_t operator_outputs_size = 1;
505     const int32_t operator_outputs[operator_outputs_size] = {3};
506 
507     operators[0] = {CreateOperator(
508         *builder, 0,
509         builder->CreateVector(operator_inputs, operator_inputs_size),
510         builder->CreateVector(operator_outputs, operator_outputs_size),
511         BuiltinOptions_NONE)};
512   }
513 
514   {
515     // Set Op 2 attributes
516     constexpr size_t operator_inputs_size = 3;
517     const int32_t operator_inputs[operator_inputs_size] = {3, 4, 5};
518     constexpr size_t operator_outputs_size = 1;
519     const int32_t operator_outputs[operator_outputs_size] = {6};
520 
521     operators[1] = {CreateOperator(
522         *builder, 0,
523         builder->CreateVector(operator_inputs, operator_inputs_size),
524         builder->CreateVector(operator_outputs, operator_outputs_size),
525         BuiltinOptions_NONE)};
526   }
527 
528   {
529     // Set Op 3 attributes
530     constexpr size_t operator_inputs_size = 3;
531     const int32_t operator_inputs[operator_inputs_size] = {6, 7, 8};
532     constexpr size_t operator_outputs_size = 1;
533     const int32_t operator_outputs[operator_outputs_size] = {9};
534 
535     operators[2] = {CreateOperator(
536         *builder, 0,
537         builder->CreateVector(operator_inputs, operator_inputs_size),
538         builder->CreateVector(operator_outputs, operator_outputs_size),
539         BuiltinOptions_NONE)};
540   }
541 
542   constexpr size_t inputs_size = 1;
543   const int32_t inputs[inputs_size] = {0};
544   constexpr size_t outputs_size = 1;
545   const int32_t outputs[outputs_size] = {9};
546 
547   constexpr size_t subgraphs_size = 1;
548   const Offset<SubGraph> subgraphs[subgraphs_size] = {
549       CreateSubGraph(*builder, builder->CreateVector(tensors, tensors_size),
550                      builder->CreateVector(inputs, inputs_size),
551                      builder->CreateVector(outputs, outputs_size),
552                      builder->CreateVector(operators, operators_size),
553                      builder->CreateString("test_subgraph"))};
554 
555   constexpr size_t operator_codes_size = 1;
556   const Offset<OperatorCode> operator_codes[operator_codes_size] = {
557       CreateOperatorCodeDirect(*builder, /*deprecated_builtin_code=*/0,
558                                "mock_custom",
559                                /*version=*/0, BuiltinOperator_CUSTOM)};
560 
561   const Offset<Model> model_offset = CreateModel(
562       *builder, 0, builder->CreateVector(operator_codes, operator_codes_size),
563       builder->CreateVector(subgraphs, subgraphs_size),
564       builder->CreateString("test_model"),
565       builder->CreateVector(buffers, buffers_size));
566 
567   FinishModelBuffer(*builder, model_offset);
568   void* model_pointer = builder->GetBufferPointer();
569   const Model* model = flatbuffers::GetRoot<Model>(model_pointer);
570   return model;
571 }
572 
BuildSimpleMultipleInputsModel()573 const Model* BuildSimpleMultipleInputsModel() {
574   using flatbuffers::Offset;
575   flatbuffers::FlatBufferBuilder* builder = BuilderInstance();
576 
577   constexpr size_t buffers_size = 1;
578   const Offset<Buffer> buffers[buffers_size] = {
579       CreateBuffer(*builder),
580   };
581   constexpr size_t tensor_shape_size = 1;
582   const int32_t tensor_shape[tensor_shape_size] = {1};
583   constexpr size_t tensors_size = 4;
584   const Offset<Tensor> tensors[tensors_size] = {
585       CreateTensor(*builder,
586                    builder->CreateVector(tensor_shape, tensor_shape_size),
587                    TensorType_INT32, 0,
588                    builder->CreateString("test_input_tensor1"), 0, false),
589       CreateTensor(*builder,
590                    builder->CreateVector(tensor_shape, tensor_shape_size),
591                    TensorType_INT8, 0,
592                    builder->CreateString("test_input_tensor2"), 0, false),
593       CreateTensor(*builder,
594                    builder->CreateVector(tensor_shape, tensor_shape_size),
595                    TensorType_INT32, 0,
596                    builder->CreateString("test_input_tensor3"), 0, false),
597       CreateTensor(*builder,
598                    builder->CreateVector(tensor_shape, tensor_shape_size),
599                    TensorType_INT32, 0,
600                    builder->CreateString("test_output_tensor"), 0, false),
601   };
602   constexpr size_t inputs_size = 3;
603   const int32_t inputs[inputs_size] = {0, 1, 2};
604   constexpr size_t outputs_size = 1;
605   const int32_t outputs[outputs_size] = {3};
606   constexpr size_t operator_inputs_size = 3;
607   const int32_t operator_inputs[operator_inputs_size] = {0, 1, 2};
608   constexpr size_t operator_outputs_size = 1;
609   const int32_t operator_outputs[operator_outputs_size] = {3};
610   constexpr size_t operators_size = 1;
611   const Offset<Operator> operators[operators_size] = {
612       CreateOperator(
613           *builder, 0,
614           builder->CreateVector(operator_inputs, operator_inputs_size),
615           builder->CreateVector(operator_outputs, operator_outputs_size),
616           BuiltinOptions_NONE),
617   };
618   constexpr size_t subgraphs_size = 1;
619   const Offset<SubGraph> subgraphs[subgraphs_size] = {
620       CreateSubGraph(*builder, builder->CreateVector(tensors, tensors_size),
621                      builder->CreateVector(inputs, inputs_size),
622                      builder->CreateVector(outputs, outputs_size),
623                      builder->CreateVector(operators, operators_size),
624                      builder->CreateString("test_subgraph"))};
625   constexpr size_t operator_codes_size = 1;
626   const Offset<OperatorCode> operator_codes[operator_codes_size] = {
627       CreateOperatorCodeDirect(*builder, /*deprecated_builtin_code=*/0,
628                                "multiple_inputs_op",
629                                /*version=*/0, BuiltinOperator_CUSTOM)};
630   const Offset<Model> model_offset = CreateModel(
631       *builder, 0, builder->CreateVector(operator_codes, operator_codes_size),
632       builder->CreateVector(subgraphs, subgraphs_size),
633       builder->CreateString("test_model"),
634       builder->CreateVector(buffers, buffers_size));
635   FinishModelBuffer(*builder, model_offset);
636   void* model_pointer = builder->GetBufferPointer();
637   const Model* model = flatbuffers::GetRoot<Model>(model_pointer);
638   return model;
639 }
640 
641 }  // namespace
642 
getRegistration()643 const TfLiteRegistration* SimpleStatefulOp::getRegistration() {
644   return GetMutableRegistration();
645 }
646 
GetMutableRegistration()647 TfLiteRegistration* SimpleStatefulOp::GetMutableRegistration() {
648   static TfLiteRegistration r;
649   r.init = Init;
650   r.prepare = Prepare;
651   r.invoke = Invoke;
652   return &r;
653 }
654 
Init(TfLiteContext * context,const char * buffer,size_t length)655 void* SimpleStatefulOp::Init(TfLiteContext* context, const char* buffer,
656                              size_t length) {
657   TFLITE_DCHECK(context->AllocateBufferForEval == nullptr);
658   TFLITE_DCHECK(context->GetScratchBuffer == nullptr);
659   TFLITE_DCHECK(context->RequestScratchBufferInArena == nullptr);
660 
661   void* raw = context->AllocatePersistentBuffer(context, sizeof(OpData));
662   OpData* data = reinterpret_cast<OpData*>(raw);
663   *data = {};
664   return raw;
665 }
666 
Prepare(TfLiteContext * context,TfLiteNode * node)667 TfLiteStatus SimpleStatefulOp::Prepare(TfLiteContext* context,
668                                        TfLiteNode* node) {
669   OpData* data = reinterpret_cast<OpData*>(node->user_data);
670 
671   // Make sure that the input is in uint8_t with at least 1 data entry.
672   const TfLiteTensor* input;
673   TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
674   if (input->type != kTfLiteUInt8) return kTfLiteError;
675   if (NumElements(input->dims) == 0) return kTfLiteError;
676 
677   // Allocate a temporary buffer with the same size of input for sorting.
678   TF_LITE_ENSURE_STATUS(context->RequestScratchBufferInArena(
679       context, sizeof(uint8_t) * NumElements(input->dims),
680       &data->sorting_buffer));
681   // We can interleave scratch / persistent buffer allocation.
682   data->invoke_count = reinterpret_cast<int*>(
683       context->AllocatePersistentBuffer(context, sizeof(int)));
684   *data->invoke_count = 0;
685 
686   return kTfLiteOk;
687 }
688 
Invoke(TfLiteContext * context,TfLiteNode * node)689 TfLiteStatus SimpleStatefulOp::Invoke(TfLiteContext* context,
690                                       TfLiteNode* node) {
691   OpData* data = reinterpret_cast<OpData*>(node->user_data);
692   *data->invoke_count += 1;
693 
694   const TfLiteTensor* input;
695   TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
696   const uint8_t* input_data = GetTensorData<uint8_t>(input);
697   int size = NumElements(input->dims);
698 
699   uint8_t* sorting_buffer = reinterpret_cast<uint8_t*>(
700       context->GetScratchBuffer(context, data->sorting_buffer));
701   // Copy inputs data to the sorting buffer. We don't want to mutate the input
702   // tensor as it might be used by a another node.
703   for (int i = 0; i < size; i++) {
704     sorting_buffer[i] = input_data[i];
705   }
706 
707   // In place insertion sort on `sorting_buffer`.
708   for (int i = 1; i < size; i++) {
709     for (int j = i; j > 0 && sorting_buffer[j] < sorting_buffer[j - 1]; j--) {
710       std::swap(sorting_buffer[j], sorting_buffer[j - 1]);
711     }
712   }
713 
714   TfLiteTensor* median;
715   TF_LITE_ENSURE_OK(context,
716                     GetOutputSafe(context, node, kMedianTensor, &median));
717   uint8_t* median_data = GetTensorData<uint8_t>(median);
718   TfLiteTensor* invoke_count;
719   TF_LITE_ENSURE_OK(context,
720                     GetOutputSafe(context, node, kInvokeCount, &invoke_count));
721   int32_t* invoke_count_data = GetTensorData<int32_t>(invoke_count);
722 
723   median_data[0] = sorting_buffer[size / 2];
724   invoke_count_data[0] = *data->invoke_count;
725   return kTfLiteOk;
726 }
727 
getRegistration()728 const TfLiteRegistration* MockCustom::getRegistration() {
729   return GetMutableRegistration();
730 }
731 
GetMutableRegistration()732 TfLiteRegistration* MockCustom::GetMutableRegistration() {
733   static TfLiteRegistration r;
734   r.init = Init;
735   r.prepare = Prepare;
736   r.invoke = Invoke;
737   r.free = Free;
738   return &r;
739 }
740 
Init(TfLiteContext * context,const char * buffer,size_t length)741 void* MockCustom::Init(TfLiteContext* context, const char* buffer,
742                        size_t length) {
743   // We don't support delegate in TFL micro. This is a weak check to test if
744   // context struct being zero-initialized.
745   TFLITE_DCHECK(context->ReplaceNodeSubsetsWithDelegateKernels == nullptr);
746   freed_ = false;
747   // Do nothing.
748   return nullptr;
749 }
750 
Free(TfLiteContext * context,void * buffer)751 void MockCustom::Free(TfLiteContext* context, void* buffer) { freed_ = true; }
752 
Prepare(TfLiteContext * context,TfLiteNode * node)753 TfLiteStatus MockCustom::Prepare(TfLiteContext* context, TfLiteNode* node) {
754   return kTfLiteOk;
755 }
756 
Invoke(TfLiteContext * context,TfLiteNode * node)757 TfLiteStatus MockCustom::Invoke(TfLiteContext* context, TfLiteNode* node) {
758   const TfLiteTensor* input;
759   TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));
760   const int32_t* input_data = input->data.i32;
761   const TfLiteTensor* weight;
762   TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 1, &weight));
763   const uint8_t* weight_data = weight->data.uint8;
764   TfLiteTensor* output;
765   TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
766   int32_t* output_data = output->data.i32;
767   output_data[0] =
768       0;  // Catch output tensor sharing memory with an input tensor
769   output_data[0] = input_data[0] + weight_data[0];
770   return kTfLiteOk;
771 }
772 
773 bool MockCustom::freed_ = false;
774 
getRegistration()775 const TfLiteRegistration* MultipleInputs::getRegistration() {
776   return GetMutableRegistration();
777 }
778 
GetMutableRegistration()779 TfLiteRegistration* MultipleInputs::GetMutableRegistration() {
780   static TfLiteRegistration r;
781   r.init = Init;
782   r.prepare = Prepare;
783   r.invoke = Invoke;
784   r.free = Free;
785   return &r;
786 }
787 
Init(TfLiteContext * context,const char * buffer,size_t length)788 void* MultipleInputs::Init(TfLiteContext* context, const char* buffer,
789                            size_t length) {
790   // We don't support delegate in TFL micro. This is a weak check to test if
791   // context struct being zero-initialized.
792   TFLITE_DCHECK(context->ReplaceNodeSubsetsWithDelegateKernels == nullptr);
793   freed_ = false;
794   // Do nothing.
795   return nullptr;
796 }
797 
Free(TfLiteContext * context,void * buffer)798 void MultipleInputs::Free(TfLiteContext* context, void* buffer) {
799   freed_ = true;
800 }
801 
Prepare(TfLiteContext * context,TfLiteNode * node)802 TfLiteStatus MultipleInputs::Prepare(TfLiteContext* context, TfLiteNode* node) {
803   return kTfLiteOk;
804 }
805 
Invoke(TfLiteContext * context,TfLiteNode * node)806 TfLiteStatus MultipleInputs::Invoke(TfLiteContext* context, TfLiteNode* node) {
807   const TfLiteTensor* input;
808   TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));
809   const int32_t* input_data = input->data.i32;
810   const TfLiteTensor* input1;
811   TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 1, &input1));
812   const int32_t* input_data1 = input1->data.i32;
813   const TfLiteTensor* input2;
814   TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 2, &input2));
815   const int32_t* input_data2 = input2->data.i32;
816 
817   TfLiteTensor* output;
818   TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
819   int32_t* output_data = output->data.i32;
820   output_data[0] =
821       0;  // Catch output tensor sharing memory with an input tensor
822   output_data[0] = input_data[0] + input_data1[0] + input_data2[0];
823   return kTfLiteOk;
824 }
825 
826 bool MultipleInputs::freed_ = false;
827 
GetOpResolver()828 AllOpsResolver GetOpResolver() {
829   AllOpsResolver op_resolver;
830   op_resolver.AddCustom("mock_custom", MockCustom::GetMutableRegistration());
831   op_resolver.AddCustom("simple_stateful_op",
832                         SimpleStatefulOp::GetMutableRegistration());
833   op_resolver.AddCustom("multiple_inputs_op",
834                         MultipleInputs::GetMutableRegistration());
835   return op_resolver;
836 }
837 
GetSimpleMockModel()838 const Model* GetSimpleMockModel() {
839   static Model* model = nullptr;
840   if (!model) {
841     model = const_cast<Model*>(BuildSimpleMockModel());
842   }
843   return model;
844 }
845 
GetSimpleMultipleInputsModel()846 const Model* GetSimpleMultipleInputsModel() {
847   static Model* model = nullptr;
848   if (!model) {
849     model = const_cast<Model*>(BuildSimpleMultipleInputsModel());
850   }
851   return model;
852 }
853 
GetComplexMockModel()854 const Model* GetComplexMockModel() {
855   static Model* model = nullptr;
856   if (!model) {
857     model = const_cast<Model*>(BuildComplexMockModel());
858   }
859   return model;
860 }
861 
GetSimpleModelWithBranch()862 const Model* GetSimpleModelWithBranch() {
863   static Model* model = nullptr;
864   if (!model) {
865     model = const_cast<Model*>(BuildSimpleModelWithBranch());
866   }
867   return model;
868 }
869 
GetModelWithOfflinePlanning(int num_tensors,const int32_t * metadata_buffer,NodeConnection * node_conn,int num_conns,int num_subgraph_inputs)870 const Model* GetModelWithOfflinePlanning(int num_tensors,
871                                          const int32_t* metadata_buffer,
872                                          NodeConnection* node_conn,
873                                          int num_conns,
874                                          int num_subgraph_inputs) {
875   const Model* model = BuildModelWithOfflinePlanning(
876       num_tensors, metadata_buffer, node_conn, num_conns, num_subgraph_inputs);
877   return model;
878 }
879 
GetSimpleStatefulModel()880 const Model* GetSimpleStatefulModel() {
881   static Model* model = nullptr;
882   if (!model) {
883     model = const_cast<Model*>(BuildSimpleStatefulModel());
884   }
885   return model;
886 }
887 
Create1dFlatbufferTensor(int size,bool is_variable)888 const Tensor* Create1dFlatbufferTensor(int size, bool is_variable) {
889   using flatbuffers::Offset;
890   flatbuffers::FlatBufferBuilder* builder = BuilderInstance();
891   constexpr size_t tensor_shape_size = 1;
892   const int32_t tensor_shape[tensor_shape_size] = {size};
893   const Offset<Tensor> tensor_offset = CreateTensor(
894       *builder, builder->CreateVector(tensor_shape, tensor_shape_size),
895       TensorType_INT32, 0, builder->CreateString("test_tensor"), 0,
896       is_variable);
897   builder->Finish(tensor_offset);
898   void* tensor_pointer = builder->GetBufferPointer();
899   const Tensor* tensor = flatbuffers::GetRoot<Tensor>(tensor_pointer);
900   return tensor;
901 }
902 
CreateQuantizedFlatbufferTensor(int size)903 const Tensor* CreateQuantizedFlatbufferTensor(int size) {
904   using flatbuffers::Offset;
905   flatbuffers::FlatBufferBuilder* builder = BuilderInstance();
906   const Offset<QuantizationParameters> quant_params =
907       CreateQuantizationParameters(
908           *builder,
909           /*min=*/builder->CreateVector<float>({0.1f}),
910           /*max=*/builder->CreateVector<float>({0.2f}),
911           /*scale=*/builder->CreateVector<float>({0.3f}),
912           /*zero_point=*/builder->CreateVector<int64_t>({100ll}));
913 
914   constexpr size_t tensor_shape_size = 1;
915   const int32_t tensor_shape[tensor_shape_size] = {size};
916   const Offset<Tensor> tensor_offset = CreateTensor(
917       *builder, builder->CreateVector(tensor_shape, tensor_shape_size),
918       TensorType_INT32, 0, builder->CreateString("test_tensor"), quant_params,
919       false);
920   builder->Finish(tensor_offset);
921   void* tensor_pointer = builder->GetBufferPointer();
922   const Tensor* tensor = flatbuffers::GetRoot<Tensor>(tensor_pointer);
923   return tensor;
924 }
925 
CreateMissingQuantizationFlatbufferTensor(int size)926 const Tensor* CreateMissingQuantizationFlatbufferTensor(int size) {
927   using flatbuffers::Offset;
928   flatbuffers::FlatBufferBuilder* builder = BuilderInstance();
929   const Offset<QuantizationParameters> quant_params =
930       CreateQuantizationParameters(*builder, 0, 0, 0, 0,
931                                    QuantizationDetails_NONE, 0, 0);
932   constexpr size_t tensor_shape_size = 1;
933   const int32_t tensor_shape[tensor_shape_size] = {size};
934   const Offset<Tensor> tensor_offset = CreateTensor(
935       *builder, builder->CreateVector(tensor_shape, tensor_shape_size),
936       TensorType_INT32, 0, builder->CreateString("test_tensor"), quant_params,
937       false);
938   builder->Finish(tensor_offset);
939   void* tensor_pointer = builder->GetBufferPointer();
940   const Tensor* tensor = flatbuffers::GetRoot<Tensor>(tensor_pointer);
941   return tensor;
942 }
943 
944 const flatbuffers::Vector<flatbuffers::Offset<Buffer>>*
CreateFlatbufferBuffers()945 CreateFlatbufferBuffers() {
946   using flatbuffers::Offset;
947   flatbuffers::FlatBufferBuilder* builder = BuilderInstance();
948   constexpr size_t buffers_size = 1;
949   const Offset<Buffer> buffers[buffers_size] = {
950       CreateBuffer(*builder),
951   };
952   const flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<Buffer>>>
953       buffers_offset = builder->CreateVector(buffers, buffers_size);
954   builder->Finish(buffers_offset);
955   void* buffers_pointer = builder->GetBufferPointer();
956   const flatbuffers::Vector<flatbuffers::Offset<Buffer>>* result =
957       flatbuffers::GetRoot<flatbuffers::Vector<flatbuffers::Offset<Buffer>>>(
958           buffers_pointer);
959   return result;
960 }
961 
TestStrcmp(const char * a,const char * b)962 int TestStrcmp(const char* a, const char* b) {
963   if ((a == nullptr) || (b == nullptr)) {
964     return -1;
965   }
966   while ((*a != 0) && (*a == *b)) {
967     a++;
968     b++;
969   }
970   return *reinterpret_cast<const unsigned char*>(a) -
971          *reinterpret_cast<const unsigned char*>(b);
972 }
973 
974 // Wrapper to forward kernel errors to the interpreter's error reporter.
ReportOpError(struct TfLiteContext * context,const char * format,...)975 void ReportOpError(struct TfLiteContext* context, const char* format, ...) {
976 #ifndef TF_LITE_STRIP_ERROR_STRINGS
977   ErrorReporter* error_reporter = static_cast<ErrorReporter*>(context->impl_);
978   va_list args;
979   va_start(args, format);
980   TF_LITE_REPORT_ERROR(error_reporter, format, args);
981   va_end(args);
982 #endif
983 }
984 
985 // Create a TfLiteIntArray from an array of ints.  The first element in the
986 // supplied array must be the size of the array expressed as an int.
IntArrayFromInts(const int * int_array)987 TfLiteIntArray* IntArrayFromInts(const int* int_array) {
988   return const_cast<TfLiteIntArray*>(
989       reinterpret_cast<const TfLiteIntArray*>(int_array));
990 }
991 
992 // Create a TfLiteFloatArray from an array of floats.  The first element in the
993 // supplied array must be the size of the array expressed as a float.
FloatArrayFromFloats(const float * floats)994 TfLiteFloatArray* FloatArrayFromFloats(const float* floats) {
995   static_assert(sizeof(float) == sizeof(int),
996                 "assumes sizeof(float) == sizeof(int) to perform casting");
997   int size = static_cast<int>(floats[0]);
998   *reinterpret_cast<int32_t*>(const_cast<float*>(floats)) = size;
999   return reinterpret_cast<TfLiteFloatArray*>(const_cast<float*>(floats));
1000 }
1001 
CreateQuantizedBiasTensor(const float * data,int32_t * quantized,TfLiteIntArray * dims,float input_scale,float weights_scale,bool is_variable)1002 TfLiteTensor CreateQuantizedBiasTensor(const float* data, int32_t* quantized,
1003                                        TfLiteIntArray* dims, float input_scale,
1004                                        float weights_scale, bool is_variable) {
1005   float bias_scale = input_scale * weights_scale;
1006   tflite::SymmetricQuantize(data, quantized, ElementCount(*dims), bias_scale);
1007 
1008   // Quantized int32_t tensors always have a zero point of 0, since the range of
1009   // int32_t values is large, and because zero point costs extra cycles during
1010   // processing.
1011   TfLiteTensor result =
1012       CreateQuantizedTensor(quantized, dims, bias_scale, 0, is_variable);
1013   return result;
1014 }
1015 
1016 // Quantizes int32_t bias tensor with per-channel weights determined by input
1017 // scale multiplied by weight scale for each channel.
CreatePerChannelQuantizedBiasTensor(const float * input,int32_t * quantized,TfLiteIntArray * dims,float input_scale,float * weight_scales,float * scales,int * zero_points,TfLiteAffineQuantization * affine_quant,int quantized_dimension,bool is_variable)1018 TfLiteTensor CreatePerChannelQuantizedBiasTensor(
1019     const float* input, int32_t* quantized, TfLiteIntArray* dims,
1020     float input_scale, float* weight_scales, float* scales, int* zero_points,
1021     TfLiteAffineQuantization* affine_quant, int quantized_dimension,
1022     bool is_variable) {
1023   int input_size = ElementCount(*dims);
1024   int num_channels = dims->data[quantized_dimension];
1025   // First element is reserved for array length
1026   zero_points[0] = num_channels;
1027   scales[0] = static_cast<float>(num_channels);
1028   float* scales_array = &scales[1];
1029   for (int i = 0; i < num_channels; i++) {
1030     scales_array[i] = input_scale * weight_scales[i];
1031     zero_points[i + 1] = 0;
1032   }
1033 
1034   SymmetricPerChannelQuantize<int32_t>(input, quantized, input_size,
1035                                        num_channels, scales_array);
1036 
1037   affine_quant->scale = FloatArrayFromFloats(scales);
1038   affine_quant->zero_point = IntArrayFromInts(zero_points);
1039   affine_quant->quantized_dimension = quantized_dimension;
1040 
1041   TfLiteTensor result = CreateTensor(quantized, dims, is_variable);
1042   result.quantization = {kTfLiteAffineQuantization, affine_quant};
1043   return result;
1044 }
1045 
CreateSymmetricPerChannelQuantizedTensor(const float * input,int8_t * quantized,TfLiteIntArray * dims,float * scales,int * zero_points,TfLiteAffineQuantization * affine_quant,int quantized_dimension,bool is_variable)1046 TfLiteTensor CreateSymmetricPerChannelQuantizedTensor(
1047     const float* input, int8_t* quantized, TfLiteIntArray* dims, float* scales,
1048     int* zero_points, TfLiteAffineQuantization* affine_quant,
1049     int quantized_dimension, bool is_variable) {
1050   int channel_count = dims->data[quantized_dimension];
1051   scales[0] = static_cast<float>(channel_count);
1052   zero_points[0] = channel_count;
1053 
1054   SignedSymmetricPerChannelQuantize(input, dims, quantized_dimension, quantized,
1055                                     &scales[1]);
1056 
1057   for (int i = 0; i < channel_count; i++) {
1058     zero_points[i + 1] = 0;
1059   }
1060 
1061   affine_quant->scale = FloatArrayFromFloats(scales);
1062   affine_quant->zero_point = IntArrayFromInts(zero_points);
1063   affine_quant->quantized_dimension = quantized_dimension;
1064 
1065   TfLiteTensor result = CreateTensor(quantized, dims, is_variable);
1066   result.quantization = {kTfLiteAffineQuantization, affine_quant};
1067   return result;
1068 }
1069 
GetModelTensorCount(const Model * model)1070 size_t GetModelTensorCount(const Model* model) {
1071   auto* subgraphs = model->subgraphs();
1072   if (subgraphs) {
1073     return (*subgraphs)[0]->tensors()->size();
1074   }
1075   return 0;
1076 }
1077 
1078 }  // namespace testing
1079 }  // namespace tflite
1080