• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 
16 #include "tensorflow/lite/interpreter.h"
17 
18 #include <stddef.h>
19 #include <stdint.h>
20 #include <stdlib.h>
21 #include <string.h>
22 
23 #include <map>
24 #include <memory>
25 #include <new>
26 #include <string>
27 #include <utility>
28 #include <vector>
29 
30 #include <gmock/gmock.h>
31 #include <gtest/gtest.h>
32 #include "third_party/eigen3/Eigen/Core"
33 #include "tensorflow/lite/c/builtin_op_data.h"
34 #include "tensorflow/lite/external_cpu_backend_context.h"
35 #include "tensorflow/lite/interpreter_test_util.h"
36 #include "tensorflow/lite/kernels/builtin_op_kernels.h"
37 #include "tensorflow/lite/kernels/internal/compatibility.h"
38 #include "tensorflow/lite/kernels/kernel_util.h"
39 #include "tensorflow/lite/string_type.h"
40 #include "tensorflow/lite/string_util.h"
41 #include "tensorflow/lite/testing/util.h"
42 #include "tensorflow/lite/util.h"
43 
44 namespace tflite {
45 
46 namespace ops {
47 namespace builtin {
48 TfLiteRegistration* Register_PADV2();
49 TfLiteRegistration* Register_NEG();
50 }  // namespace builtin
51 }  // namespace ops
52 
53 namespace {
54 
55 using ::testing::IsEmpty;
56 
57 // Make an interpreter that has no tensors and no nodes
TEST(BasicInterpreter,ZeroInterpreter)58 TEST(BasicInterpreter, ZeroInterpreter) {
59   testing::internal::CaptureStderr();
60 
61   Interpreter interpreter;
62 
63 #ifndef NDEBUG
64   const char* kExpectedLog = "INFO: Initialized TensorFlow Lite runtime";
65 #else
66   const char* kExpectedLog = "";
67 #endif
68   EXPECT_THAT(testing::internal::GetCapturedStderr(),
69               testing::HasSubstr(kExpectedLog));
70 
71   interpreter.SetInputs({});
72   interpreter.SetOutputs({});
73   ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
74   ASSERT_EQ(interpreter.Invoke(), kTfLiteOk);
75 
76   // Creating a new interpreter should not redundantly log runtime init.
77   testing::internal::CaptureStderr();
78   Interpreter interpreter2;
79   EXPECT_THAT(testing::internal::GetCapturedStderr(), IsEmpty());
80 }
81 
82 // Test various error conditions.
TEST(BasicInterpreter,InvokeInvalidModel)83 TEST(BasicInterpreter, InvokeInvalidModel) {
84   Interpreter interpreter;
85   ASSERT_NE(interpreter.Invoke(), kTfLiteOk);
86   ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
87   ASSERT_EQ(interpreter.Invoke(), kTfLiteOk);
88 }
89 
TEST(BasicInterpreter,TestAllocateTensorsResetVariableTensorsFloatAndHyrbid)90 TEST(BasicInterpreter, TestAllocateTensorsResetVariableTensorsFloatAndHyrbid) {
91   Interpreter interpreter;
92   int tensor_index;
93   ASSERT_EQ(interpreter.AddTensors(1, &tensor_index), kTfLiteOk);
94   constexpr int kTensorSize = 16;
95   TfLiteQuantizationParams quant;
96   interpreter.SetTensorParametersReadWrite(tensor_index, kTfLiteFloat32, "",
97                                            {kTensorSize}, quant,
98                                            /*is_variable=*/true);
99   interpreter.SetVariables({tensor_index});
100   ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
101   TfLiteTensor* tensor = interpreter.tensor(tensor_index);
102   // Ensure that variable tensors are reset to zero.
103   for (int i = 0; i < kTensorSize; ++i) {
104     ASSERT_EQ(tensor->data.f[i], 0.0f);
105   }
106 }
107 
TEST(BasicInterpreter,TestAllocateTensorsResetVariableTensorsInt8)108 TEST(BasicInterpreter, TestAllocateTensorsResetVariableTensorsInt8) {
109   Interpreter interpreter;
110   int tensor_index;
111   ASSERT_EQ(interpreter.AddTensors(1, &tensor_index), kTfLiteOk);
112   constexpr int kTensorSize = 16;
113   TfLiteQuantizationParams quant;
114   quant.scale = 0.15;
115   quant.zero_point = -3;
116   interpreter.SetTensorParametersReadWrite(tensor_index, kTfLiteInt8, "",
117                                            {kTensorSize}, quant,
118                                            /*is_variable=*/true);
119   interpreter.SetVariables({tensor_index});
120   ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
121   TfLiteTensor* tensor = interpreter.tensor(tensor_index);
122   // Ensure that variable tensors are reset to zero point.
123   for (int i = 0; i < kTensorSize; ++i) {
124     ASSERT_EQ(tensor->data.int8[i], -3);
125   }
126 }
127 
128 // Test size accessor functions.
TEST(BasicInterpreter,TestSizeFunctions)129 TEST(BasicInterpreter, TestSizeFunctions) {
130   Interpreter interpreter;
131   int base_index;
132   ASSERT_EQ(interpreter.nodes_size(), 0);
133   ASSERT_EQ(interpreter.tensors_size(), 0);
134   ASSERT_EQ(interpreter.AddTensors(2, &base_index), kTfLiteOk);
135   ASSERT_EQ(interpreter.tensors_size(), 2);
136   ASSERT_EQ(base_index, 0);
137   ASSERT_EQ(interpreter.AddTensors(3, &base_index), kTfLiteOk);
138   ASSERT_EQ(interpreter.tensors_size(), 5);
139   ASSERT_EQ(interpreter.AddTensors(1), kTfLiteOk);
140   ASSERT_EQ(interpreter.tensors_size(), 6);
141   ASSERT_EQ(base_index, 2);
142 }
143 
144 // Test if invalid indices make a model inconsistent (and conversely if
145 // valid indices keep a model consistent).
TEST(BasicInterpreter,InconsistentModel)146 TEST(BasicInterpreter, InconsistentModel) {
147   // Invalid inputs
148   {
149     Interpreter interpreter;
150     ASSERT_NE(interpreter.SetInputs({5}), kTfLiteOk);
151     ASSERT_NE(interpreter.AllocateTensors(), kTfLiteOk);
152     ASSERT_NE(interpreter.Invoke(), kTfLiteOk);
153     ASSERT_EQ(interpreter.inputs(), std::vector<int>());
154   }
155   // Invalid outputs
156   {
157     Interpreter interpreter;
158     ASSERT_NE(interpreter.SetOutputs({5}), kTfLiteOk);
159     ASSERT_NE(interpreter.AllocateTensors(), kTfLiteOk);
160     ASSERT_NE(interpreter.Invoke(), kTfLiteOk);
161     ASSERT_EQ(interpreter.outputs(), std::vector<int>());
162   }
163   // Invalid node inputs
164   {
165     Interpreter interpreter;
166     TfLiteRegistration registration = {nullptr, nullptr, nullptr, nullptr};
167     ASSERT_NE(interpreter.AddNodeWithParameters({3}, {0}, nullptr, 0, nullptr,
168                                                 &registration),
169               kTfLiteOk);
170     ASSERT_NE(interpreter.AllocateTensors(), kTfLiteOk);
171     ASSERT_NE(interpreter.Invoke(), kTfLiteOk);
172   }
173   // Valid inputs and outputs and a node with valid inputs and outputs
174   {
175     Interpreter interpreter;
176     ASSERT_EQ(interpreter.AddTensors(2), kTfLiteOk);
177     TfLiteRegistration registration = {nullptr, nullptr, nullptr, nullptr};
178     ASSERT_EQ(interpreter.SetInputs({0}), kTfLiteOk);
179     ASSERT_EQ(interpreter.SetOutputs({0}), kTfLiteOk);
180     ASSERT_EQ(interpreter.AddNodeWithParameters({0}, {1}, nullptr, 0, nullptr,
181                                                 &registration),
182               kTfLiteOk);
183   }
184 }
185 
186 // Make an interpreter that has one tensor but no ops
TEST(BasicInterpreter,CheckAllocate)187 TEST(BasicInterpreter, CheckAllocate) {
188   struct {
189     TfLiteType type;
190     size_t size;
191   } cases[] = {
192       {kTfLiteFloat32, sizeof(float)},         {kTfLiteInt32, sizeof(int32_t)},
193       {kTfLiteUInt32, sizeof(uint32_t)},       {kTfLiteUInt8, sizeof(uint8_t)},
194       {kTfLiteInt64, sizeof(int64_t)},         {kTfLiteInt16, sizeof(int16_t)},
195       {kTfLiteFloat16, sizeof(TfLiteFloat16)},
196   };
197 
198   for (auto test : cases) {
199     Interpreter interpreter;
200     ASSERT_EQ(interpreter.AddTensors(2), kTfLiteOk);
201     interpreter.SetInputs({0, 1});
202     interpreter.SetOutputs({});
203     TfLiteQuantizationParams quant;
204 
205     interpreter.SetTensorParametersReadWrite(0, test.type, "", {3}, quant);
206     interpreter.SetTensorParametersReadWrite(1, test.type, "", {4}, quant);
207     ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
208     ASSERT_EQ(interpreter.tensor(0)->bytes, 3 * test.size);
209     ASSERT_NE(interpreter.tensor(0)->data.raw, nullptr);
210     ASSERT_EQ(interpreter.tensor(1)->bytes, 4 * test.size);
211     ASSERT_NE(interpreter.tensor(1)->data.raw, nullptr);
212   }
213 }
214 
TEST(BasicInterpreter,CheckQuantization)215 TEST(BasicInterpreter, CheckQuantization) {
216   Interpreter interpreter;
217   ASSERT_EQ(interpreter.AddTensors(2), kTfLiteOk);
218   interpreter.SetInputs({0, 1});
219   interpreter.SetOutputs({});
220   TfLiteType tensor_type = kTfLiteInt8;
221   const uint8_t int8s[] = {3, 4};
222   float scale = 0.5f;
223   int32_t zero_point = 12;
224 
225   TfLiteQuantization rw_quantization;
226   rw_quantization.type = kTfLiteAffineQuantization;
227   auto* rw_affine_quantization = static_cast<TfLiteAffineQuantization*>(
228       malloc(sizeof(TfLiteAffineQuantization)));
229   rw_affine_quantization->scale = TfLiteFloatArrayCreate(1);
230   rw_affine_quantization->zero_point = TfLiteIntArrayCreate(1);
231   rw_affine_quantization->scale->data[0] = scale;
232   rw_affine_quantization->zero_point->data[0] = zero_point;
233   rw_quantization.params = rw_affine_quantization;
234 
235   TfLiteQuantization ro_quantization;
236   ro_quantization.type = kTfLiteAffineQuantization;
237   auto* ro_affine_quantization = static_cast<TfLiteAffineQuantization*>(
238       malloc(sizeof(TfLiteAffineQuantization)));
239   ro_affine_quantization->scale = TfLiteFloatArrayCreate(1);
240   ro_affine_quantization->zero_point = TfLiteIntArrayCreate(1);
241   ro_affine_quantization->scale->data[0] = scale;
242   ro_affine_quantization->zero_point->data[0] = zero_point;
243   ro_quantization.params = ro_affine_quantization;
244 
245   ASSERT_EQ(interpreter.SetTensorParametersReadWrite(0, tensor_type, "", {3},
246                                                      rw_quantization),
247             kTfLiteOk);
248   ASSERT_EQ(interpreter.SetTensorParametersReadOnly(
249                 1, tensor_type, "", {2}, ro_quantization,
250                 reinterpret_cast<const char*>(int8s), 2),
251             kTfLiteOk);
252   ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
253   // Check that the legacy scale and zero_point are set correctly.
254   ASSERT_EQ(interpreter.tensor(0)->params.scale, scale);
255   ASSERT_EQ(interpreter.tensor(0)->params.zero_point, zero_point);
256   ASSERT_EQ(interpreter.tensor(0)->quantization.type, rw_quantization.type);
257   ASSERT_EQ(interpreter.tensor(1)->params.scale, scale);
258   ASSERT_EQ(interpreter.tensor(1)->params.zero_point, zero_point);
259   ASSERT_EQ(interpreter.tensor(1)->quantization.type, ro_quantization.type);
260 }
261 
TEST(BasicInterpreter,CheckResize)262 TEST(BasicInterpreter, CheckResize) {
263   const float floats[] = {-3., -4.};
264   const int32_t int32s[] = {-3, -4};
265   const uint32_t uint32s[] = {3, 4};
266   const uint8_t uint8s[] = {3, 4};
267   const int64_t int64s[] = {6, -7};
268   const int16_t int16s[] = {8, -9};
269   const Eigen::half float16s[] = {Eigen::half_impl::float_to_half_rtne(-3.f),
270                                   Eigen::half_impl::float_to_half_rtne(-4.f)};
271 
272   struct {
273     TfLiteType type;
274     size_t size;
275     const char* array;
276   } cases[] = {
277       {kTfLiteFloat32, sizeof(float), reinterpret_cast<const char*>(floats)},
278       {kTfLiteInt32, sizeof(int32_t), reinterpret_cast<const char*>(int32s)},
279       {kTfLiteUInt32, sizeof(uint32_t), reinterpret_cast<const char*>(uint32s)},
280       {kTfLiteUInt8, sizeof(uint8_t), reinterpret_cast<const char*>(uint8s)},
281       {kTfLiteInt64, sizeof(int64_t), reinterpret_cast<const char*>(int64s)},
282       {kTfLiteInt16, sizeof(int16_t), reinterpret_cast<const char*>(int16s)},
283       {kTfLiteFloat16, sizeof(TfLiteFloat16),
284        reinterpret_cast<const char*>(float16s)},
285   };
286 
287   for (auto test : cases) {
288     Interpreter interpreter;
289 
290     ASSERT_EQ(interpreter.AddTensors(2), kTfLiteOk);
291     interpreter.SetInputs({0, 1});
292     interpreter.SetOutputs({});
293     TfLiteQuantizationParams quant;
294 
295     ASSERT_EQ(
296         interpreter.SetTensorParametersReadWrite(0, test.type, "", {3}, quant),
297         kTfLiteOk);
298     ASSERT_EQ(interpreter.SetTensorParametersReadOnly(
299                   1, test.type, "", {2}, quant, test.array, 2 * test.size),
300               kTfLiteOk);
301     ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
302     ASSERT_EQ(interpreter.ResizeInputTensor(0, {1, 2}), kTfLiteOk);
303     // Resizing a mmapped tensor is not allowed and should produce error.
304     ASSERT_NE(interpreter.ResizeInputTensor(1, {3}), kTfLiteOk);
305     // Set the tensor to be mmapped but with a buffer size that is insufficient
306     // to match the dimensionality.
307     ASSERT_NE(interpreter.SetTensorParametersReadOnly(
308                   1, test.type, "", {2}, quant, test.array, 1 * test.size),
309               kTfLiteOk);
310     // Allocating should work since we should have our last correct array
311     // values in place.
312     ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
313   }
314 }
315 
TEST(BasicInterpreter,CheckAlignment)316 TEST(BasicInterpreter, CheckAlignment) {
317   struct {
318     TfLiteType type;
319   } cases[] = {{kTfLiteFloat32}, {kTfLiteInt32}, {kTfLiteUInt32},
320                {kTfLiteUInt8},   {kTfLiteInt64}, {kTfLiteInt16},
321                {kTfLiteFloat16}};
322 
323   for (auto test : cases) {
324     Interpreter interpreter;
325 
326     ASSERT_EQ(interpreter.AddTensors(4), kTfLiteOk);
327 
328     for (int i = 0; i < 4; i++) {
329       TfLiteQuantizationParams quant;
330       interpreter.SetTensorParametersReadWrite(i, test.type, "", {2 * i + 1},
331                                                quant);
332     }
333     interpreter.AllocateTensors();
334     for (int i = 0; i < 4; i++) {
335       const TfLiteTensor& tensor = *interpreter.tensor(i);
336       ASSERT_EQ(reinterpret_cast<intptr_t>(tensor.data.raw) % 4, 0);
337     }
338   }
339 }
340 
TEST(BasicInterpreter,CheckArenaAllocation)341 TEST(BasicInterpreter, CheckArenaAllocation) {
342   Interpreter interpreter;
343   ASSERT_EQ(interpreter.AddTensors(10), kTfLiteOk);
344 
345   TfLiteQuantizationParams quant;
346   TfLiteRegistration reg = {nullptr, nullptr, nullptr, nullptr};
347 
348   std::vector<int> sizes{2048, 4096, 1023, 2047, 1021,
349                          2047, 1023, 2046, 0,    2048};
350   for (size_t i = 0; i < sizes.size(); ++i) {
351     interpreter.SetTensorParametersReadWrite(static_cast<int>(i), kTfLiteUInt8,
352                                              "", {sizes[i]}, quant);
353   }
354   interpreter.SetInputs({0, 1});
355   interpreter.SetOutputs({9, 4});
356   interpreter.AddNodeWithParameters({0, 1}, {2, 3}, nullptr, 0, nullptr, &reg);
357   interpreter.AddNodeWithParameters({2, 1}, {4, 5}, nullptr, 0, nullptr, &reg);
358   interpreter.AddNodeWithParameters({4, 3}, {6, 7}, nullptr, 0, nullptr, &reg);
359   interpreter.AddNodeWithParameters({6, 5}, {8}, nullptr, 0, nullptr, &reg);
360   interpreter.AddNodeWithParameters({8, 7}, {9}, nullptr, 0, nullptr, &reg);
361 
362   ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
363 
364   ASSERT_LT(interpreter.tensor(0)->data.raw, interpreter.tensor(1)->data.raw);
365   ASSERT_LT(interpreter.tensor(1)->data.raw, interpreter.tensor(3)->data.raw);
366   ASSERT_EQ(interpreter.tensor(3)->data.raw, interpreter.tensor(9)->data.raw);
367   ASSERT_LT(interpreter.tensor(3)->data.raw, interpreter.tensor(5)->data.raw);
368   ASSERT_LT(interpreter.tensor(5)->data.raw, interpreter.tensor(2)->data.raw);
369   ASSERT_EQ(interpreter.tensor(2)->data.raw, interpreter.tensor(7)->data.raw);
370   ASSERT_LT(interpreter.tensor(2)->data.raw, interpreter.tensor(4)->data.raw);
371   // #4 is the one with the largest pointer.
372   ASSERT_EQ(interpreter.tensor(8)->data.raw, nullptr);
373 }
374 
TEST(BasicInterpreter,BufferAccess)375 TEST(BasicInterpreter, BufferAccess) {
376   Interpreter interpreter;
377   ASSERT_EQ(interpreter.AddTensors(1), kTfLiteOk);
378   ASSERT_EQ(interpreter.SetInputs({0}), kTfLiteOk);
379 
380   ASSERT_EQ(interpreter.SetTensorParametersReadWrite(
381                 0, kTfLiteFloat32, "", {3}, TfLiteQuantizationParams()),
382             kTfLiteOk);
383   ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
384   // Verify we get a valid pointer.
385   ASSERT_NE(interpreter.typed_tensor<float>(0), nullptr);
386   // Verify incorrect pointer is not returned.
387   ASSERT_EQ(interpreter.typed_tensor<int>(0), nullptr);
388   // Verify that raw c interface ptr matches safe interface.
389   ASSERT_EQ(interpreter.typed_tensor<float>(0), interpreter.tensor(0)->data.f);
390 }
391 
TEST(BasicInterpreter,NoOpInterpreter)392 TEST(BasicInterpreter, NoOpInterpreter) {
393   Interpreter interpreter;
394   ASSERT_EQ(interpreter.AddTensors(1), kTfLiteOk);
395   ASSERT_EQ(interpreter.SetInputs({0}), kTfLiteOk);
396   ASSERT_EQ(interpreter.SetOutputs({0}), kTfLiteOk);
397 
398   ASSERT_EQ(interpreter.SetTensorParametersReadWrite(
399                 0, kTfLiteFloat32, "", {3}, TfLiteQuantizationParams()),
400             kTfLiteOk);
401 
402   ASSERT_EQ(interpreter.ResizeInputTensor(interpreter.inputs()[0], {1, 2, 3}),
403             kTfLiteOk);
404   ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
405   ASSERT_EQ(interpreter.Invoke(), kTfLiteOk);
406 }
407 
TEST(BasicInterpreter,RedundantAllocateTensors)408 TEST(BasicInterpreter, RedundantAllocateTensors) {
409   Interpreter interpreter;
410   ASSERT_EQ(interpreter.AddTensors(1), kTfLiteOk);
411   ASSERT_EQ(interpreter.SetInputs({0}), kTfLiteOk);
412 
413   ASSERT_EQ(interpreter.SetTensorParametersReadWrite(
414                 0, kTfLiteFloat32, "", {3}, TfLiteQuantizationParams()),
415             kTfLiteOk);
416 
417   ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
418   const auto data_raw = interpreter.tensor(0)->data.raw;
419   ASSERT_NE(data_raw, nullptr);
420 
421   // A redundant allocation request should have no impact.
422   ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
423   ASSERT_EQ(interpreter.tensor(0)->data.raw, data_raw);
424 }
425 
TEST(BasicInterpreter,RedundantAllocateTensorsWithDynamicInputs)426 TEST(BasicInterpreter, RedundantAllocateTensorsWithDynamicInputs) {
427   Interpreter interpreter;
428   TfLiteRegistration reg = {nullptr, nullptr, nullptr, nullptr};
429   ASSERT_EQ(interpreter.AddTensors(2), kTfLiteOk);
430   interpreter.SetInputs({0});
431   interpreter.SetOutputs({1});
432   interpreter.AddNodeWithParameters({0}, {1}, nullptr, 0, nullptr, &reg);
433 
434   ASSERT_EQ(interpreter.SetTensorParametersReadWrite(
435                 0, kTfLiteFloat32, "", {3}, TfLiteQuantizationParams()),
436             kTfLiteOk);
437   ASSERT_EQ(interpreter.SetTensorParametersReadWrite(
438                 1, kTfLiteFloat32, "", {3}, TfLiteQuantizationParams()),
439             kTfLiteOk);
440 
441   // Configure the input tensor as dynamic.
442   interpreter.tensor(0)->data.raw = nullptr;
443   interpreter.tensor(0)->allocation_type = kTfLiteDynamic;
444 
445   ASSERT_EQ(interpreter.ResizeInputTensor(interpreter.inputs()[0], {1, 2, 3}),
446             kTfLiteOk);
447   ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
448   ASSERT_NE(interpreter.tensor(1)->data.raw, nullptr);
449 
450   // Reset the output tensor's buffer.
451   interpreter.tensor(1)->data.raw = nullptr;
452 
453   // A redundant allocation request should be honored, as the input tensor
454   // was marked dynamic.
455   ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
456   ASSERT_NE(interpreter.tensor(1)->data.raw, nullptr);
457 }
458 
TEST(BasicInterpreter,ResizingTensors)459 TEST(BasicInterpreter, ResizingTensors) {
460   Interpreter interpreter;
461   ASSERT_EQ(interpreter.AddTensors(1), kTfLiteOk);
462   ASSERT_EQ(interpreter.SetInputs({0}), kTfLiteOk);
463   ASSERT_EQ(interpreter.SetOutputs({0}), kTfLiteOk);
464 
465   ASSERT_EQ(interpreter.SetTensorParametersReadWrite(
466                 0, kTfLiteFloat32, "", {3}, TfLiteQuantizationParams()),
467             kTfLiteOk);
468 
469   int t = interpreter.inputs()[0];
470   TfLiteTensor* tensor = interpreter.tensor(t);
471 
472   ASSERT_EQ(interpreter.ResizeInputTensor(t, {1, 2, 3}), kTfLiteOk);
473   EXPECT_EQ(tensor->bytes, 6 * sizeof(float));
474   ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
475 
476   tensor->data.f[5] = 0.123f;
477 
478   // Changing from kTfLiteArenaRw to kTfLiteDynamic is quite complicate: we need
479   // to unset data.raw, otherwise Realloc will try to free that memory.
480   tensor->data.raw = nullptr;
481   tensor->allocation_type = kTfLiteDynamic;
482 
483   ASSERT_EQ(interpreter.ResizeInputTensor(t, {1, 2, 4}), kTfLiteOk);
484   EXPECT_EQ(tensor->bytes, 8 * sizeof(float));
485   ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
486 
487   ASSERT_EQ(interpreter.ResizeInputTensor(t, {}), kTfLiteOk);
488   EXPECT_EQ(tensor->bytes, 1 * sizeof(float));
489   ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
490 
491   ASSERT_EQ(interpreter.ResizeInputTensor(t, {0}), kTfLiteOk);
492   EXPECT_EQ(tensor->bytes, 0);
493   ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
494 
495   ASSERT_EQ(interpreter.ResizeInputTensor(t, {1, 2, 0}), kTfLiteOk);
496   EXPECT_EQ(tensor->bytes, 0);
497   ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
498 
499   // TODO(ahentz): We shouldn't have to force reallocation, but
500   // ResizeInputTensor doesn't realloc dynamic tensors. Also note that
501   // TfLiteTensorRealloc(tensor->bytes, tensor) is a no-op.
502   TfLiteTensorRealloc(9 * sizeof(float), tensor);
503   tensor->data.f[7] = 0.123f;
504 
505   ASSERT_EQ(interpreter.ResizeInputTensor(t, {2, 2, 4}), kTfLiteOk);
506   EXPECT_EQ(tensor->bytes, 16 * sizeof(float));
507   ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
508 
509   // TODO(ahentz): We shouldn't have to force reallocation, but
510   // ResizeInputTensor doesn't realloc dynamic tensors. Also note that
511   // TfLiteTensorRealloc(tensor->bytes, tensor) is a no-op.
512   TfLiteTensorRealloc(17 * sizeof(float), tensor);
513   tensor->data.f[15] = 0.123f;
514 }
515 
TEST(BasicInterpreter,NoopResizingTensors)516 TEST(BasicInterpreter, NoopResizingTensors) {
517   Interpreter interpreter;
518   ASSERT_EQ(interpreter.AddTensors(1), kTfLiteOk);
519   ASSERT_EQ(interpreter.SetInputs({0}), kTfLiteOk);
520   ASSERT_EQ(interpreter.SetOutputs({0}), kTfLiteOk);
521 
522   ASSERT_EQ(interpreter.SetTensorParametersReadWrite(
523                 0, kTfLiteFloat32, "", {3}, TfLiteQuantizationParams()),
524             kTfLiteOk);
525 
526   int t = interpreter.inputs()[0];
527   TfLiteTensor* tensor = interpreter.tensor(t);
528 
529   ASSERT_EQ(interpreter.ResizeInputTensor(t, {1, 2, 3}), kTfLiteOk);
530   EXPECT_EQ(tensor->bytes, 6 * sizeof(float));
531   ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
532   tensor->data.f[5] = 0.123f;
533 
534   // Resizing to the same size should not trigger re-allocation.
535   ASSERT_EQ(interpreter.ResizeInputTensor(t, {1, 2, 3}), kTfLiteOk);
536   EXPECT_EQ(tensor->bytes, 6 * sizeof(float));
537   ASSERT_NE(tensor->data.raw, nullptr);
538   ASSERT_EQ(tensor->data.f[5], 0.123f);
539 
540   // Explicitly allocating should be a no-op, as no resize was performed.
541   ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
542   EXPECT_EQ(tensor->bytes, 6 * sizeof(float));
543   ASSERT_NE(tensor->data.raw, nullptr);
544   ASSERT_EQ(tensor->data.f[5], 0.123f);
545 }
546 
TEST(BasicInterpreter,ResizingTensorsStrictInvalid)547 TEST(BasicInterpreter, ResizingTensorsStrictInvalid) {
548   // Tests ResizeInputTensorStrict where `dims_signature` is not specified.
549   Interpreter interpreter;
550   ASSERT_EQ(interpreter.AddTensors(1), kTfLiteOk);
551   ASSERT_EQ(interpreter.SetInputs({0}), kTfLiteOk);
552   ASSERT_EQ(interpreter.SetOutputs({0}), kTfLiteOk);
553 
554   ASSERT_EQ(interpreter.SetTensorParametersReadWrite(
555                 0, kTfLiteFloat32, "", {1, 1, 3}, TfLiteQuantizationParams()),
556             kTfLiteOk);
557 
558   int t = interpreter.inputs()[0];
559   TfLiteTensor* tensor = interpreter.tensor(t);
560 
561   ASSERT_EQ(interpreter.ResizeInputTensorStrict(t, {1, 1, 3}), kTfLiteOk);
562   EXPECT_EQ(tensor->bytes, 3 * sizeof(float));
563   ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
564 
565   // Invalid becuase `dims_signature` is not specified.
566   ASSERT_EQ(interpreter.ResizeInputTensorStrict(t, {1, 2, 3}), kTfLiteError);
567   EXPECT_EQ(tensor->bytes, 3 * sizeof(float));
568   ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
569 
570   // Assert that ResizeInputTensor works for this value.
571   ASSERT_EQ(interpreter.ResizeInputTensor(t, {1, 2, 3}), kTfLiteOk);
572   EXPECT_EQ(tensor->bytes, 6 * sizeof(float));
573   ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
574 }
575 
TEST(BasicInterpreter,ResizingTensorsStrict)576 TEST(BasicInterpreter, ResizingTensorsStrict) {
577   // Tests ResizeInputTensorStrict where `dims_signature` is specified.
578   Interpreter interpreter;
579   ASSERT_EQ(interpreter.AddTensors(1), kTfLiteOk);
580   ASSERT_EQ(interpreter.SetInputs({0}), kTfLiteOk);
581   ASSERT_EQ(interpreter.SetOutputs({0}), kTfLiteOk);
582 
583   std::vector<int> dims_signature = {-1, -1, 3};
584   ASSERT_EQ(interpreter.SetTensorParametersReadWrite(
585                 0, kTfLiteFloat32, "", {1, 1, 3}, TfLiteQuantizationParams(),
586                 false, &dims_signature),
587             kTfLiteOk);
588 
589   int t = interpreter.inputs()[0];
590   TfLiteTensor* tensor = interpreter.tensor(t);
591 
592   ASSERT_EQ(interpreter.ResizeInputTensorStrict(t, {1, 2, 3}), kTfLiteOk);
593   EXPECT_EQ(tensor->bytes, 6 * sizeof(float));
594   ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
595 
596   ASSERT_EQ(interpreter.ResizeInputTensorStrict(t, {1, 2, 4}), kTfLiteError);
597   EXPECT_EQ(tensor->bytes, 6 * sizeof(float));
598   ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
599 
600   // Assert that ResizeInputTensor works for this value.
601   ASSERT_EQ(interpreter.ResizeInputTensor(t, {1, 2, 4}), kTfLiteOk);
602   EXPECT_EQ(tensor->bytes, 8 * sizeof(float));
603   ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
604 }
605 
606 // Simple op that does input = output.
GetPassthroughOpRegistration()607 TfLiteRegistration GetPassthroughOpRegistration() {
608   TfLiteRegistration reg = {nullptr, nullptr, nullptr, nullptr};
609   reg.init = [](TfLiteContext* context, const char*, size_t) -> void* {
610     auto* first_new_tensor = new int;
611     context->AddTensors(context, 2, first_new_tensor);
612     return first_new_tensor;
613   };
614   reg.free = [](TfLiteContext* context, void* buffer) {
615     delete static_cast<int*>(buffer);
616   };
617   reg.prepare = [](TfLiteContext* context, TfLiteNode* node) {
618     auto* first_new_tensor = static_cast<int*>(node->user_data);
619 
620     const TfLiteTensor* tensor0;
621     TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &tensor0));
622     TfLiteTensor* tensor1;
623     TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &tensor1));
624 
625     TfLiteIntArray* newSize = TfLiteIntArrayCopy(tensor0->dims);
626     TF_LITE_ENSURE_STATUS(context->ResizeTensor(context, tensor1, newSize));
627 
628     TfLiteIntArrayFree(node->temporaries);
629     node->temporaries = TfLiteIntArrayCreate(2);
630     for (int i = 0; i < 2; ++i) {
631       node->temporaries->data[i] = *(first_new_tensor) + i;
632     }
633 
634     auto setup_temporary = [&](int id) {
635       TfLiteTensor* tmp = &context->tensors[id];
636       tmp->type = kTfLiteFloat32;
637       tmp->allocation_type = kTfLiteArenaRw;
638       return context->ResizeTensor(context, tmp,
639                                    TfLiteIntArrayCopy(tensor0->dims));
640     };
641     TF_LITE_ENSURE_STATUS(setup_temporary(node->temporaries->data[0]));
642     TF_LITE_ENSURE_STATUS(setup_temporary(node->temporaries->data[1]));
643 
644     return kTfLiteOk;
645   };
646   reg.invoke = [](TfLiteContext* context, TfLiteNode* node) {
647     const TfLiteTensor* a0;
648     TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &a0));
649 
650     auto populate = [&](int id) {
651       TfLiteTensor* t = &context->tensors[id];
652       int num = a0->dims->data[0];
653       for (int i = 0; i < num; i++) {
654         t->data.f[i] = a0->data.f[i];
655       }
656     };
657 
658     populate(node->outputs->data[0]);
659     populate(node->temporaries->data[0]);
660     populate(node->temporaries->data[1]);
661     return kTfLiteOk;
662   };
663 
664   return reg;
665 }
666 
TEST(BasicInterpreter,OneOpInterpreter)667 TEST(BasicInterpreter, OneOpInterpreter) {
668   Interpreter interpreter;
669   ASSERT_EQ(interpreter.AddTensors(2), kTfLiteOk);
670   ASSERT_EQ(interpreter.SetInputs({0}), kTfLiteOk);
671   ASSERT_EQ(interpreter.SetOutputs({1}), kTfLiteOk);
672 
673   TfLiteQuantizationParams quantized;
674   ASSERT_EQ(interpreter.SetTensorParametersReadWrite(0, kTfLiteFloat32, "in1",
675                                                      {3}, quantized),
676             kTfLiteOk);
677   ASSERT_EQ(interpreter.SetTensorParametersReadWrite(1, kTfLiteFloat32, "out0",
678                                                      {3}, quantized),
679             kTfLiteOk);
680 
681   ASSERT_EQ(interpreter.GetInputName(0), "in1");
682   ASSERT_EQ(interpreter.GetOutputName(0), "out0");
683 
684   TfLiteRegistration reg = GetPassthroughOpRegistration();
685 
686   ASSERT_EQ(
687       interpreter.AddNodeWithParameters({0}, {1}, nullptr, 0, nullptr, &reg),
688       kTfLiteOk);
689   ASSERT_EQ(interpreter.ResizeInputTensor(0, {3}), kTfLiteOk);
690   ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
691 
692   ASSERT_EQ(interpreter.Invoke(), kTfLiteOk);
693 }
694 
TEST(BasicInterpreter,ReleaseNonPersistentMemory)695 TEST(BasicInterpreter, ReleaseNonPersistentMemory) {
696   Interpreter interpreter;
697   ASSERT_EQ(interpreter.AddTensors(2), kTfLiteOk);
698   ASSERT_EQ(interpreter.SetInputs({0}), kTfLiteOk);
699   ASSERT_EQ(interpreter.SetOutputs({1}), kTfLiteOk);
700 
701   TfLiteQuantizationParams quantized;
702   ASSERT_EQ(interpreter.SetTensorParametersReadWrite(0, kTfLiteFloat32, "in1",
703                                                      {3}, quantized),
704             kTfLiteOk);
705   ASSERT_EQ(interpreter.SetTensorParametersReadWrite(1, kTfLiteFloat32, "out0",
706                                                      {3}, quantized),
707             kTfLiteOk);
708 
709   TfLiteRegistration reg = GetPassthroughOpRegistration();
710 
711   ASSERT_EQ(
712       interpreter.AddNodeWithParameters({0}, {1}, nullptr, 0, nullptr, &reg),
713       kTfLiteOk);
714   ASSERT_EQ(interpreter.ResizeInputTensor(0, {3}), kTfLiteOk);
715 
716   // AllocateTensors() hasn't been called yet, so this should be a no-op.
717   ASSERT_EQ(interpreter.ReleaseNonPersistentMemory(), kTfLiteOk);
718 
719   ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
720   ASSERT_EQ(interpreter.Invoke(), kTfLiteOk);
721 
722   ASSERT_EQ(interpreter.ReleaseNonPersistentMemory(), kTfLiteOk);
723   // Invoke() now fails because non-persistent arenas have been released.
724   ASSERT_NE(interpreter.Invoke(), kTfLiteOk);
725 
726   ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
727   ASSERT_EQ(interpreter.Invoke(), kTfLiteOk);
728 
729   // ResizeInputTensors just after ReleaseNonPersistentMemory should also need
730   // AllocateTensors, without causing any unexpected crashes.
731   ASSERT_EQ(interpreter.ReleaseNonPersistentMemory(), kTfLiteOk);
732   ASSERT_EQ(interpreter.ResizeInputTensor(0, {4}), kTfLiteOk);
733   ASSERT_NE(interpreter.Invoke(), kTfLiteOk);
734   ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
735   ASSERT_EQ(interpreter.Invoke(), kTfLiteOk);
736 }
737 
738 // Forcefully divides tensor allocation in three steps: one before invocation
739 // and two more at invocation time. This happens because we use string tensors
740 // and their sizes can't be determined until invocation time.
TEST(BasicInterpreter,ThreeStepAllocate)741 TEST(BasicInterpreter, ThreeStepAllocate) {
742   Interpreter interpreter;
743   ASSERT_EQ(interpreter.AddTensors(5), kTfLiteOk);
744   ASSERT_EQ(interpreter.SetInputs({0}), kTfLiteOk);
745   ASSERT_EQ(interpreter.SetOutputs({4}), kTfLiteOk);
746 
747   TfLiteQuantizationParams quantized;
748 
749   // String tensor with one string of length 3
750   union {
751     char raw_bytes[15];
752     struct {
753       int32_t num_strs;
754       int32_t offsets[2];
755       char str_data[3];
756     } tensor_data;
757   } data;
758   data.tensor_data = {1, {12, 15}, {'A', 'B', 'C'}};
759 
760   // Read only string tensor.
761   ASSERT_EQ(interpreter.SetTensorParametersReadOnly(0, kTfLiteString, "", {1},
762                                                     quantized, data.raw_bytes,
763                                                     sizeof(data.raw_bytes)),
764             kTfLiteOk);
765   // Read-write string tensor.
766   ASSERT_EQ(interpreter.SetTensorParametersReadWrite(1, kTfLiteString, "", {1},
767                                                      quantized),
768             kTfLiteOk);
769   ASSERT_EQ(interpreter.SetTensorParametersReadWrite(2, kTfLiteInt32, "", {1},
770                                                      quantized),
771             kTfLiteOk);
772   ASSERT_EQ(interpreter.SetTensorParametersReadWrite(3, kTfLiteString, "", {1},
773                                                      quantized),
774             kTfLiteOk);
775   ASSERT_EQ(interpreter.SetTensorParametersReadWrite(4, kTfLiteInt32, "", {1},
776                                                      quantized),
777             kTfLiteOk);
778 
779   // String-in String-out node.
780   TfLiteRegistration reg_copy = {nullptr, nullptr, nullptr, nullptr};
781   reg_copy.invoke = [](TfLiteContext* context, TfLiteNode* node) {
782     const TfLiteTensor* input;
783     TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));
784     TfLiteTensor* output;
785     TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
786     DynamicBuffer buf;
787     StringRef str_ref = GetString(input, 0);
788     buf.AddString(str_ref);
789     buf.WriteToTensorAsVector(output);
790     return kTfLiteOk;
791   };
792 
793   // String-in Int-out node.
794   TfLiteRegistration reg_len = {nullptr, nullptr, nullptr, nullptr};
795   reg_len.prepare = [](TfLiteContext* context, TfLiteNode* node) {
796     TfLiteTensor* output;
797     TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
798     TfLiteIntArray* outputSize = TfLiteIntArrayCreate(1);
799     outputSize->data[0] = 1;
800     return context->ResizeTensor(context, output, outputSize);
801   };
802   reg_len.invoke = [](TfLiteContext* context, TfLiteNode* node) {
803     const TfLiteTensor* a0;
804     TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &a0));
805     TfLiteTensor* a1;
806     TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &a1));
807     a1->data.i32[0] = a0->bytes;
808     return kTfLiteOk;
809   };
810 
811   ASSERT_EQ(interpreter.AddNodeWithParameters({0}, {1}, nullptr, 0, nullptr,
812                                               &reg_copy),
813             kTfLiteOk);
814   ASSERT_EQ(interpreter.AddNodeWithParameters({1}, {2}, nullptr, 0, nullptr,
815                                               &reg_len),
816             kTfLiteOk);
817   ASSERT_EQ(interpreter.AddNodeWithParameters({0}, {3}, nullptr, 0, nullptr,
818                                               &reg_copy),
819             kTfLiteOk);
820   ASSERT_EQ(interpreter.AddNodeWithParameters({3}, {4}, nullptr, 0, nullptr,
821                                               &reg_len),
822             kTfLiteOk);
823 
824   ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
825   ASSERT_EQ(interpreter.Invoke(), kTfLiteOk);
826 
827   ASSERT_EQ(interpreter.tensor(0)->bytes, 15);
828   ASSERT_NE(interpreter.tensor(0)->data.raw, nullptr);
829   ASSERT_EQ(interpreter.tensor(1)->bytes, 15);
830   ASSERT_NE(interpreter.tensor(1)->data.raw, nullptr);
831   ASSERT_EQ(interpreter.tensor(3)->bytes, 15);
832   ASSERT_NE(interpreter.tensor(4)->data.raw, nullptr);
833   ASSERT_EQ(interpreter.tensor(2)->bytes, 4);
834   ASSERT_EQ(interpreter.tensor(2)->data.i32[0], 15);
835   ASSERT_EQ(interpreter.tensor(4)->bytes, 4);
836   ASSERT_EQ(interpreter.tensor(4)->data.i32[0], 15);
837 }
838 
TEST(BasicInterpreter,AllocateTwice)839 TEST(BasicInterpreter, AllocateTwice) {
840   Interpreter interpreter;
841   ASSERT_EQ(interpreter.AddTensors(2), kTfLiteOk);
842   ASSERT_EQ(interpreter.SetInputs({0}), kTfLiteOk);
843   ASSERT_EQ(interpreter.SetOutputs({1}), kTfLiteOk);
844 
845   TfLiteQuantizationParams quantized;
846   ASSERT_EQ(interpreter.SetTensorParametersReadWrite(0, kTfLiteFloat32, "", {3},
847                                                      quantized),
848             kTfLiteOk);
849   ASSERT_EQ(interpreter.SetTensorParametersReadWrite(1, kTfLiteFloat32, "", {3},
850                                                      quantized),
851             kTfLiteOk);
852 
853   TfLiteRegistration reg = {nullptr, nullptr, nullptr, nullptr};
854   reg.prepare = [](TfLiteContext* context, TfLiteNode* node) {
855     const TfLiteTensor* tensor0;
856     TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &tensor0));
857     TfLiteTensor* tensor1;
858     TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &tensor1));
859     TfLiteIntArray* newSize = TfLiteIntArrayCopy(tensor0->dims);
860     return context->ResizeTensor(context, tensor1, newSize);
861   };
862   reg.invoke = [](TfLiteContext* context, TfLiteNode* node) {
863     const TfLiteTensor* a0;
864     TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &a0));
865     TfLiteTensor* a1;
866     TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &a1));
867     int num = a0->dims->data[0];
868     for (int i = 0; i < num; i++) {
869       a1->data.f[i] = a0->data.f[i];
870     }
871     return kTfLiteOk;
872   };
873   ASSERT_EQ(
874       interpreter.AddNodeWithParameters({0}, {1}, nullptr, 0, nullptr, &reg),
875       kTfLiteOk);
876   ASSERT_EQ(interpreter.ResizeInputTensor(0, {3}), kTfLiteOk);
877   ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
878   ASSERT_EQ(interpreter.Invoke(), kTfLiteOk);
879   char* old_tensor0_ptr = interpreter.tensor(0)->data.raw;
880   char* old_tensor1_ptr = interpreter.tensor(1)->data.raw;
881 
882   ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
883   ASSERT_EQ(interpreter.Invoke(), kTfLiteOk);
884   ASSERT_EQ(old_tensor0_ptr, interpreter.tensor(0)->data.raw);
885   ASSERT_EQ(old_tensor1_ptr, interpreter.tensor(1)->data.raw);
886 }
887 
TEST(BasicInterpreter,TestNullErrorReporter)888 TEST(BasicInterpreter, TestNullErrorReporter) {
889   TestErrorReporter reporter;
890   Interpreter interpreter;
891 }
892 
TEST(BasicInterpreter,TestCustomErrorReporter)893 TEST(BasicInterpreter, TestCustomErrorReporter) {
894   TestErrorReporter reporter;
895   Interpreter interpreter(&reporter);
896   ASSERT_NE(interpreter.Invoke(), kTfLiteOk);
897   ASSERT_EQ(reporter.error_messages(),
898             "Invoke called on model that is not ready.");
899   ASSERT_EQ(reporter.num_calls(), 1);
900 }
901 
TEST(BasicInterpreter,TestOverflow)902 TEST(BasicInterpreter, TestOverflow) {
903   TestErrorReporter reporter;
904   Interpreter interpreter(&reporter);
905   TfLiteQuantizationParams quantized;
906 
907   ASSERT_EQ(interpreter.AddTensors(1), kTfLiteOk);
908   ASSERT_EQ(interpreter.SetInputs({0}), kTfLiteOk);
909   ASSERT_EQ(interpreter.SetOutputs({0}), kTfLiteOk);
910   // Overflow testing is pointer word size dependent.
911   if (sizeof(size_t) == 8) {
912     // #bits for bytecount = 30 + 30 + 2 = 62 < 64
913     ASSERT_EQ(interpreter.SetTensorParametersReadWrite(
914                   0, kTfLiteFloat32, "in1", {1 << 30, 1 << 30}, quantized),
915               kTfLiteOk);
916     // #bits for element count = 30 + 30 + 2 = 62 < 64 (no overflow)
917     // #bits for byte count = 30 + 30 + 2 + 2 = 64 == 64 (overflow)
918     ASSERT_NE(
919         interpreter.SetTensorParametersReadWrite(
920             0, kTfLiteFloat32, "in1", {1 << 30, 1 << 30, 1 << 2}, quantized),
921         kTfLiteOk);
922     EXPECT_THAT(
923         reporter.error_messages(),
924         testing::EndsWith("BytesRequired number of bytes overflowed.\n"));
925     // #bits for element count = 30 + 30 + 2 + 4 = 66 > 64 (overflow).
926     // #bits for byte count = 30 + 30 + 2 + 4 + 2 = 68 > 64 (overflow).
927     reporter.Reset();
928     ASSERT_NE(interpreter.SetTensorParametersReadWrite(
929                   0, kTfLiteFloat32, "in1", {1 << 30, 1 << 30, 1 << 2, 1 << 4},
930                   quantized),
931               kTfLiteOk);
932     EXPECT_THAT(
933         reporter.error_messages(),
934         testing::EndsWith("BytesRequired number of elements overflowed.\n"));
935 
936   } else if (sizeof(size_t) == 4) {
937     // #bits for bytecount = 14 + 14 + 2 = 30 < 32
938     ASSERT_EQ(interpreter.SetTensorParametersReadWrite(
939                   0, kTfLiteFloat32, "in1", {1 << 14, 1 << 14}, quantized),
940               kTfLiteOk);
941     // #bits for element count = 14 + 14 + 3 = 31 < 32 (no overflow).
942     // #bits for byte count = 14 + 14 + 3 + 2 = 33 > 32 (overflow).
943     ASSERT_NE(
944         interpreter.SetTensorParametersReadWrite(
945             0, kTfLiteFloat32, "in1", {1 << 14, 1 << 14, 1 << 3}, quantized),
946         kTfLiteOk);
947     EXPECT_THAT(
948         reporter.error_messages(),
949         testing::EndsWith("BytesRequired number of bytes overflowed.\n"));
950     // #bits for element count = 14 + 14 + 4 = 32 == 32 (overflow).
951     // byte count also overflows, but we don't get to that check.
952     reporter.Reset();
953     ASSERT_NE(
954         interpreter.SetTensorParametersReadWrite(
955             0, kTfLiteFloat32, "in1", {1 << 14, 1 << 14, 1 << 4}, quantized),
956         kTfLiteOk);
957     EXPECT_THAT(
958         reporter.error_messages(),
959         testing::EndsWith("BytesRequired number of elements overflowed.\n"));
960   } else {
961     // This test failing means that we are using a non 32/64 bit architecture.
962     ASSERT_TRUE(false);
963   }
964 }
965 
TEST(BasicInterpreter,TestUnsupportedDelegateFunctions)966 TEST(BasicInterpreter, TestUnsupportedDelegateFunctions) {
967   Interpreter interpreter;
968   ASSERT_EQ(interpreter.AddTensors(2), kTfLiteOk);
969   TfLiteRegistration registration = {nullptr, nullptr, nullptr, nullptr};
970   // These functions are only supported inside Delegate's Prepare function.
971   // The test verifies that these functions returns `kTfLiteError`, but not
972   // `kTfLiteOk` or just crashes.
973   registration.prepare = [](TfLiteContext* context, TfLiteNode* node) {
974     {
975       TfLiteIntArray* execution_plan;
976       EXPECT_EQ(context->GetExecutionPlan(context, &execution_plan),
977                 kTfLiteError);
978     }
979     {
980       TfLiteNode* node;
981       TfLiteRegistration* registration;
982       EXPECT_EQ(
983           context->GetNodeAndRegistration(context, 0, &node, &registration),
984           kTfLiteError);
985     }
986     {
987       TfLiteRegistration delegate_registration = {nullptr, nullptr, nullptr,
988                                                   nullptr};
989       TfLiteIntArray nodes_to_replace;
990       nodes_to_replace.size = 0;
991       EXPECT_EQ(context->ReplaceNodeSubsetsWithDelegateKernels(
992                     context, delegate_registration, &nodes_to_replace, nullptr),
993                 kTfLiteError);
994     }
995     return kTfLiteError;
996   };
997   ASSERT_EQ(interpreter.SetInputs({0}), kTfLiteOk);
998   ASSERT_EQ(interpreter.SetOutputs({0}), kTfLiteOk);
999   ASSERT_EQ(interpreter.AddNodeWithParameters({0}, {1}, nullptr, 0, nullptr,
1000                                               &registration),
1001             kTfLiteOk);
1002   EXPECT_EQ(interpreter.AllocateTensors(), kTfLiteError);
1003 }
1004 
TEST(BasicInterpreter,DynamicTensorsResizeDescendants)1005 TEST(BasicInterpreter, DynamicTensorsResizeDescendants) {
1006   // Assemble a graph with a node that has dynamically sized output (via the
1007   // pad op), followed by a node with a standard element-wise op (negate).
1008   Interpreter interpreter;
1009   interpreter.AddTensors(4);
1010   interpreter.SetInputs({0, 1});
1011   interpreter.SetOutputs({3});
1012   TfLiteQuantizationParams quant;
1013   interpreter.SetTensorParametersReadWrite(0, kTfLiteFloat32, "", {2, 2, 1, 1},
1014                                            quant);
1015   interpreter.SetTensorParametersReadWrite(1, kTfLiteInt32, "", {4, 2}, quant);
1016   interpreter.SetTensorParametersReadWrite(2, kTfLiteFloat32, "", {}, quant);
1017   interpreter.SetTensorParametersReadWrite(3, kTfLiteFloat32, "", {}, quant);
1018 
1019   TfLiteRegistration* pad_op = tflite::ops::builtin::Register_PADV2();
1020   TfLiteRegistration* neg_op = tflite::ops::builtin::Register_NEG();
1021   interpreter.AddNodeWithParameters({0, 1}, {2}, nullptr, 0, nullptr, pad_op);
1022   interpreter.AddNodeWithParameters({2}, {3}, nullptr, 0, nullptr, neg_op);
1023   ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
1024 
1025   // Configure [[2,2],[4,4]] padding and execute the graph.
1026   interpreter.typed_tensor<int>(1)[0] = 2;
1027   interpreter.typed_tensor<int>(1)[1] = 2;
1028   interpreter.typed_tensor<int>(1)[2] = 2;
1029   interpreter.typed_tensor<int>(1)[3] = 2;
1030   interpreter.typed_tensor<int>(1)[4] = 0;
1031   interpreter.typed_tensor<int>(1)[5] = 0;
1032   interpreter.typed_tensor<int>(1)[6] = 0;
1033   interpreter.typed_tensor<int>(1)[7] = 0;
1034   ASSERT_EQ(interpreter.Invoke(), kTfLiteOk);
1035 
1036   // Both the output and intermediate tensor sizes should reflect the output
1037   // from the dynamic pad operation.
1038   ASSERT_EQ(interpreter.tensor(2)->bytes, sizeof(float) * 6 * 6);
1039   ASSERT_EQ(interpreter.tensor(3)->bytes, sizeof(float) * 6 * 6);
1040 
1041   // Now configure [[4,4],[6,6]] padding and execute the graph.
1042   interpreter.typed_tensor<int>(1)[0] = 4;
1043   interpreter.typed_tensor<int>(1)[1] = 4;
1044   interpreter.typed_tensor<int>(1)[2] = 6;
1045   interpreter.typed_tensor<int>(1)[3] = 6;
1046   interpreter.typed_tensor<int>(1)[4] = 0;
1047   interpreter.typed_tensor<int>(1)[5] = 0;
1048   interpreter.typed_tensor<int>(1)[6] = 0;
1049   interpreter.typed_tensor<int>(1)[7] = 0;
1050   ASSERT_EQ(interpreter.Invoke(), kTfLiteOk);
1051 
1052   // Again, the output and intermediate tensor sizes should reflect the *new*
1053   // resize from the latest pad operation.
1054   ASSERT_EQ(interpreter.tensor(2)->bytes, sizeof(float) * 10 * 14);
1055   ASSERT_EQ(interpreter.tensor(3)->bytes, sizeof(float) * 10 * 14);
1056 }
1057 
TEST(InterpreterTensorsCapacityTest,TestWithinHeadroom)1058 TEST(InterpreterTensorsCapacityTest, TestWithinHeadroom) {
1059   Interpreter interpreter;
1060   ASSERT_EQ(interpreter.AddTensors(Interpreter::kTensorsReservedCapacity),
1061             kTfLiteOk);
1062   TfLiteRegistration registration = {nullptr, nullptr, nullptr, nullptr};
1063   registration.prepare = [](TfLiteContext* context, TfLiteNode* node) {
1064     TfLiteTensor* first_tensor = context->tensors;
1065 
1066     int new_tensor_index;
1067     context->AddTensors(context, Interpreter::kTensorsCapacityHeadroom,
1068                         &new_tensor_index);
1069     EXPECT_EQ(first_tensor, context->tensors);
1070     return kTfLiteOk;
1071   };
1072   ASSERT_EQ(interpreter.AddNodeWithParameters({0}, {1}, nullptr, 0, nullptr,
1073                                               &registration),
1074             kTfLiteOk);
1075   ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
1076 }
1077 
TEST(InterpreterTensorsCapacityTest,TestExceedHeadroom)1078 TEST(InterpreterTensorsCapacityTest, TestExceedHeadroom) {
1079   Interpreter interpreter;
1080   ASSERT_EQ(interpreter.AddTensors(Interpreter::kTensorsReservedCapacity),
1081             kTfLiteOk);
1082   TfLiteRegistration registration = {nullptr, nullptr, nullptr, nullptr};
1083   registration.prepare = [](TfLiteContext* context, TfLiteNode* node) {
1084     TfLiteTensor* first_tensor = context->tensors;
1085 
1086     int new_tensor_index;
1087     // Add enough tensors to trigger buffer re-allocation.
1088     context->AddTensors(
1089         context,
1090         (context->tensors_size + Interpreter::kTensorsCapacityHeadroom + 1) * 2,
1091         &new_tensor_index);
1092     EXPECT_NE(first_tensor, context->tensors);
1093     return kTfLiteOk;
1094   };
1095   ASSERT_EQ(interpreter.AddNodeWithParameters({0}, {1}, nullptr, 0, nullptr,
1096                                               &registration),
1097             kTfLiteOk);
1098   ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
1099 }
1100 
1101 struct TestExternalContext : public TfLiteExternalContext {
1102   static constexpr TfLiteExternalContextType kType = kTfLiteGemmLowpContext;
1103 
Gettflite::__anonc83d92200111::TestExternalContext1104   static TestExternalContext* Get(TfLiteContext* context) {
1105     return reinterpret_cast<TestExternalContext*>(
1106         context->GetExternalContext(context, kType));
1107   }
1108 
Settflite::__anonc83d92200111::TestExternalContext1109   static void Set(TfLiteContext* context, TestExternalContext* value) {
1110     context->SetExternalContext(context, kType, value);
1111   }
1112 
1113   int num_refreshes = 0;
1114 };
1115 
TEST_F(InterpreterTest,GetSetResetExternalContexts)1116 TEST_F(InterpreterTest, GetSetResetExternalContexts) {
1117   auto* context = GetInterpreterContext();
1118 
1119   TestExternalContext external_context;
1120   external_context.Refresh = [](TfLiteContext* context) {
1121     auto* ptr = TestExternalContext::Get(context);
1122     if (ptr != nullptr) {
1123       ++ptr->num_refreshes;
1124     }
1125     return kTfLiteOk;
1126   };
1127 
1128   EXPECT_EQ(TestExternalContext::Get(context), nullptr);
1129   ASSERT_EQ(interpreter_.SetNumThreads(4), kTfLiteOk);
1130 
1131   TestExternalContext::Set(context, &external_context);
1132   EXPECT_EQ(TestExternalContext::Get(context), &external_context);
1133   ASSERT_EQ(interpreter_.SetNumThreads(4), kTfLiteOk);
1134   ASSERT_EQ(interpreter_.SetNumThreads(5), kTfLiteOk);
1135   EXPECT_EQ(external_context.num_refreshes, 2);
1136 
1137   // Reset refresh count to 0
1138   external_context.num_refreshes = 0;
1139   // Below should not call external context refresh
1140   ASSERT_EQ(interpreter_.SetNumThreads(-2), kTfLiteError);
1141   EXPECT_EQ(external_context.num_refreshes, 0);
1142 
1143   ASSERT_EQ(interpreter_.SetNumThreads(-1), kTfLiteOk);
1144   EXPECT_EQ(external_context.num_refreshes, 1);
1145 
1146   TestExternalContext::Set(context, nullptr);
1147   EXPECT_EQ(TestExternalContext::Get(context), nullptr);
1148   ASSERT_EQ(interpreter_.SetNumThreads(4), kTfLiteOk);
1149 }
1150 
1151 struct TestCpuBackendContext : public TfLiteInternalBackendContext {
1152   // Count the number of calls to ClearCaches for the backend context.
ClearCachestflite::__anonc83d92200111::TestCpuBackendContext1153   void ClearCaches() override { ++num_calls; }
SetMaxNumThreadstflite::__anonc83d92200111::TestCpuBackendContext1154   void SetMaxNumThreads(int num_threads) override {}
1155   int num_calls = 0;
1156 };
1157 
TEST_F(InterpreterTest,ExternalBackendContextClearsCachesOnDelete)1158 TEST_F(InterpreterTest, ExternalBackendContextClearsCachesOnDelete) {
1159   ExternalCpuBackendContext external_cpu_context;
1160   TestCpuBackendContext* cpu_backend_context = new TestCpuBackendContext();
1161   external_cpu_context.set_internal_backend_context(
1162       std::unique_ptr<TfLiteInternalBackendContext>(cpu_backend_context));
1163 
1164   {
1165     // Create an interpreter with an external Cpu backend context and ensure
1166     // it goes out of scope.
1167     Interpreter interpreter;
1168     interpreter.SetExternalContext(kTfLiteCpuBackendContext,
1169                                    &external_cpu_context);
1170     EXPECT_EQ(cpu_backend_context->num_calls, 0);
1171   }
1172   EXPECT_EQ(cpu_backend_context->num_calls, 1);
1173 }
1174 
1175 // Test fixture that allows playing with execution plans. It creates a two
1176 // node graph that can be executed in either [0,1] order or [1,0] order.
1177 // The CopyOp records when it is invoked in the class member run_order_
1178 // so we can test whether the execution plan was honored.
1179 class TestExecutionPlan : public ::testing::Test {
1180   // Encapsulates the node ids and provides them to a C primitive data type
1181   // Allocatable with placement new, but never destructed, so make sure this
1182   // doesn't own any heap allocated data. This is then is used as op local
1183   // data to allow access to the test fixture data.
1184   class CallReporting {
1185    public:
CallReporting(int node_id,std::vector<int> * run_order)1186     CallReporting(int node_id, std::vector<int>* run_order)
1187         : node_id_(node_id), run_order_(run_order) {}
1188 
Record()1189     void Record() { run_order_->push_back(node_id_); }
1190 
1191    private:
1192     // The node id for this particular node
1193     int node_id_;
1194     // A pointer to the global run-order
1195     std::vector<int>* run_order_;
1196   };
1197 
1198   // Build a kernel registration for an op that copies its one input
1199   // to an output
CopyOpRegistration()1200   TfLiteRegistration CopyOpRegistration() {
1201     TfLiteRegistration reg = {nullptr, nullptr, nullptr, nullptr};
1202 
1203     reg.prepare = [](TfLiteContext* context, TfLiteNode* node) {
1204       // Set output size to input size
1205       const TfLiteTensor* tensor0;
1206       TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &tensor0));
1207       TfLiteTensor* tensor1;
1208       TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &tensor1));
1209       TfLiteIntArray* newSize = TfLiteIntArrayCopy(tensor0->dims);
1210       return context->ResizeTensor(context, tensor1, newSize);
1211     };
1212 
1213     reg.invoke = [](TfLiteContext* context, TfLiteNode* node) {
1214       CallReporting* call_reporting =
1215           static_cast<CallReporting*>(node->builtin_data);
1216       // Copy input data to output data.
1217       const TfLiteTensor* a0;
1218       TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &a0));
1219       TfLiteTensor* a1;
1220       TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &a1));
1221       int num = a0->dims->data[0];
1222       for (int i = 0; i < num; i++) {
1223         a1->data.f[i] = a0->data.f[i];
1224       }
1225       call_reporting->Record();
1226       return kTfLiteOk;
1227     };
1228     return reg;
1229   }
1230 
1231   // Adds a copy node going from tensor `input` to output tensor `output`.
1232   // Note, input is used as the node_id. Inject run_order as op accessible
1233   // data. Note: this is a little strange of a way to do this, but it is
1234   // using op functionality to avoid static global variables.
MakeCopyNode(int input,int output)1235   void MakeCopyNode(int input, int output) {
1236     // Ownership of call_reporting is taken by interpreter (malloc is used due
1237     // to nodes being a C99 interface so free() is used).
1238     TfLiteRegistration copy_op = CopyOpRegistration();
1239     CallReporting* call_reporting_1 =
1240         static_cast<CallReporting*>(malloc(sizeof(CallReporting)));
1241     new (call_reporting_1) CallReporting(input, &run_order_);
1242     ASSERT_EQ(interpreter_.AddNodeWithParameters(
1243                   {0}, {2}, nullptr, 0, static_cast<void*>(call_reporting_1),
1244                   &copy_op),
1245               kTfLiteOk);
1246     ASSERT_EQ(interpreter_.ResizeInputTensor(input, {3}), kTfLiteOk);
1247   }
1248 
SetUp()1249   void SetUp() final {
1250     // Add two inputs and two outputs that don't depend on each other
1251     ASSERT_EQ(interpreter_.AddTensors(4), kTfLiteOk);
1252     interpreter_.SetInputs({0, 1});
1253     interpreter_.SetOutputs({2, 3});
1254     TfLiteQuantizationParams quantized;
1255     for (int tensor_index = 0; tensor_index < 4; tensor_index++) {
1256       ASSERT_EQ(interpreter_.SetTensorParametersReadWrite(
1257                     tensor_index, kTfLiteFloat32, "", {3}, quantized),
1258                 kTfLiteOk);
1259     }
1260 
1261     // Define two copy functions that also use the user_data to report that
1262     // they were called.
1263     // i.e. tensor[2] = copy(tensor[0]); tensor[3] = copy(tensor[1]);
1264     // thus we can reorder the two nodes arbitrary and still satisfy dependency
1265     // order.
1266     MakeCopyNode(0, 2);
1267     MakeCopyNode(1, 3);
1268 
1269     ASSERT_EQ(interpreter_.AllocateTensors(), kTfLiteOk);
1270   }
1271 
1272  protected:
1273   Interpreter interpreter_;
1274 
1275   // list of node_ids that were run
1276   std::vector<int> run_order_;
1277 };
1278 
TEST_F(TestExecutionPlan,DefaultExecutionPlan)1279 TEST_F(TestExecutionPlan, DefaultExecutionPlan) {
1280   // Check default order
1281   ASSERT_EQ(interpreter_.Invoke(), kTfLiteOk);
1282   ASSERT_EQ(run_order_, std::vector<int>({0, 1}));
1283 }
1284 
TEST_F(TestExecutionPlan,ReversedExecutionPlan)1285 TEST_F(TestExecutionPlan, ReversedExecutionPlan) {
1286   // Check reversed order
1287   interpreter_.SetExecutionPlan({1, 0});
1288   ASSERT_EQ(interpreter_.Invoke(), kTfLiteOk);
1289   ASSERT_EQ(run_order_, std::vector<int>({1, 0}));
1290 }
1291 
TEST_F(TestExecutionPlan,SubsetExecutionPlan)1292 TEST_F(TestExecutionPlan, SubsetExecutionPlan) {
1293   // Check running only node index 1
1294   interpreter_.SetExecutionPlan({1});
1295   ASSERT_EQ(interpreter_.Invoke(), kTfLiteOk);
1296   ASSERT_EQ(run_order_, std::vector<int>({1}));
1297 }
1298 
TEST_F(TestExecutionPlan,NullExecutionPlan)1299 TEST_F(TestExecutionPlan, NullExecutionPlan) {
1300   // Check nothing executed.
1301   interpreter_.SetExecutionPlan({});
1302   ASSERT_EQ(interpreter_.Invoke(), kTfLiteOk);
1303   ASSERT_EQ(run_order_, std::vector<int>());
1304 }
1305 
TEST(TestDelegateOwnership,ProperlyDisposed)1306 TEST(TestDelegateOwnership, ProperlyDisposed) {
1307   struct TfLiteInterpreterOwnedDelegate : public TfLiteDelegate {
1308     TfLiteInterpreterOwnedDelegate(bool* destroyed, bool* prepared)
1309         : destroyed(destroyed), prepared(prepared) {
1310       flags = kTfLiteDelegateFlagsNone;
1311       Prepare = [](TfLiteContext*, TfLiteDelegate* delegate) -> TfLiteStatus {
1312         *static_cast<TfLiteInterpreterOwnedDelegate*>(delegate)->prepared =
1313             true;
1314         return kTfLiteOk;
1315       };
1316     }
1317     ~TfLiteInterpreterOwnedDelegate() { *destroyed = true; }
1318 
1319     bool* destroyed;
1320     bool* prepared;
1321   };
1322 
1323   // Construct a delegate with flags for indicating preparation/destruction.
1324   bool destroyed = false;
1325   bool prepared = false;
1326   std::unique_ptr<TfLiteInterpreterOwnedDelegate> delegate(
1327       new TfLiteInterpreterOwnedDelegate(&destroyed, &prepared));
1328   {
1329     // Create an interpreter and assemble a simple graph.
1330     Interpreter interpreter;
1331     TfLiteRegistration registration = {nullptr, nullptr, nullptr, nullptr};
1332     ASSERT_EQ(interpreter.AddTensors(2), kTfLiteOk);
1333     ASSERT_EQ(interpreter.SetInputs({0}), kTfLiteOk);
1334     ASSERT_EQ(interpreter.SetOutputs({1}), kTfLiteOk);
1335     ASSERT_EQ(interpreter.AddNodeWithParameters({0}, {1}, nullptr, 0, nullptr,
1336                                                 &registration),
1337               kTfLiteOk);
1338 
1339     // Pass delegate ownership to that interpreter.
1340     ASSERT_EQ(InterpreterTest::ModifyGraphWithDelegate(&interpreter,
1341                                                        std::move(delegate)),
1342               kTfLiteOk);
1343 
1344     // The delegate should be prepared as normal, and should be preserved.
1345     EXPECT_TRUE(prepared);
1346     EXPECT_FALSE(destroyed);
1347 
1348     // Interpreter interaction should not impact the delegate's validity.
1349     interpreter.AllocateTensors();
1350     interpreter.Invoke();
1351     EXPECT_FALSE(destroyed);
1352   }
1353 
1354   // Only after the interpreter is destroyed should the delegate be destroyed.
1355   EXPECT_TRUE(destroyed);
1356 }
1357 
1358 // CancellationData contains the data required to cancel a call to Invoke().
1359 struct CancellationData {
1360   bool is_cancelled = false;
1361 };
1362 
1363 // Indicates whether Invoke() has been cancelled based on the value of the
1364 // CancellationData object passed in.
CheckCancellation(void * data)1365 bool CheckCancellation(void* data) {
1366   CancellationData* cancellation_data =
1367       static_cast<struct CancellationData*>(data);
1368   return cancellation_data->is_cancelled;
1369 }
1370 
1371 static struct CancellationData cancellation_data_;
1372 
1373 // Test fixture to test cancellation within the Interpreter.
1374 class CancellationTest : public ::testing::Test {
1375  public:
Invoke()1376   TfLiteStatus Invoke() { return interpreter_.Invoke(); }
Cancel()1377   void Cancel() { cancellation_data_.is_cancelled = true; }
1378 
1379   // Adds an CancelOp with input tensor `input` and output tensor `output`.
MakeCancelNode(int input,int output)1380   void MakeCancelNode(int input, int output) {
1381     TfLiteRegistration op = CancelOpRegistration();
1382     ASSERT_EQ(interpreter_.AddNodeWithParameters({input}, {output}, nullptr, 0,
1383                                                  nullptr, &op),
1384               kTfLiteOk);
1385     ASSERT_EQ(interpreter_.ResizeInputTensor(input, {3}), kTfLiteOk);
1386   }
1387 
1388   // Adds an OkOp with input tensor `input` and output tensor `output`.
MakeOkNode(int input,int output)1389   void MakeOkNode(int input, int output) {
1390     TfLiteRegistration op = OkOpRegistration();
1391     ASSERT_EQ(interpreter_.AddNodeWithParameters({input}, {output}, nullptr, 0,
1392                                                  nullptr, &op),
1393               kTfLiteOk);
1394     ASSERT_EQ(interpreter_.ResizeInputTensor(input, {3}), kTfLiteOk);
1395   }
1396 
1397   Interpreter interpreter_;
1398 
1399  private:
1400   // Build the kernel registration for an op that cancels the operation.
CancelOpRegistration()1401   TfLiteRegistration CancelOpRegistration() {
1402     TfLiteRegistration reg = {nullptr, nullptr, nullptr, nullptr};
1403 
1404     // Set output size to the input size in CancelOp::Prepare(). Code exists to
1405     // have a framework in Prepare. The input and output tensors are not used.
1406     reg.prepare = [](TfLiteContext* context, TfLiteNode* node) {
1407       const TfLiteTensor* in_tensor;
1408       TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &in_tensor));
1409       TfLiteTensor* out_tensor;
1410       TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &out_tensor));
1411       TfLiteIntArray* new_size = TfLiteIntArrayCopy(in_tensor->dims);
1412       return context->ResizeTensor(context, out_tensor, new_size);
1413     };
1414 
1415     reg.invoke = [](TfLiteContext* context, TfLiteNode* node) {
1416       cancellation_data_.is_cancelled = true;
1417       return kTfLiteOk;
1418     };
1419     return reg;
1420   }
1421 
1422   // Build the kernel registration for an op that returns kTfLiteOk.
OkOpRegistration()1423   TfLiteRegistration OkOpRegistration() {
1424     TfLiteRegistration reg = {nullptr, nullptr, nullptr, nullptr};
1425 
1426     // Set output size to the input size in OkOp::Prepare(). Code exists to have
1427     // a framework in Prepare. The input and output tensors are not used.
1428     reg.prepare = [](TfLiteContext* context, TfLiteNode* node) {
1429       const TfLiteTensor* in_tensor;
1430       TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &in_tensor));
1431       TfLiteTensor* out_tensor;
1432       TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &out_tensor));
1433       TfLiteIntArray* new_size = TfLiteIntArrayCopy(in_tensor->dims);
1434       return context->ResizeTensor(context, out_tensor, new_size);
1435     };
1436 
1437     reg.invoke = [](TfLiteContext* context, TfLiteNode* node) {
1438       return kTfLiteOk;
1439     };
1440     return reg;
1441   }
1442 
SetUp()1443   void SetUp() final {
1444     cancellation_data_.is_cancelled = false;
1445 
1446     // Set up the interpreter. Create the input and output tensors.
1447     int num_tensors = 3;
1448     ASSERT_EQ(interpreter_.AddTensors(num_tensors), kTfLiteOk);
1449     interpreter_.SetInputs({0});
1450     interpreter_.SetOutputs({2});
1451     TfLiteQuantizationParams quantized;
1452     for (int tensor_index = 0; tensor_index < num_tensors; tensor_index++) {
1453       ASSERT_EQ(interpreter_.SetTensorParametersReadWrite(
1454                     tensor_index, kTfLiteFloat32, "", {3}, quantized),
1455                 kTfLiteOk);
1456     }
1457     interpreter_.SetCancellationFunction(&cancellation_data_,
1458                                          &CheckCancellation);
1459   }
1460 };
1461 
TEST_F(CancellationTest,CancelBeforeInvoke)1462 TEST_F(CancellationTest, CancelBeforeInvoke) {
1463   // Cancel prior to calling Invoke.
1464   CancellationTest::MakeOkNode(1, 2);
1465   ASSERT_EQ(interpreter_.AllocateTensors(), kTfLiteOk);
1466 
1467   CancellationTest::Cancel();
1468   TfLiteStatus invoke_error_code = CancellationTest::Invoke();
1469   ASSERT_EQ(invoke_error_code, kTfLiteError);
1470 }
1471 
TEST_F(CancellationTest,CancelDuringInvoke)1472 TEST_F(CancellationTest, CancelDuringInvoke) {
1473   // Tests a model which sets the cancel in order to test cancellation works
1474   // between ops.
1475   //
1476   // The first op will set the cancellation bit to true. The second op returns
1477   // `kTfLiteOk` if executed.
1478   CancellationTest::MakeCancelNode(0, 1);
1479   CancellationTest::MakeOkNode(1, 2);
1480   ASSERT_EQ(interpreter_.AllocateTensors(), kTfLiteOk);
1481 
1482   TfLiteStatus invoke_error_code = CancellationTest::Invoke();
1483   ASSERT_EQ(invoke_error_code, kTfLiteError);
1484 }
1485 
1486 // Tests functionality related to custom memory allocations in TFLite.
1487 class TestCustomAllocation : public ::testing::Test {
1488  protected:
SetUp()1489   void SetUp() override {
1490     // Simple model with two custom ops that add 2 float tensors each.
1491     interpreter_.reset(new Interpreter);
1492     interpreter_->AddTensors(7);
1493     interpreter_->SetInputs({0, 1});
1494     interpreter_->SetOutputs({3, 4, 6});
1495     TfLiteQuantizationParams quant;
1496     interpreter_->SetTensorParametersReadWrite(0, kTfLiteFloat32, "", {3},
1497                                                quant);
1498     interpreter_->SetTensorParametersReadWrite(1, kTfLiteFloat32, "", {3},
1499                                                quant);
1500     interpreter_->SetTensorParametersReadWrite(2, kTfLiteFloat32, "", {3},
1501                                                quant);
1502     interpreter_->SetTensorParametersReadWrite(3, kTfLiteFloat32, "", {3},
1503                                                quant);
1504     interpreter_->SetTensorParametersReadWrite(4, kTfLiteFloat32, "", {3},
1505                                                quant);
1506     interpreter_->SetTensorParametersReadWrite(5, kTfLiteFloat32, "", {3},
1507                                                quant, /*is_variable=*/true);
1508     interpreter_->SetTensorParametersReadWrite(6, kTfLiteFloat32, "", {3},
1509                                                quant);
1510     auto* add_reg = ops::builtin::Register_ADD();
1511     TfLiteAddParams* builtin_data0 =
1512         reinterpret_cast<TfLiteAddParams*>(malloc(sizeof(TfLiteAddParams)));
1513     TfLiteAddParams* builtin_data1 =
1514         reinterpret_cast<TfLiteAddParams*>(malloc(sizeof(TfLiteAddParams)));
1515     TfLiteAddParams* builtin_data2 =
1516         reinterpret_cast<TfLiteAddParams*>(malloc(sizeof(TfLiteAddParams)));
1517     TfLiteAddParams* builtin_data3 =
1518         reinterpret_cast<TfLiteAddParams*>(malloc(sizeof(TfLiteAddParams)));
1519     builtin_data0->activation = kTfLiteActNone;
1520     builtin_data1->activation = kTfLiteActNone;
1521     builtin_data2->activation = kTfLiteActNone;
1522     builtin_data3->activation = kTfLiteActNone;
1523     interpreter_->AddNodeWithParameters({0, 0}, {2}, nullptr, 0, builtin_data0,
1524                                         add_reg);
1525     interpreter_->AddNodeWithParameters({1, 1}, {3}, nullptr, 0, builtin_data1,
1526                                         add_reg);
1527     interpreter_->AddNodeWithParameters({2, 1}, {4}, nullptr, 0, builtin_data2,
1528                                         add_reg);
1529     interpreter_->AddNodeWithParameters({0, 5}, {6}, nullptr, 0, builtin_data3,
1530                                         add_reg);
1531     interpreter_->SetVariables({5});
1532   }
1533 
AssignCustomAllocForTensor(int tensor_idx,int required_alignment)1534   void AssignCustomAllocForTensor(int tensor_idx, int required_alignment) {
1535     const TfLiteTensor* tensor = interpreter_->tensor(tensor_idx);
1536     auto tensor_alloc = NewCustomAlloc(tensor->bytes, required_alignment);
1537     ASSERT_EQ(
1538         interpreter_->SetCustomAllocationForTensor(tensor_idx, tensor_alloc),
1539         kTfLiteOk);
1540   }
1541 
VerifyInvoke()1542   void VerifyInvoke() {
1543     std::vector<float> input = {1.0f, 2.0f, 3.0f};
1544     std::vector<float> variable = {0.0f, 1.0f, 2.0f};
1545     std::vector<float> expected_output = {2.0f, 4.0f, 6.0f};
1546 
1547     // typed_tensor<...> should work irrespective of custom alloc, since it
1548     // accesses output_tensor.data.
1549     memcpy(interpreter_->typed_tensor<float>(interpreter_->variables()[0]),
1550            variable.data(), 3 * sizeof(float));
1551     memcpy(interpreter_->typed_tensor<float>(0), input.data(),
1552            3 * sizeof(float));
1553     memcpy(interpreter_->typed_tensor<float>(1), input.data(),
1554            3 * sizeof(float));
1555     ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
1556     TfLiteTensor* output_tensor =
1557         interpreter_->tensor(interpreter_->outputs()[0]);
1558     for (int i = 0; i < 3; ++i) {
1559       EXPECT_EQ(output_tensor->data.f[i], expected_output[i]) << i;
1560     }
1561   }
1562 
1563   // Actual initialized allocation is more than num_bytes, to account for
1564   // required_allocation.
NewCustomAlloc(size_t num_bytes,int required_alignment)1565   TfLiteCustomAllocation NewCustomAlloc(size_t num_bytes,
1566                                         int required_alignment) {
1567     // Extra memory to ensure alignment.
1568     char* new_alloc = new char[num_bytes + required_alignment];
1569     char* new_underlying_buffer_aligned_ptr = reinterpret_cast<char*>(
1570         AlignTo(required_alignment, reinterpret_cast<intptr_t>(new_alloc)));
1571     custom_alloc_buffers_.emplace_back(new_alloc);
1572 
1573     return TfLiteCustomAllocation(
1574         {new_underlying_buffer_aligned_ptr, num_bytes});
1575   }
1576 
AlignTo(size_t alignment,intptr_t offset)1577   intptr_t AlignTo(size_t alignment, intptr_t offset) {
1578     return offset % alignment == 0 ? offset
1579                                    : offset + (alignment - offset % alignment);
1580   }
1581 
TearDown()1582   void TearDown() override {
1583     interpreter_.reset();
1584     custom_alloc_buffers_.clear();
1585   }
1586 
1587  protected:
1588   TfLiteAddParams add_params_;
1589   std::unique_ptr<Interpreter> interpreter_;
1590   std::vector<std::unique_ptr<char[]>> custom_alloc_buffers_;
1591 };
1592 
TEST_F(TestCustomAllocation,InvalidAlignment)1593 TEST_F(TestCustomAllocation, InvalidAlignment) {
1594   const TfLiteTensor* input_tensor =
1595       interpreter_->tensor(interpreter_->inputs()[0]);
1596   intptr_t dummy_ptr = kDefaultTensorAlignment - 1;
1597   TfLiteCustomAllocation input_alloc{reinterpret_cast<void*>(dummy_ptr),
1598                                      input_tensor->bytes};
1599   ASSERT_EQ(interpreter_->SetCustomAllocationForTensor(
1600                 interpreter_->inputs()[0], input_alloc),
1601             kTfLiteError);
1602 
1603   // Allocate tensors & Invoke should still work.
1604   ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
1605   VerifyInvoke();
1606 }
1607 
TEST_F(TestCustomAllocation,InsufficientBytes)1608 TEST_F(TestCustomAllocation, InsufficientBytes) {
1609   auto input_alloc = NewCustomAlloc(4, kDefaultTensorAlignment);
1610   ASSERT_EQ(interpreter_->SetCustomAllocationForTensor(
1611                 interpreter_->inputs()[0], input_alloc),
1612             kTfLiteError);
1613 
1614   // Allocate tensors & Invoke should still work.
1615   ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
1616   VerifyInvoke();
1617 }
1618 
TEST_F(TestCustomAllocation,CustomInputAlloc)1619 TEST_F(TestCustomAllocation, CustomInputAlloc) {
1620   // Set custom allocation for one input tensor.
1621   AssignCustomAllocForTensor(interpreter_->inputs()[0],
1622                              /*required_alignment=*/kDefaultTensorAlignment);
1623 
1624   ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
1625   VerifyInvoke();
1626 }
1627 
TEST_F(TestCustomAllocation,CustomInputAlloc_MultipleAssigns)1628 TEST_F(TestCustomAllocation, CustomInputAlloc_MultipleAssigns) {
1629   // Set custom allocation for one input tensor.
1630   AssignCustomAllocForTensor(interpreter_->inputs()[0],
1631                              /*required_alignment=*/kDefaultTensorAlignment);
1632 
1633   AssignCustomAllocForTensor(interpreter_->inputs()[0],
1634                              /*required_alignment=*/kDefaultTensorAlignment);
1635   ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
1636   VerifyInvoke();
1637 
1638   AssignCustomAllocForTensor(interpreter_->inputs()[0],
1639                              /*required_alignment=*/kDefaultTensorAlignment);
1640   ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
1641   VerifyInvoke();
1642 }
1643 
TEST_F(TestCustomAllocation,CustomInputAlloc_AllocateTensorsBefore)1644 TEST_F(TestCustomAllocation, CustomInputAlloc_AllocateTensorsBefore) {
1645   // Allocate tensors.
1646   // Allocating now will cause TFLite to reserve some extra memory, but nothing
1647   // should break.
1648   ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
1649 
1650   AssignCustomAllocForTensor(interpreter_->inputs()[0],
1651                              /*required_alignment=*/kDefaultTensorAlignment);
1652 
1653   VerifyInvoke();
1654 }
1655 
TEST_F(TestCustomAllocation,CustomInputAndOutputAllocs)1656 TEST_F(TestCustomAllocation, CustomInputAndOutputAllocs) {
1657   // Set custom allocations for all IO tensors.
1658   AssignCustomAllocForTensor(interpreter_->inputs()[0],
1659                              /*required_alignment=*/kDefaultTensorAlignment);
1660   AssignCustomAllocForTensor(interpreter_->inputs()[1],
1661                              /*required_alignment=*/kDefaultTensorAlignment);
1662   AssignCustomAllocForTensor(interpreter_->outputs()[0],
1663                              /*required_alignment=*/kDefaultTensorAlignment);
1664   AssignCustomAllocForTensor(interpreter_->outputs()[1],
1665                              /*required_alignment=*/kDefaultTensorAlignment);
1666 
1667   ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
1668   VerifyInvoke();
1669 }
1670 
1671 // Ensure that custom allocs work for tensors on persistent arena as well.
TEST_F(TestCustomAllocation,CustomAlloc_VariableTensor)1672 TEST_F(TestCustomAllocation, CustomAlloc_VariableTensor) {
1673   // Set custom allocation for one input tensor.
1674   AssignCustomAllocForTensor(interpreter_->variables()[0],
1675                              /*required_alignment=*/kDefaultTensorAlignment);
1676 
1677   ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
1678   VerifyInvoke();
1679 
1680   AssignCustomAllocForTensor(interpreter_->variables()[0],
1681                              /*required_alignment=*/kDefaultTensorAlignment);
1682   ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
1683 
1684   std::vector<float> input = {2.0f, 3.0f, 4.0f};
1685   std::vector<float> variable = {1.0f, 2.0f, 3.0f};
1686   std::vector<float> expected_output = {3.0f, 5.0f, 7.0f};
1687   memcpy(interpreter_->typed_tensor<float>(interpreter_->variables()[0]),
1688          variable.data(), 3 * sizeof(float));
1689   memcpy(interpreter_->typed_tensor<float>(0), input.data(), 3 * sizeof(float));
1690   memcpy(interpreter_->typed_tensor<float>(1), input.data(), 3 * sizeof(float));
1691   ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
1692 
1693   // expected_output = input + variable
1694   TfLiteTensor* output_tensor =
1695       interpreter_->tensor(interpreter_->outputs()[2]);
1696   for (int i = 0; i < 3; ++i) {
1697     EXPECT_EQ(output_tensor->data.f[i], expected_output[i]) << i;
1698   }
1699 }
1700 
TEST_F(TestCustomAllocation,ResizeTensorsWithoutEnoughMemory)1701 TEST_F(TestCustomAllocation, ResizeTensorsWithoutEnoughMemory) {
1702   // Set custom allocations for all input tensors.
1703   AssignCustomAllocForTensor(interpreter_->inputs()[0],
1704                              /*required_alignment=*/kDefaultTensorAlignment);
1705   AssignCustomAllocForTensor(interpreter_->inputs()[1],
1706                              /*required_alignment=*/kDefaultTensorAlignment);
1707 
1708   ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
1709 
1710   // Now resize tensors to double the size.
1711   ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {2, 3}),
1712             kTfLiteOk);
1713   ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {2, 3}),
1714             kTfLiteOk);
1715 
1716   // Since the custom memory previously allocated isn't enough,
1717   // AllocateTensors() will fail.
1718   ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteError);
1719   // Interpreter should no longer be in invokable state, so expect failure.
1720   ASSERT_EQ(interpreter_->Invoke(), kTfLiteError);
1721 }
1722 
TEST_F(TestCustomAllocation,ResizeTensorsWithEnoughMemory)1723 TEST_F(TestCustomAllocation, ResizeTensorsWithEnoughMemory) {
1724   // Set custom allocations for all input tensors, with double the required
1725   // memory.
1726   const TfLiteTensor* input0_tensor =
1727       interpreter_->tensor(interpreter_->inputs()[0]);
1728   auto input0_alloc =
1729       NewCustomAlloc(2 * input0_tensor->bytes, kDefaultTensorAlignment);
1730   ASSERT_EQ(interpreter_->SetCustomAllocationForTensor(
1731                 interpreter_->inputs()[0], input0_alloc),
1732             kTfLiteOk);
1733   const TfLiteTensor* input1_tensor =
1734       interpreter_->tensor(interpreter_->inputs()[1]);
1735   auto input1_alloc =
1736       NewCustomAlloc(2 * input1_tensor->bytes, kDefaultTensorAlignment);
1737   ASSERT_EQ(interpreter_->SetCustomAllocationForTensor(
1738                 interpreter_->inputs()[1], input1_alloc),
1739             kTfLiteOk);
1740 
1741   ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
1742 
1743   // Now resize tensors to double the size.
1744   ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {6, 1}),
1745             kTfLiteOk);
1746   ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {6, 1}),
1747             kTfLiteOk);
1748 
1749   ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
1750 
1751   std::vector<float> input = {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f};
1752   std::vector<float> expected_output = {2.0f, 4.0f, 6.0f, 8.0f, 10.0f, 12.0f};
1753   TfLiteTensor* tensor = interpreter_->tensor(interpreter_->outputs()[0]);
1754   memcpy(interpreter_->typed_tensor<float>(0), input.data(), 6 * sizeof(float));
1755   memcpy(interpreter_->typed_tensor<float>(1), input.data(), 6 * sizeof(float));
1756   ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
1757   for (int i = 0; i < 6; ++i) {
1758     EXPECT_EQ(tensor->data.f[i], expected_output[i]) << i;
1759   }
1760 
1761   ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {3, 1}),
1762             kTfLiteOk);
1763   ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {3, 1}),
1764             kTfLiteOk);
1765 
1766   ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
1767   VerifyInvoke();
1768 }
1769 
1770 // Tests related to lazy delegate providers that are primarily used for applying
1771 // TfLite delegates by default.
1772 class TestLazyDelegateProvider : public InterpreterTest {
1773  protected:
1774   struct DummyLazyDelegateProvider : public TfLiteDelegate {
DummyLazyDelegateProvidertflite::__anonc83d92200111::TestLazyDelegateProvider::DummyLazyDelegateProvider1775     explicit DummyLazyDelegateProvider(int64_t support_flags) {
1776       data_ = static_cast<void*>(this);
1777       flags = support_flags;
1778       Prepare = [](TfLiteContext*, TfLiteDelegate* delegate) -> TfLiteStatus {
1779         return kTfLiteOk;
1780       };
1781     }
1782   };
1783 
InitWithLazyDelegate(int64_t delegate_flags,bool create_dyanmic_tensor=false,bool return_error=false)1784   void InitWithLazyDelegate(int64_t delegate_flags,
1785                             bool create_dyanmic_tensor = false,
1786                             bool return_error = false) {
1787     TfLiteRegistration reg = {nullptr};
1788     if (return_error) {
1789       reg.prepare = [](TfLiteContext* context, TfLiteNode* node) {
1790         return kTfLiteError;
1791       };
1792     }
1793     ASSERT_EQ(interpreter_.AddTensors(2), kTfLiteOk);
1794     interpreter_.SetInputs({0});
1795     interpreter_.SetOutputs({1});
1796     interpreter_.AddNodeWithParameters({0}, {1}, nullptr, 0, nullptr, &reg);
1797 
1798     Interpreter::TfLiteDelegatePtr delegate(
1799         new DummyLazyDelegateProvider(delegate_flags),
1800         [](TfLiteDelegate* delegate) {
1801           auto* dummy =
1802               static_cast<DummyLazyDelegateProvider*>(delegate->data_);
1803           delete dummy;
1804         });
1805     mutable_lazy_delegate_providers()->push_back(std::move(delegate));
1806 
1807     if (create_dyanmic_tensor) {
1808       // Mark the output as dynamic tensor.
1809       interpreter_.tensor(1)->data.raw = nullptr;
1810       interpreter_.tensor(1)->allocation_type = kTfLiteDynamic;
1811     }
1812   }
1813 };
1814 
TEST_F(TestLazyDelegateProvider,ApplicationSuccess)1815 TEST_F(TestLazyDelegateProvider, ApplicationSuccess) {
1816   InitWithLazyDelegate(kTfLiteDelegateFlagsNone);
1817   EXPECT_EQ(kTfLiteOk, interpreter_.AllocateTensors());
1818   // We clear Interpreter::lazy_delegate_providers_ after they are tried out.
1819   EXPECT_TRUE(mutable_lazy_delegate_providers()->empty());
1820   EXPECT_TRUE(HasDelegates());
1821 }
1822 
TEST_F(TestLazyDelegateProvider,ApplicationFailure)1823 TEST_F(TestLazyDelegateProvider, ApplicationFailure) {
1824   InitWithLazyDelegate(kTfLiteDelegateFlagsNone,
1825                        false /* create_dyanmic_tensor */,
1826                        true /* return_error */);
1827   EXPECT_EQ(kTfLiteError, interpreter_.AllocateTensors());
1828   // We clear Interpreter::lazy_delegate_providers_ after they are tried out.
1829   EXPECT_TRUE(mutable_lazy_delegate_providers()->empty());
1830   EXPECT_FALSE(HasDelegates());
1831 }
1832 
TEST_F(TestLazyDelegateProvider,ApplicationSkipped)1833 TEST_F(TestLazyDelegateProvider, ApplicationSkipped) {
1834   InitWithLazyDelegate(kTfLiteDelegateFlagsNone,
1835                        true /* create_dyanmic_tensor */);
1836   EXPECT_EQ(kTfLiteOk, interpreter_.AllocateTensors());
1837   EXPECT_TRUE(mutable_lazy_delegate_providers()->empty());
1838   // As the delegate doesn't allow dynamic tensor, the delegate won't be applied
1839   // and the interpreter doesn't have any delegate applied.
1840   EXPECT_FALSE(HasDelegates());
1841 }
1842 
TEST_F(InterpreterTest,SingleSignature_get_signatures)1843 TEST_F(InterpreterTest, SingleSignature_get_signatures) {
1844   const char kMethodName[] = "test_method";
1845   const char kSignatureDefKey[] = "test_key";
1846   BuildSignature(kMethodName, kSignatureDefKey, {{"Input1", 0}, {"Input2", 1}},
1847                  {{"Output1", 5}});
1848   auto results = interpreter_.signature_def_names();
1849   ASSERT_EQ(1, results.size());
1850   EXPECT_EQ(kMethodName, *results[0]);
1851 }
1852 
TEST_F(InterpreterTest,SingleSignature_get_inputs)1853 TEST_F(InterpreterTest, SingleSignature_get_inputs) {
1854   const char kMethodName[] = "test_method";
1855   const char kSignatureDefKey[] = "test_key";
1856   const std::map<std::string, uint32_t> inputs = {{"Input1", 0}, {"Input2", 1}};
1857   const std::map<std::string, uint32_t> outputs = {{"Output1", 5}};
1858   BuildSignature(kMethodName, kSignatureDefKey, inputs, outputs);
1859   EXPECT_THAT(interpreter_.signature_inputs(kMethodName), testing::Eq(inputs));
1860   EXPECT_THAT(interpreter_.signature_outputs(kMethodName),
1861               testing::Eq(outputs));
1862 }
1863 
TEST_F(InterpreterTest,SingleSignature_validate_get_tensor)1864 TEST_F(InterpreterTest, SingleSignature_validate_get_tensor) {
1865   const char kMethodName[] = "test_method";
1866   const char kSignatureDefKey[] = "test_key";
1867   const std::map<std::string, uint32_t> inputs = {{"Input1", 0}, {"Input2", 1}};
1868   const std::map<std::string, uint32_t> outputs = {{"Output1", 5}};
1869 
1870   BuildSignature(kMethodName, kSignatureDefKey, inputs, outputs);
1871   ASSERT_EQ(interpreter_.AddTensors(6), kTfLiteOk);
1872   ASSERT_EQ(interpreter_.SetInputs({0, 1}), kTfLiteOk);
1873   ASSERT_EQ(interpreter_.SetOutputs({5}), kTfLiteOk);
1874   ASSERT_EQ(interpreter_.SetTensorParametersReadWrite(
1875                 0, kTfLiteFloat32, "", {3}, TfLiteQuantizationParams()),
1876             kTfLiteOk);
1877   ASSERT_EQ(interpreter_.SetTensorParametersReadWrite(
1878                 1, kTfLiteFloat32, "", {3}, TfLiteQuantizationParams()),
1879             kTfLiteOk);
1880   ASSERT_EQ(interpreter_.ResizeInputTensor(interpreter_.inputs()[0], {1, 2, 3}),
1881             kTfLiteOk);
1882   ASSERT_EQ(interpreter_.ResizeInputTensor(interpreter_.inputs()[1], {1, 2, 3}),
1883             kTfLiteOk);
1884   ASSERT_EQ(interpreter_.AllocateTensors(), kTfLiteOk);
1885 
1886   EXPECT_TRUE(interpreter_.input_tensor_by_signature_name(
1887                   "Input1", kMethodName) != nullptr);
1888   EXPECT_TRUE(interpreter_.input_tensor_by_signature_name(
1889                   "Input2", kMethodName) != nullptr);
1890   EXPECT_TRUE(interpreter_.output_tensor_by_signature_name(
1891                   "Output1", kMethodName) != nullptr);
1892 
1893   // Invalid tensor
1894   EXPECT_EQ(interpreter_.input_tensor_by_signature_name("Input3", kMethodName),
1895             nullptr);
1896   EXPECT_EQ(interpreter_.output_tensor_by_signature_name("Input3", kMethodName),
1897             nullptr);
1898   // Invalid method
1899   EXPECT_EQ(
1900       interpreter_.input_tensor_by_signature_name("Input1", "InvalidMethod"),
1901       nullptr);
1902   EXPECT_EQ(
1903       interpreter_.output_tensor_by_signature_name("Output1", "InvalidMethod"),
1904       nullptr);
1905 }
1906 
1907 }  // namespace
1908 }  // namespace tflite
1909 
main(int argc,char ** argv)1910 int main(int argc, char** argv) {
1911   ::tflite::LogToStderr();
1912   ::testing::InitGoogleTest(&argc, argv);
1913   return RUN_ALL_TESTS();
1914 }
1915