• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 
16 #include "tensorflow/lite/micro/micro_allocator.h"
17 
18 #include <cstdint>
19 
20 #include "tensorflow/lite/micro/memory_helpers.h"
21 #include "tensorflow/lite/micro/micro_error_reporter.h"
22 #include "tensorflow/lite/micro/simple_memory_allocator.h"
23 #include "tensorflow/lite/micro/test_helpers.h"
24 #include "tensorflow/lite/micro/testing/micro_test.h"
25 #include "tensorflow/lite/micro/testing/test_conv_model.h"
26 
27 namespace tflite {
28 namespace testing {
29 namespace {
30 
31 constexpr int kExpectedAlignment = 4;
32 constexpr int t0 = 0;
33 constexpr int t1 = 1;
34 constexpr int t2 = 2;
35 constexpr int t3 = 3;
36 constexpr int t4 = 4;
37 constexpr int t5 = 5;
38 
VerifyMockTfLiteTensor(TfLiteTensor * tensor,bool is_variable=false)39 void VerifyMockTfLiteTensor(TfLiteTensor* tensor, bool is_variable = false) {
40   TF_LITE_MICRO_EXPECT_EQ(kTfLiteInt32, tensor->type);
41   TF_LITE_MICRO_EXPECT_EQ(1, tensor->dims->size);
42   TF_LITE_MICRO_EXPECT_EQ(1, tensor->dims->data[0]);
43   TF_LITE_MICRO_EXPECT_EQ(is_variable, tensor->is_variable);
44   TF_LITE_MICRO_EXPECT_EQ(static_cast<size_t>(4), tensor->bytes);
45   TF_LITE_MICRO_EXPECT_NE(nullptr, tensor->data.raw);
46   TF_LITE_MICRO_EXPECT_EQ(static_cast<size_t>(0),
47                           (reinterpret_cast<std::uintptr_t>(tensor->data.raw) %
48                            kExpectedAlignment));
49 }
50 
VerifyMockWeightTfLiteTensor(TfLiteTensor * tensor)51 void VerifyMockWeightTfLiteTensor(TfLiteTensor* tensor) {
52   TF_LITE_MICRO_EXPECT_EQ(kTfLiteUInt8, tensor->type);
53   TF_LITE_MICRO_EXPECT_EQ(1, tensor->dims->size);
54   TF_LITE_MICRO_EXPECT_EQ(1, tensor->dims->data[0]);
55   TF_LITE_MICRO_EXPECT_EQ(static_cast<size_t>(1), tensor->bytes);
56   TF_LITE_MICRO_EXPECT_NE(nullptr, tensor->data.raw);
57 }
58 
VerifyMockTfLiteEvalTensor(TfLiteEvalTensor * tensor)59 void VerifyMockTfLiteEvalTensor(TfLiteEvalTensor* tensor) {
60   TF_LITE_MICRO_EXPECT_EQ(kTfLiteInt32, tensor->type);
61   TF_LITE_MICRO_EXPECT_EQ(1, tensor->dims->size);
62   TF_LITE_MICRO_EXPECT_EQ(1, tensor->dims->data[0]);
63   size_t buffer_size;
64   TF_LITE_MICRO_EXPECT_EQ(
65       kTfLiteOk, tflite::TfLiteEvalTensorByteLength(tensor, &buffer_size));
66   TF_LITE_MICRO_EXPECT_EQ(static_cast<size_t>(4), buffer_size);
67   TF_LITE_MICRO_EXPECT_NE(nullptr, tensor->data.raw);
68   TF_LITE_MICRO_EXPECT_EQ(static_cast<size_t>(0),
69                           (reinterpret_cast<std::uintptr_t>(tensor->data.raw) %
70                            kExpectedAlignment));
71 }
72 
VerifyMockWeightTfLiteEvalTensor(TfLiteEvalTensor * tensor)73 void VerifyMockWeightTfLiteEvalTensor(TfLiteEvalTensor* tensor) {
74   TF_LITE_MICRO_EXPECT_EQ(kTfLiteUInt8, tensor->type);
75   TF_LITE_MICRO_EXPECT_EQ(1, tensor->dims->size);
76   TF_LITE_MICRO_EXPECT_EQ(1, tensor->dims->data[0]);
77   size_t buffer_size;
78   TF_LITE_MICRO_EXPECT_EQ(
79       kTfLiteOk, tflite::TfLiteEvalTensorByteLength(tensor, &buffer_size));
80   TF_LITE_MICRO_EXPECT_EQ(static_cast<size_t>(1), buffer_size);
81   TF_LITE_MICRO_EXPECT_NE(nullptr, tensor->data.raw);
82 }
83 
VerifyMockTensor(const Model * model,MicroAllocator * allocator,TfLiteEvalTensor * eval_tensors,int tensor_idx,bool is_variable=false)84 void VerifyMockTensor(const Model* model, MicroAllocator* allocator,
85                       TfLiteEvalTensor* eval_tensors, int tensor_idx,
86                       bool is_variable = false) {
87   VerifyMockTfLiteTensor(allocator->AllocatePersistentTfLiteTensor(
88                              model, eval_tensors, tensor_idx),
89                          is_variable);
90   VerifyMockTfLiteEvalTensor(&eval_tensors[tensor_idx]);
91 }
92 
VerifyMockWeightTensor(const Model * model,MicroAllocator * allocator,TfLiteEvalTensor * eval_tensors,int tensor_idx)93 void VerifyMockWeightTensor(const Model* model, MicroAllocator* allocator,
94                             TfLiteEvalTensor* eval_tensors, int tensor_idx) {
95   VerifyMockWeightTfLiteTensor(allocator->AllocatePersistentTfLiteTensor(
96       model, eval_tensors, tensor_idx));
97   VerifyMockWeightTfLiteEvalTensor(&eval_tensors[tensor_idx]);
98 }
99 
EnsureUniqueVariableTensorBuffer(const Model * model,TfLiteEvalTensor * eval_tensors,const int variable_tensor_idx)100 void EnsureUniqueVariableTensorBuffer(const Model* model,
101                                       TfLiteEvalTensor* eval_tensors,
102                                       const int variable_tensor_idx) {
103   for (size_t i = 0; i < GetModelTensorCount(model); i++) {
104     if (i != static_cast<size_t>(variable_tensor_idx)) {
105       TF_LITE_MICRO_EXPECT_NE(eval_tensors[variable_tensor_idx].data.raw,
106                               eval_tensors[i].data.raw);
107     }
108   }
109 }
110 
VerifyRegistrationAndNodeAllocation(NodeAndRegistration * node_and_registration,size_t count)111 void VerifyRegistrationAndNodeAllocation(
112     NodeAndRegistration* node_and_registration, size_t count) {
113   for (size_t i = 0; i < count; i++) {
114     TF_LITE_MICRO_EXPECT_NE(nullptr, node_and_registration[i].registration);
115     TF_LITE_MICRO_EXPECT_NE(nullptr, node_and_registration[i].node.inputs);
116     TF_LITE_MICRO_EXPECT_NE(nullptr, node_and_registration[i].node.outputs);
117   }
118 }
119 
120 }  // namespace
121 }  // namespace testing
122 }  // namespace tflite
123 
124 TF_LITE_MICRO_TESTS_BEGIN
125 
TF_LITE_MICRO_TEST(TestInitializeRuntimeTensor)126 TF_LITE_MICRO_TEST(TestInitializeRuntimeTensor) {
127   constexpr size_t arena_size = 1024;
128   uint8_t arena[arena_size];
129   tflite::SimpleMemoryAllocator* simple_allocator =
130       tflite::SimpleMemoryAllocator::Create(tflite::GetMicroErrorReporter(),
131                                             arena, arena_size);
132 
133   const tflite::Tensor* tensor = tflite::testing::Create1dFlatbufferTensor(100);
134   const flatbuffers::Vector<flatbuffers::Offset<tflite::Buffer>>* buffers =
135       tflite::testing::CreateFlatbufferBuffers();
136 
137   TfLiteTensor allocated_tensor;
138   TF_LITE_MICRO_EXPECT_EQ(
139       kTfLiteOk,
140       tflite::internal::InitializeTfLiteTensorFromFlatbuffer(
141           simple_allocator, /*allocate_temp=*/false, *tensor, buffers,
142           tflite::GetMicroErrorReporter(), &allocated_tensor));
143   TF_LITE_MICRO_EXPECT_EQ(kTfLiteInt32, allocated_tensor.type);
144   TF_LITE_MICRO_EXPECT_EQ(1, allocated_tensor.dims->size);
145   TF_LITE_MICRO_EXPECT_EQ(100, allocated_tensor.dims->data[0]);
146   TF_LITE_MICRO_EXPECT_EQ(static_cast<size_t>(400), allocated_tensor.bytes);
147   TF_LITE_MICRO_EXPECT(nullptr == allocated_tensor.data.i32);
148   TF_LITE_MICRO_EXPECT_EQ(kTfLiteArenaRw, allocated_tensor.allocation_type);
149 
150   simple_allocator->~SimpleMemoryAllocator();
151 }
152 
153 // TODO(b/162311891): Drop this test when InitializeTfLiteTensorFromFlatbuffer()
154 // always allocates from temp (interpreter returns buffers from
155 // TfLiteEvalTensor):
TF_LITE_MICRO_TEST(TestInitializeTempRuntimeTensor)156 TF_LITE_MICRO_TEST(TestInitializeTempRuntimeTensor) {
157   constexpr size_t arena_size = 1024;
158   uint8_t arena[arena_size];
159   tflite::SimpleMemoryAllocator* simple_allocator =
160       tflite::SimpleMemoryAllocator::Create(tflite::GetMicroErrorReporter(),
161                                             arena, arena_size);
162 
163   const tflite::Tensor* tensor = tflite::testing::Create1dFlatbufferTensor(100);
164   const flatbuffers::Vector<flatbuffers::Offset<tflite::Buffer>>* buffers =
165       tflite::testing::CreateFlatbufferBuffers();
166 
167   TfLiteTensor allocated_temp_tensor;
168   TF_LITE_MICRO_EXPECT_EQ(
169       kTfLiteOk, tflite::internal::InitializeTfLiteTensorFromFlatbuffer(
170                      simple_allocator, /*allocate_temp=*/true, *tensor, buffers,
171                      tflite::GetMicroErrorReporter(), &allocated_temp_tensor));
172   TF_LITE_MICRO_EXPECT_EQ(kTfLiteInt32, allocated_temp_tensor.type);
173   TF_LITE_MICRO_EXPECT_EQ(1, allocated_temp_tensor.dims->size);
174   TF_LITE_MICRO_EXPECT_EQ(100, allocated_temp_tensor.dims->data[0]);
175   TF_LITE_MICRO_EXPECT_EQ(static_cast<size_t>(400),
176                           allocated_temp_tensor.bytes);
177   TF_LITE_MICRO_EXPECT(nullptr == allocated_temp_tensor.data.i32);
178   TF_LITE_MICRO_EXPECT_EQ(kTfLiteArenaRw,
179                           allocated_temp_tensor.allocation_type);
180 
181   simple_allocator->~SimpleMemoryAllocator();
182 }
183 
TF_LITE_MICRO_TEST(TestInitializeQuantizedTensor)184 TF_LITE_MICRO_TEST(TestInitializeQuantizedTensor) {
185   constexpr size_t arena_size = 1024;
186   uint8_t arena[arena_size];
187   tflite::SimpleMemoryAllocator* simple_allocator =
188       tflite::SimpleMemoryAllocator::Create(tflite::GetMicroErrorReporter(),
189                                             arena, arena_size);
190 
191   const tflite::Tensor* tensor =
192       tflite::testing::CreateQuantizedFlatbufferTensor(100);
193   const flatbuffers::Vector<flatbuffers::Offset<tflite::Buffer>>* buffers =
194       tflite::testing::CreateFlatbufferBuffers();
195 
196   TfLiteTensor allocated_tensor;
197   TF_LITE_MICRO_EXPECT_EQ(
198       kTfLiteOk,
199       tflite::internal::InitializeTfLiteTensorFromFlatbuffer(
200           simple_allocator, /*allocate_temp=*/false, *tensor, buffers,
201           tflite::GetMicroErrorReporter(), &allocated_tensor));
202   TF_LITE_MICRO_EXPECT_EQ(kTfLiteInt32, allocated_tensor.type);
203   TF_LITE_MICRO_EXPECT_EQ(1, allocated_tensor.dims->size);
204   TF_LITE_MICRO_EXPECT_EQ(100, allocated_tensor.dims->data[0]);
205   TF_LITE_MICRO_EXPECT_EQ(static_cast<size_t>(400), allocated_tensor.bytes);
206   TF_LITE_MICRO_EXPECT(nullptr == allocated_tensor.data.i32);
207   TF_LITE_MICRO_EXPECT_EQ(kTfLiteArenaRw, allocated_tensor.allocation_type);
208 
209   simple_allocator->~SimpleMemoryAllocator();
210 }
211 
TF_LITE_MICRO_TEST(TestMissingQuantization)212 TF_LITE_MICRO_TEST(TestMissingQuantization) {
213   constexpr size_t arena_size = 1024;
214   uint8_t arena[arena_size];
215   tflite::SimpleMemoryAllocator* simple_allocator =
216       tflite::SimpleMemoryAllocator::Create(tflite::GetMicroErrorReporter(),
217                                             arena, arena_size);
218 
219   const tflite::Tensor* tensor =
220       tflite::testing::CreateMissingQuantizationFlatbufferTensor(100);
221   const flatbuffers::Vector<flatbuffers::Offset<tflite::Buffer>>* buffers =
222       tflite::testing::CreateFlatbufferBuffers();
223 
224   TfLiteTensor allocated_tensor;
225   TF_LITE_MICRO_EXPECT_EQ(
226       kTfLiteOk,
227       tflite::internal::InitializeTfLiteTensorFromFlatbuffer(
228           simple_allocator, /*allocate_temp=*/false, *tensor, buffers,
229           tflite::GetMicroErrorReporter(), &allocated_tensor));
230   TF_LITE_MICRO_EXPECT_EQ(kTfLiteInt32, allocated_tensor.type);
231   TF_LITE_MICRO_EXPECT_EQ(1, allocated_tensor.dims->size);
232   TF_LITE_MICRO_EXPECT_EQ(100, allocated_tensor.dims->data[0]);
233   TF_LITE_MICRO_EXPECT_EQ(static_cast<size_t>(400), allocated_tensor.bytes);
234   TF_LITE_MICRO_EXPECT(nullptr == allocated_tensor.data.i32);
235 }
236 
TF_LITE_MICRO_TEST(TestFailsWhenModelStartsTwice)237 TF_LITE_MICRO_TEST(TestFailsWhenModelStartsTwice) {
238   const tflite::Model* model = tflite::testing::GetSimpleMockModel();
239   TfLiteEvalTensor* eval_tensors = nullptr;
240   tflite::AllOpsResolver op_resolver = tflite::testing::GetOpResolver();
241   tflite::NodeAndRegistration* node_and_registration;
242   constexpr size_t arena_size = 1024;
243   uint8_t arena[arena_size];
244   tflite::MicroAllocator* allocator = tflite::MicroAllocator::Create(
245       arena, arena_size, tflite::GetMicroErrorReporter());
246   TF_LITE_MICRO_EXPECT(nullptr != allocator);
247   TF_LITE_MICRO_EXPECT_EQ(
248       kTfLiteOk,
249       allocator->StartModelAllocation(model, op_resolver,
250                                       &node_and_registration, &eval_tensors));
251   TF_LITE_MICRO_EXPECT_EQ(
252       kTfLiteError,
253       allocator->StartModelAllocation(model, op_resolver,
254                                       &node_and_registration, &eval_tensors));
255 }
256 
TF_LITE_MICRO_TEST(TestFailsWithWrongSequence)257 TF_LITE_MICRO_TEST(TestFailsWithWrongSequence) {
258   const tflite::Model* model = tflite::testing::GetSimpleMockModel();
259   TfLiteEvalTensor* eval_tensors = nullptr;
260   tflite::ScratchBufferHandle* scratch_buffer_handles = nullptr;
261   tflite::AllOpsResolver op_resolver = tflite::testing::GetOpResolver();
262   tflite::NodeAndRegistration* node_and_registration;
263   constexpr size_t arena_size = 1024;
264   uint8_t arena[arena_size];
265   tflite::MicroAllocator* allocator = tflite::MicroAllocator::Create(
266       arena, arena_size, tflite::GetMicroErrorReporter());
267   TF_LITE_MICRO_EXPECT_NE(nullptr, allocator);
268 
269   // We can't finish allocation before it ever got started.
270   TF_LITE_MICRO_EXPECT_EQ(
271       kTfLiteError, allocator->FinishModelAllocation(model, eval_tensors,
272                                                      &scratch_buffer_handles));
273 
274   // Start twice is not allowed.
275   TF_LITE_MICRO_EXPECT_EQ(
276       kTfLiteOk,
277       allocator->StartModelAllocation(model, op_resolver,
278                                       &node_and_registration, &eval_tensors));
279   TF_LITE_MICRO_EXPECT_EQ(
280       kTfLiteError,
281       allocator->StartModelAllocation(model, op_resolver,
282                                       &node_and_registration, &eval_tensors));
283 }
284 
TF_LITE_MICRO_TEST(TestMockModelAllocation)285 TF_LITE_MICRO_TEST(TestMockModelAllocation) {
286   const tflite::Model* model = tflite::testing::GetSimpleMockModel();
287   TfLiteEvalTensor* eval_tensors = nullptr;
288   tflite::ScratchBufferHandle* scratch_buffer_handles = nullptr;
289   tflite::AllOpsResolver op_resolver = tflite::testing::GetOpResolver();
290   tflite::NodeAndRegistration* node_and_registration;
291   constexpr size_t arena_size = 1024;
292   uint8_t arena[arena_size];
293   tflite::MicroAllocator* allocator = tflite::MicroAllocator::Create(
294       arena, arena_size, tflite::GetMicroErrorReporter());
295   TF_LITE_MICRO_EXPECT(nullptr != allocator);
296   TF_LITE_MICRO_EXPECT_EQ(
297       kTfLiteOk,
298       allocator->StartModelAllocation(model, op_resolver,
299                                       &node_and_registration, &eval_tensors));
300   TF_LITE_MICRO_EXPECT_EQ(
301       kTfLiteOk, allocator->FinishModelAllocation(model, eval_tensors,
302                                                   &scratch_buffer_handles));
303 
304   size_t model_tensor_size = tflite::testing::GetModelTensorCount(model);
305   TF_LITE_MICRO_EXPECT_EQ(static_cast<size_t>(4), model_tensor_size);
306 
307   tflite::testing::VerifyMockTensor(model, allocator, eval_tensors, 0);
308   tflite::testing::VerifyMockWeightTensor(model, allocator, eval_tensors, 1);
309   tflite::testing::VerifyMockTensor(model, allocator, eval_tensors, 2);
310   tflite::testing::VerifyMockTensor(model, allocator, eval_tensors, 3);
311 
312   TF_LITE_MICRO_EXPECT_NE(eval_tensors[1].data.raw, eval_tensors[0].data.raw);
313   TF_LITE_MICRO_EXPECT_NE(eval_tensors[2].data.raw, eval_tensors[0].data.raw);
314   TF_LITE_MICRO_EXPECT_NE(eval_tensors[1].data.raw, eval_tensors[2].data.raw);
315   TF_LITE_MICRO_EXPECT_NE(eval_tensors[3].data.raw, eval_tensors[0].data.raw);
316   TF_LITE_MICRO_EXPECT_NE(eval_tensors[3].data.raw, eval_tensors[1].data.raw);
317   TF_LITE_MICRO_EXPECT_NE(eval_tensors[3].data.raw, eval_tensors[2].data.raw);
318   TF_LITE_MICRO_EXPECT_LE(allocator->used_bytes(), 856 + 100);
319 
320   // SimpleMockModel has 2 operators:
321   tflite::testing::VerifyRegistrationAndNodeAllocation(node_and_registration,
322                                                        /*count=*/2);
323 }
324 
TF_LITE_MICRO_TEST(TestMultiTenantAllocation)325 TF_LITE_MICRO_TEST(TestMultiTenantAllocation) {
326   // The `OpResolver` is shared among different models in this test for
327   // simplicity but in practice you could have different `OpResolver`.
328   tflite::AllOpsResolver op_resolver = tflite::testing::GetOpResolver();
329 
330   // Create a shared allocator.
331   constexpr size_t arena_size = 4096;
332   uint8_t arena[arena_size];
333   tflite::MicroAllocator* allocator = tflite::MicroAllocator::Create(
334       arena, arena_size, tflite::GetMicroErrorReporter());
335   TF_LITE_MICRO_EXPECT_NE(nullptr, allocator);
336   TfLiteEvalTensor* eval_tensors = nullptr;
337   tflite::ScratchBufferHandle* scratch_buffer_handles = nullptr;
338 
339   // Allocate for model 1. We use ComplexMockModel here to cover the code path
340   // allocatig variables.
341   const tflite::Model* model1 = tflite::testing::GetComplexMockModel();
342   tflite::NodeAndRegistration* node_and_registration1;
343   TF_LITE_MICRO_EXPECT_EQ(
344       kTfLiteOk,
345       allocator->StartModelAllocation(model1, op_resolver,
346                                       &node_and_registration1, &eval_tensors));
347   TF_LITE_MICRO_EXPECT_EQ(
348       kTfLiteOk, allocator->FinishModelAllocation(model1, eval_tensors,
349                                                   &scratch_buffer_handles));
350   const size_t single_model_used_bytes = allocator->used_bytes();
351 
352   // Allocate for model 2.
353   const tflite::Model* model2 = tflite::testing::GetComplexMockModel();
354   tflite::NodeAndRegistration* node_and_registration2;
355   TF_LITE_MICRO_EXPECT_EQ(
356       kTfLiteOk,
357       allocator->StartModelAllocation(model2, op_resolver,
358                                       &node_and_registration2, &eval_tensors));
359   TF_LITE_MICRO_EXPECT_EQ(
360       kTfLiteOk, allocator->FinishModelAllocation(model2, eval_tensors,
361                                                   &scratch_buffer_handles));
362 
363   // Allocation for two instances of the same model takes less memory as `head`
364   // of the arena is reused.
365   TF_LITE_MICRO_EXPECT_LE(allocator->used_bytes(), 2 * single_model_used_bytes);
366 }
367 
TF_LITE_MICRO_TEST(TestAllocationForModelsWithBranches)368 TF_LITE_MICRO_TEST(TestAllocationForModelsWithBranches) {
369   const tflite::Model* model = tflite::testing::GetSimpleModelWithBranch();
370   TfLiteEvalTensor* eval_tensors = nullptr;
371   tflite::ScratchBufferHandle* scratch_buffer_handles = nullptr;
372   tflite::AllOpsResolver op_resolver = tflite::testing::GetOpResolver();
373   tflite::NodeAndRegistration* node_and_registration;
374   constexpr size_t arena_size = 4096;
375   uint8_t arena[arena_size];
376   tflite::MicroAllocator* allocator = tflite::MicroAllocator::Create(
377       arena, arena_size, tflite::GetMicroErrorReporter());
378   TF_LITE_MICRO_EXPECT_NE(nullptr, allocator);
379   TF_LITE_MICRO_EXPECT_EQ(
380       kTfLiteOk,
381       allocator->StartModelAllocation(model, op_resolver,
382                                       &node_and_registration, &eval_tensors));
383   TF_LITE_MICRO_EXPECT_EQ(
384       kTfLiteOk, allocator->FinishModelAllocation(model, eval_tensors,
385                                                   &scratch_buffer_handles));
386 
387   uint8_t* start = eval_tensors[0].data.uint8;
388   // Check test_helpers.cc BuildSimpleModelWithBranch for model structure.
389   // t0 is the first tensor, so place it in offset 0.
390   TF_LITE_MICRO_EXPECT_EQ(0, eval_tensors[0].data.uint8 - start);
391   // bytes = 2 * 2 * 3 * sizeof(float32) = 48, same for other tensors.
392   size_t buffer_size;
393   TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, tflite::TfLiteEvalTensorByteLength(
394                                          &eval_tensors[0], &buffer_size));
395   TF_LITE_MICRO_EXPECT_EQ(static_cast<size_t>(48), buffer_size);
396   // t1 can't reuse any memory, as n0 requires both t0 and t1.
397   TF_LITE_MICRO_EXPECT_EQ(96, eval_tensors[1].data.uint8 - start);
398   // t2 can't reuse any memory, as n1 requires both t0 and t2. Also n2 requires
399   // both t1 and t2.
400   TF_LITE_MICRO_EXPECT_EQ(48, eval_tensors[2].data.uint8 - start);
401   // t3 reuses the same memory from t0 as t0 is not an input to any node.
402   TF_LITE_MICRO_EXPECT_EQ(0, eval_tensors[3].data.uint8 - start);
403 
404   // SimpleModelWithBranch has 3 operators:
405   tflite::testing::VerifyRegistrationAndNodeAllocation(node_and_registration,
406                                                        /*count=*/3);
407 }
408 
TF_LITE_MICRO_TEST(TestAllocationForComplexModelAllocation)409 TF_LITE_MICRO_TEST(TestAllocationForComplexModelAllocation) {
410   const tflite::Model* model = tflite::testing::GetComplexMockModel();
411   TfLiteEvalTensor* eval_tensors = nullptr;
412   tflite::ScratchBufferHandle* scratch_buffer_handles = nullptr;
413   tflite::AllOpsResolver op_resolver = tflite::testing::GetOpResolver();
414   tflite::NodeAndRegistration* node_and_registration;
415   constexpr size_t arena_size = 2048;
416   uint8_t arena[arena_size];
417   tflite::MicroAllocator* allocator = tflite::MicroAllocator::Create(
418       arena, arena_size, tflite::GetMicroErrorReporter());
419   TF_LITE_MICRO_EXPECT(nullptr != allocator);
420   TF_LITE_MICRO_EXPECT_EQ(
421       kTfLiteOk,
422       allocator->StartModelAllocation(model, op_resolver,
423                                       &node_and_registration, &eval_tensors));
424   TF_LITE_MICRO_EXPECT_EQ(
425       kTfLiteOk, allocator->FinishModelAllocation(model, eval_tensors,
426                                                   &scratch_buffer_handles));
427 
428   size_t model_tensor_size = tflite::testing::GetModelTensorCount(model);
429   TF_LITE_MICRO_EXPECT_EQ(static_cast<size_t>(10), model_tensor_size);
430 
431   // NOTE: Tensor indexes match the values in GetComplexMockModel().
432   tflite::testing::VerifyMockTensor(model, allocator, eval_tensors, 0);
433   tflite::testing::VerifyMockTensor(model, allocator, eval_tensors, 1,
434                                     /*is_variable=*/true);
435   tflite::testing::VerifyMockWeightTensor(model, allocator, eval_tensors, 2);
436   tflite::testing::VerifyMockTensor(model, allocator, eval_tensors, 3);
437   tflite::testing::VerifyMockTensor(model, allocator, eval_tensors, 4,
438                                     /*is_variable=*/true);
439   tflite::testing::VerifyMockWeightTensor(model, allocator, eval_tensors, 5);
440   tflite::testing::VerifyMockTensor(model, allocator, eval_tensors, 6);
441   tflite::testing::VerifyMockTensor(model, allocator, eval_tensors, 7,
442                                     /*is_variable=*/true);
443   tflite::testing::VerifyMockWeightTensor(model, allocator, eval_tensors, 8);
444   tflite::testing::VerifyMockTensor(model, allocator, eval_tensors, 9);
445 
446   // // Ensure that variable tensors have unique address
447   tflite::testing::EnsureUniqueVariableTensorBuffer(model, eval_tensors, 1);
448   tflite::testing::EnsureUniqueVariableTensorBuffer(model, eval_tensors, 4);
449   tflite::testing::EnsureUniqueVariableTensorBuffer(model, eval_tensors, 7);
450 
451   // ComplexMockModel has 3 operators:
452   tflite::testing::VerifyRegistrationAndNodeAllocation(node_and_registration,
453                                                        /*count=*/3);
454 }
455 
TF_LITE_MICRO_TEST(OfflinePlannerBranchesAllOnline)456 TF_LITE_MICRO_TEST(OfflinePlannerBranchesAllOnline) {
457   int version = 1;
458   int subgraph = 0;
459   constexpr int number_tensors = 4;
460   tflite::AllOpsResolver op_resolver = tflite::testing::GetOpResolver();
461   tflite::NodeAndRegistration* node_and_registration;
462   const int32_t metadata_buffer[tflite::testing::kOfflinePlannerHeaderSize +
463                                 number_tensors] = {version, subgraph,
464                                                    number_tensors,  // header
465                                                    // memory offsets:
466                                                    -1, -1, -1, -1};
467 
468   // The structure is identical to the one in
469   // TestAllocationForModelsWithBranches
470   int number_connections = 3;
471   tflite::testing::NodeConnection node_list[3] = {{
472                                                       {0},  // input
473                                                       {1}   // output
474                                                   },
475                                                   {
476                                                       {0},  // input
477                                                       {2}   // output
478                                                   },
479                                                   {
480                                                       {1, 2},  // input1, input2
481                                                       {3}      // output
482                                                   }};
483 
484   const tflite::Model* model = tflite::testing::GetModelWithOfflinePlanning(
485       number_tensors, metadata_buffer, node_list, number_connections);
486 
487   TfLiteEvalTensor* eval_tensors = nullptr;
488   tflite::ScratchBufferHandle* scratch_buffer_handles = nullptr;
489 
490   constexpr size_t arena_size = 4096;
491   uint8_t arena[arena_size];
492   tflite::MicroAllocator* allocator = tflite::MicroAllocator::Create(
493       arena, arena_size, tflite::GetMicroErrorReporter());
494 
495   TF_LITE_MICRO_EXPECT_EQ(
496       kTfLiteOk,
497       allocator->StartModelAllocation(model, op_resolver,
498                                       &node_and_registration, &eval_tensors));
499   TF_LITE_MICRO_EXPECT_EQ(
500       kTfLiteOk, allocator->FinishModelAllocation(model, eval_tensors,
501                                                   &scratch_buffer_handles));
502 
503   // Since all of the tensors are online planned and the model structure is
504   // identical to that in TestAllocationForModelsWithBranches,
505   // the offsets be should identical to that test.
506   uint8_t* start = eval_tensors[0].data.uint8;
507   TF_LITE_MICRO_EXPECT_EQ(0, eval_tensors[0].data.uint8 - start);
508 
509   size_t buffer_size;
510   TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, tflite::TfLiteEvalTensorByteLength(
511                                          &eval_tensors[0], &buffer_size));
512   TF_LITE_MICRO_EXPECT_EQ(static_cast<size_t>(48), buffer_size);
513   TF_LITE_MICRO_EXPECT_EQ(96, eval_tensors[1].data.uint8 - start);
514   TF_LITE_MICRO_EXPECT_EQ(48, eval_tensors[2].data.uint8 - start);
515   TF_LITE_MICRO_EXPECT_EQ(0, eval_tensors[3].data.uint8 - start);
516 }
517 
TF_LITE_MICRO_TEST(OfflinePlannerBasic)518 TF_LITE_MICRO_TEST(OfflinePlannerBasic) {
519   constexpr int number_tensors = 4;
520   tflite::AllOpsResolver op_resolver = tflite::testing::GetOpResolver();
521   tflite::NodeAndRegistration* node_and_registration;
522   const int32_t metadata_buffer[tflite::testing::kOfflinePlannerHeaderSize +
523                                 number_tensors] = {1,         0, number_tensors,
524                                                    /*t0=*/0,
525                                                    /*t1=*/48,
526                                                    /*t2=*/0,
527                                                    /*t3=*/48};
528   constexpr int number_connections = 3;
529   tflite::testing::NodeConnection node_list[number_connections] = {
530       {/*input=*/{tflite::testing::t0},
531        /*output=*/{tflite::testing::t1}},
532       {/*input=*/{tflite::testing::t1},
533        /*output=*/{tflite::testing::t2}},
534       {/*input=*/{tflite::testing::t2},
535        /*output=*/{tflite::testing::t3}}};
536 
537   const tflite::Model* model = tflite::testing::GetModelWithOfflinePlanning(
538       number_tensors, metadata_buffer, node_list, number_connections);
539 
540   TfLiteEvalTensor* eval_tensors = nullptr;
541   tflite::ScratchBufferHandle* scratch_buffer_handles = nullptr;
542   constexpr size_t arena_size = 4096;
543   uint8_t arena[arena_size];
544   tflite::MicroAllocator* allocator = tflite::MicroAllocator::Create(
545       arena, arena_size, tflite::GetMicroErrorReporter());
546 
547   TF_LITE_MICRO_EXPECT_EQ(
548       kTfLiteOk,
549       allocator->StartModelAllocation(model, op_resolver,
550                                       &node_and_registration, &eval_tensors));
551   TF_LITE_MICRO_EXPECT_EQ(
552       kTfLiteOk, allocator->FinishModelAllocation(model, eval_tensors,
553                                                   &scratch_buffer_handles));
554 
555   uint8_t* start = eval_tensors[0].data.uint8;
556   TF_LITE_MICRO_EXPECT_EQ(0, eval_tensors[0].data.uint8 - start);
557   TF_LITE_MICRO_EXPECT_EQ(48, eval_tensors[1].data.uint8 - start);
558   TF_LITE_MICRO_EXPECT_EQ(0, eval_tensors[2].data.uint8 - start);
559   TF_LITE_MICRO_EXPECT_EQ(48, eval_tensors[3].data.uint8 - start);
560 }
561 
TF_LITE_MICRO_TEST(OfflinePlannerOverlappingAllocation)562 TF_LITE_MICRO_TEST(OfflinePlannerOverlappingAllocation) {
563   constexpr int number_tensors = 4;
564   tflite::AllOpsResolver op_resolver = tflite::testing::GetOpResolver();
565   tflite::NodeAndRegistration* node_and_registration;
566   const int32_t metadata_buffer[tflite::testing::kOfflinePlannerHeaderSize +
567                                 number_tensors] = {/*version=*/1,
568                                                    /*subgraph=*/0,
569                                                    number_tensors,
570                                                    /*t0=*/0,
571                                                    /*t1=*/0,
572                                                    /*t2=*/48,
573                                                    /*t3=*/-1};
574 
575   int number_connections = 2;
576   tflite::testing::NodeConnection node_list[2] = {
577       {/*input, scratch=*/{tflite::testing::t0, tflite::testing::t1},
578        /*output=*/{tflite::testing::t2}},
579       {/*input=*/{tflite::testing::t2},
580        /*output=*/{tflite::testing::t3}},
581   };
582 
583   const tflite::Model* model = tflite::testing::GetModelWithOfflinePlanning(
584       number_tensors, metadata_buffer, node_list, number_connections);
585 
586   TfLiteEvalTensor* eval_tensors = nullptr;
587   tflite::ScratchBufferHandle* scratch_buffer_handles = nullptr;
588   constexpr size_t arena_size = 4096;
589   uint8_t arena[arena_size];
590   tflite::MicroAllocator* allocator = tflite::MicroAllocator::Create(
591       arena, arena_size, tflite::GetMicroErrorReporter());
592 
593   TF_LITE_MICRO_EXPECT_EQ(
594       kTfLiteOk,
595       allocator->StartModelAllocation(model, op_resolver,
596                                       &node_and_registration, &eval_tensors));
597   TF_LITE_MICRO_EXPECT_EQ(
598       kTfLiteOk, allocator->FinishModelAllocation(model, eval_tensors,
599                                                   &scratch_buffer_handles));
600 
601   uint8_t* start = eval_tensors[0].data.uint8;
602   TF_LITE_MICRO_EXPECT_EQ(0, eval_tensors[0].data.uint8 - start);
603   TF_LITE_MICRO_EXPECT_EQ(0, eval_tensors[1].data.uint8 - start);
604   TF_LITE_MICRO_EXPECT_EQ(48, eval_tensors[2].data.uint8 - start);
605   TF_LITE_MICRO_EXPECT_EQ(0, eval_tensors[3].data.uint8 - start);
606   // TF_LITE_MICRO_EXPECT_EQ(static_cast<size_t>(48), context.tensors[0].bytes);
607 }
608 
TF_LITE_MICRO_TEST(OfflinePlannerOfflineOnline)609 TF_LITE_MICRO_TEST(OfflinePlannerOfflineOnline) {
610   constexpr int number_tensors = 5;
611   tflite::AllOpsResolver op_resolver = tflite::testing::GetOpResolver();
612   tflite::NodeAndRegistration* node_and_registration;
613   const int32_t metadata_buffer[tflite::testing::kOfflinePlannerHeaderSize +
614                                 number_tensors] = {/*version=*/1,
615                                                    /*subgraph=*/0,
616                                                    number_tensors,
617                                                    /*t0=*/0,
618                                                    /*t1=*/48,
619                                                    /*t2=*/-1,
620                                                    /*t3=*/0,
621                                                    /*t4=*/-1};
622 
623   constexpr int number_connections = 2;
624   tflite::testing::NodeConnection node_list[number_connections] = {
625       {
626           /*input, scratch=*/{tflite::testing::t0, tflite::testing::t1},
627           /*output=*/{tflite::testing::t2},
628       },
629       {
630           /*input=*/{tflite::testing::t2},
631           /*output1, output2=*/{tflite::testing::t3, tflite::testing::t4},
632       },
633   };
634 
635   const tflite::Model* model = tflite::testing::GetModelWithOfflinePlanning(
636       number_tensors, metadata_buffer, node_list, number_connections);
637 
638   TfLiteEvalTensor* eval_tensors = nullptr;
639   tflite::ScratchBufferHandle* scratch_buffer_handles = nullptr;
640   constexpr size_t arena_size = 4096;
641   uint8_t arena[arena_size];
642   tflite::MicroAllocator* allocator = tflite::MicroAllocator::Create(
643       arena, arena_size, tflite::GetMicroErrorReporter());
644 
645   TF_LITE_MICRO_EXPECT_EQ(
646       kTfLiteOk,
647       allocator->StartModelAllocation(model, op_resolver,
648                                       &node_and_registration, &eval_tensors));
649   TF_LITE_MICRO_EXPECT_EQ(
650       kTfLiteOk, allocator->FinishModelAllocation(model, eval_tensors,
651                                                   &scratch_buffer_handles));
652 
653   uint8_t* start = eval_tensors[0].data.uint8;
654   TF_LITE_MICRO_EXPECT_EQ(0, eval_tensors[0].data.uint8 - start);
655   TF_LITE_MICRO_EXPECT_EQ(48, eval_tensors[1].data.uint8 - start);
656   TF_LITE_MICRO_EXPECT_EQ(96, eval_tensors[2].data.uint8 - start);
657   TF_LITE_MICRO_EXPECT_EQ(48, eval_tensors[4].data.uint8 - start);
658   TF_LITE_MICRO_EXPECT_EQ(0, eval_tensors[3].data.uint8 - start);
659 }
660 
TF_LITE_MICRO_TEST(TestAllocatePersistentTfLiteTensor)661 TF_LITE_MICRO_TEST(TestAllocatePersistentTfLiteTensor) {
662   const tflite::Model* model = tflite::GetModel(kTestConvModelData);
663   constexpr size_t arena_size = 1024 * 12;
664   uint8_t arena[arena_size];
665   tflite::MicroAllocator* allocator = tflite::MicroAllocator::Create(
666       arena, arena_size, tflite::GetMicroErrorReporter());
667   TF_LITE_MICRO_EXPECT_NE(allocator, nullptr);
668 
669   TfLiteTensor* tensor1 = allocator->AllocatePersistentTfLiteTensor(
670       model, /*eval_tensors=*/nullptr, /*tensor_index=*/1);
671   TF_LITE_MICRO_EXPECT_NE(tensor1, nullptr);
672   TF_LITE_MICRO_EXPECT_NE(tensor1->quantization.params, nullptr);
673   TF_LITE_MICRO_EXPECT_FALSE(tensor1->is_variable);
674 
675   TfLiteTensor* tensor2 = allocator->AllocatePersistentTfLiteTensor(
676       model, /*eval_tensors=*/nullptr, /*tensor_index=*/2);
677   TF_LITE_MICRO_EXPECT_NE(tensor2, nullptr);
678   TF_LITE_MICRO_EXPECT_NE(tensor2->quantization.params, nullptr);
679   TF_LITE_MICRO_EXPECT_FALSE(tensor2->is_variable);
680 
681   // The address of tensor1 should be higher than the address of tensor2 since
682   // persistent allocations take place in the tail which grows downward.
683   TF_LITE_MICRO_EXPECT_GT(tensor1, tensor2);
684 }
685 
TF_LITE_MICRO_TEST(TestAllocateSingleTempTfLiteTensor)686 TF_LITE_MICRO_TEST(TestAllocateSingleTempTfLiteTensor) {
687   const tflite::Model* model = tflite::testing::GetSimpleMockModel();
688   constexpr size_t arena_size = 1024;
689   uint8_t arena[arena_size];
690   tflite::MicroAllocator* allocator = tflite::MicroAllocator::Create(
691       arena, arena_size, tflite::GetMicroErrorReporter());
692   TF_LITE_MICRO_EXPECT_NE(allocator, nullptr);
693 
694   TfLiteTensor* tensor1 = allocator->AllocateTempTfLiteTensor(
695       model, /*eval_tensors=*/nullptr, /*tensor_index=*/1);
696   TF_LITE_MICRO_EXPECT_NE(tensor1, nullptr);
697 }
698 
TF_LITE_MICRO_TEST(TestAllocateChainOfTfLiteTensor)699 TF_LITE_MICRO_TEST(TestAllocateChainOfTfLiteTensor) {
700   const tflite::Model* model = tflite::testing::GetSimpleMockModel();
701   constexpr size_t arena_size = 1024;
702   uint8_t arena[arena_size];
703   tflite::MicroAllocator* allocator = tflite::MicroAllocator::Create(
704       arena, arena_size, tflite::GetMicroErrorReporter());
705   TF_LITE_MICRO_EXPECT_NE(allocator, nullptr);
706 
707   TfLiteTensor* tensor1 = allocator->AllocateTempTfLiteTensor(
708       model, /*eval_tensors=*/nullptr, /*tensor_index=*/1);
709   TF_LITE_MICRO_EXPECT_NE(tensor1, nullptr);
710 
711   TfLiteTensor* tensor2 = allocator->AllocateTempTfLiteTensor(
712       model, /*eval_tensors=*/nullptr, /*tensor_index=*/2);
713   TF_LITE_MICRO_EXPECT_NE(tensor2, nullptr);
714 
715   // The address of tensor2 should be higher than the address of tensor1
716   // (chained allocations):
717   TF_LITE_MICRO_EXPECT_GT(tensor2, tensor1);
718 }
719 
TF_LITE_MICRO_TEST(TestAllocateTfLiteTensorWithReset)720 TF_LITE_MICRO_TEST(TestAllocateTfLiteTensorWithReset) {
721   const tflite::Model* model = tflite::testing::GetSimpleMockModel();
722   constexpr size_t arena_size = 1024;
723   uint8_t arena[arena_size];
724   tflite::MicroAllocator* allocator = tflite::MicroAllocator::Create(
725       arena, arena_size, tflite::GetMicroErrorReporter());
726   TF_LITE_MICRO_EXPECT(allocator != nullptr);
727 
728   TfLiteTensor* tensor1 = allocator->AllocateTempTfLiteTensor(
729       model, /*eval_tensors=*/nullptr, /*tensor_index=*/1);
730   TF_LITE_MICRO_EXPECT(tensor1 != nullptr);
731 
732   allocator->ResetTempAllocations();
733 
734   TfLiteTensor* tensor2 = allocator->AllocateTempTfLiteTensor(
735       model, /*eval_tensors=*/nullptr, /*tensor_index=*/2);
736   TF_LITE_MICRO_EXPECT(tensor2 != nullptr);
737 
738   // The address of tensor2 should be equal than the address of tensor1 since
739   // allocations were not chained:
740   TF_LITE_MICRO_EXPECT(tensor2 == tensor1);
741 }
742 
TF_LITE_MICRO_TEST(TestOperatorInputsNotInSubgraphInputs)743 TF_LITE_MICRO_TEST(TestOperatorInputsNotInSubgraphInputs) {
744   constexpr int number_tensors = 5;
745   tflite::AllOpsResolver op_resolver = tflite::testing::GetOpResolver();
746   tflite::NodeAndRegistration* node_and_registration;
747   const int32_t metadata_buffer[tflite::testing::kOfflinePlannerHeaderSize +
748                                 number_tensors] = {/*version=*/1,
749                                                    /*subgraph=*/0,
750                                                    number_tensors,
751                                                    /*t0=*/0,
752                                                    /*t1=*/0,
753                                                    /*t2=*/0,
754                                                    /*t3=*/48,
755                                                    /*t4=*/-1};
756 
757   constexpr int number_connections = 2;
758   tflite::testing::NodeConnection node_list[number_connections] = {
759       {// t0: input (actual input part of subgraph inputs as
760        // well as operator inputs)
761        // t1: scratch1 (only in operator inputs)
762        // t2: scratch2 (only in operator inputs)
763        {tflite::testing::t0, tflite::testing::t1, tflite::testing::t2},
764        /*t3: output=*/{tflite::testing::t3}},
765       {/*t3: input=*/{tflite::testing::t3},
766        /*t4: output=*/{tflite::testing::t4}},
767   };
768 
769   const tflite::Model* model = tflite::testing::GetModelWithOfflinePlanning(
770       number_tensors, metadata_buffer, node_list, number_connections,
771       /*Only first tensor (t0) is in subgraph input list=*/1);
772 
773   TfLiteEvalTensor* eval_tensors = nullptr;
774   tflite::ScratchBufferHandle* scratch_buffer_handles = nullptr;
775   constexpr size_t arena_size = 4096;
776   uint8_t arena[arena_size];
777   tflite::MicroAllocator* allocator = tflite::MicroAllocator::Create(
778       arena, arena_size, tflite::GetMicroErrorReporter());
779 
780   TF_LITE_MICRO_EXPECT_EQ(
781       kTfLiteOk,
782       allocator->StartModelAllocation(model, op_resolver,
783                                       &node_and_registration, &eval_tensors));
784   TF_LITE_MICRO_EXPECT_EQ(
785       kTfLiteOk, allocator->FinishModelAllocation(model, eval_tensors,
786                                                   &scratch_buffer_handles));
787 
788   uint8_t* start = eval_tensors[0].data.uint8;
789   TF_LITE_MICRO_EXPECT_EQ(0, eval_tensors[0].data.uint8 - start);
790   TF_LITE_MICRO_EXPECT_EQ(0, eval_tensors[1].data.uint8 - start);
791   TF_LITE_MICRO_EXPECT_EQ(0, eval_tensors[2].data.uint8 - start);
792   TF_LITE_MICRO_EXPECT_EQ(48, eval_tensors[3].data.uint8 - start);
793   TF_LITE_MICRO_EXPECT_EQ(0, eval_tensors[4].data.uint8 - start);
794 }
795 
TF_LITE_MICRO_TEST(TestTypicalFirstOpAndSecondOpWithScratchTensors)796 TF_LITE_MICRO_TEST(TestTypicalFirstOpAndSecondOpWithScratchTensors) {
797   constexpr int number_tensors = 6;
798   tflite::AllOpsResolver op_resolver = tflite::testing::GetOpResolver();
799   tflite::NodeAndRegistration* node_and_registration;
800   const int32_t metadata_buffer[tflite::testing::kOfflinePlannerHeaderSize +
801                                 number_tensors] = {/*version=*/1,
802                                                    /*subgraph=*/0,
803                                                    number_tensors,
804                                                    /*t0=*/0,
805                                                    /*t1=*/0,
806                                                    /*t2=*/0,
807                                                    /*t3=*/0,
808                                                    /*t4=*/48,
809                                                    /*t5=*/-1};
810 
811   constexpr int number_connections = 3;
812   tflite::testing::NodeConnection node_list[number_connections] = {
813       {/*t0: input (subgraph and operator input)=*/{tflite::testing::t0},
814        /*t1: output=*/{tflite::testing::t1}},
815       {// t1: input
816        // t2: scratch1 (only in operator inputs)
817        // t3: scratch2 (only in operator inputs)
818        {tflite::testing::t1, tflite::testing::t2, tflite::testing::t3},
819 
820        /*t4: output=*/{tflite::testing::t4}},
821       {/*t4: input=*/{tflite::testing::t4},
822        /*t5: output=*/{tflite::testing::t5}},
823   };
824 
825   const tflite::Model* model = tflite::testing::GetModelWithOfflinePlanning(
826       number_tensors, metadata_buffer, node_list, number_connections,
827       /*Only first tensor (t0) is in subgraph input list=*/1);
828 
829   TfLiteEvalTensor* eval_tensors = nullptr;
830   tflite::ScratchBufferHandle* scratch_buffer_handles = nullptr;
831   constexpr size_t arena_size = 4096;
832   uint8_t arena[arena_size];
833   tflite::MicroAllocator* allocator = tflite::MicroAllocator::Create(
834       arena, arena_size, tflite::GetMicroErrorReporter());
835 
836   TF_LITE_MICRO_EXPECT_EQ(
837       kTfLiteOk,
838       allocator->StartModelAllocation(model, op_resolver,
839                                       &node_and_registration, &eval_tensors));
840   TF_LITE_MICRO_EXPECT_EQ(
841       kTfLiteOk, allocator->FinishModelAllocation(model, eval_tensors,
842                                                   &scratch_buffer_handles));
843 
844   uint8_t* start = eval_tensors[0].data.uint8;
845   TF_LITE_MICRO_EXPECT_EQ(0, eval_tensors[0].data.uint8 - start);
846   TF_LITE_MICRO_EXPECT_EQ(0, eval_tensors[1].data.uint8 - start);
847   TF_LITE_MICRO_EXPECT_EQ(0, eval_tensors[2].data.uint8 - start);
848   TF_LITE_MICRO_EXPECT_EQ(0, eval_tensors[3].data.uint8 - start);
849   TF_LITE_MICRO_EXPECT_EQ(48, eval_tensors[4].data.uint8 - start);
850   TF_LITE_MICRO_EXPECT_EQ(0, eval_tensors[5].data.uint8 - start);
851 }
852 
853 TF_LITE_MICRO_TESTS_END
854