• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 
16 #include "tensorflow/lite/micro/recording_micro_allocator.h"
17 
18 #include "tensorflow/lite/core/api/error_reporter.h"
19 #include "tensorflow/lite/kernels/internal/compatibility.h"
20 #include "tensorflow/lite/micro/compatibility.h"
21 #include "tensorflow/lite/micro/micro_allocator.h"
22 #include "tensorflow/lite/micro/recording_simple_memory_allocator.h"
23 
24 namespace tflite {
25 
RecordingMicroAllocator(RecordingSimpleMemoryAllocator * recording_memory_allocator,ErrorReporter * error_reporter)26 RecordingMicroAllocator::RecordingMicroAllocator(
27     RecordingSimpleMemoryAllocator* recording_memory_allocator,
28     ErrorReporter* error_reporter)
29     : MicroAllocator(recording_memory_allocator, error_reporter),
30       recording_memory_allocator_(recording_memory_allocator) {}
31 
Create(uint8_t * tensor_arena,size_t arena_size,ErrorReporter * error_reporter)32 RecordingMicroAllocator* RecordingMicroAllocator::Create(
33     uint8_t* tensor_arena, size_t arena_size, ErrorReporter* error_reporter) {
34   TFLITE_DCHECK(error_reporter != nullptr);
35 
36   RecordingSimpleMemoryAllocator* simple_memory_allocator =
37       RecordingSimpleMemoryAllocator::Create(error_reporter, tensor_arena,
38                                              arena_size);
39   TFLITE_DCHECK(simple_memory_allocator != nullptr);
40 
41   uint8_t* allocator_buffer = simple_memory_allocator->AllocateFromTail(
42       sizeof(RecordingMicroAllocator), alignof(RecordingMicroAllocator));
43   RecordingMicroAllocator* allocator = new (allocator_buffer)
44       RecordingMicroAllocator(simple_memory_allocator, error_reporter);
45   return allocator;
46 }
47 
GetRecordedAllocation(RecordedAllocationType allocation_type) const48 RecordedAllocation RecordingMicroAllocator::GetRecordedAllocation(
49     RecordedAllocationType allocation_type) const {
50   switch (allocation_type) {
51     case RecordedAllocationType::kTfLiteEvalTensorData:
52       return recorded_tflite_eval_tensor_data_;
53     case RecordedAllocationType::kPersistentTfLiteTensorData:
54       return recorded_persistent_tflite_tensor_data_;
55     case RecordedAllocationType::kPersistentTfLiteTensorQuantizationData:
56       return recorded_persistent_tflite_tensor_quantization_data_;
57     case RecordedAllocationType::kPersistentBufferData:
58       return recorded_persistent_buffer_data_;
59     case RecordedAllocationType::kTfLiteTensorVariableBufferData:
60       return recorded_tflite_tensor_variable_buffer_data_;
61     case RecordedAllocationType::kNodeAndRegistrationArray:
62       return recorded_node_and_registration_array_data_;
63     case RecordedAllocationType::kOpData:
64       return recorded_op_data_;
65   }
66   TF_LITE_REPORT_ERROR(error_reporter(), "Invalid allocation type supplied: %d",
67                        allocation_type);
68   return RecordedAllocation();
69 }
70 
71 const RecordingSimpleMemoryAllocator*
GetSimpleMemoryAllocator() const72 RecordingMicroAllocator::GetSimpleMemoryAllocator() const {
73   return recording_memory_allocator_;
74 }
75 
PrintAllocations() const76 void RecordingMicroAllocator::PrintAllocations() const {
77   TF_LITE_REPORT_ERROR(
78       error_reporter(),
79       "[RecordingMicroAllocator] Arena allocation total %d bytes",
80       recording_memory_allocator_->GetUsedBytes());
81   TF_LITE_REPORT_ERROR(
82       error_reporter(),
83       "[RecordingMicroAllocator] Arena allocation head %d bytes",
84       recording_memory_allocator_->GetHeadUsedBytes());
85   TF_LITE_REPORT_ERROR(
86       error_reporter(),
87       "[RecordingMicroAllocator] Arena allocation tail %d bytes",
88       recording_memory_allocator_->GetTailUsedBytes());
89   PrintRecordedAllocation(RecordedAllocationType::kTfLiteEvalTensorData,
90                           "TfLiteEvalTensor data", "allocations");
91   PrintRecordedAllocation(RecordedAllocationType::kPersistentTfLiteTensorData,
92                           "Persistent TfLiteTensor data", "tensors");
93   PrintRecordedAllocation(
94       RecordedAllocationType::kPersistentTfLiteTensorQuantizationData,
95       "Persistent TfLiteTensor quantization data", "allocations");
96   PrintRecordedAllocation(RecordedAllocationType::kPersistentBufferData,
97                           "Persistent buffer data", "allocations");
98   PrintRecordedAllocation(
99       RecordedAllocationType::kTfLiteTensorVariableBufferData,
100       "TfLiteTensor variable buffer data", "allocations");
101   PrintRecordedAllocation(RecordedAllocationType::kNodeAndRegistrationArray,
102                           "NodeAndRegistration struct",
103                           "NodeAndRegistration structs");
104   PrintRecordedAllocation(RecordedAllocationType::kOpData,
105                           "Operator runtime data", "OpData structs");
106 }
107 
AllocatePersistentBuffer(size_t bytes)108 void* RecordingMicroAllocator::AllocatePersistentBuffer(size_t bytes) {
109   RecordedAllocation allocations = SnapshotAllocationUsage();
110   void* buffer = MicroAllocator::AllocatePersistentBuffer(bytes);
111   RecordAllocationUsage(allocations, recorded_persistent_buffer_data_);
112 
113   return buffer;
114 }
115 
PrintRecordedAllocation(RecordedAllocationType allocation_type,const char * allocation_name,const char * allocation_description) const116 void RecordingMicroAllocator::PrintRecordedAllocation(
117     RecordedAllocationType allocation_type, const char* allocation_name,
118     const char* allocation_description) const {
119 #ifndef TF_LITE_STRIP_ERROR_STRINGS
120   RecordedAllocation allocation = GetRecordedAllocation(allocation_type);
121   if (allocation.used_bytes > 0 || allocation.requested_bytes > 0) {
122     TF_LITE_REPORT_ERROR(
123         error_reporter(),
124         "[RecordingMicroAllocator] '%s' used %d bytes with alignment overhead "
125         "(requested %d bytes for %d %s)",
126         allocation_name, allocation.used_bytes, allocation.requested_bytes,
127         allocation.count, allocation_description);
128   }
129 #endif
130 }
131 
AllocateNodeAndRegistrations(const Model * model,NodeAndRegistration ** node_and_registrations)132 TfLiteStatus RecordingMicroAllocator::AllocateNodeAndRegistrations(
133     const Model* model, NodeAndRegistration** node_and_registrations) {
134   RecordedAllocation allocations = SnapshotAllocationUsage();
135 
136   TfLiteStatus status = MicroAllocator::AllocateNodeAndRegistrations(
137       model, node_and_registrations);
138 
139   RecordAllocationUsage(allocations,
140                         recorded_node_and_registration_array_data_);
141   // The allocation count in SimpleMemoryAllocator will only be 1. To provide
142   // better logging, decrement by 1 and add in the actual number of operators
143   // used in the graph:
144   // The allocation for this recording will always be 1. This is because the
145   // parent class mallocs one large allocation for the number of nodes in the
146   // graph (e.g. sizeof(NodeAndRegistration) * num_nodes).
147   // To prevent extra overhead and potential for fragmentation, manually adjust
148   // the accounting by decrementing by 1 and adding the actual number of nodes
149   // used in the graph:
150   recorded_node_and_registration_array_data_.count +=
151       GetSubGraphFromModel(model)->operators()->size() - 1;
152   return status;
153 }
154 
155 TfLiteStatus
PrepareNodeAndRegistrationDataFromFlatbuffer(const Model * model,const MicroOpResolver & op_resolver,NodeAndRegistration * node_and_registrations)156 RecordingMicroAllocator::PrepareNodeAndRegistrationDataFromFlatbuffer(
157     const Model* model, const MicroOpResolver& op_resolver,
158     NodeAndRegistration* node_and_registrations) {
159   RecordedAllocation allocations = SnapshotAllocationUsage();
160 
161   TfLiteStatus status =
162       MicroAllocator::PrepareNodeAndRegistrationDataFromFlatbuffer(
163           model, op_resolver, node_and_registrations);
164 
165   RecordAllocationUsage(allocations, recorded_op_data_);
166   return status;
167 }
168 
AllocateTfLiteEvalTensors(const Model * model,TfLiteEvalTensor ** eval_tensors)169 TfLiteStatus RecordingMicroAllocator::AllocateTfLiteEvalTensors(
170     const Model* model, TfLiteEvalTensor** eval_tensors) {
171   RecordedAllocation allocations = SnapshotAllocationUsage();
172 
173   TfLiteStatus status =
174       MicroAllocator::AllocateTfLiteEvalTensors(model, eval_tensors);
175 
176   RecordAllocationUsage(allocations, recorded_tflite_eval_tensor_data_);
177   // The allocation for this recording will always be 1. This is because the
178   // parent class mallocs one large allocation for the number of tensors in the
179   // graph (e.g. sizeof(TfLiteEvalTensor) * num_tensors).
180   // To prevent extra overhead and potential for fragmentation, manually adjust
181   // the accounting by decrementing by 1 and adding the actual number of tensors
182   // used in the graph:
183   recorded_tflite_eval_tensor_data_.count +=
184       GetSubGraphFromModel(model)->tensors()->size() - 1;
185   return status;
186 }
187 
AllocateVariables(const SubGraph * subgraph,TfLiteEvalTensor * eval_tensors)188 TfLiteStatus RecordingMicroAllocator::AllocateVariables(
189     const SubGraph* subgraph, TfLiteEvalTensor* eval_tensors) {
190   RecordedAllocation allocations = SnapshotAllocationUsage();
191 
192   TfLiteStatus status =
193       MicroAllocator::AllocateVariables(subgraph, eval_tensors);
194 
195   RecordAllocationUsage(allocations,
196                         recorded_tflite_tensor_variable_buffer_data_);
197   return status;
198 }
199 
AllocatePersistentTfLiteTensorInternal(const Model * model,TfLiteEvalTensor * eval_tensors,int tensor_index)200 TfLiteTensor* RecordingMicroAllocator::AllocatePersistentTfLiteTensorInternal(
201     const Model* model, TfLiteEvalTensor* eval_tensors, int tensor_index) {
202   RecordedAllocation allocations = SnapshotAllocationUsage();
203 
204   TfLiteTensor* result = MicroAllocator::AllocatePersistentTfLiteTensorInternal(
205       model, eval_tensors, tensor_index);
206 
207   RecordAllocationUsage(allocations, recorded_persistent_tflite_tensor_data_);
208   return result;
209 }
210 
PopulateTfLiteTensorFromFlatbuffer(const Model * model,const SubGraph * subgraph,TfLiteTensor * tensor,int tensor_index,bool allocate_temp)211 TfLiteStatus RecordingMicroAllocator::PopulateTfLiteTensorFromFlatbuffer(
212     const Model* model, const SubGraph* subgraph, TfLiteTensor* tensor,
213     int tensor_index, bool allocate_temp) {
214   RecordedAllocation allocations = SnapshotAllocationUsage();
215 
216   TfLiteStatus status = MicroAllocator::PopulateTfLiteTensorFromFlatbuffer(
217       model, subgraph, tensor, tensor_index, allocate_temp);
218 
219   RecordAllocationUsage(allocations,
220                         recorded_persistent_tflite_tensor_quantization_data_);
221   return status;
222 }
223 
SnapshotAllocationUsage() const224 RecordedAllocation RecordingMicroAllocator::SnapshotAllocationUsage() const {
225   return {/*requested_bytes=*/recording_memory_allocator_->GetRequestedBytes(),
226           /*used_bytes=*/recording_memory_allocator_->GetUsedBytes(),
227           /*count=*/recording_memory_allocator_->GetAllocatedCount()};
228 }
229 
RecordAllocationUsage(const RecordedAllocation & snapshotted_allocation,RecordedAllocation & recorded_allocation)230 void RecordingMicroAllocator::RecordAllocationUsage(
231     const RecordedAllocation& snapshotted_allocation,
232     RecordedAllocation& recorded_allocation) {
233   recorded_allocation.requested_bytes +=
234       recording_memory_allocator_->GetRequestedBytes() -
235       snapshotted_allocation.requested_bytes;
236   recorded_allocation.used_bytes +=
237       recording_memory_allocator_->GetUsedBytes() -
238       snapshotted_allocation.used_bytes;
239   recorded_allocation.count +=
240       recording_memory_allocator_->GetAllocatedCount() -
241       snapshotted_allocation.count;
242 }
243 
244 }  // namespace tflite
245