• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 #ifndef TENSORFLOW_LITE_MICRO_MICRO_ALLOCATOR_H_
16 #define TENSORFLOW_LITE_MICRO_MICRO_ALLOCATOR_H_
17 
18 #include "tensorflow/lite/c/common.h"
19 #include "tensorflow/lite/core/api/error_reporter.h"
20 #include "tensorflow/lite/core/api/flatbuffer_conversions.h"
21 #include "tensorflow/lite/micro/simple_memory_allocator.h"
22 #include "tensorflow/lite/schema/schema_generated.h"
23 
24 namespace tflite {
25 
26 // Namespace used for unittests.
27 namespace internal {
28 // Sets up all of the data structure members for a runtime tensor
29 // based on the contents of a serialized tensor.
30 TfLiteStatus InitializeRuntimeTensor(
31     SimpleMemoryAllocator* allocator, const tflite::Tensor& flatbuffer_tensor,
32     const flatbuffers::Vector<flatbuffers::Offset<Buffer>>* buffers,
33     ErrorReporter* error_reporter, TfLiteTensor* result);
34 }  // namespace internal
35 
36 typedef struct {
37   TfLiteNode node;
38   const TfLiteRegistration* registration;
39 } NodeAndRegistration;
40 
41 // Allocator responsible for allocating memory for all intermediate tensors
42 // necessary to invoke a model.
43 class MicroAllocator {
44  public:
45   // The lifetime of the model, tensor allocator and error reporter must be at
46   // least as long as that of the allocator object, since the allocator needs
47   // them to be accessible during its entire lifetime.
48 
49   // Note: Please use __declspec(align(16)) to make sure tensor_arena is 16
50   // bytes aligned, otherwise some head room will be wasted.
51   MicroAllocator(TfLiteContext* context, const Model* model,
52                  uint8_t* tensor_arena, size_t arena_size,
53                  ErrorReporter* error_reporter);
54 
55   // Runs through the model and allocates all necessary input, output and
56   // intermediate tensors.
57   // WARNING: doing any allocation after calling this method has the risk of
58   // corrupting tensor data so this method should be the last method to be
59   // called in this class.
60   TfLiteStatus FinishTensorAllocation();
61 
62   // Run through the model to allocate nodes and registrations. We need to keep
63   // them for the entire life time of the model to allow persistent tensors.
64   // This method needs to be called before FinishTensorAllocation method.
65   TfLiteStatus AllocateNodeAndRegistrations(
66       const OpResolver& op_resolver,
67       NodeAndRegistration** node_and_registrations);
68 
69  private:
70   TfLiteStatus Init();
71 
72   const Model* model_;
73   SimpleMemoryAllocator* memory_allocator_;
74   ErrorReporter* error_reporter_;
75   TfLiteContext* context_;
76   // Indicating if the allocator is ready for allocation.
77   bool active_ = false;
78 
79   const SubGraph* subgraph_;
80   const flatbuffers::Vector<flatbuffers::Offset<Operator>>* operators_;
81   const flatbuffers::Vector<flatbuffers::Offset<Tensor>>* tensors_;
82 };
83 
84 }  // namespace tflite
85 #endif  // TENSORFLOW_LITE_MICRO_MICRO_ALLOCATOR_H_
86