• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 #ifndef TENSORFLOW_LITE_SIMPLE_MEMORY_ARENA_H_
16 #define TENSORFLOW_LITE_SIMPLE_MEMORY_ARENA_H_
17 
18 #include <stddef.h>
19 
20 #include <cstdint>
21 #include <memory>
22 #include <vector>
23 
24 #include "tensorflow/lite/c/common.h"
25 
26 namespace tflite {
27 
28 // This little structure holds the offset and the size for a dynamic memory
29 // allocation in the memory arena as well as first_node and last_node that use
30 // corresponding tensor. It means that continuous part of memory with this size
31 // needs to be allocated before execution of operation in the first node and can
32 // be deallocated after execution of the operation in the last_node. When the
33 // arena is committed and the underlying buffer is set, the alloc can be
34 // resolved into an actual memory pointer.
35 struct ArenaAllocWithUsageInterval {
ArenaAllocWithUsageIntervalArenaAllocWithUsageInterval36   ArenaAllocWithUsageInterval() { reset(); }
37 
38   size_t offset;
39   size_t size;
40   int32_t tensor;
41   int32_t first_node;
42   int32_t last_node;
43 
resetArenaAllocWithUsageInterval44   inline void reset() {
45     offset = 0;
46     size = 0;
47     tensor = -1;
48     first_node = -1;
49     last_node = -1;
50   }
51 
52   inline bool operator<(const ArenaAllocWithUsageInterval& other) const {
53     return offset < other.offset;
54   }
55 };
56 
57 // This small class is responsible for allocating, deallocating and reusing
58 // dynamic memory from a common underlying buffer. The arena can be used in
59 // scenarios when the pattern of memory allocations and deallocations is
60 // repetitive, e.g. running NN inference in multiple iterations. Note that
61 // zero-sized allocations are explicitly allowed, and will resolve to null.
62 class SimpleMemoryArena {
63  public:
SimpleMemoryArena(size_t arena_alignment)64   explicit SimpleMemoryArena(size_t arena_alignment)
65       : committed_(false),
66         arena_alignment_(arena_alignment),
67         high_water_mark_(0),
68         underlying_buffer_size_(0),
69         ordered_allocs_() {}
70 
71   // Schedule memory allocation for a tensor with a given size, assuming that it
72   // needs to be allocated before the execution of first_node, and deallocated
73   // after the execution of last_node.
74   TfLiteStatus Allocate(TfLiteContext* context, size_t alignment, size_t size,
75                         int32_t tensor, int32_t first_node, int32_t last_node,
76                         ArenaAllocWithUsageInterval* new_alloc);
77 
78   TfLiteStatus Deallocate(TfLiteContext* context,
79                           const ArenaAllocWithUsageInterval& alloc);
80 
RequiredBufferSize()81   inline size_t RequiredBufferSize() {
82     // Add in a small amount of padding to reduce the chance of resize events
83     // for small allocations.
84     size_t padding = arena_alignment_;
85     return arena_alignment_ + high_water_mark_ + padding;
86   }
87 
88   TfLiteStatus Commit(TfLiteContext* context);
89 
90   TfLiteStatus ResolveAlloc(TfLiteContext* context,
91                             const ArenaAllocWithUsageInterval& alloc,
92                             char** output_ptr);
93 
94   // This clears allocation details but does not release the underlying buffer.
95   // New allocations should be committed & resolved before using this arena
96   // again.
97   TfLiteStatus ClearPlan();
98 
99   // This releases the underlying buffer but does not clear the allocation plan.
100   // Since all associated pointers are invalidated, the arena cannot be used
101   // again until Commit() is called & tensor allocations are resolved.
102   TfLiteStatus ReleaseBuffer();
103 
GetBufferSize()104   size_t GetBufferSize() { return underlying_buffer_size_; }
105 
BasePointer()106   std::intptr_t BasePointer() const {
107     return reinterpret_cast<std::intptr_t>(underlying_buffer_aligned_ptr_);
108   }
109 
110  private:
111   bool committed_;
112   size_t arena_alignment_;
113   size_t high_water_mark_;
114   std::unique_ptr<char[]> underlying_buffer_;
115   size_t underlying_buffer_size_;
116   char* underlying_buffer_aligned_ptr_;
117   std::vector<ArenaAllocWithUsageInterval> ordered_allocs_;
118 };
119 
120 }  // namespace tflite
121 
122 #endif  // TENSORFLOW_LITE_SIMPLE_MEMORY_ARENA_H_
123