• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 
16 #include "tensorflow/lite/simple_memory_arena.h"
17 
18 #include <stddef.h>
19 #include <stdint.h>
20 
21 #include <algorithm>
22 #include <cstring>
23 #include <iterator>
24 #include <limits>
25 #include <memory>
26 #include <vector>
27 
28 #include "tensorflow/lite/c/common.h"
29 
30 namespace {
31 
32 template <typename T>
AlignTo(size_t alignment,T offset)33 T AlignTo(size_t alignment, T offset) {
34   return offset % alignment == 0 ? offset
35                                  : offset + (alignment - offset % alignment);
36 }
37 
38 }  // namespace
39 
40 namespace tflite {
Allocate(TfLiteContext * context,size_t alignment,size_t size,int32_t tensor,int32_t first_node,int32_t last_node,ArenaAllocWithUsageInterval * new_alloc)41 TfLiteStatus SimpleMemoryArena::Allocate(
42     TfLiteContext* context, size_t alignment, size_t size, int32_t tensor,
43     int32_t first_node, int32_t last_node,
44     ArenaAllocWithUsageInterval* new_alloc) {
45   TF_LITE_ENSURE(context, alignment <= arena_alignment_);
46   new_alloc->tensor = tensor;
47   new_alloc->first_node = first_node;
48   new_alloc->last_node = last_node;
49   new_alloc->size = size;
50   if (size == 0) {
51     new_alloc->offset = 0;
52     return kTfLiteOk;
53   }
54 
55   // If we don't find a better gap just allocate at the end of the buffer.
56   const size_t kOffsetNotAssigned = std::numeric_limits<size_t>::max();
57   size_t best_offset = kOffsetNotAssigned;
58   size_t best_offset_fit = kOffsetNotAssigned;
59 
60   // Go through the sorted allocs and look at the gaps between them.
61   size_t current_offset = 0;
62   for (const auto& alloc : ordered_allocs_) {
63     if (alloc.last_node < first_node || alloc.first_node > last_node) {
64       // Usage interval of alloc doesn't intersect with current tensor's usage
65       // interval, so we skip it.
66       continue;
67     }
68     size_t aligned_current_offset = AlignTo(alignment, current_offset);
69     // If we found a gap larger than required size, and smaller than previous
70     // best fit, take it.
71     if (aligned_current_offset + size <= alloc.offset &&
72         alloc.offset - aligned_current_offset < best_offset_fit) {
73       best_offset = aligned_current_offset;
74       best_offset_fit = alloc.offset - current_offset;
75     }
76     current_offset = std::max(current_offset, alloc.offset + alloc.size);
77   }
78   if (best_offset == kOffsetNotAssigned) {
79     best_offset = AlignTo(alignment, current_offset);
80   }
81 
82   // Update the required buffer size.
83   high_water_mark_ = std::max(high_water_mark_, best_offset + size);
84   new_alloc->offset = best_offset;
85 
86   auto insertion_it = ordered_allocs_.begin();
87   while (insertion_it != ordered_allocs_.end() && *insertion_it < *new_alloc) {
88     ++insertion_it;
89   }
90   ordered_allocs_.insert(insertion_it, *new_alloc);
91   return kTfLiteOk;
92 }
93 
Deallocate(TfLiteContext * context,const ArenaAllocWithUsageInterval & alloc)94 TfLiteStatus SimpleMemoryArena::Deallocate(
95     TfLiteContext* context, const ArenaAllocWithUsageInterval& alloc) {
96   if (alloc.size == 0) {
97     return kTfLiteOk;
98   }
99 
100   int erased_allocs_count = 0;
101   auto it = ordered_allocs_.begin();
102   while (it != ordered_allocs_.end()) {
103     if (it->tensor == alloc.tensor) {
104       erased_allocs_count++;
105       it = ordered_allocs_.erase(it);
106     } else {
107       ++it;
108     }
109   }
110   TF_LITE_ENSURE(context, erased_allocs_count <= 1);
111   return kTfLiteOk;
112 }
113 
Commit(TfLiteContext * context)114 TfLiteStatus SimpleMemoryArena::Commit(TfLiteContext* context) {
115   size_t required_size = RequiredBufferSize();
116   if (required_size > underlying_buffer_size_) {
117     char* new_alloc = new char[required_size];
118     char* new_underlying_buffer_aligned_ptr = reinterpret_cast<char*>(
119         AlignTo(arena_alignment_, reinterpret_cast<intptr_t>(new_alloc)));
120 
121     // If the arena had been previously allocated, copy over the old memory.
122     // Since Alloc pointers are offset based, they will remain valid in the new
123     // memory block.
124     if (high_water_mark_ > 0 && underlying_buffer_size_ > 0) {
125       size_t copy_amount = std::min(
126           underlying_buffer_.get() + underlying_buffer_size_ -
127               underlying_buffer_aligned_ptr_,
128           new_alloc + required_size - new_underlying_buffer_aligned_ptr);
129       memcpy(new_underlying_buffer_aligned_ptr, underlying_buffer_aligned_ptr_,
130              copy_amount);
131     }
132 
133     underlying_buffer_.reset(new_alloc);
134     underlying_buffer_size_ = required_size;
135     underlying_buffer_aligned_ptr_ = new_underlying_buffer_aligned_ptr;
136   }
137   committed_ = true;
138   return underlying_buffer_ != nullptr ? kTfLiteOk : kTfLiteError;
139 }
140 
ResolveAlloc(TfLiteContext * context,const ArenaAllocWithUsageInterval & alloc,char ** output_ptr)141 TfLiteStatus SimpleMemoryArena::ResolveAlloc(
142     TfLiteContext* context, const ArenaAllocWithUsageInterval& alloc,
143     char** output_ptr) {
144   TF_LITE_ENSURE(context, committed_);
145   TF_LITE_ENSURE(context, output_ptr != nullptr);
146   TF_LITE_ENSURE(context,
147                  underlying_buffer_size_ >= (alloc.offset + alloc.size));
148   if (alloc.size == 0) {
149     *output_ptr = nullptr;
150   } else {
151     *output_ptr = underlying_buffer_aligned_ptr_ + alloc.offset;
152   }
153   return kTfLiteOk;
154 }
155 
ClearPlan()156 TfLiteStatus SimpleMemoryArena::ClearPlan() {
157   committed_ = false;
158   high_water_mark_ = 0;
159   ordered_allocs_.clear();
160   return kTfLiteOk;
161 }
162 
ReleaseBuffer()163 TfLiteStatus SimpleMemoryArena::ReleaseBuffer() {
164   committed_ = false;
165   underlying_buffer_size_ = 0;
166   underlying_buffer_aligned_ptr_ = nullptr;
167   underlying_buffer_.reset();
168   return kTfLiteOk;
169 }
170 
171 }  // namespace tflite
172