• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright 2021 Huawei Technologies Co., Ltd
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "src/litert/runtime_allocator.h"
18 
19 namespace mindspore {
RuntimeAllocator(size_t aligned_size)20 RuntimeAllocator::RuntimeAllocator(size_t aligned_size) {
21   aligned_size_ = aligned_size;
22   return;
23 }
24 
~RuntimeAllocator()25 RuntimeAllocator::~RuntimeAllocator() {
26   if (data_ != nullptr) {
27     free(data_);
28     data_ = nullptr;
29   }
30 }
31 
MallocOptData()32 void *RuntimeAllocator::MallocOptData() {
33   if (data_ == nullptr) {
34     data_ = malloc(total_size_);
35   }
36   return data_;
37 }
38 
FindMinFree(size_t size)39 size_t RuntimeAllocator::FindMinFree(size_t size) {
40   size_t min_size = total_size_ + 1;
41   size_t min_addr = total_size_ + 1;
42   for (auto const &itr : free_list_) {
43     if (itr.second >= size && min_size > itr.second) {
44       min_size = itr.second;
45       min_addr = itr.first;
46     }
47   }
48   return min_addr;
49 }
50 
FreeTensorData(lite::Tensor * tensor)51 void RuntimeAllocator::FreeTensorData(lite::Tensor *tensor) {
52   size_t offset = offset_map_[tensor];
53   free_list_[offset] = used_list_[offset];
54   used_list_.erase(offset);
55 
56   size_t length = free_list_[offset];
57 
58   size_t post_offset = offset + length;
59   auto post_iter = free_list_.find(post_offset);
60   if (post_iter != free_list_.end()) {
61     size_t post_length = post_iter->second;
62     free_list_[offset] = length + post_length;
63     free_list_.erase(post_offset);
64   }
65 
66   auto pre_iter = free_list_.lower_bound(offset);
67   if (pre_iter != free_list_.begin()) {
68     pre_iter--;
69     size_t pre_offset = pre_iter->first;
70     if ((pre_offset + free_list_[pre_offset]) == offset) {
71       free_list_[pre_offset] = free_list_[pre_offset] + length;
72       free_list_.erase(offset);
73     }
74   }
75 }
76 
SetDataOffset(lite::Tensor * tensor,size_t offset)77 void RuntimeAllocator::SetDataOffset(lite::Tensor *tensor, size_t offset) {
78   offset_map_[tensor] = offset;
79   return;
80 }
81 
Clear(AllocatorPtr default_allocator)82 void RuntimeAllocator::Clear(AllocatorPtr default_allocator) {
83   total_size_ = 0;
84   for (auto iter : offset_map_) {
85     iter.first->set_allocator(default_allocator);
86     iter.first->set_data(nullptr);
87   }
88   if (data_ != nullptr) {
89     free(data_);
90     data_ = nullptr;
91   }
92   offset_map_.clear();
93   free_list_.clear();
94   used_list_.clear();
95 }
96 
MallocTensorData(lite::Tensor * tensor)97 void RuntimeAllocator::MallocTensorData(lite::Tensor *tensor) {
98   size_t size = tensor->Size();
99   size_t offset = FindMinFree(size);
100 
101   if (offset > total_size_) {
102     if (free_list_.empty()) {
103       offset = total_size_;
104     } else {
105       offset = free_list_.rbegin()->first;
106       if (offset + free_list_[offset] < total_size_) {
107         offset = total_size_;
108       } else {
109         free_list_.erase(offset);
110       }
111     }
112     total_size_ = offset + size;
113   } else {
114     if (free_list_[offset] > size) {
115       free_list_[offset + size] = free_list_[offset] - size;
116     }
117     free_list_.erase(offset);
118   }
119 
120   used_list_[offset] = size;
121   offset_map_[tensor] = offset;
122 }
123 }  // namespace mindspore
124