1 /**
2 * Copyright 2020 Huawei Technologies Co., Ltd
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "src/runtime/inner_allocator.h"
18 #include <utility>
19 #include "src/common/log_adapter.h"
20 #include "src/common/utils.h"
21
22 namespace mindspore {
Create()23 std::shared_ptr<Allocator> Allocator::Create() { return std::make_shared<DefaultAllocator>(); }
DefaultAllocator(size_t aligned_size)24 DefaultAllocator::DefaultAllocator(size_t aligned_size) {
25 aligned_size_ = aligned_size;
26 max_malloc_size_ = lite::GetMaxMallocSize();
27 }
28
~DefaultAllocator()29 DefaultAllocator::~DefaultAllocator() { Clear(); }
30
SetContext(const AllocatorContext & ctx)31 void DefaultAllocator::SetContext(const AllocatorContext &ctx) {
32 lockFlag_ = ctx.lockFlag;
33 shiftFactor_ = static_cast<unsigned>(ctx.shiftFactor);
34 }
35
Lock()36 void DefaultAllocator::Lock() {
37 if (lockFlag_) {
38 lock_.lock();
39 }
40 }
41
UnLock()42 void DefaultAllocator::UnLock() {
43 if (lockFlag_) {
44 lock_.unlock();
45 }
46 }
47
ReuseMemory(size_t free_size,size_t size) const48 bool DefaultAllocator::ReuseMemory(size_t free_size, size_t size) const {
49 return free_size >= size &&
50 (free_size <= (size >= UINT32_MAX / (1ul << shiftFactor_) ? UINT32_MAX : size << shiftFactor_));
51 }
52
Malloc(size_t size)53 void *DefaultAllocator::Malloc(size_t size) {
54 if (size > max_malloc_size_) {
55 MS_LOG(ERROR) << "MallocData out of max_size, size: " << size;
56 return nullptr;
57 }
58 if (this->total_size_ >= max_malloc_size_) {
59 MS_LOG(ERROR) << "Memory pool is exhausted";
60 return nullptr;
61 }
62 Lock();
63 auto iter = freeList_.lower_bound(size);
64 if (iter != freeList_.end() && ReuseMemory(iter->second->size, size)) {
65 auto membuf = iter->second;
66 membuf->ref_count_ = 0;
67 (void)freeList_.erase(iter);
68 allocatedList_[membuf->buf] = membuf;
69 UnLock();
70 return membuf->buf;
71 }
72
73 std::unique_ptr<MemBuf> membuf(reinterpret_cast<MemBuf *>(malloc(sizeof(MemBuf) + size + aligned_size_)));
74 if (membuf == nullptr) {
75 MS_LOG(ERROR) << "malloc membuf return nullptr";
76 UnLock();
77 return nullptr;
78 }
79 this->total_size_ += size;
80 membuf->ref_count_ = 0;
81 membuf->size = size;
82 membuf->buf = reinterpret_cast<char *>(
83 (reinterpret_cast<uintptr_t>(membuf.get()) + sizeof(MemBuf) + aligned_size_ - 1) & (~(aligned_size_ - 1)));
84 auto bufPtr = membuf->buf;
85 allocatedList_[bufPtr] = membuf.release();
86 UnLock();
87 return bufPtr;
88 }
89
Free(void * buf)90 void DefaultAllocator::Free(void *buf) {
91 if (buf == nullptr) {
92 return;
93 }
94 Lock();
95 auto iter = allocatedList_.find(buf);
96 if (iter != allocatedList_.end()) {
97 auto membuf = iter->second;
98 membuf->ref_count_ = 0;
99 (void)allocatedList_.erase(iter);
100 (void)freeList_.insert(std::make_pair(membuf->size, membuf));
101 UnLock();
102 return;
103 }
104 UnLock();
105 free(buf);
106 }
107
RefCount(void * buf)108 int DefaultAllocator::RefCount(void *buf) {
109 if (buf == nullptr) {
110 return -1;
111 }
112 Lock();
113 auto iter = allocatedList_.find(buf);
114 if (iter != allocatedList_.end()) {
115 auto membuf = iter->second;
116 int ref_count = std::atomic_load(&membuf->ref_count_);
117 UnLock();
118 return ref_count;
119 }
120 UnLock();
121 return -1;
122 }
SetRefCount(void * buf,int ref_count)123 int DefaultAllocator::SetRefCount(void *buf, int ref_count) {
124 if (buf == nullptr) {
125 return -1;
126 }
127 Lock();
128 auto iter = allocatedList_.find(buf);
129 if (iter != allocatedList_.end()) {
130 auto membuf = iter->second;
131 std::atomic_store(&membuf->ref_count_, ref_count);
132 UnLock();
133 return ref_count;
134 }
135 UnLock();
136 return -1;
137 }
IncRefCount(void * buf,int ref_count)138 int DefaultAllocator::IncRefCount(void *buf, int ref_count) {
139 if (buf == nullptr) {
140 return -1;
141 }
142 Lock();
143 auto iter = allocatedList_.find(buf);
144 if (iter != allocatedList_.end()) {
145 auto membuf = iter->second;
146 auto ref = std::atomic_fetch_add(&membuf->ref_count_, ref_count);
147 UnLock();
148 return (ref + ref_count);
149 }
150 UnLock();
151 return -1;
152 }
DecRefCount(void * buf,int ref_count)153 int DefaultAllocator::DecRefCount(void *buf, int ref_count) {
154 if (buf == nullptr) {
155 return -1;
156 }
157 Lock();
158 auto iter = allocatedList_.find(buf);
159 if (iter != allocatedList_.end()) {
160 auto membuf = iter->second;
161 auto ref = std::atomic_fetch_sub(&membuf->ref_count_, ref_count);
162 UnLock();
163 return (ref - ref_count);
164 }
165 UnLock();
166 return -1;
167 }
Clear()168 void DefaultAllocator::Clear() {
169 Lock();
170
171 for (auto &it : allocatedList_) {
172 free(it.second);
173 }
174 allocatedList_.clear();
175
176 for (auto &it : freeList_) {
177 free(it.second);
178 }
179 freeList_.clear();
180 UnLock();
181 }
182 } // namespace mindspore
183