• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright 2020 Huawei Technologies Co., Ltd
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "src/litert/inner_allocator.h"
18 #include <utility>
19 #include "src/common/log_adapter.h"
20 #include "src/common/utils.h"
21 
22 namespace mindspore {
DefaultAllocator(size_t aligned_size)23 DefaultAllocator::DefaultAllocator(size_t aligned_size) {
24   aligned_size_ = aligned_size;
25   max_malloc_size_ = lite::GetMaxMallocSize();
26 }
27 
~DefaultAllocator()28 DefaultAllocator::~DefaultAllocator() { Clear(); }
29 
SetContext(const AllocatorContext & ctx)30 void DefaultAllocator::SetContext(const AllocatorContext &ctx) {
31   lockFlag_ = ctx.lockFlag;
32   shiftFactor_ = static_cast<unsigned>(ctx.shiftFactor);
33 }
34 
Lock()35 void DefaultAllocator::Lock() {
36   if (lockFlag_) {
37     lock_.lock();
38   }
39 }
40 
UnLock()41 void DefaultAllocator::UnLock() {
42   if (lockFlag_) {
43     lock_.unlock();
44   }
45 }
46 
ReuseMemory(size_t free_size,size_t size) const47 bool DefaultAllocator::ReuseMemory(size_t free_size, size_t size) const {
48   return free_size >= size &&
49          (free_size <= (size >= UINT32_MAX / (1ul << shiftFactor_) ? UINT32_MAX : size << shiftFactor_));
50 }
51 
Malloc(size_t size)52 void *DefaultAllocator::Malloc(size_t size) {
53   if (size > max_malloc_size_) {
54     MS_LOG(ERROR) << "MallocData out of max_size, size: " << size;
55     return nullptr;
56   }
57   Lock();
58   if (this->total_size_ >= max_malloc_size_) {
59     MS_LOG(ERROR) << "Memory pool is exhausted";
60     return nullptr;
61   }
62   auto iter = freeList_.lower_bound(size);
63   if (iter != freeList_.end() && ReuseMemory(iter->second->size, size)) {
64     auto membuf = iter->second;
65     membuf->ref_count_ = 0;
66     (void)freeList_.erase(iter);
67     allocatedList_[membuf->buf] = membuf;
68     UnLock();
69     return membuf->buf;
70   }
71 
72   std::unique_ptr<MemBuf> membuf(reinterpret_cast<MemBuf *>(malloc(sizeof(MemBuf) + size + aligned_size_)));
73   if (membuf == nullptr) {
74     MS_LOG(ERROR) << "malloc membuf return nullptr";
75     UnLock();
76     return nullptr;
77   }
78   this->total_size_ += size;
79   membuf->ref_count_ = 0;
80   membuf->size = size;
81   membuf->buf = reinterpret_cast<char *>(
82     (reinterpret_cast<uintptr_t>(membuf.get()) + sizeof(MemBuf) + aligned_size_ - 1) & (~(aligned_size_ - 1)));
83   auto bufPtr = membuf->buf;
84   allocatedList_[bufPtr] = membuf.release();
85   UnLock();
86   return bufPtr;
87 }
88 
Free(void * buf)89 void DefaultAllocator::Free(void *buf) {
90   if (buf == nullptr) {
91     return;
92   }
93   Lock();
94   auto iter = allocatedList_.find(buf);
95   if (iter != allocatedList_.end()) {
96     auto membuf = iter->second;
97     membuf->ref_count_ = 0;
98     (void)allocatedList_.erase(iter);
99     (void)freeList_.insert(std::make_pair(membuf->size, membuf));
100     UnLock();
101     return;
102   }
103   UnLock();
104   free(buf);
105 }
106 
RefCount(void * buf)107 int DefaultAllocator::RefCount(void *buf) {
108   if (buf == nullptr) {
109     return -1;
110   }
111   Lock();
112   auto iter = allocatedList_.find(buf);
113   if (iter != allocatedList_.end()) {
114     auto membuf = iter->second;
115     int ref_count = std::atomic_load(&membuf->ref_count_);
116     UnLock();
117     return ref_count;
118   }
119   UnLock();
120   return -1;
121 }
SetRefCount(void * buf,int ref_count)122 int DefaultAllocator::SetRefCount(void *buf, int ref_count) {
123   if (buf == nullptr) {
124     return -1;
125   }
126   Lock();
127   auto iter = allocatedList_.find(buf);
128   if (iter != allocatedList_.end()) {
129     auto membuf = iter->second;
130     std::atomic_store(&membuf->ref_count_, ref_count);
131     UnLock();
132     return ref_count;
133   }
134   UnLock();
135   return -1;
136 }
IncRefCount(void * buf,int ref_count)137 int DefaultAllocator::IncRefCount(void *buf, int ref_count) {
138   if (buf == nullptr) {
139     return -1;
140   }
141   Lock();
142   auto iter = allocatedList_.find(buf);
143   if (iter != allocatedList_.end()) {
144     auto membuf = iter->second;
145     auto ref = std::atomic_fetch_add(&membuf->ref_count_, ref_count);
146     UnLock();
147     return (ref + ref_count);
148   }
149   UnLock();
150   return -1;
151 }
DecRefCount(void * buf,int ref_count)152 int DefaultAllocator::DecRefCount(void *buf, int ref_count) {
153   if (buf == nullptr) {
154     return -1;
155   }
156   Lock();
157   auto iter = allocatedList_.find(buf);
158   if (iter != allocatedList_.end()) {
159     auto membuf = iter->second;
160     auto ref = std::atomic_fetch_sub(&membuf->ref_count_, ref_count);
161     UnLock();
162     return (ref - ref_count);
163   }
164   UnLock();
165   return -1;
166 }
Clear()167 void DefaultAllocator::Clear() {
168   Lock();
169 
170   for (auto &it : allocatedList_) {
171     free(it.second);
172   }
173   allocatedList_.clear();
174 
175   for (auto &it : freeList_) {
176     free(it.second);
177   }
178   freeList_.clear();
179   UnLock();
180 }
181 }  // namespace mindspore
182