• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright 2019 Huawei Technologies Co., Ltd
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 #include "minddata/dataset/util/circular_pool.h"
17 
18 #include <algorithm>
19 #include <limits>
20 #include <utility>
21 #include "./securec.h"
22 #include "minddata/dataset/util/log_adapter.h"
23 #include "minddata/dataset/util/system_pool.h"
24 
25 namespace mindspore {
26 namespace dataset {
AddOneArena()27 Status CircularPool::AddOneArena() {
28   Status rc;
29   std::shared_ptr<Arena> b;
30 #ifdef ENABLE_GPUQUE
31   RETURN_IF_NOT_OK(Arena::CreateArena(&b, arena_size_, is_cuda_malloc_));
32 #else
33   RETURN_IF_NOT_OK(Arena::CreateArena(&b, arena_size_));
34 #endif
35   tail_ = b.get();
36   cur_size_in_mb_ += arena_size_;
37   mem_segments_.push_back(std::move(b));
38   return Status::OK();
39 }
40 
Next()41 ListOfArenas::iterator CircularPool::CircularIterator::Next() {
42   ListOfArenas::iterator it = dp_->mem_segments_.begin();
43   uint32_t size = dp_->mem_segments_.size();
44   // This is what we return
45   it += cur_;
46   // Prepare for the next round
47   cur_++;
48   if (cur_ == size) {
49     if (start_ == 0) {
50       has_next_ = false;
51     } else {
52       wrap_ = true;
53       cur_ = 0;
54     }
55   } else if (cur_ == start_) {
56     has_next_ = false;
57   }
58   return it;
59 }
60 
has_next() const61 bool CircularPool::CircularIterator::has_next() const { return has_next_; }
62 
Reset()63 void CircularPool::CircularIterator::Reset() {
64   wrap_ = false;
65   has_next_ = false;
66   if (!dp_->mem_segments_.empty()) {
67     // Find the buddy arena that corresponds to the tail.
68     cur_tail_ = dp_->tail_;
69     auto list_end = dp_->mem_segments_.end();
70     auto it = std::find_if(dp_->mem_segments_.begin(), list_end,
71                            [this](const std::shared_ptr<Arena> &b) { return b.get() == cur_tail_; });
72     MS_ASSERT(it != list_end);
73     start_ = std::distance(dp_->mem_segments_.begin(), it);
74     cur_ = start_;
75     has_next_ = true;
76   }
77 }
78 
CircularIterator(CircularPool * dp)79 CircularPool::CircularIterator::CircularIterator(CircularPool *dp) : dp_(dp) { Reset(); }
80 
Allocate(size_t n,void ** p)81 Status CircularPool::Allocate(size_t n, void **p) {
82   if (p == nullptr) {
83     RETURN_STATUS_UNEXPECTED("p is null");
84   }
85   Status rc;
86   void *ptr = nullptr;
87   do {
88     SharedLock lock_s(&rw_lock_);
89     int prevSzInMB = cur_size_in_mb_;
90     bool move_tail = false;
91     CircularIterator cirIt(this);
92     while (cirIt.has_next()) {
93       auto it = cirIt.Next();
94       Arena *ba = it->get();
95       if (ba->get_max_size() < n) {
96         return Status(StatusCode::kMDOutOfMemory);
97       }
98       // If we are asked to move forward the tail
99       if (move_tail) {
100         Arena *expected = cirIt.cur_tail_;
101         (void)atomic_compare_exchange_weak(&tail_, &expected, ba);
102         move_tail = false;
103       }
104       rc = ba->Allocate(n, &ptr);
105       if (rc.IsOk()) {
106         *p = ptr;
107         break;
108       } else if (rc == StatusCode::kMDOutOfMemory) {
109         // Make the next arena a new tail and continue.
110         move_tail = true;
111       } else {
112         return rc;
113       }
114     }
115 
116     // Handle the case we have done one round robin search.
117     if (ptr == nullptr) {
118       // If we have room to expand.
119       if (unlimited_ || cur_size_in_mb_ < max_size_in_mb_) {
120         // lock in exclusively mode.
121         lock_s.Upgrade();
122         // Check again if someone has already expanded.
123         if (cur_size_in_mb_ == prevSzInMB) {
124           RETURN_IF_NOT_OK(AddOneArena());
125         }
126         // Re-acquire the shared lock and try again
127         lock_s.Downgrade();
128       } else {
129         return Status(StatusCode::kMDOutOfMemory, __LINE__, __FILE__);
130       }
131     }
132   } while (ptr == nullptr);
133   return rc;
134 }
135 
Deallocate(void * p)136 void CircularPool::Deallocate(void *p) {
137   // Lock in the chain in shared mode and find out which
138   // segment it comes from
139   SharedLock lock(&rw_lock_);
140   auto it = std::find_if(mem_segments_.begin(), mem_segments_.end(), [this, p](std::shared_ptr<Arena> &b) -> bool {
141     char *q = reinterpret_cast<char *>(p);
142     auto *base = reinterpret_cast<const char *>(b->get_base_addr());
143     return (q > base && q < base + arena_size_ * 1048576L);
144   });
145   lock.Unlock();
146   MS_ASSERT(it != mem_segments_.end());
147   it->get()->Deallocate(p);
148 }
149 
Reallocate(void ** pp,size_t old_sz,size_t new_sz)150 Status CircularPool::Reallocate(void **pp, size_t old_sz, size_t new_sz) {
151   // Lock in the chain in shared mode and find out which
152   // segment it comes from
153   if (pp == nullptr) {
154     RETURN_STATUS_UNEXPECTED("pp is null");
155   }
156   void *p = *pp;
157   SharedLock lock(&rw_lock_);
158   auto it = std::find_if(mem_segments_.begin(), mem_segments_.end(), [this, p](std::shared_ptr<Arena> &b) -> bool {
159     char *q = reinterpret_cast<char *>(p);
160     auto *base = reinterpret_cast<const char *>(b->get_base_addr());
161     return (q > base && q < base + arena_size_ * 1048576L);
162   });
163   lock.Unlock();
164   MS_ASSERT(it != mem_segments_.end());
165   Arena *ba = it->get();
166   Status rc = ba->Reallocate(pp, old_sz, new_sz);
167   if (rc == StatusCode::kMDOutOfMemory) {
168     // The current arena has no room for the bigger size.
169     // Allocate free space from another arena and copy
170     // the content over.
171     void *q = nullptr;
172     rc = this->Allocate(new_sz, &q);
173     RETURN_IF_NOT_OK(rc);
174     errno_t err = memcpy_s(q, new_sz, p, old_sz);
175     if (err) {
176       this->Deallocate(q);
177       RETURN_STATUS_UNEXPECTED(std::to_string(err));
178     }
179     *pp = q;
180     ba->Deallocate(p);
181   }
182   return Status::OK();
183 }
184 
get_max_size() const185 uint64_t CircularPool::get_max_size() const { return mem_segments_.front()->get_max_size(); }
186 
PercentFree() const187 int CircularPool::PercentFree() const {
188   int percent_free = 0;
189   int num_arena = 0;
190   for (auto const &p : mem_segments_) {
191     percent_free += p->PercentFree();
192     num_arena++;
193   }
194   if (num_arena) {
195     return percent_free / num_arena;
196   } else {
197     return 100;
198   }
199 }
200 
201 #ifdef ENABLE_GPUQUE
CircularPool(int max_size_in_gb,int arena_size,bool is_cuda_malloc)202 CircularPool::CircularPool(int max_size_in_gb, int arena_size, bool is_cuda_malloc)
203     : unlimited_(max_size_in_gb <= 0),
204       max_size_in_mb_(unlimited_ ? std::numeric_limits<int32_t>::max() : max_size_in_gb * 1024),
205       arena_size_(arena_size),
206       is_cuda_malloc_(is_cuda_malloc),
207       cur_size_in_mb_(0) {}
208 #else
CircularPool(int max_size_in_gb,int arena_size)209 CircularPool::CircularPool(int max_size_in_gb, int arena_size)
210     : unlimited_(max_size_in_gb <= 0),
211       max_size_in_mb_(unlimited_ ? std::numeric_limits<int32_t>::max() : max_size_in_gb * 1024),
212       arena_size_(arena_size),
213       cur_size_in_mb_(0) {}
214 #endif
215 
216 #ifdef ENABLE_GPUQUE
CreateCircularPool(std::shared_ptr<MemoryPool> * out_pool,int max_size_in_gb,int arena_size,bool createOneArena,bool is_cuda_malloc)217 Status CircularPool::CreateCircularPool(std::shared_ptr<MemoryPool> *out_pool, int max_size_in_gb, int arena_size,
218                                         bool createOneArena, bool is_cuda_malloc) {
219   Status rc;
220   if (out_pool == nullptr) {
221     RETURN_STATUS_UNEXPECTED("pPool is null");
222   }
223   auto pool = new (std::nothrow) CircularPool(max_size_in_gb, arena_size, is_cuda_malloc);
224   if (pool == nullptr) {
225     return Status(StatusCode::kMDOutOfMemory);
226   }
227   if (createOneArena) {
228     rc = pool->AddOneArena();
229   }
230   if (rc.IsOk()) {
231     (*out_pool).reset(pool);
232   } else {
233     delete pool;
234   }
235   return rc;
236 }
237 #else
CreateCircularPool(std::shared_ptr<MemoryPool> * out_pool,int max_size_in_gb,int arena_size,bool createOneArena)238 Status CircularPool::CreateCircularPool(std::shared_ptr<MemoryPool> *out_pool, int max_size_in_gb, int arena_size,
239                                         bool createOneArena) {
240   Status rc;
241   if (out_pool == nullptr) {
242     RETURN_STATUS_UNEXPECTED("pPool is null");
243   }
244   auto pool = new (std::nothrow) CircularPool(max_size_in_gb, arena_size);
245   if (pool == nullptr) {
246     return Status(StatusCode::kMDOutOfMemory);
247   }
248   if (createOneArena) {
249     rc = pool->AddOneArena();
250   }
251   if (rc.IsOk()) {
252     (*out_pool).reset(pool);
253   } else {
254     delete pool;
255   }
256   return rc;
257 }
258 #endif
259 
260 CircularPool::~CircularPool() = default;
261 }  // namespace dataset
262 }  // namespace mindspore
263