1 /** 2 * Copyright 2019 Huawei Technologies Co., Ltd 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 #ifndef MINDSPORE_CCSRC_MINDDATA_DATASET_UTIL_CIRCULAR_POOL_H_ 17 #define MINDSPORE_CCSRC_MINDDATA_DATASET_UTIL_CIRCULAR_POOL_H_ 18 19 #include <atomic> 20 #include <memory> 21 #include <vector> 22 #include "minddata/dataset/util/memory_pool.h" 23 #include "minddata/dataset/util/arena.h" 24 #include "minddata/dataset/util/lock.h" 25 26 namespace mindspore { 27 namespace dataset { 28 using ListOfArenas = std::vector<std::shared_ptr<Arena>>; 29 30 // This is a dynamic memory pool built on top of memory 31 // segment each of which is 4G in size. Initially we start 32 // with one segment, and gradually add segments (not 33 // guaranteed contiguous) until we reach 32G in size. There 34 // is an assumption about this kind of memory pool. Allocated 35 // memory is not held for the whole duration of the pool and 36 // will be released soon. Based on this assumption, memory is 37 // obtained from the tail while allocated memory is returned 38 // to the head of the pool. 39 class CircularPool : public MemoryPool { 40 public: 41 class CircularIterator { 42 friend class CircularPool; 43 44 public: 45 explicit CircularIterator(CircularPool *dp); 46 47 ~CircularIterator() = default; 48 49 bool has_next() const; 50 51 ListOfArenas::iterator Next(); 52 53 void Reset(); 54 55 private: 56 CircularPool *dp_; 57 Arena *cur_tail_{}; 58 uint32_t start_{}; 59 uint32_t cur_{}; 60 bool wrap_{}; 61 bool has_next_{}; 62 }; 63 64 CircularPool(const CircularPool &) = delete; 65 66 CircularPool &operator=(const CircularPool &) = delete; 67 68 ~CircularPool() override; 69 70 Status Allocate(size_t n, void **) override; 71 72 Status Reallocate(void **, size_t old_size, size_t new_size) override; 73 74 void Deallocate(void *) override; 75 76 uint64_t get_max_size() const override; 77 78 int PercentFree() const override; 79 80 friend std::ostream &operator<<(std::ostream &os, const CircularPool &s) { 81 int i = 0; 82 for (auto it = s.mem_segments_.begin(); it != s.mem_segments_.end(); ++it, ++i) { 83 os << "Dumping segment " << i << "\n" << *(it->get()); 84 } 85 return os; 86 } 87 88 #ifdef ENABLE_GPUQUE 89 static Status CreateCircularPool(std::shared_ptr<MemoryPool> *out_pool, int max_size_in_gb = -1, 90 int arena_size = 4096, bool create_one_arena = false, bool is_cuda_malloc = false); 91 #else 92 static Status CreateCircularPool(std::shared_ptr<MemoryPool> *out_pool, int max_size_in_gb = -1, 93 int arena_size = 4096, bool create_one_arena = false); 94 #endif 95 96 private: 97 ListOfArenas mem_segments_; 98 std::atomic<Arena *> tail_{}; 99 bool unlimited_; 100 int max_size_in_mb_; 101 int arena_size_; 102 int cur_size_in_mb_; 103 RWLock rw_lock_; 104 #ifdef ENABLE_GPU 105 bool is_cuda_malloc_; 106 107 // We can take negative or 0 as input which means unlimited. 108 CircularPool(int max_size_in_gb, int arena_size, bool is_cuda_malloc); 109 #else 110 111 // We can take negative or 0 as input which means unlimited. 112 CircularPool(int max_size_in_gb, int arena_size); 113 #endif 114 115 Status AddOneArena(); 116 }; 117 } // namespace dataset 118 } // namespace mindspore 119 120 #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_UTIL_CIRCULAR_POOL_H_ 121