• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright 2020 Huawei Technologies Co., Ltd
3 
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7 
8  * http://www.apache.org/licenses/LICENSE-2.0
9 
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15 */
16 #ifndef MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_CACHE_NUMA_H_
17 #define MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_CACHE_NUMA_H_
18 
19 #include <limits>
20 #include <map>
21 #include <memory>
22 #include <mutex>
23 #include <utility>
24 #include <vector>
25 #include "minddata/dataset/engine/cache/cache_hw.h"
26 #include "minddata/dataset/util/arena.h"
27 #include "minddata/dataset/util/memory_pool.h"
28 
29 namespace mindspore {
30 namespace dataset {
31 /// \brief A NumaMemoryPool is like a CircularPool but all the arenas have already been allocated
32 /// and each one comes from a numa socket. Memory is allocated using OnNode policy. That is,
33 /// it is solely comes from one particular numa node, and is not interleaved.
34 class NumaMemoryPool : public MemoryPool {
35  public:
36   explicit NumaMemoryPool(std::shared_ptr<CacheServerHW> hw, float memory_cap_ratio);
37   ~NumaMemoryPool() override;
38 
39   // As a derived class, we override the following functions
40   Status Allocate(size_t size, void **pVoid) override;
41   void Deallocate(void *pVoid) override;
Reallocate(void ** pVoid,size_t old_sz,size_t new_sz)42   Status Reallocate(void **pVoid, size_t old_sz, size_t new_sz) override { RETURN_STATUS_UNEXPECTED("Not supported"); }
get_max_size()43   uint64_t get_max_size() const override { return std::numeric_limits<uint64_t>::max(); }
44   int PercentFree() const override;
45 
46   /// \brief Return if the memory pool is numa aware
NumaAware()47   bool NumaAware() const { return CacheServerHW::numa_enabled(); }
48 
49   /// \brief. This returns all the numa nodes that we are able to allocate memory from.
50   std::vector<numa_id_t> GetAvailableNodes() const;
51 
52   /// \brief. Given a pointer (allocated from this pool), return the numa node where it is located.
53   /// \note. -1 is returned if not found.
FindNode(void * p)54   numa_id_t FindNode(void *p) const {
55     auto slot = Locate(p);
56     if (slot != -1) {
57       return nodes_.at(slot);
58     } else {
59       return -1;
60     }
61   }
62 
63   /// \brief Return maximum available memory
GetAvailableMemory()64   int64_t GetAvailableMemory() const { return memory_cap_; }
65 
66   /// \brief Return the configured or computed memory cap ratio
GetMemoryCapRatio()67   float GetMemoryCapRatio() const { return memory_cap_ratio_; }
68 
69  private:
70   std::shared_ptr<CacheServerHW> hw_;
71   float memory_cap_ratio_;
72   int64_t memory_cap_;
73   std::vector<std::pair<void *, int64_t>> memory_segments_;
74   std::vector<std::unique_ptr<ArenaImpl>> arena_list_;
75   std::unique_ptr<std::mutex[]> mux_;
76   std::vector<numa_id_t> nodes_;
77   std::map<numa_id_t, std::vector<int32_t>> numa_map_;
78 
79   /// \brief. Returns the slot that a given memory comes from.
80   /// \return slot from numa_segments. -1 if not found.
81   int32_t Locate(void *p) const;
82 
83   /// If numa library is not linked, or numa_availble() return -1, we will fall back to this method.
84   int32_t CreateMultipleArenas(int64_t segment_sz, numa_id_t node_id, int32_t repeat_count);
85 };
86 }  // namespace dataset
87 }  // namespace mindspore
88 #endif  // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_CACHE_NUMA_H_
89