• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2025 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #ifndef COMMON_COMPONENTS_COMMON_MEM_COMMON_H
17 #define COMMON_COMPONENTS_COMMON_MEM_COMMON_H
18 
19 #include <algorithm>
20 #include <mutex>
21 #include <thread>
22 #include <unordered_map>
23 
24 #include "common_components/common/page_pool.h"
25 
26 namespace common {
27 using pageID = unsigned long long;
28 // The maximum number of pages that PageCache can allocate
29 constexpr size_t MAX_NPAGES = 129;
30 // Calculate the page number and the starting address of the page using bitwise shift operations
31 constexpr size_t PAGE_SHIFT = 12;
32 // The maximum memory space that ThreadCache can allocate
33 constexpr size_t MAX_BYTES = 256 * 1024;
34 
35 enum AlignNmu { ALIGN_8 = 8, ALIGN_16 = 16, ALIGN_128 = 128, ALIGN_1024 = 1024, ALIGN_8192 = 8 * 1028 };
36 
37 enum MemberSize {
38     ALING_8_BYTE = 128,
39     ALING_16_BYTE = 1024,
40     ALING_128_BYTE = 8 * 1024,
41     ALING_1024_BYTE = 64 * 1024,
42     ALING_8192_BYTE = 256 * 1024,
43 };
44 
45 enum AlignShift { ALIGN_8_SIFT = 3, ALIGN_16_SIFT = 4, ALIGN_128_SIFT = 7, ALIGN_1024_SIFT = 10, ALIGN_8192_SIFT = 13 };
46 
47 // Allocate memory resources from the system
SystemAlloc(size_t kpage)48 inline void* SystemAlloc(size_t kpage)
49 {
50     return PagePool::Instance().GetPage(kpage * COMMON_PAGE_SIZE);
51 }
52 
53 // Return a reference to the first 4 or 8 bytes of the passed-in space
NextObj(void * obj)54 inline void*& NextObj(void* obj)
55 {
56     CHECK_CC(obj != nullptr);
57     return *(void**)obj;
58 }
59 
60 // The free list used to store the small fixed-size memory blocks after splitting.
61 class FreeList {
62 public:
PushFront(void * obj)63     void PushFront(void* obj)
64     {
65         CHECK_CC(obj != nullptr);
66 
67         NextObj(obj) = freeList_;
68         freeList_ = obj;
69         ++size_;
70     }
PopFront()71     void* PopFront()
72     {
73         CHECK_CC(!Empty());
74 
75         void* obj = freeList_;
76         freeList_ = NextObj(obj);
77         --size_;
78 
79         return obj;
80     }
81 
PushAtFront(void * start,void * end,size_t n)82     void PushAtFront(void* start, void* end, size_t n)
83     {
84         CHECK_CC(start != nullptr);
85         CHECK_CC(end != nullptr);
86         CHECK_CC(n > 0);
87 
88         NextObj(end) = freeList_;
89         freeList_ = start;
90         size_ += n;
91     }
92 
PopAtFront(void * & start,size_t n)93     void PopAtFront(void*& start, size_t n)
94     {
95         CHECK_CC(n <= size_);
96 
97         start = freeList_;
98         void* end = freeList_;
99         for (size_t i = 0; i < n - 1; ++i) {
100             end = NextObj(end);
101         }
102         freeList_ = NextObj(end);
103         NextObj(end) = nullptr;
104         size_ -= n;
105     }
106 
Empty()107     bool Empty() { return size_ == 0; }
108 
GetAdjustSize()109     size_t& GetAdjustSize() { return adjustSize_; }
110 
GetSize()111     size_t GetSize() { return size_; }
112 
113 private:
114     size_t size_ = 0;       // The number of small fixed-size memory blocks
115     size_t adjustSize_ = 1; // The slow-start adjustment value for requesting memory from the central cache.
116     void* freeList_ = nullptr;
117 };
118 
119 // Span manages a large block of memory in units of pages.
120 struct Span {
121     size_t pageNum = 0; // number of page
122     pageID pageId = 0;  // page if of first page
123     Span* next = nullptr;
124     Span* prev = nullptr;
125     size_t useCount = 0;
126     void* freeBlocks = nullptr;
127     bool isUse = false;
128 };
129 
130 // The doubly linked circular list with a header node for managing spans.
131 class SpanList {
132 public:
SpanList()133     SpanList()
134     {
135         head_->prev = head_;
136         head_->next = head_;
137     }
138 
~SpanList()139     ~SpanList()
140     {
141         while (!Empty()) {
142             Span* span = PopFront();
143             delete span;
144         }
145         delete head_;
146     }
147 
Insert(Span * pos,Span * newSpan)148     void Insert(Span* pos, Span* newSpan)
149     {
150         CHECK_CC(pos != nullptr);
151         CHECK_CC(newSpan != nullptr);
152         Span* prev = pos->prev;
153         prev->next = newSpan;
154         newSpan->prev = prev;
155         newSpan->next = pos;
156         pos->prev = newSpan;
157     }
158 
Erase(Span * pos)159     void Erase(Span* pos)
160     {
161         CHECK_CC(pos != nullptr);
162         CHECK_CC(pos != head_); // The sentinel header node must not be deleted.
163         Span* prev = pos->prev;
164         Span* next = pos->next;
165         prev->next = next;
166         next->prev = prev;
167     }
168 
Begin()169     Span* Begin() { return head_->next; }
170 
End()171     Span* End() { return head_; }
172 
Empty()173     bool Empty() { return Begin() == head_; }
174 
PopFront()175     Span* PopFront()
176     {
177         CHECK_CC(!Empty());
178 
179         Span* ret = Begin();
180         Erase(Begin());
181         return ret;
182     }
183 
PushFront(Span * span)184     void PushFront(Span* span)
185     {
186         CHECK_CC(span != nullptr);
187 
188         Insert(Begin(), span);
189     }
190 
GetSpanListMutex()191     std::mutex& GetSpanListMutex() { return mtx_; }
192 
193 private:
194     Span* head_ = new Span;
195     std::mutex mtx_;
196 };
197 } // namespace common
198 #endif
199