• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2025 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #ifndef ARK_COMM_RUNTIME_ALLOCATOR_PAGE_POOL_H
16 #define ARK_COMM_RUNTIME_ALLOCATOR_PAGE_POOL_H
17 
18 #include <atomic>
19 #include <mutex>
20 #ifdef _WIN64
21 #include <errhandlingapi.h>
22 #include <handleapi.h>
23 #include <memoryapi.h>
24 #else
25 #include <sys/mman.h>
26 #endif
27 
28 #include "common_components/base/globals.h"
29 #include "common_components/base/sys_call.h"
30 #include "common_components/heap/allocator/treap.h"
31 #include "common_components/platform/os.h"
32 #include "securec.h"
33 #if defined(_WIN64) || defined(__APPLE__)
34 #include "common_components/base/mem_utils.h"
35 #endif
36 
37 namespace common {
38 // a page pool maintain a pool of free pages, serve page allocation and free
39 class PagePool {
40 public:
PagePool(const char * name)41     explicit PagePool(const char* name) : tag_(name) {}
42     PagePool(PagePool const&) = delete;
43     PagePool& operator=(const PagePool&) = delete;
~PagePool()44     ~PagePool() {}
Init(uint32_t pageCount)45     void Init(uint32_t pageCount)
46     {
47         totalPageCount_ = pageCount;
48         smallPageUsed_ = 0;
49         usedZone_ = 0;
50         size_t size = static_cast<size_t>(totalPageCount_) * COMMON_PAGE_SIZE;
51         freePagesTree_.Init(totalPageCount_);
52         base_ = MapMemory(size, tag_);
53         totalSize_ = size;
54     }
Fini()55     void Fini()
56     {
57         freePagesTree_.Fini();
58 #ifdef _WIN64
59         LOGE_IF(!VirtualFree(base_, 0, MEM_RELEASE)) <<
60                 "VirtualFree failed in PagePool destruction, errno: " << GetLastError();
61 #else
62         LOGE_IF(munmap(base_, totalPageCount_ * COMMON_PAGE_SIZE) != EOK) <<
63                 "munmap failed in PagePool destruction, errno: " << errno;
64 #endif
65     }
66 
67     uint8_t* GetPage(size_t bytes = COMMON_PAGE_SIZE)
68     {
69         uint32_t idx = 0;
70         size_t count = (bytes + COMMON_PAGE_SIZE - 1) / COMMON_PAGE_SIZE;
71         size_t pageSize = RoundUp(bytes, COMMON_PAGE_SIZE);
72         LOGF_CHECK(count < std::numeric_limits<uint32_t>::max()) << "native memory out of memory!";
73         {
74             std::lock_guard<std::mutex> lg(freePagesMutex_);
75             if (freePagesTree_.TakeUnits(static_cast<uint32_t>(count), idx, false)) {
76                 auto* ret = base_ + static_cast<size_t>(idx) * COMMON_PAGE_SIZE;
77 #ifdef _WIN64
78                 LOGE_IF(UNLIKELY_CC(!VirtualAlloc(ret, pageSize, MEM_COMMIT, PAGE_READWRITE))) <<
79                     "VirtualAlloc commit failed in GetPage, errno: " << GetLastError();
80 #endif
81                 return ret;
82             }
83             if ((usedZone_ + pageSize) <= totalSize_ && base_ != nullptr) {
84                 size_t current = usedZone_;
85                 usedZone_ += pageSize;
86 #ifdef _WIN64
87                 LOGE_IF(UNLIKELY_CC(!VirtualAlloc(base_ + current, pageSize, MEM_COMMIT, PAGE_READWRITE))) <<
88                     "VirtualAlloc commit failed in GetPage, errno: " << GetLastError();
89 #endif
90                 return base_ + current;
91             }
92         }
93         return MapMemory(pageSize, tag_, true);
94     }
95 
96     void ReturnPage(uint8_t* page, size_t bytes = COMMON_PAGE_SIZE) noexcept
97     {
98         uint8_t* end = base_ + totalSize_;
99         size_t num = (bytes + COMMON_PAGE_SIZE - 1) / COMMON_PAGE_SIZE;
100         if (page < base_ || page >= end) {
101 #ifdef _WIN64
102             LOGE_IF(UNLIKELY_CC(!VirtualFree(page, 0, MEM_RELEASE))) <<
103                 "VirtualFree failed in ReturnPage, errno: " << GetLastError();
104 #else
105             LOGE_IF(UNLIKELY_CC(munmap(page, num * COMMON_PAGE_SIZE) != EOK)) <<
106                 "munmap failed in ReturnPage, errno: " << errno;
107 #endif
108             return;
109         }
110         LOGF_CHECK(num < std::numeric_limits<uint32_t>::max()) << "native memory out of memory!";
111         uint32_t idx = static_cast<uint32_t>((page - base_) / COMMON_PAGE_SIZE);
112 #if defined(_WIN64)
113         LOGE_IF(UNLIKELY_CC(!VirtualFree(page, num * COMMON_PAGE_SIZE, MEM_DECOMMIT))) <<
114             "VirtualFree failed in ReturnPage, errno: " << GetLastError();
115 #elif defined(__APPLE__)
116         MemorySet(reinterpret_cast<uintptr_t>(page), num * COMMON_PAGE_SIZE, 0,
117                   num * COMMON_PAGE_SIZE);
118         (void)madvise(page, num * COMMON_PAGE_SIZE, MADV_DONTNEED);
119 #else
120         (void)madvise(page, num * COMMON_PAGE_SIZE, MADV_DONTNEED);
121 #endif
122         std::lock_guard<std::mutex> lg(freePagesMutex_);
123         LOGF_CHECK(freePagesTree_.MergeInsert(idx, static_cast<uint32_t>(num), false)) <<
124             "tid " << GetTid() << ": failed to return pages to freePagesTree [" <<
125             idx << "+" << num << ", " << (idx + num) << ")";
126     }
127 
128     // return unused pages to os
Trim()129     void Trim() const {}
130 
131     PUBLIC_API static PagePool& Instance() noexcept;
132 
133 protected:
134     uint8_t* MapMemory(size_t size, const char* memName, bool isCommit = false) const
135     {
136 #ifdef _WIN64
137         void* result = VirtualAlloc(NULL, size, isCommit ? MEM_COMMIT : MEM_RESERVE, PAGE_READWRITE);
138         if (result == NULL) { //LCOV_EXCL_BR_LINE
139             LOG_COMMON(FATAL) << "allocate create page failed! Out of Memory!";
140             UNREACHABLE_CC();
141         }
142         (void)memName;
143 #else
144         void* result = mmap(nullptr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, -1, 0);
145         LOGF_CHECK(result != MAP_FAILED) << "allocate create page failed! Out of Memory!";
146 #if defined(__linux__) || defined(PANDA_TARGET_OHOS)
147         (void)madvise(result, size, MADV_NOHUGEPAGE);
148 #endif
149         (void)isCommit;
150 #endif
151 
152 #if defined(__linux__) || defined(PANDA_TARGET_OHOS)
153         COMMON_PRCTL(result, size, memName);
154 #endif
155         os::PrctlSetVMA(result, size, (std::string("ArkTS Heap CMCGC PagePool ") + memName).c_str());
156         return reinterpret_cast<uint8_t*>(result);
157     }
158 
159     std::mutex freePagesMutex_;
160     Treap freePagesTree_;
161     uint8_t* base_ = nullptr; // start address of the mapped pages
162     size_t totalSize_ = 0;    // total size of the mapped pages
163     size_t usedZone_ = 0;     // used zone for native memory pool.
164     const char* tag_ = nullptr;
165 
166 private:
167     std::atomic<uint32_t> smallPageUsed_ = { 0 };
168     uint32_t totalPageCount_ = 0;
169 };
170 } // namespace common
171 #endif // ARK_COMM_RUNTIME_ALLOCATOR_PAGE_POOL_H
172