1 /*
2 * Copyright (c) 2022 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include <stddef.h> /* NULL */
17 #include <sys/mman.h> /* mmap */
18 #include <sched.h> /* sched_yield() */
19
20 #include "hilog/log_c.h"
21 #include "pm_util.h"
22 #include "ux_page_table_c.h"
23
24 #undef LOG_TAG
25 #define LOG_TAG "PurgeableMemC: UPT"
26
27 #if defined(USE_UXPT) && (USE_UXPT > 0) /* (USE_UXPT > 0) means using uxpt */
28
29 /*
30 * using uint64_t as uxpte_t to avoid avoid confusion on 32-bit and 64 bit systems.
31 * Type uxpte_t may be modified to uint32_t in the future, so typedef is used.
32 */
33 typedef uint64_t uxpte_t;
34
35 typedef struct UserExtendPageTable {
36 uint64_t dataAddr;
37 size_t dataSize;
38 uxpte_t *uxpte;
39 } UxPageTableStruct;
40
41 static bool g_supportUxpt = false;
42
43 /*
44 * -------------------------------------------------------------------------
45 * | virtual page number | |
46 * |--------------------------------------------| vaddr offset in virt page |
47 * | uxpte page number | offset in uxpte page | |
48 * --------------------------------------------------------------------------
49 * | | UXPTE_PER_PAGE_SHIFT | PAGE_SHIFT |
50 */
51 static const size_t UXPTE_SIZE_SHIFT = 3;
52 static const size_t UXPTE_PER_PAGE_SHIFT = PAGE_SHIFT - UXPTE_SIZE_SHIFT;
53 static const size_t UXPTE_PER_PAGE = 1 << UXPTE_PER_PAGE_SHIFT;
54
55 /* get virtual page number from virtual address */
VirtPageNo_(uint64_t vaddr)56 static inline uint64_t VirtPageNo_(uint64_t vaddr)
57 {
58 return vaddr >> PAGE_SHIFT;
59 }
60
61 /* page number in user page table of uxpte for virtual address */
UxptePageNo_(uint64_t vaddr)62 static inline uint64_t UxptePageNo_(uint64_t vaddr)
63 {
64 return VirtPageNo_(vaddr) >> UXPTE_PER_PAGE_SHIFT;
65 }
66
67 /* uxpte offset in uxpte page for virtual address */
UxpteOffset_(uint64_t vaddr)68 static inline uint64_t UxpteOffset_(uint64_t vaddr)
69 {
70 return VirtPageNo_(vaddr) & (UXPTE_PER_PAGE - 1);
71 }
72
73 static const size_t UXPTE_PRESENT_BIT = 1;
74 static const size_t UXPTE_PRESENT_MASK = (1 << UXPTE_PRESENT_BIT) - 1;
75 static const size_t UXPTE_REFCNT_ONE = 1 << UXPTE_PRESENT_BIT;
76 static const uxpte_t UXPTE_UNDER_RECLAIM = (uxpte_t)(-UXPTE_REFCNT_ONE);
77
IsUxptePresent_(uxpte_t pte)78 static inline bool IsUxptePresent_(uxpte_t pte)
79 {
80 return pte & (uxpte_t)UXPTE_PRESENT_MASK;
81 }
82
IsUxpteUnderReclaim_(uxpte_t pte)83 static inline bool IsUxpteUnderReclaim_(uxpte_t pte)
84 {
85 return pte == UXPTE_UNDER_RECLAIM;
86 }
87
GetUxPageSize_(uint64_t dataAddr,size_t dataSize)88 static inline size_t GetUxPageSize_(uint64_t dataAddr, size_t dataSize)
89 {
90 return (UxptePageNo_(dataAddr + dataSize - 1) - UxptePageNo_(dataAddr) + 1) * PAGE_SIZE;
91 }
92
RoundUp(uint64_t val,size_t align)93 static inline uint64_t RoundUp(uint64_t val, size_t align)
94 {
95 if (align == 0) {
96 return val;
97 }
98 return ((val + align - 1) / align) * align;
99 }
100
RoundDown_(uint64_t val,size_t align)101 static inline uint64_t RoundDown_(uint64_t val, size_t align)
102 {
103 if (align == 0) {
104 return val;
105 }
106 return val & (~(align - 1));
107 }
108
109 enum UxpteOp {
110 UPT_GET = 0,
111 UPT_PUT = 1,
112 UPT_CLEAR = 2,
113 UPT_IS_PRESENT = 3,
114 };
115
116 static void __attribute__((constructor)) CheckUxpt(void);
117 static void UxpteAdd_(uxpte_t *pte, size_t incNum);
118 static void UxpteSub_(uxpte_t *pte, size_t decNum);
119
120 static void GetUxpteAt_(UxPageTableStruct *upt, uint64_t addr);
121 static void PutUxpteAt_(UxPageTableStruct *upt, uint64_t addr);
122 static bool IsPresentAt_(UxPageTableStruct *upt, uint64_t addr);
123 static PMState UxpteOps_(UxPageTableStruct *upt, uint64_t addr, size_t len, enum UxpteOp op);
124
125 static uxpte_t *MapUxptePages_(uint64_t dataAddr, size_t dataSize);
126 static int UnmapUxptePages_(uxpte_t *ptes, size_t size);
127
CheckUxpt(void)128 static void __attribute__((constructor)) CheckUxpt(void)
129 {
130 int prot = PROT_READ | PROT_WRITE;
131 int type = MAP_ANONYMOUS | MAP_PURGEABLE;
132 size_t dataSize = PAGE_SIZE;
133 /* try to mmap purgable page */
134 void *dataPtr = mmap(NULL, dataSize, prot, type, -1, 0);
135 if (dataPtr == MAP_FAILED) {
136 HILOG_ERROR(LOG_CORE, "%{public}s: not support MAP_PURG", __func__);
137 g_supportUxpt = false;
138 return;
139 }
140 /* try to mmap uxpt page */
141 type = MAP_ANONYMOUS | MAP_USEREXPTE;
142 size_t uptSize = GetUxPageSize_((uint64_t)dataPtr, dataSize);
143 void *ptes = mmap(NULL, uptSize, prot, type, -1, UxptePageNo_((uint64_t)dataPtr) * PAGE_SIZE);
144 if (ptes != MAP_FAILED) {
145 g_supportUxpt = true;
146 /* free uxpt */
147 if (munmap(ptes, uptSize) != 0) {
148 HILOG_ERROR(LOG_CORE, "%{public}s: unmap uxpt fail", __func__);
149 }
150 } else { /* MAP_FAILED */
151 g_supportUxpt = false;
152 HILOG_ERROR(LOG_CORE, "%{public}s: not support uxpt", __func__);
153 }
154 ptes = NULL;
155 /* free data */
156 if (munmap(dataPtr, dataSize) != 0) {
157 HILOG_ERROR(LOG_CORE, "%{public}s: unmap purg data fail", __func__);
158 }
159 dataPtr = NULL;
160 HILOG_INFO(LOG_CORE, "%{public}s: supportUxpt=%{public}s", __func__, (g_supportUxpt ? "1" : "0"));
161 return;
162 }
163
UxpteIsEnabled(void)164 bool UxpteIsEnabled(void)
165 {
166 return g_supportUxpt;
167 }
168
UxPageTableSize(void)169 size_t UxPageTableSize(void)
170 {
171 return sizeof(UxPageTableStruct);
172 }
173
InitUxPageTable(UxPageTableStruct * upt,uint64_t addr,size_t len)174 PMState InitUxPageTable(UxPageTableStruct *upt, uint64_t addr, size_t len)
175 {
176 if (!g_supportUxpt) {
177 HILOG_DEBUG(LOG_CORE, "%{public}s: not support uxpt", __func__);
178 return PM_OK;
179 }
180 upt->dataAddr = addr;
181 upt->dataSize = len;
182 upt->uxpte = MapUxptePages_(upt->dataAddr, upt->dataSize);
183 if (!(upt->uxpte)) {
184 return PM_MMAP_UXPT_FAIL;
185 }
186 UxpteClear(upt, addr, len);
187 return PM_OK;
188 }
189
DeinitUxPageTable(UxPageTableStruct * upt)190 PMState DeinitUxPageTable(UxPageTableStruct *upt)
191 {
192 if (!g_supportUxpt) {
193 HILOG_DEBUG(LOG_CORE, "%{public}s: not support uxpt", __func__);
194 return PM_OK;
195 }
196 size_t size = GetUxPageSize_(upt->dataAddr, upt->dataSize);
197 int unmapRet = 0;
198 if (upt->uxpte) {
199 unmapRet = UnmapUxptePages_(upt->uxpte, size);
200 if (unmapRet != 0) {
201 HILOG_ERROR(LOG_CORE, "%{public}s: unmap uxpt fail", __func__);
202 return PM_UNMAP_UXPT_FAIL;
203 }
204 upt->uxpte = NULL;
205 }
206 upt->dataAddr = 0;
207 upt->dataSize = 0;
208 return PM_OK;
209 }
210
UxpteGet(UxPageTableStruct * upt,uint64_t addr,size_t len)211 void UxpteGet(UxPageTableStruct *upt, uint64_t addr, size_t len)
212 {
213 if (!g_supportUxpt) {
214 return;
215 }
216 UxpteOps_(upt, addr, len, UPT_GET);
217 }
218
UxptePut(UxPageTableStruct * upt,uint64_t addr,size_t len)219 void UxptePut(UxPageTableStruct *upt, uint64_t addr, size_t len)
220 {
221 if (!g_supportUxpt) {
222 return;
223 }
224 UxpteOps_(upt, addr, len, UPT_PUT);
225 }
226
UxpteClear(UxPageTableStruct * upt,uint64_t addr,size_t len)227 void UxpteClear(UxPageTableStruct *upt, uint64_t addr, size_t len)
228 {
229 if (!g_supportUxpt) {
230 return;
231 }
232 UxpteOps_(upt, addr, len, UPT_CLEAR);
233 }
234
UxpteIsPresent(UxPageTableStruct * upt,uint64_t addr,size_t len)235 bool UxpteIsPresent(UxPageTableStruct *upt, uint64_t addr, size_t len)
236 {
237 if (!g_supportUxpt) {
238 return true;
239 }
240 PMState ret = UxpteOps_(upt, addr, len, UPT_IS_PRESENT);
241 return ret == PM_OK;
242 }
243
UxpteLoad_(uxpte_t * uxpte)244 static inline uxpte_t UxpteLoad_(uxpte_t *uxpte)
245 {
246 __sync_synchronize();
247 return *uxpte;
248 }
249
UxpteCAS_(uxpte_t * uxpte,uxpte_t old,uxpte_t newVal)250 static inline bool UxpteCAS_(uxpte_t *uxpte, uxpte_t old, uxpte_t newVal)
251 {
252 return __sync_bool_compare_and_swap(uxpte, old, newVal);
253 }
254
UxpteAdd_(uxpte_t * pte,size_t incNum)255 static void UxpteAdd_(uxpte_t *pte, size_t incNum)
256 {
257 uxpte_t old;
258 do {
259 old = UxpteLoad_(pte);
260 if (IsUxpteUnderReclaim_(old)) {
261 sched_yield();
262 continue;
263 }
264 } while (!UxpteCAS_(pte, old, old + incNum));
265 }
266
UxpteSub_(uxpte_t * pte,size_t decNum)267 static void UxpteSub_(uxpte_t *pte, size_t decNum)
268 {
269 uxpte_t old;
270 do {
271 old = UxpteLoad_(pte);
272 } while (!UxpteCAS_(pte, old, old - decNum));
273 }
274
UxpteClear_(uxpte_t * pte)275 static void UxpteClear_(uxpte_t *pte)
276 {
277 uxpte_t old = UxpteLoad_(pte);
278 if ((unsigned long long)old == 0) {
279 return; /* has been set to zero */
280 }
281 HILOG_ERROR(LOG_CORE, "%{public}s: upte(0x%{public}llx) != 0", __func__, (unsigned long long)old);
282 do {
283 old = UxpteLoad_(pte);
284 } while (!UxpteCAS_(pte, old, 0));
285 }
286
GetIndexInUxpte_(uint64_t startAddr,uint64_t currAddr)287 static inline size_t GetIndexInUxpte_(uint64_t startAddr, uint64_t currAddr)
288 {
289 return UxpteOffset_(startAddr) + (VirtPageNo_(currAddr) - VirtPageNo_(startAddr));
290 }
291
GetUxpteAt_(UxPageTableStruct * upt,uint64_t addr)292 static void GetUxpteAt_(UxPageTableStruct *upt, uint64_t addr)
293 {
294 size_t index = GetIndexInUxpte_(upt->dataAddr, addr);
295 UxpteAdd_(&(upt->uxpte[index]), UXPTE_REFCNT_ONE);
296
297 HILOG_DEBUG(LOG_CORE, "%{public}s: addr(0x%{public}llx) upte=0x%{public}llx",
298 __func__, (unsigned long long)addr, (unsigned long long)(upt->uxpte[index]));
299 }
300
PutUxpteAt_(UxPageTableStruct * upt,uint64_t addr)301 static void PutUxpteAt_(UxPageTableStruct *upt, uint64_t addr)
302 {
303 size_t index = GetIndexInUxpte_(upt->dataAddr, addr);
304 UxpteSub_(&(upt->uxpte[index]), UXPTE_REFCNT_ONE);
305
306 HILOG_DEBUG(LOG_CORE, "%{public}s: addr(0x%{public}llx) upte=0x%{public}llx",
307 __func__, (unsigned long long)addr, (unsigned long long)(upt->uxpte[index]));
308 }
309
ClearUxpteAt_(UxPageTableStruct * upt,uint64_t addr)310 static void ClearUxpteAt_(UxPageTableStruct *upt, uint64_t addr)
311 {
312 size_t index = GetIndexInUxpte_(upt->dataAddr, addr);
313 UxpteClear_(&(upt->uxpte[index]));
314 }
315
IsPresentAt_(UxPageTableStruct * upt,uint64_t addr)316 static bool IsPresentAt_(UxPageTableStruct *upt, uint64_t addr)
317 {
318 size_t index = GetIndexInUxpte_(upt->dataAddr, addr);
319
320 HILOG_DEBUG(LOG_CORE, "%{public}s: addr(0x%{public}llx) upte=0x%{public}llx PRESENT_MASK=0x%{public}zx",
321 __func__, (unsigned long long)addr, (unsigned long long)(upt->uxpte[index]), UXPTE_PRESENT_MASK);
322
323 return IsUxptePresent_(upt->uxpte[index]);
324 }
325
UxpteOps_(UxPageTableStruct * upt,uint64_t addr,size_t len,enum UxpteOp op)326 static PMState UxpteOps_(UxPageTableStruct *upt, uint64_t addr, size_t len, enum UxpteOp op)
327 {
328 if (upt == NULL) {
329 return PM_BUILDER_NULL;
330 }
331 uint64_t start = RoundDown_(addr, PAGE_SIZE);
332 uint64_t end = RoundUp(addr + len, PAGE_SIZE);
333 if (start < upt->dataAddr || end > (upt->dataAddr + upt->dataSize)) {
334 HILOG_ERROR(LOG_CORE, "%{public}s: addr(0x%{public}llx) start(0x%{public}llx) < dataAddr(0x%{public}llx)"
335 " || end(0x%{public}llx) > dataAddr+dataSize(0x%{public}llx) out of bound",
336 __func__, (unsigned long long)addr, (unsigned long long)start, (unsigned long long)(upt->dataAddr),
337 (unsigned long long)end, (unsigned long long)(upt->dataAddr + upt->dataSize));
338
339 return PM_UXPT_OUT_RANGE;
340 }
341
342 for (uint64_t off = start; off < end; off += PAGE_SIZE) {
343 switch (op) {
344 case UPT_GET: {
345 GetUxpteAt_(upt, off);
346 break;
347 }
348 case UPT_PUT: {
349 PutUxpteAt_(upt, off);
350 break;
351 }
352 case UPT_CLEAR: {
353 ClearUxpteAt_(upt, off);
354 break;
355 }
356 case UPT_IS_PRESENT: {
357 if (!IsPresentAt_(upt, off)) {
358 HILOG_ERROR(LOG_CORE, "%{public}s: addr(0x%{public}llx) not present", __func__,
359 (unsigned long long)addr);
360 return PM_UXPT_NO_PRESENT;
361 }
362 break;
363 }
364 default:
365 break;
366 }
367 }
368
369 return PM_OK;
370 }
371
MapUxptePages_(uint64_t dataAddr,size_t dataSize)372 static uxpte_t *MapUxptePages_(uint64_t dataAddr, size_t dataSize)
373 {
374 int prot = PROT_READ | PROT_WRITE;
375 int type = MAP_ANONYMOUS | MAP_USEREXPTE;
376 size_t size = GetUxPageSize_(dataAddr, dataSize);
377 uxpte_t *ptes = (uxpte_t*)mmap(NULL, size, prot, type, -1, UxptePageNo_(dataAddr) * PAGE_SIZE);
378 if (ptes == MAP_FAILED) {
379 HILOG_ERROR(LOG_CORE, "%{public}s: fail, return NULL", __func__);
380 ptes = NULL;
381 }
382
383 return ptes;
384 }
385
UnmapUxptePages_(uxpte_t * ptes,size_t size)386 static int UnmapUxptePages_(uxpte_t *ptes, size_t size)
387 {
388 return munmap(ptes, size);
389 }
390
391 #else /* !(defined(USE_UXPT) && (USE_UXPT <= 0)), it means does not using uxpt */
392
393 typedef struct UserExtendPageTable {
394 /* i am empty */
395 } UxPageTableStruct;
396
UxpteIsEnabled(void)397 bool UxpteIsEnabled(void)
398 {
399 return false;
400 }
401
UxPageTableSize(void)402 size_t UxPageTableSize(void)
403 {
404 return 0;
405 }
406
InitUxPageTable(UxPageTableStruct * upt,uint64_t addr,size_t len)407 PMState InitUxPageTable(UxPageTableStruct *upt, uint64_t addr, size_t len)
408 {
409 return PM_OK;
410 }
411
DeinitUxPageTable(UxPageTableStruct * upt)412 PMState DeinitUxPageTable(UxPageTableStruct *upt)
413 {
414 return PM_OK;
415 }
416
UxpteGet(UxPageTableStruct * upt,uint64_t addr,size_t len)417 void UxpteGet(UxPageTableStruct *upt, uint64_t addr, size_t len) {}
418
UxptePut(UxPageTableStruct * upt,uint64_t addr,size_t len)419 void UxptePut(UxPageTableStruct *upt, uint64_t addr, size_t len) {}
420
UxpteIsPresent(UxPageTableStruct * upt,uint64_t addr,size_t len)421 bool UxpteIsPresent(UxPageTableStruct *upt, uint64_t addr, size_t len)
422 {
423 return true;
424 }
425
426 #endif /* USE_UXPT > 0 */
427