1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * DAMON Primitives for The Physical Address Space
4 *
5 * Author: SeongJae Park <sj@kernel.org>
6 */
7
8 #define pr_fmt(fmt) "damon-pa: " fmt
9
10 #include <linux/mmu_notifier.h>
11 #include <linux/page_idle.h>
12 #include <linux/pagemap.h>
13 #include <linux/rmap.h>
14 #include <linux/swap.h>
15 #include <linux/memory-tiers.h>
16 #include <linux/migrate.h>
17 #include <linux/mm_inline.h>
18
19 #include "../internal.h"
20 #include "ops-common.h"
21
damon_folio_mkold_one(struct folio * folio,struct vm_area_struct * vma,unsigned long addr,void * arg)22 static bool damon_folio_mkold_one(struct folio *folio,
23 struct vm_area_struct *vma, unsigned long addr, void *arg)
24 {
25 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0);
26
27 while (page_vma_mapped_walk(&pvmw)) {
28 addr = pvmw.address;
29 if (pvmw.pte)
30 damon_ptep_mkold(pvmw.pte, vma, addr);
31 else
32 damon_pmdp_mkold(pvmw.pmd, vma, addr);
33 }
34 return true;
35 }
36
damon_folio_mkold(struct folio * folio)37 static void damon_folio_mkold(struct folio *folio)
38 {
39 struct rmap_walk_control rwc = {
40 .rmap_one = damon_folio_mkold_one,
41 .anon_lock = folio_lock_anon_vma_read,
42 };
43 bool need_lock;
44
45 if (!folio_mapped(folio) || !folio_raw_mapping(folio)) {
46 folio_set_idle(folio);
47 return;
48 }
49
50 need_lock = !folio_test_anon(folio) || folio_test_ksm(folio);
51 if (need_lock && !folio_trylock(folio))
52 return;
53
54 rmap_walk(folio, &rwc);
55
56 if (need_lock)
57 folio_unlock(folio);
58
59 }
60
damon_pa_mkold(unsigned long paddr)61 static void damon_pa_mkold(unsigned long paddr)
62 {
63 struct folio *folio = damon_get_folio(PHYS_PFN(paddr));
64
65 if (!folio)
66 return;
67
68 damon_folio_mkold(folio);
69 folio_put(folio);
70 }
71
__damon_pa_prepare_access_check(struct damon_region * r)72 static void __damon_pa_prepare_access_check(struct damon_region *r)
73 {
74 r->sampling_addr = damon_rand(r->ar.start, r->ar.end);
75
76 damon_pa_mkold(r->sampling_addr);
77 }
78
damon_pa_prepare_access_checks(struct damon_ctx * ctx)79 static void damon_pa_prepare_access_checks(struct damon_ctx *ctx)
80 {
81 struct damon_target *t;
82 struct damon_region *r;
83
84 damon_for_each_target(t, ctx) {
85 damon_for_each_region(r, t)
86 __damon_pa_prepare_access_check(r);
87 }
88 }
89
damon_folio_young_one(struct folio * folio,struct vm_area_struct * vma,unsigned long addr,void * arg)90 static bool damon_folio_young_one(struct folio *folio,
91 struct vm_area_struct *vma, unsigned long addr, void *arg)
92 {
93 bool *accessed = arg;
94 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0);
95
96 *accessed = false;
97 while (page_vma_mapped_walk(&pvmw)) {
98 addr = pvmw.address;
99 if (pvmw.pte) {
100 *accessed = pte_young(ptep_get(pvmw.pte)) ||
101 !folio_test_idle(folio) ||
102 mmu_notifier_test_young(vma->vm_mm, addr);
103 } else {
104 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
105 *accessed = pmd_young(pmdp_get(pvmw.pmd)) ||
106 !folio_test_idle(folio) ||
107 mmu_notifier_test_young(vma->vm_mm, addr);
108 #else
109 WARN_ON_ONCE(1);
110 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
111 }
112 if (*accessed) {
113 page_vma_mapped_walk_done(&pvmw);
114 break;
115 }
116 }
117
118 /* If accessed, stop walking */
119 return *accessed == false;
120 }
121
damon_folio_young(struct folio * folio)122 static bool damon_folio_young(struct folio *folio)
123 {
124 bool accessed = false;
125 struct rmap_walk_control rwc = {
126 .arg = &accessed,
127 .rmap_one = damon_folio_young_one,
128 .anon_lock = folio_lock_anon_vma_read,
129 };
130 bool need_lock;
131
132 if (!folio_mapped(folio) || !folio_raw_mapping(folio)) {
133 if (folio_test_idle(folio))
134 return false;
135 else
136 return true;
137 }
138
139 need_lock = !folio_test_anon(folio) || folio_test_ksm(folio);
140 if (need_lock && !folio_trylock(folio))
141 return false;
142
143 rmap_walk(folio, &rwc);
144
145 if (need_lock)
146 folio_unlock(folio);
147
148 return accessed;
149 }
150
damon_pa_young(unsigned long paddr,unsigned long * folio_sz)151 static bool damon_pa_young(unsigned long paddr, unsigned long *folio_sz)
152 {
153 struct folio *folio = damon_get_folio(PHYS_PFN(paddr));
154 bool accessed;
155
156 if (!folio)
157 return false;
158
159 accessed = damon_folio_young(folio);
160 *folio_sz = folio_size(folio);
161 folio_put(folio);
162 return accessed;
163 }
164
__damon_pa_check_access(struct damon_region * r,struct damon_attrs * attrs)165 static void __damon_pa_check_access(struct damon_region *r,
166 struct damon_attrs *attrs)
167 {
168 static unsigned long last_addr;
169 static unsigned long last_folio_sz = PAGE_SIZE;
170 static bool last_accessed;
171
172 /* If the region is in the last checked page, reuse the result */
173 if (ALIGN_DOWN(last_addr, last_folio_sz) ==
174 ALIGN_DOWN(r->sampling_addr, last_folio_sz)) {
175 damon_update_region_access_rate(r, last_accessed, attrs);
176 return;
177 }
178
179 last_accessed = damon_pa_young(r->sampling_addr, &last_folio_sz);
180 damon_update_region_access_rate(r, last_accessed, attrs);
181
182 last_addr = r->sampling_addr;
183 }
184
damon_pa_check_accesses(struct damon_ctx * ctx)185 static unsigned int damon_pa_check_accesses(struct damon_ctx *ctx)
186 {
187 struct damon_target *t;
188 struct damon_region *r;
189 unsigned int max_nr_accesses = 0;
190
191 damon_for_each_target(t, ctx) {
192 damon_for_each_region(r, t) {
193 __damon_pa_check_access(r, &ctx->attrs);
194 max_nr_accesses = max(r->nr_accesses, max_nr_accesses);
195 }
196 }
197
198 return max_nr_accesses;
199 }
200
__damos_pa_filter_out(struct damos_filter * filter,struct folio * folio)201 static bool __damos_pa_filter_out(struct damos_filter *filter,
202 struct folio *folio)
203 {
204 bool matched = false;
205 struct mem_cgroup *memcg;
206
207 switch (filter->type) {
208 case DAMOS_FILTER_TYPE_ANON:
209 matched = folio_test_anon(folio);
210 break;
211 case DAMOS_FILTER_TYPE_MEMCG:
212 rcu_read_lock();
213 memcg = folio_memcg_check(folio);
214 if (!memcg)
215 matched = false;
216 else
217 matched = filter->memcg_id == mem_cgroup_id(memcg);
218 rcu_read_unlock();
219 break;
220 case DAMOS_FILTER_TYPE_YOUNG:
221 matched = damon_folio_young(folio);
222 if (matched)
223 damon_folio_mkold(folio);
224 break;
225 default:
226 break;
227 }
228
229 return matched == filter->matching;
230 }
231
232 /*
233 * damos_pa_filter_out - Return true if the page should be filtered out.
234 */
damos_pa_filter_out(struct damos * scheme,struct folio * folio)235 static bool damos_pa_filter_out(struct damos *scheme, struct folio *folio)
236 {
237 struct damos_filter *filter;
238
239 damos_for_each_filter(filter, scheme) {
240 if (__damos_pa_filter_out(filter, folio))
241 return true;
242 }
243 return false;
244 }
245
damon_pa_pageout(struct damon_region * r,struct damos * s)246 static unsigned long damon_pa_pageout(struct damon_region *r, struct damos *s)
247 {
248 unsigned long addr, applied;
249 LIST_HEAD(folio_list);
250 bool install_young_filter = true;
251 struct damos_filter *filter;
252
253 /* check access in page level again by default */
254 damos_for_each_filter(filter, s) {
255 if (filter->type == DAMOS_FILTER_TYPE_YOUNG) {
256 install_young_filter = false;
257 break;
258 }
259 }
260 if (install_young_filter) {
261 filter = damos_new_filter(DAMOS_FILTER_TYPE_YOUNG, true);
262 if (!filter)
263 return 0;
264 damos_add_filter(s, filter);
265 }
266
267 addr = r->ar.start;
268 while (addr < r->ar.end) {
269 struct folio *folio = damon_get_folio(PHYS_PFN(addr));
270
271 if (!folio) {
272 addr += PAGE_SIZE;
273 continue;
274 }
275
276 if (damos_pa_filter_out(s, folio))
277 goto put_folio;
278
279 folio_clear_referenced(folio);
280 folio_test_clear_young(folio);
281 if (!folio_isolate_lru(folio))
282 goto put_folio;
283 if (folio_test_unevictable(folio))
284 folio_putback_lru(folio);
285 else
286 list_add(&folio->lru, &folio_list);
287 put_folio:
288 addr += folio_size(folio);
289 folio_put(folio);
290 }
291 if (install_young_filter)
292 damos_destroy_filter(filter);
293 applied = reclaim_pages(&folio_list);
294 cond_resched();
295 return applied * PAGE_SIZE;
296 }
297
damon_pa_mark_accessed_or_deactivate(struct damon_region * r,struct damos * s,bool mark_accessed)298 static inline unsigned long damon_pa_mark_accessed_or_deactivate(
299 struct damon_region *r, struct damos *s, bool mark_accessed)
300 {
301 unsigned long addr, applied = 0;
302
303 addr = r->ar.start;
304 while (addr < r->ar.end) {
305 struct folio *folio = damon_get_folio(PHYS_PFN(addr));
306
307 if (!folio) {
308 addr += PAGE_SIZE;
309 continue;
310 }
311
312 if (damos_pa_filter_out(s, folio))
313 goto put_folio;
314
315 if (mark_accessed)
316 folio_mark_accessed(folio);
317 else
318 folio_deactivate(folio);
319 applied += folio_nr_pages(folio);
320 put_folio:
321 addr += folio_size(folio);
322 folio_put(folio);
323 }
324 return applied * PAGE_SIZE;
325 }
326
damon_pa_mark_accessed(struct damon_region * r,struct damos * s)327 static unsigned long damon_pa_mark_accessed(struct damon_region *r,
328 struct damos *s)
329 {
330 return damon_pa_mark_accessed_or_deactivate(r, s, true);
331 }
332
damon_pa_deactivate_pages(struct damon_region * r,struct damos * s)333 static unsigned long damon_pa_deactivate_pages(struct damon_region *r,
334 struct damos *s)
335 {
336 return damon_pa_mark_accessed_or_deactivate(r, s, false);
337 }
338
__damon_pa_migrate_folio_list(struct list_head * migrate_folios,struct pglist_data * pgdat,int target_nid)339 static unsigned int __damon_pa_migrate_folio_list(
340 struct list_head *migrate_folios, struct pglist_data *pgdat,
341 int target_nid)
342 {
343 unsigned int nr_succeeded = 0;
344 nodemask_t allowed_mask = NODE_MASK_NONE;
345 struct migration_target_control mtc = {
346 /*
347 * Allocate from 'node', or fail quickly and quietly.
348 * When this happens, 'page' will likely just be discarded
349 * instead of migrated.
350 */
351 .gfp_mask = (GFP_HIGHUSER_MOVABLE & ~__GFP_RECLAIM) |
352 __GFP_NOWARN | __GFP_NOMEMALLOC | GFP_NOWAIT,
353 .nid = target_nid,
354 .nmask = &allowed_mask
355 };
356
357 if (pgdat->node_id == target_nid || target_nid == NUMA_NO_NODE)
358 return 0;
359
360 if (list_empty(migrate_folios))
361 return 0;
362
363 /* Migration ignores all cpuset and mempolicy settings */
364 migrate_pages(migrate_folios, alloc_migrate_folio, NULL,
365 (unsigned long)&mtc, MIGRATE_ASYNC, MR_DAMON,
366 &nr_succeeded);
367
368 return nr_succeeded;
369 }
370
damon_pa_migrate_folio_list(struct list_head * folio_list,struct pglist_data * pgdat,int target_nid)371 static unsigned int damon_pa_migrate_folio_list(struct list_head *folio_list,
372 struct pglist_data *pgdat,
373 int target_nid)
374 {
375 unsigned int nr_migrated = 0;
376 struct folio *folio;
377 LIST_HEAD(ret_folios);
378 LIST_HEAD(migrate_folios);
379
380 while (!list_empty(folio_list)) {
381 struct folio *folio;
382
383 cond_resched();
384
385 folio = lru_to_folio(folio_list);
386 list_del(&folio->lru);
387
388 if (!folio_trylock(folio))
389 goto keep;
390
391 /* Relocate its contents to another node. */
392 list_add(&folio->lru, &migrate_folios);
393 folio_unlock(folio);
394 continue;
395 keep:
396 list_add(&folio->lru, &ret_folios);
397 }
398 /* 'folio_list' is always empty here */
399
400 /* Migrate folios selected for migration */
401 nr_migrated += __damon_pa_migrate_folio_list(
402 &migrate_folios, pgdat, target_nid);
403 /*
404 * Folios that could not be migrated are still in @migrate_folios. Add
405 * those back on @folio_list
406 */
407 if (!list_empty(&migrate_folios))
408 list_splice_init(&migrate_folios, folio_list);
409
410 try_to_unmap_flush();
411
412 list_splice(&ret_folios, folio_list);
413
414 while (!list_empty(folio_list)) {
415 folio = lru_to_folio(folio_list);
416 list_del(&folio->lru);
417 folio_putback_lru(folio);
418 }
419
420 return nr_migrated;
421 }
422
damon_pa_migrate_pages(struct list_head * folio_list,int target_nid)423 static unsigned long damon_pa_migrate_pages(struct list_head *folio_list,
424 int target_nid)
425 {
426 int nid;
427 unsigned long nr_migrated = 0;
428 LIST_HEAD(node_folio_list);
429 unsigned int noreclaim_flag;
430
431 if (list_empty(folio_list))
432 return nr_migrated;
433
434 if (target_nid < 0 || target_nid >= MAX_NUMNODES ||
435 !node_state(target_nid, N_MEMORY))
436 return nr_migrated;
437
438 noreclaim_flag = memalloc_noreclaim_save();
439
440 nid = folio_nid(lru_to_folio(folio_list));
441 do {
442 struct folio *folio = lru_to_folio(folio_list);
443
444 if (nid == folio_nid(folio)) {
445 list_move(&folio->lru, &node_folio_list);
446 continue;
447 }
448
449 nr_migrated += damon_pa_migrate_folio_list(&node_folio_list,
450 NODE_DATA(nid),
451 target_nid);
452 nid = folio_nid(lru_to_folio(folio_list));
453 } while (!list_empty(folio_list));
454
455 nr_migrated += damon_pa_migrate_folio_list(&node_folio_list,
456 NODE_DATA(nid),
457 target_nid);
458
459 memalloc_noreclaim_restore(noreclaim_flag);
460
461 return nr_migrated;
462 }
463
damon_pa_migrate(struct damon_region * r,struct damos * s)464 static unsigned long damon_pa_migrate(struct damon_region *r, struct damos *s)
465 {
466 unsigned long addr, applied;
467 LIST_HEAD(folio_list);
468
469 addr = r->ar.start;
470 while (addr < r->ar.end) {
471 struct folio *folio = damon_get_folio(PHYS_PFN(addr));
472
473 if (!folio) {
474 addr += PAGE_SIZE;
475 continue;
476 }
477
478 if (damos_pa_filter_out(s, folio))
479 goto put_folio;
480
481 if (!folio_isolate_lru(folio))
482 goto put_folio;
483 list_add(&folio->lru, &folio_list);
484 put_folio:
485 addr += folio_size(folio);
486 folio_put(folio);
487 }
488 applied = damon_pa_migrate_pages(&folio_list, s->target_nid);
489 cond_resched();
490 return applied * PAGE_SIZE;
491 }
492
493
damon_pa_apply_scheme(struct damon_ctx * ctx,struct damon_target * t,struct damon_region * r,struct damos * scheme)494 static unsigned long damon_pa_apply_scheme(struct damon_ctx *ctx,
495 struct damon_target *t, struct damon_region *r,
496 struct damos *scheme)
497 {
498 switch (scheme->action) {
499 case DAMOS_PAGEOUT:
500 return damon_pa_pageout(r, scheme);
501 case DAMOS_LRU_PRIO:
502 return damon_pa_mark_accessed(r, scheme);
503 case DAMOS_LRU_DEPRIO:
504 return damon_pa_deactivate_pages(r, scheme);
505 case DAMOS_MIGRATE_HOT:
506 case DAMOS_MIGRATE_COLD:
507 return damon_pa_migrate(r, scheme);
508 case DAMOS_STAT:
509 break;
510 default:
511 /* DAMOS actions that not yet supported by 'paddr'. */
512 break;
513 }
514 return 0;
515 }
516
damon_pa_scheme_score(struct damon_ctx * context,struct damon_target * t,struct damon_region * r,struct damos * scheme)517 static int damon_pa_scheme_score(struct damon_ctx *context,
518 struct damon_target *t, struct damon_region *r,
519 struct damos *scheme)
520 {
521 switch (scheme->action) {
522 case DAMOS_PAGEOUT:
523 return damon_cold_score(context, r, scheme);
524 case DAMOS_LRU_PRIO:
525 return damon_hot_score(context, r, scheme);
526 case DAMOS_LRU_DEPRIO:
527 return damon_cold_score(context, r, scheme);
528 case DAMOS_MIGRATE_HOT:
529 return damon_hot_score(context, r, scheme);
530 case DAMOS_MIGRATE_COLD:
531 return damon_cold_score(context, r, scheme);
532 default:
533 break;
534 }
535
536 return DAMOS_MAX_SCORE;
537 }
538
damon_pa_initcall(void)539 static int __init damon_pa_initcall(void)
540 {
541 struct damon_operations ops = {
542 .id = DAMON_OPS_PADDR,
543 .init = NULL,
544 .update = NULL,
545 .prepare_access_checks = damon_pa_prepare_access_checks,
546 .check_accesses = damon_pa_check_accesses,
547 .reset_aggregated = NULL,
548 .target_valid = NULL,
549 .cleanup = NULL,
550 .apply_scheme = damon_pa_apply_scheme,
551 .get_scheme_score = damon_pa_scheme_score,
552 };
553
554 return damon_register_ops(&ops);
555 };
556
557 subsys_initcall(damon_pa_initcall);
558