1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * DAMON Primitives for Virtual Address Spaces
4 *
5 * Author: SeongJae Park <sjpark@amazon.de>
6 */
7
8 #define pr_fmt(fmt) "damon-va: " fmt
9
10 #include <asm-generic/mman-common.h>
11 #include <linux/highmem.h>
12 #include <linux/hugetlb.h>
13 #include <linux/mmu_notifier.h>
14 #include <linux/page_idle.h>
15 #include <linux/pagewalk.h>
16 #include <linux/sched/mm.h>
17
18 #include "prmtv-common.h"
19
20 #ifdef CONFIG_DAMON_VADDR_KUNIT_TEST
21 #undef DAMON_MIN_REGION
22 #define DAMON_MIN_REGION 1
23 #endif
24
25 /*
26 * 't->id' should be the pointer to the relevant 'struct pid' having reference
27 * count. Caller must put the returned task, unless it is NULL.
28 */
damon_get_task_struct(struct damon_target * t)29 static inline struct task_struct *damon_get_task_struct(struct damon_target *t)
30 {
31 return get_pid_task((struct pid *)t->id, PIDTYPE_PID);
32 }
33
34 /*
35 * Get the mm_struct of the given target
36 *
37 * Caller _must_ put the mm_struct after use, unless it is NULL.
38 *
39 * Returns the mm_struct of the target on success, NULL on failure
40 */
damon_get_mm(struct damon_target * t)41 static struct mm_struct *damon_get_mm(struct damon_target *t)
42 {
43 struct task_struct *task;
44 struct mm_struct *mm;
45
46 task = damon_get_task_struct(t);
47 if (!task)
48 return NULL;
49
50 mm = get_task_mm(task);
51 put_task_struct(task);
52 return mm;
53 }
54
55 /*
56 * Functions for the initial monitoring target regions construction
57 */
58
59 /*
60 * Size-evenly split a region into 'nr_pieces' small regions
61 *
62 * Returns 0 on success, or negative error code otherwise.
63 */
damon_va_evenly_split_region(struct damon_target * t,struct damon_region * r,unsigned int nr_pieces)64 static int damon_va_evenly_split_region(struct damon_target *t,
65 struct damon_region *r, unsigned int nr_pieces)
66 {
67 unsigned long sz_orig, sz_piece, orig_end;
68 struct damon_region *n = NULL, *next;
69 unsigned long start;
70
71 if (!r || !nr_pieces)
72 return -EINVAL;
73
74 orig_end = r->ar.end;
75 sz_orig = r->ar.end - r->ar.start;
76 sz_piece = ALIGN_DOWN(sz_orig / nr_pieces, DAMON_MIN_REGION);
77
78 if (!sz_piece)
79 return -EINVAL;
80
81 r->ar.end = r->ar.start + sz_piece;
82 next = damon_next_region(r);
83 for (start = r->ar.end; start + sz_piece <= orig_end;
84 start += sz_piece) {
85 n = damon_new_region(start, start + sz_piece);
86 if (!n)
87 return -ENOMEM;
88 damon_insert_region(n, r, next, t);
89 r = n;
90 }
91 /* complement last region for possible rounding error */
92 if (n)
93 n->ar.end = orig_end;
94
95 return 0;
96 }
97
sz_range(struct damon_addr_range * r)98 static unsigned long sz_range(struct damon_addr_range *r)
99 {
100 return r->end - r->start;
101 }
102
103 /*
104 * Find three regions separated by two biggest unmapped regions
105 *
106 * vma the head vma of the target address space
107 * regions an array of three address ranges that results will be saved
108 *
109 * This function receives an address space and finds three regions in it which
110 * separated by the two biggest unmapped regions in the space. Please refer to
111 * below comments of '__damon_va_init_regions()' function to know why this is
112 * necessary.
113 *
114 * Returns 0 if success, or negative error code otherwise.
115 */
__damon_va_three_regions(struct vm_area_struct * vma,struct damon_addr_range regions[3])116 static int __damon_va_three_regions(struct vm_area_struct *vma,
117 struct damon_addr_range regions[3])
118 {
119 struct damon_addr_range gap = {0}, first_gap = {0}, second_gap = {0};
120 struct vm_area_struct *last_vma = NULL;
121 unsigned long start = 0;
122 struct rb_root rbroot;
123
124 /* Find two biggest gaps so that first_gap > second_gap > others */
125 for (; vma; vma = vma->vm_next) {
126 if (!last_vma) {
127 start = vma->vm_start;
128 goto next;
129 }
130
131 if (vma->rb_subtree_gap <= sz_range(&second_gap)) {
132 rbroot.rb_node = &vma->vm_rb;
133 vma = rb_entry(rb_last(&rbroot),
134 struct vm_area_struct, vm_rb);
135 goto next;
136 }
137
138 gap.start = last_vma->vm_end;
139 gap.end = vma->vm_start;
140 if (sz_range(&gap) > sz_range(&second_gap)) {
141 swap(gap, second_gap);
142 if (sz_range(&second_gap) > sz_range(&first_gap))
143 swap(second_gap, first_gap);
144 }
145 next:
146 last_vma = vma;
147 }
148
149 if (!sz_range(&second_gap) || !sz_range(&first_gap))
150 return -EINVAL;
151
152 /* Sort the two biggest gaps by address */
153 if (first_gap.start > second_gap.start)
154 swap(first_gap, second_gap);
155
156 /* Store the result */
157 regions[0].start = ALIGN(start, DAMON_MIN_REGION);
158 regions[0].end = ALIGN(first_gap.start, DAMON_MIN_REGION);
159 regions[1].start = ALIGN(first_gap.end, DAMON_MIN_REGION);
160 regions[1].end = ALIGN(second_gap.start, DAMON_MIN_REGION);
161 regions[2].start = ALIGN(second_gap.end, DAMON_MIN_REGION);
162 regions[2].end = ALIGN(last_vma->vm_end, DAMON_MIN_REGION);
163
164 return 0;
165 }
166
167 /*
168 * Get the three regions in the given target (task)
169 *
170 * Returns 0 on success, negative error code otherwise.
171 */
damon_va_three_regions(struct damon_target * t,struct damon_addr_range regions[3])172 static int damon_va_three_regions(struct damon_target *t,
173 struct damon_addr_range regions[3])
174 {
175 struct mm_struct *mm;
176 int rc;
177
178 mm = damon_get_mm(t);
179 if (!mm)
180 return -EINVAL;
181
182 mmap_read_lock(mm);
183 rc = __damon_va_three_regions(mm->mmap, regions);
184 mmap_read_unlock(mm);
185
186 mmput(mm);
187 return rc;
188 }
189
190 /*
191 * Initialize the monitoring target regions for the given target (task)
192 *
193 * t the given target
194 *
195 * Because only a number of small portions of the entire address space
196 * is actually mapped to the memory and accessed, monitoring the unmapped
197 * regions is wasteful. That said, because we can deal with small noises,
198 * tracking every mapping is not strictly required but could even incur a high
199 * overhead if the mapping frequently changes or the number of mappings is
200 * high. The adaptive regions adjustment mechanism will further help to deal
201 * with the noise by simply identifying the unmapped areas as a region that
202 * has no access. Moreover, applying the real mappings that would have many
203 * unmapped areas inside will make the adaptive mechanism quite complex. That
204 * said, too huge unmapped areas inside the monitoring target should be removed
205 * to not take the time for the adaptive mechanism.
206 *
207 * For the reason, we convert the complex mappings to three distinct regions
208 * that cover every mapped area of the address space. Also the two gaps
209 * between the three regions are the two biggest unmapped areas in the given
210 * address space. In detail, this function first identifies the start and the
211 * end of the mappings and the two biggest unmapped areas of the address space.
212 * Then, it constructs the three regions as below:
213 *
214 * [mappings[0]->start, big_two_unmapped_areas[0]->start)
215 * [big_two_unmapped_areas[0]->end, big_two_unmapped_areas[1]->start)
216 * [big_two_unmapped_areas[1]->end, mappings[nr_mappings - 1]->end)
217 *
218 * As usual memory map of processes is as below, the gap between the heap and
219 * the uppermost mmap()-ed region, and the gap between the lowermost mmap()-ed
220 * region and the stack will be two biggest unmapped regions. Because these
221 * gaps are exceptionally huge areas in usual address space, excluding these
222 * two biggest unmapped regions will be sufficient to make a trade-off.
223 *
224 * <heap>
225 * <BIG UNMAPPED REGION 1>
226 * <uppermost mmap()-ed region>
227 * (other mmap()-ed regions and small unmapped regions)
228 * <lowermost mmap()-ed region>
229 * <BIG UNMAPPED REGION 2>
230 * <stack>
231 */
__damon_va_init_regions(struct damon_ctx * ctx,struct damon_target * t)232 static void __damon_va_init_regions(struct damon_ctx *ctx,
233 struct damon_target *t)
234 {
235 struct damon_target *ti;
236 struct damon_region *r;
237 struct damon_addr_range regions[3];
238 unsigned long sz = 0, nr_pieces;
239 int i, tidx = 0;
240
241 if (damon_va_three_regions(t, regions)) {
242 damon_for_each_target(ti, ctx) {
243 if (ti == t)
244 break;
245 tidx++;
246 }
247 pr_debug("Failed to get three regions of %dth target\n", tidx);
248 return;
249 }
250
251 for (i = 0; i < 3; i++)
252 sz += regions[i].end - regions[i].start;
253 if (ctx->min_nr_regions)
254 sz /= ctx->min_nr_regions;
255 if (sz < DAMON_MIN_REGION)
256 sz = DAMON_MIN_REGION;
257
258 /* Set the initial three regions of the target */
259 for (i = 0; i < 3; i++) {
260 r = damon_new_region(regions[i].start, regions[i].end);
261 if (!r) {
262 pr_err("%d'th init region creation failed\n", i);
263 return;
264 }
265 damon_add_region(r, t);
266
267 nr_pieces = (regions[i].end - regions[i].start) / sz;
268 damon_va_evenly_split_region(t, r, nr_pieces);
269 }
270 }
271
272 /* Initialize '->regions_list' of every target (task) */
damon_va_init(struct damon_ctx * ctx)273 static void damon_va_init(struct damon_ctx *ctx)
274 {
275 struct damon_target *t;
276
277 damon_for_each_target(t, ctx) {
278 /* the user may set the target regions as they want */
279 if (!damon_nr_regions(t))
280 __damon_va_init_regions(ctx, t);
281 }
282 }
283
284 /*
285 * Functions for the dynamic monitoring target regions update
286 */
287
288 /*
289 * Check whether a region is intersecting an address range
290 *
291 * Returns true if it is.
292 */
damon_intersect(struct damon_region * r,struct damon_addr_range * re)293 static bool damon_intersect(struct damon_region *r,
294 struct damon_addr_range *re)
295 {
296 return !(r->ar.end <= re->start || re->end <= r->ar.start);
297 }
298
299 /*
300 * Update damon regions for the three big regions of the given target
301 *
302 * t the given target
303 * bregions the three big regions of the target
304 */
damon_va_apply_three_regions(struct damon_target * t,struct damon_addr_range bregions[3])305 static void damon_va_apply_three_regions(struct damon_target *t,
306 struct damon_addr_range bregions[3])
307 {
308 struct damon_region *r, *next;
309 unsigned int i;
310
311 /* Remove regions which are not in the three big regions now */
312 damon_for_each_region_safe(r, next, t) {
313 for (i = 0; i < 3; i++) {
314 if (damon_intersect(r, &bregions[i]))
315 break;
316 }
317 if (i == 3)
318 damon_destroy_region(r, t);
319 }
320
321 /* Adjust intersecting regions to fit with the three big regions */
322 for (i = 0; i < 3; i++) {
323 struct damon_region *first = NULL, *last;
324 struct damon_region *newr;
325 struct damon_addr_range *br;
326
327 br = &bregions[i];
328 /* Get the first and last regions which intersects with br */
329 damon_for_each_region(r, t) {
330 if (damon_intersect(r, br)) {
331 if (!first)
332 first = r;
333 last = r;
334 }
335 if (r->ar.start >= br->end)
336 break;
337 }
338 if (!first) {
339 /* no damon_region intersects with this big region */
340 newr = damon_new_region(
341 ALIGN_DOWN(br->start,
342 DAMON_MIN_REGION),
343 ALIGN(br->end, DAMON_MIN_REGION));
344 if (!newr)
345 continue;
346 damon_insert_region(newr, damon_prev_region(r), r, t);
347 } else {
348 first->ar.start = ALIGN_DOWN(br->start,
349 DAMON_MIN_REGION);
350 last->ar.end = ALIGN(br->end, DAMON_MIN_REGION);
351 }
352 }
353 }
354
355 /*
356 * Update regions for current memory mappings
357 */
damon_va_update(struct damon_ctx * ctx)358 static void damon_va_update(struct damon_ctx *ctx)
359 {
360 struct damon_addr_range three_regions[3];
361 struct damon_target *t;
362
363 damon_for_each_target(t, ctx) {
364 if (damon_va_three_regions(t, three_regions))
365 continue;
366 damon_va_apply_three_regions(t, three_regions);
367 }
368 }
369
damon_mkold_pmd_entry(pmd_t * pmd,unsigned long addr,unsigned long next,struct mm_walk * walk)370 static int damon_mkold_pmd_entry(pmd_t *pmd, unsigned long addr,
371 unsigned long next, struct mm_walk *walk)
372 {
373 pte_t *pte;
374 spinlock_t *ptl;
375
376 if (pmd_huge(*pmd)) {
377 ptl = pmd_lock(walk->mm, pmd);
378 if (!pmd_present(*pmd)) {
379 spin_unlock(ptl);
380 return 0;
381 }
382
383 if (pmd_huge(*pmd)) {
384 damon_pmdp_mkold(pmd, walk->mm, addr);
385 spin_unlock(ptl);
386 return 0;
387 }
388 spin_unlock(ptl);
389 }
390
391 if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
392 return 0;
393 pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
394 if (!pte_present(*pte))
395 goto out;
396 damon_ptep_mkold(pte, walk->mm, addr);
397 out:
398 pte_unmap_unlock(pte, ptl);
399 return 0;
400 }
401
402 #ifdef CONFIG_HUGETLB_PAGE
damon_hugetlb_mkold(pte_t * pte,struct mm_struct * mm,struct vm_area_struct * vma,unsigned long addr)403 static void damon_hugetlb_mkold(pte_t *pte, struct mm_struct *mm,
404 struct vm_area_struct *vma, unsigned long addr)
405 {
406 bool referenced = false;
407 pte_t entry = huge_ptep_get(pte);
408 struct page *page = pte_page(entry);
409
410 if (!page)
411 return;
412
413 get_page(page);
414
415 if (pte_young(entry)) {
416 referenced = true;
417 entry = pte_mkold(entry);
418 set_huge_pte_at(mm, addr, pte, entry);
419 }
420
421 #ifdef CONFIG_MMU_NOTIFIER
422 if (mmu_notifier_clear_young(mm, addr,
423 addr + huge_page_size(hstate_vma(vma))))
424 referenced = true;
425 #endif /* CONFIG_MMU_NOTIFIER */
426
427 if (referenced)
428 set_page_young(page);
429
430 set_page_idle(page);
431 put_page(page);
432 }
433
damon_mkold_hugetlb_entry(pte_t * pte,unsigned long hmask,unsigned long addr,unsigned long end,struct mm_walk * walk)434 static int damon_mkold_hugetlb_entry(pte_t *pte, unsigned long hmask,
435 unsigned long addr, unsigned long end,
436 struct mm_walk *walk)
437 {
438 struct hstate *h = hstate_vma(walk->vma);
439 spinlock_t *ptl;
440 pte_t entry;
441
442 ptl = huge_pte_lock(h, walk->mm, pte);
443 entry = huge_ptep_get(pte);
444 if (!pte_present(entry))
445 goto out;
446
447 damon_hugetlb_mkold(pte, walk->mm, walk->vma, addr);
448
449 out:
450 spin_unlock(ptl);
451 return 0;
452 }
453 #else
454 #define damon_mkold_hugetlb_entry NULL
455 #endif /* CONFIG_HUGETLB_PAGE */
456
457 static const struct mm_walk_ops damon_mkold_ops = {
458 .pmd_entry = damon_mkold_pmd_entry,
459 .hugetlb_entry = damon_mkold_hugetlb_entry,
460 };
461
damon_va_mkold(struct mm_struct * mm,unsigned long addr)462 static void damon_va_mkold(struct mm_struct *mm, unsigned long addr)
463 {
464 mmap_read_lock(mm);
465 walk_page_range(mm, addr, addr + 1, &damon_mkold_ops, NULL);
466 mmap_read_unlock(mm);
467 }
468
469 /*
470 * Functions for the access checking of the regions
471 */
472
__damon_va_prepare_access_check(struct damon_ctx * ctx,struct mm_struct * mm,struct damon_region * r)473 static void __damon_va_prepare_access_check(struct damon_ctx *ctx,
474 struct mm_struct *mm, struct damon_region *r)
475 {
476 r->sampling_addr = damon_rand(r->ar.start, r->ar.end);
477
478 damon_va_mkold(mm, r->sampling_addr);
479 }
480
damon_va_prepare_access_checks(struct damon_ctx * ctx)481 static void damon_va_prepare_access_checks(struct damon_ctx *ctx)
482 {
483 struct damon_target *t;
484 struct mm_struct *mm;
485 struct damon_region *r;
486
487 damon_for_each_target(t, ctx) {
488 mm = damon_get_mm(t);
489 if (!mm)
490 continue;
491 damon_for_each_region(r, t)
492 __damon_va_prepare_access_check(ctx, mm, r);
493 mmput(mm);
494 }
495 }
496
497 struct damon_young_walk_private {
498 unsigned long *page_sz;
499 bool young;
500 };
501
damon_young_pmd_entry(pmd_t * pmd,unsigned long addr,unsigned long next,struct mm_walk * walk)502 static int damon_young_pmd_entry(pmd_t *pmd, unsigned long addr,
503 unsigned long next, struct mm_walk *walk)
504 {
505 pte_t *pte;
506 spinlock_t *ptl;
507 struct page *page;
508 struct damon_young_walk_private *priv = walk->private;
509
510 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
511 if (pmd_huge(*pmd)) {
512 ptl = pmd_lock(walk->mm, pmd);
513 if (!pmd_present(*pmd)) {
514 spin_unlock(ptl);
515 return 0;
516 }
517
518 if (!pmd_huge(*pmd)) {
519 spin_unlock(ptl);
520 goto regular_page;
521 }
522 page = damon_get_page(pmd_pfn(*pmd));
523 if (!page)
524 goto huge_out;
525 if (pmd_young(*pmd) || !page_is_idle(page) ||
526 mmu_notifier_test_young(walk->mm,
527 addr)) {
528 *priv->page_sz = ((1UL) << HPAGE_PMD_SHIFT);
529 priv->young = true;
530 }
531 put_page(page);
532 huge_out:
533 spin_unlock(ptl);
534 return 0;
535 }
536
537 regular_page:
538 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
539
540 if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
541 return -EINVAL;
542 pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
543 if (!pte_present(*pte))
544 goto out;
545 page = damon_get_page(pte_pfn(*pte));
546 if (!page)
547 goto out;
548 if (pte_young(*pte) || !page_is_idle(page) ||
549 mmu_notifier_test_young(walk->mm, addr)) {
550 *priv->page_sz = PAGE_SIZE;
551 priv->young = true;
552 }
553 put_page(page);
554 out:
555 pte_unmap_unlock(pte, ptl);
556 return 0;
557 }
558
559 #ifdef CONFIG_HUGETLB_PAGE
damon_young_hugetlb_entry(pte_t * pte,unsigned long hmask,unsigned long addr,unsigned long end,struct mm_walk * walk)560 static int damon_young_hugetlb_entry(pte_t *pte, unsigned long hmask,
561 unsigned long addr, unsigned long end,
562 struct mm_walk *walk)
563 {
564 struct damon_young_walk_private *priv = walk->private;
565 struct hstate *h = hstate_vma(walk->vma);
566 struct page *page;
567 spinlock_t *ptl;
568 pte_t entry;
569
570 ptl = huge_pte_lock(h, walk->mm, pte);
571 entry = huge_ptep_get(pte);
572 if (!pte_present(entry))
573 goto out;
574
575 page = pte_page(entry);
576 if (!page)
577 goto out;
578
579 get_page(page);
580
581 if (pte_young(entry) || !page_is_idle(page) ||
582 mmu_notifier_test_young(walk->mm, addr)) {
583 *priv->page_sz = huge_page_size(h);
584 priv->young = true;
585 }
586
587 put_page(page);
588
589 out:
590 spin_unlock(ptl);
591 return 0;
592 }
593 #else
594 #define damon_young_hugetlb_entry NULL
595 #endif /* CONFIG_HUGETLB_PAGE */
596
597 static const struct mm_walk_ops damon_young_ops = {
598 .pmd_entry = damon_young_pmd_entry,
599 .hugetlb_entry = damon_young_hugetlb_entry,
600 };
601
damon_va_young(struct mm_struct * mm,unsigned long addr,unsigned long * page_sz)602 static bool damon_va_young(struct mm_struct *mm, unsigned long addr,
603 unsigned long *page_sz)
604 {
605 struct damon_young_walk_private arg = {
606 .page_sz = page_sz,
607 .young = false,
608 };
609
610 mmap_read_lock(mm);
611 walk_page_range(mm, addr, addr + 1, &damon_young_ops, &arg);
612 mmap_read_unlock(mm);
613 return arg.young;
614 }
615
616 /*
617 * Check whether the region was accessed after the last preparation
618 *
619 * mm 'mm_struct' for the given virtual address space
620 * r the region to be checked
621 */
__damon_va_check_access(struct damon_ctx * ctx,struct mm_struct * mm,struct damon_region * r)622 static void __damon_va_check_access(struct damon_ctx *ctx,
623 struct mm_struct *mm, struct damon_region *r)
624 {
625 static struct mm_struct *last_mm;
626 static unsigned long last_addr;
627 static unsigned long last_page_sz = PAGE_SIZE;
628 static bool last_accessed;
629
630 /* If the region is in the last checked page, reuse the result */
631 if (mm == last_mm && (ALIGN_DOWN(last_addr, last_page_sz) ==
632 ALIGN_DOWN(r->sampling_addr, last_page_sz))) {
633 if (last_accessed)
634 r->nr_accesses++;
635 return;
636 }
637
638 last_accessed = damon_va_young(mm, r->sampling_addr, &last_page_sz);
639 if (last_accessed)
640 r->nr_accesses++;
641
642 last_mm = mm;
643 last_addr = r->sampling_addr;
644 }
645
damon_va_check_accesses(struct damon_ctx * ctx)646 static unsigned int damon_va_check_accesses(struct damon_ctx *ctx)
647 {
648 struct damon_target *t;
649 struct mm_struct *mm;
650 struct damon_region *r;
651 unsigned int max_nr_accesses = 0;
652
653 damon_for_each_target(t, ctx) {
654 mm = damon_get_mm(t);
655 if (!mm)
656 continue;
657 damon_for_each_region(r, t) {
658 __damon_va_check_access(ctx, mm, r);
659 max_nr_accesses = max(r->nr_accesses, max_nr_accesses);
660 }
661 mmput(mm);
662 }
663
664 return max_nr_accesses;
665 }
666
667 /*
668 * Functions for the target validity check and cleanup
669 */
670
damon_va_target_valid(void * target)671 bool damon_va_target_valid(void *target)
672 {
673 struct damon_target *t = target;
674 struct task_struct *task;
675
676 task = damon_get_task_struct(t);
677 if (task) {
678 put_task_struct(task);
679 return true;
680 }
681
682 return false;
683 }
684
685 #ifndef CONFIG_ADVISE_SYSCALLS
damos_madvise(struct damon_target * target,struct damon_region * r,int behavior)686 static unsigned long damos_madvise(struct damon_target *target,
687 struct damon_region *r, int behavior)
688 {
689 return 0;
690 }
691 #else
damos_madvise(struct damon_target * target,struct damon_region * r,int behavior)692 static unsigned long damos_madvise(struct damon_target *target,
693 struct damon_region *r, int behavior)
694 {
695 struct mm_struct *mm;
696 unsigned long start = PAGE_ALIGN(r->ar.start);
697 unsigned long len = PAGE_ALIGN(r->ar.end - r->ar.start);
698 unsigned long applied;
699
700 mm = damon_get_mm(target);
701 if (!mm)
702 return 0;
703
704 applied = do_madvise(mm, start, len, behavior) ? 0 : len;
705 mmput(mm);
706
707 return applied;
708 }
709 #endif /* CONFIG_ADVISE_SYSCALLS */
710
damon_va_apply_scheme(struct damon_ctx * ctx,struct damon_target * t,struct damon_region * r,struct damos * scheme)711 static unsigned long damon_va_apply_scheme(struct damon_ctx *ctx,
712 struct damon_target *t, struct damon_region *r,
713 struct damos *scheme)
714 {
715 int madv_action;
716
717 switch (scheme->action) {
718 case DAMOS_WILLNEED:
719 madv_action = MADV_WILLNEED;
720 break;
721 case DAMOS_COLD:
722 madv_action = MADV_COLD;
723 break;
724 case DAMOS_PAGEOUT:
725 madv_action = MADV_PAGEOUT;
726 break;
727 case DAMOS_HUGEPAGE:
728 madv_action = MADV_HUGEPAGE;
729 break;
730 case DAMOS_NOHUGEPAGE:
731 madv_action = MADV_NOHUGEPAGE;
732 break;
733 case DAMOS_STAT:
734 return 0;
735 default:
736 return 0;
737 }
738
739 return damos_madvise(t, r, madv_action);
740 }
741
damon_va_scheme_score(struct damon_ctx * context,struct damon_target * t,struct damon_region * r,struct damos * scheme)742 static int damon_va_scheme_score(struct damon_ctx *context,
743 struct damon_target *t, struct damon_region *r,
744 struct damos *scheme)
745 {
746
747 switch (scheme->action) {
748 case DAMOS_PAGEOUT:
749 return damon_pageout_score(context, r, scheme);
750 default:
751 break;
752 }
753
754 return DAMOS_MAX_SCORE;
755 }
756
damon_va_set_primitives(struct damon_ctx * ctx)757 void damon_va_set_primitives(struct damon_ctx *ctx)
758 {
759 ctx->primitive.init = damon_va_init;
760 ctx->primitive.update = damon_va_update;
761 ctx->primitive.prepare_access_checks = damon_va_prepare_access_checks;
762 ctx->primitive.check_accesses = damon_va_check_accesses;
763 ctx->primitive.reset_aggregated = NULL;
764 ctx->primitive.target_valid = damon_va_target_valid;
765 ctx->primitive.cleanup = NULL;
766 ctx->primitive.apply_scheme = damon_va_apply_scheme;
767 ctx->primitive.get_scheme_score = damon_va_scheme_score;
768 }
769
770 #include "vaddr-test.h"
771