1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef LINUX_MM_INLINE_H
3 #define LINUX_MM_INLINE_H
4
5 #include <linux/atomic.h>
6 #include <linux/huge_mm.h>
7 #include <linux/mm_types.h>
8 #include <linux/swap.h>
9 #include <linux/string.h>
10 #include <linux/userfaultfd_k.h>
11 #include <linux/swapops.h>
12
13 #undef CREATE_TRACE_POINTS
14 #include <trace/hooks/mm.h>
15
16 /**
17 * folio_is_file_lru - Should the folio be on a file LRU or anon LRU?
18 * @folio: The folio to test.
19 *
20 * We would like to get this info without a page flag, but the state
21 * needs to survive until the folio is last deleted from the LRU, which
22 * could be as far down as __page_cache_release.
23 *
24 * Return: An integer (not a boolean!) used to sort a folio onto the
25 * right LRU list and to account folios correctly.
26 * 1 if @folio is a regular filesystem backed page cache folio
27 * or a lazily freed anonymous folio (e.g. via MADV_FREE).
28 * 0 if @folio is a normal anonymous folio, a tmpfs folio or otherwise
29 * ram or swap backed folio.
30 */
folio_is_file_lru(struct folio * folio)31 static inline int folio_is_file_lru(struct folio *folio)
32 {
33 return !folio_test_swapbacked(folio);
34 }
35
page_is_file_lru(struct page * page)36 static inline int page_is_file_lru(struct page *page)
37 {
38 return folio_is_file_lru(page_folio(page));
39 }
40
__update_lru_size(struct lruvec * lruvec,enum lru_list lru,enum zone_type zid,long nr_pages)41 static __always_inline void __update_lru_size(struct lruvec *lruvec,
42 enum lru_list lru, enum zone_type zid,
43 long nr_pages)
44 {
45 struct pglist_data *pgdat = lruvec_pgdat(lruvec);
46
47 lockdep_assert_held(&lruvec->lru_lock);
48 WARN_ON_ONCE(nr_pages != (int)nr_pages);
49
50 __mod_lruvec_state(lruvec, NR_LRU_BASE + lru, nr_pages);
51 __mod_zone_page_state(&pgdat->node_zones[zid],
52 NR_ZONE_LRU_BASE + lru, nr_pages);
53 }
54
update_lru_size(struct lruvec * lruvec,enum lru_list lru,enum zone_type zid,long nr_pages)55 static __always_inline void update_lru_size(struct lruvec *lruvec,
56 enum lru_list lru, enum zone_type zid,
57 long nr_pages)
58 {
59 __update_lru_size(lruvec, lru, zid, nr_pages);
60 #ifdef CONFIG_MEMCG
61 mem_cgroup_update_lru_size(lruvec, lru, zid, nr_pages);
62 #endif
63 }
64
65 /**
66 * __folio_clear_lru_flags - Clear page lru flags before releasing a page.
67 * @folio: The folio that was on lru and now has a zero reference.
68 */
__folio_clear_lru_flags(struct folio * folio)69 static __always_inline void __folio_clear_lru_flags(struct folio *folio)
70 {
71 VM_BUG_ON_FOLIO(!folio_test_lru(folio), folio);
72
73 __folio_clear_lru(folio);
74
75 /* this shouldn't happen, so leave the flags to bad_page() */
76 if (folio_test_active(folio) && folio_test_unevictable(folio))
77 return;
78
79 __folio_clear_active(folio);
80 __folio_clear_unevictable(folio);
81 }
82
83 /**
84 * folio_lru_list - Which LRU list should a folio be on?
85 * @folio: The folio to test.
86 *
87 * Return: The LRU list a folio should be on, as an index
88 * into the array of LRU lists.
89 */
folio_lru_list(struct folio * folio)90 static __always_inline enum lru_list folio_lru_list(struct folio *folio)
91 {
92 enum lru_list lru;
93
94 VM_BUG_ON_FOLIO(folio_test_active(folio) && folio_test_unevictable(folio), folio);
95
96 if (folio_test_unevictable(folio))
97 return LRU_UNEVICTABLE;
98
99 lru = folio_is_file_lru(folio) ? LRU_INACTIVE_FILE : LRU_INACTIVE_ANON;
100 if (folio_test_active(folio))
101 lru += LRU_ACTIVE;
102
103 return lru;
104 }
105
106 #ifdef CONFIG_LRU_GEN
107
108 #ifdef CONFIG_LRU_GEN_ENABLED
lru_gen_enabled(void)109 static inline bool lru_gen_enabled(void)
110 {
111 DECLARE_STATIC_KEY_TRUE(lru_gen_caps[NR_LRU_GEN_CAPS]);
112
113 return static_branch_likely(&lru_gen_caps[LRU_GEN_CORE]);
114 }
115 #else
lru_gen_enabled(void)116 static inline bool lru_gen_enabled(void)
117 {
118 DECLARE_STATIC_KEY_FALSE(lru_gen_caps[NR_LRU_GEN_CAPS]);
119
120 return static_branch_unlikely(&lru_gen_caps[LRU_GEN_CORE]);
121 }
122 #endif
123
lru_gen_in_fault(void)124 static inline bool lru_gen_in_fault(void)
125 {
126 return current->in_lru_fault;
127 }
128
lru_gen_from_seq(unsigned long seq)129 static inline int lru_gen_from_seq(unsigned long seq)
130 {
131 return seq % MAX_NR_GENS;
132 }
133
lru_hist_from_seq(unsigned long seq)134 static inline int lru_hist_from_seq(unsigned long seq)
135 {
136 return seq % NR_HIST_GENS;
137 }
138
lru_tier_from_refs(int refs,bool workingset)139 static inline int lru_tier_from_refs(int refs, bool workingset)
140 {
141 VM_WARN_ON_ONCE(refs > BIT(LRU_REFS_WIDTH));
142
143 /* see the comment on MAX_NR_TIERS */
144 return workingset ? MAX_NR_TIERS - 1 : order_base_2(refs);
145 }
146
folio_lru_refs(struct folio * folio)147 static inline int folio_lru_refs(struct folio *folio)
148 {
149 unsigned long flags = READ_ONCE(folio->flags);
150
151 if (!(flags & BIT(PG_referenced)))
152 return 0;
153 /*
154 * Return the total number of accesses including PG_referenced. Also see
155 * the comment on LRU_REFS_FLAGS.
156 */
157 return ((flags & LRU_REFS_MASK) >> LRU_REFS_PGOFF) + 1;
158 }
159
folio_lru_gen(struct folio * folio)160 static inline int folio_lru_gen(struct folio *folio)
161 {
162 unsigned long flags = READ_ONCE(folio->flags);
163
164 return ((flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1;
165 }
166
lru_gen_is_active(struct lruvec * lruvec,int gen)167 static inline bool lru_gen_is_active(struct lruvec *lruvec, int gen)
168 {
169 unsigned long max_seq = lruvec->lrugen.max_seq;
170
171 VM_WARN_ON_ONCE(gen >= MAX_NR_GENS);
172
173 /* see the comment on MIN_NR_GENS */
174 return gen == lru_gen_from_seq(max_seq) || gen == lru_gen_from_seq(max_seq - 1);
175 }
176
lru_gen_update_size(struct lruvec * lruvec,struct folio * folio,int old_gen,int new_gen)177 static inline void lru_gen_update_size(struct lruvec *lruvec, struct folio *folio,
178 int old_gen, int new_gen)
179 {
180 int type = folio_is_file_lru(folio);
181 int zone = folio_zonenum(folio);
182 int delta = folio_nr_pages(folio);
183 enum lru_list lru = type * LRU_INACTIVE_FILE;
184 struct lru_gen_folio *lrugen = &lruvec->lrugen;
185
186 VM_WARN_ON_ONCE(old_gen != -1 && old_gen >= MAX_NR_GENS);
187 VM_WARN_ON_ONCE(new_gen != -1 && new_gen >= MAX_NR_GENS);
188 VM_WARN_ON_ONCE(old_gen == -1 && new_gen == -1);
189
190 if (old_gen >= 0)
191 WRITE_ONCE(lrugen->nr_pages[old_gen][type][zone],
192 lrugen->nr_pages[old_gen][type][zone] - delta);
193 if (new_gen >= 0)
194 WRITE_ONCE(lrugen->nr_pages[new_gen][type][zone],
195 lrugen->nr_pages[new_gen][type][zone] + delta);
196
197 /* addition */
198 if (old_gen < 0) {
199 if (lru_gen_is_active(lruvec, new_gen))
200 lru += LRU_ACTIVE;
201 __update_lru_size(lruvec, lru, zone, delta);
202 return;
203 }
204
205 /* deletion */
206 if (new_gen < 0) {
207 if (lru_gen_is_active(lruvec, old_gen))
208 lru += LRU_ACTIVE;
209 __update_lru_size(lruvec, lru, zone, -delta);
210 return;
211 }
212
213 /* promotion */
214 if (!lru_gen_is_active(lruvec, old_gen) && lru_gen_is_active(lruvec, new_gen)) {
215 __update_lru_size(lruvec, lru, zone, -delta);
216 __update_lru_size(lruvec, lru + LRU_ACTIVE, zone, delta);
217 }
218
219 /* demotion requires isolation, e.g., lru_deactivate_fn() */
220 VM_WARN_ON_ONCE(lru_gen_is_active(lruvec, old_gen) && !lru_gen_is_active(lruvec, new_gen));
221 }
222
lru_gen_folio_seq(struct lruvec * lruvec,struct folio * folio,bool reclaiming)223 static inline unsigned long lru_gen_folio_seq(struct lruvec *lruvec, struct folio *folio,
224 bool reclaiming)
225 {
226 int gen;
227 int type = folio_is_file_lru(folio);
228 struct lru_gen_folio *lrugen = &lruvec->lrugen;
229
230 /*
231 * +-----------------------------------+-----------------------------------+
232 * | Accessed through page tables and | Accessed through file descriptors |
233 * | promoted by folio_update_gen() | and protected by folio_inc_gen() |
234 * +-----------------------------------+-----------------------------------+
235 * | PG_active (set while isolated) | |
236 * +-----------------+-----------------+-----------------+-----------------+
237 * | PG_workingset | PG_referenced | PG_workingset | LRU_REFS_FLAGS |
238 * +-----------------------------------+-----------------------------------+
239 * |<---------- MIN_NR_GENS ---------->| |
240 * |<---------------------------- MAX_NR_GENS ---------------------------->|
241 */
242 if (folio_test_active(folio))
243 gen = MIN_NR_GENS - folio_test_workingset(folio);
244 else if (reclaiming)
245 gen = MAX_NR_GENS;
246 else if ((!folio_is_file_lru(folio) && !folio_test_swapcache(folio)) ||
247 (folio_test_reclaim(folio) &&
248 (folio_test_dirty(folio) || folio_test_writeback(folio))))
249 gen = MIN_NR_GENS;
250 else
251 gen = MAX_NR_GENS - folio_test_workingset(folio);
252
253 return max(READ_ONCE(lrugen->max_seq) - gen + 1, READ_ONCE(lrugen->min_seq[type]));
254 }
255
lru_gen_add_folio(struct lruvec * lruvec,struct folio * folio,bool reclaiming)256 static inline bool lru_gen_add_folio(struct lruvec *lruvec, struct folio *folio, bool reclaiming)
257 {
258 unsigned long seq;
259 unsigned long flags;
260 int gen = folio_lru_gen(folio);
261 int type = folio_is_file_lru(folio);
262 int zone = folio_zonenum(folio);
263 struct lru_gen_folio *lrugen = &lruvec->lrugen;
264
265 VM_WARN_ON_ONCE_FOLIO(gen != -1, folio);
266
267 if (folio_test_unevictable(folio) || !lrugen->enabled)
268 return false;
269
270 seq = lru_gen_folio_seq(lruvec, folio, reclaiming);
271 gen = lru_gen_from_seq(seq);
272 flags = (gen + 1UL) << LRU_GEN_PGOFF;
273 /* see the comment on MIN_NR_GENS about PG_active */
274 set_mask_bits(&folio->flags, LRU_GEN_MASK | BIT(PG_active), flags);
275
276 lru_gen_update_size(lruvec, folio, -1, gen);
277 /* for folio_rotate_reclaimable() */
278 if (reclaiming)
279 list_add_tail(&folio->lru, &lrugen->folios[gen][type][zone]);
280 else
281 list_add(&folio->lru, &lrugen->folios[gen][type][zone]);
282
283 return true;
284 }
285
lru_gen_del_folio(struct lruvec * lruvec,struct folio * folio,bool reclaiming)286 static inline bool lru_gen_del_folio(struct lruvec *lruvec, struct folio *folio, bool reclaiming)
287 {
288 unsigned long flags;
289 int gen = folio_lru_gen(folio);
290
291 if (gen < 0)
292 return false;
293
294 VM_WARN_ON_ONCE_FOLIO(folio_test_active(folio), folio);
295 VM_WARN_ON_ONCE_FOLIO(folio_test_unevictable(folio), folio);
296
297 /* for folio_migrate_flags() */
298 flags = !reclaiming && lru_gen_is_active(lruvec, gen) ? BIT(PG_active) : 0;
299 flags = set_mask_bits(&folio->flags, LRU_GEN_MASK, flags);
300 gen = ((flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1;
301
302 lru_gen_update_size(lruvec, folio, gen, -1);
303 list_del(&folio->lru);
304
305 return true;
306 }
307
folio_migrate_refs(struct folio * new,struct folio * old)308 static inline void folio_migrate_refs(struct folio *new, struct folio *old)
309 {
310 unsigned long refs = READ_ONCE(old->flags) & LRU_REFS_MASK;
311
312 set_mask_bits(&new->flags, LRU_REFS_MASK, refs);
313 }
314 #else /* !CONFIG_LRU_GEN */
315
lru_gen_enabled(void)316 static inline bool lru_gen_enabled(void)
317 {
318 return false;
319 }
320
lru_gen_in_fault(void)321 static inline bool lru_gen_in_fault(void)
322 {
323 return false;
324 }
325
lru_gen_add_folio(struct lruvec * lruvec,struct folio * folio,bool reclaiming)326 static inline bool lru_gen_add_folio(struct lruvec *lruvec, struct folio *folio, bool reclaiming)
327 {
328 return false;
329 }
330
lru_gen_del_folio(struct lruvec * lruvec,struct folio * folio,bool reclaiming)331 static inline bool lru_gen_del_folio(struct lruvec *lruvec, struct folio *folio, bool reclaiming)
332 {
333 return false;
334 }
335
folio_migrate_refs(struct folio * new,struct folio * old)336 static inline void folio_migrate_refs(struct folio *new, struct folio *old)
337 {
338
339 }
340 #endif /* CONFIG_LRU_GEN */
341
342 static __always_inline
lruvec_add_folio(struct lruvec * lruvec,struct folio * folio)343 void lruvec_add_folio(struct lruvec *lruvec, struct folio *folio)
344 {
345 enum lru_list lru = folio_lru_list(folio);
346 bool skip = false;
347
348 trace_android_vh_lruvec_add_folio(lruvec, folio, lru, false, &skip);
349 if (skip)
350 return;
351
352 if (lru_gen_add_folio(lruvec, folio, false))
353 return;
354
355 update_lru_size(lruvec, lru, folio_zonenum(folio),
356 folio_nr_pages(folio));
357 if (lru != LRU_UNEVICTABLE)
358 list_add(&folio->lru, &lruvec->lists[lru]);
359 }
360
361 static __always_inline
lruvec_add_folio_tail(struct lruvec * lruvec,struct folio * folio)362 void lruvec_add_folio_tail(struct lruvec *lruvec, struct folio *folio)
363 {
364 enum lru_list lru = folio_lru_list(folio);
365 bool skip = false;
366
367 trace_android_vh_lruvec_add_folio(lruvec, folio, lru, true, &skip);
368 if (skip)
369 return;
370
371 if (lru_gen_add_folio(lruvec, folio, true))
372 return;
373
374 update_lru_size(lruvec, lru, folio_zonenum(folio),
375 folio_nr_pages(folio));
376 /* This is not expected to be used on LRU_UNEVICTABLE */
377 list_add_tail(&folio->lru, &lruvec->lists[lru]);
378 }
379
380 static __always_inline
lruvec_del_folio(struct lruvec * lruvec,struct folio * folio)381 void lruvec_del_folio(struct lruvec *lruvec, struct folio *folio)
382 {
383 enum lru_list lru = folio_lru_list(folio);
384 bool skip = false;
385
386 trace_android_vh_lruvec_del_folio(lruvec, folio, lru, &skip);
387 if (skip)
388 return;
389
390 if (lru_gen_del_folio(lruvec, folio, false))
391 return;
392
393 if (lru != LRU_UNEVICTABLE)
394 list_del(&folio->lru);
395 update_lru_size(lruvec, lru, folio_zonenum(folio),
396 -folio_nr_pages(folio));
397 }
398
399 #ifdef CONFIG_ANON_VMA_NAME
400 /* mmap_lock should be read-locked */
anon_vma_name_get(struct anon_vma_name * anon_name)401 static inline void anon_vma_name_get(struct anon_vma_name *anon_name)
402 {
403 if (anon_name)
404 kref_get(&anon_name->kref);
405 }
406
anon_vma_name_put(struct anon_vma_name * anon_name)407 static inline void anon_vma_name_put(struct anon_vma_name *anon_name)
408 {
409 if (anon_name)
410 kref_put(&anon_name->kref, anon_vma_name_free);
411 }
412
413 static inline
anon_vma_name_reuse(struct anon_vma_name * anon_name)414 struct anon_vma_name *anon_vma_name_reuse(struct anon_vma_name *anon_name)
415 {
416 /* Prevent anon_name refcount saturation early on */
417 if (kref_read(&anon_name->kref) < REFCOUNT_MAX) {
418 anon_vma_name_get(anon_name);
419 return anon_name;
420
421 }
422 return anon_vma_name_alloc(anon_name->name);
423 }
424
dup_anon_vma_name(struct vm_area_struct * orig_vma,struct vm_area_struct * new_vma)425 static inline void dup_anon_vma_name(struct vm_area_struct *orig_vma,
426 struct vm_area_struct *new_vma)
427 {
428 struct anon_vma_name *anon_name = anon_vma_name(orig_vma);
429
430 if (anon_name)
431 new_vma->anon_name = anon_vma_name_reuse(anon_name);
432 }
433
free_anon_vma_name(struct vm_area_struct * vma)434 static inline void free_anon_vma_name(struct vm_area_struct *vma)
435 {
436 /*
437 * Not using anon_vma_name because it generates a warning if mmap_lock
438 * is not held, which might be the case here.
439 */
440 anon_vma_name_put(vma->anon_name);
441 }
442
anon_vma_name_eq(struct anon_vma_name * anon_name1,struct anon_vma_name * anon_name2)443 static inline bool anon_vma_name_eq(struct anon_vma_name *anon_name1,
444 struct anon_vma_name *anon_name2)
445 {
446 if (anon_name1 == anon_name2)
447 return true;
448
449 return anon_name1 && anon_name2 &&
450 !strcmp(anon_name1->name, anon_name2->name);
451 }
452
453 #else /* CONFIG_ANON_VMA_NAME */
anon_vma_name_get(struct anon_vma_name * anon_name)454 static inline void anon_vma_name_get(struct anon_vma_name *anon_name) {}
anon_vma_name_put(struct anon_vma_name * anon_name)455 static inline void anon_vma_name_put(struct anon_vma_name *anon_name) {}
dup_anon_vma_name(struct vm_area_struct * orig_vma,struct vm_area_struct * new_vma)456 static inline void dup_anon_vma_name(struct vm_area_struct *orig_vma,
457 struct vm_area_struct *new_vma) {}
free_anon_vma_name(struct vm_area_struct * vma)458 static inline void free_anon_vma_name(struct vm_area_struct *vma) {}
459
anon_vma_name_eq(struct anon_vma_name * anon_name1,struct anon_vma_name * anon_name2)460 static inline bool anon_vma_name_eq(struct anon_vma_name *anon_name1,
461 struct anon_vma_name *anon_name2)
462 {
463 return true;
464 }
465
466 #endif /* CONFIG_ANON_VMA_NAME */
467
init_tlb_flush_pending(struct mm_struct * mm)468 static inline void init_tlb_flush_pending(struct mm_struct *mm)
469 {
470 atomic_set(&mm->tlb_flush_pending, 0);
471 }
472
inc_tlb_flush_pending(struct mm_struct * mm)473 static inline void inc_tlb_flush_pending(struct mm_struct *mm)
474 {
475 atomic_inc(&mm->tlb_flush_pending);
476 /*
477 * The only time this value is relevant is when there are indeed pages
478 * to flush. And we'll only flush pages after changing them, which
479 * requires the PTL.
480 *
481 * So the ordering here is:
482 *
483 * atomic_inc(&mm->tlb_flush_pending);
484 * spin_lock(&ptl);
485 * ...
486 * set_pte_at();
487 * spin_unlock(&ptl);
488 *
489 * spin_lock(&ptl)
490 * mm_tlb_flush_pending();
491 * ....
492 * spin_unlock(&ptl);
493 *
494 * flush_tlb_range();
495 * atomic_dec(&mm->tlb_flush_pending);
496 *
497 * Where the increment if constrained by the PTL unlock, it thus
498 * ensures that the increment is visible if the PTE modification is
499 * visible. After all, if there is no PTE modification, nobody cares
500 * about TLB flushes either.
501 *
502 * This very much relies on users (mm_tlb_flush_pending() and
503 * mm_tlb_flush_nested()) only caring about _specific_ PTEs (and
504 * therefore specific PTLs), because with SPLIT_PTE_PTLOCKS and RCpc
505 * locks (PPC) the unlock of one doesn't order against the lock of
506 * another PTL.
507 *
508 * The decrement is ordered by the flush_tlb_range(), such that
509 * mm_tlb_flush_pending() will not return false unless all flushes have
510 * completed.
511 */
512 }
513
dec_tlb_flush_pending(struct mm_struct * mm)514 static inline void dec_tlb_flush_pending(struct mm_struct *mm)
515 {
516 /*
517 * See inc_tlb_flush_pending().
518 *
519 * This cannot be smp_mb__before_atomic() because smp_mb() simply does
520 * not order against TLB invalidate completion, which is what we need.
521 *
522 * Therefore we must rely on tlb_flush_*() to guarantee order.
523 */
524 atomic_dec(&mm->tlb_flush_pending);
525 }
526
mm_tlb_flush_pending(struct mm_struct * mm)527 static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
528 {
529 /*
530 * Must be called after having acquired the PTL; orders against that
531 * PTLs release and therefore ensures that if we observe the modified
532 * PTE we must also observe the increment from inc_tlb_flush_pending().
533 *
534 * That is, it only guarantees to return true if there is a flush
535 * pending for _this_ PTL.
536 */
537 return atomic_read(&mm->tlb_flush_pending);
538 }
539
mm_tlb_flush_nested(struct mm_struct * mm)540 static inline bool mm_tlb_flush_nested(struct mm_struct *mm)
541 {
542 /*
543 * Similar to mm_tlb_flush_pending(), we must have acquired the PTL
544 * for which there is a TLB flush pending in order to guarantee
545 * we've seen both that PTE modification and the increment.
546 *
547 * (no requirement on actually still holding the PTL, that is irrelevant)
548 */
549 return atomic_read(&mm->tlb_flush_pending) > 1;
550 }
551
552 #ifdef CONFIG_MMU
553 /*
554 * Computes the pte marker to copy from the given source entry into dst_vma.
555 * If no marker should be copied, returns 0.
556 * The caller should insert a new pte created with make_pte_marker().
557 */
copy_pte_marker(swp_entry_t entry,struct vm_area_struct * dst_vma)558 static inline pte_marker copy_pte_marker(
559 swp_entry_t entry, struct vm_area_struct *dst_vma)
560 {
561 pte_marker srcm = pte_marker_get(entry);
562 /* Always copy error entries. */
563 pte_marker dstm = srcm & (PTE_MARKER_POISONED | PTE_MARKER_GUARD);
564
565 /* Only copy PTE markers if UFFD register matches. */
566 if ((srcm & PTE_MARKER_UFFD_WP) && userfaultfd_wp(dst_vma))
567 dstm |= PTE_MARKER_UFFD_WP;
568
569 return dstm;
570 }
571 #endif
572
573 /*
574 * If this pte is wr-protected by uffd-wp in any form, arm the special pte to
575 * replace a none pte. NOTE! This should only be called when *pte is already
576 * cleared so we will never accidentally replace something valuable. Meanwhile
577 * none pte also means we are not demoting the pte so tlb flushed is not needed.
578 * E.g., when pte cleared the caller should have taken care of the tlb flush.
579 *
580 * Must be called with pgtable lock held so that no thread will see the none
581 * pte, and if they see it, they'll fault and serialize at the pgtable lock.
582 *
583 * This function is a no-op if PTE_MARKER_UFFD_WP is not enabled.
584 */
585 static inline void
pte_install_uffd_wp_if_needed(struct vm_area_struct * vma,unsigned long addr,pte_t * pte,pte_t pteval)586 pte_install_uffd_wp_if_needed(struct vm_area_struct *vma, unsigned long addr,
587 pte_t *pte, pte_t pteval)
588 {
589 #ifdef CONFIG_PTE_MARKER_UFFD_WP
590 bool arm_uffd_pte = false;
591
592 /* The current status of the pte should be "cleared" before calling */
593 WARN_ON_ONCE(!pte_none(ptep_get(pte)));
594
595 /*
596 * NOTE: userfaultfd_wp_unpopulated() doesn't need this whole
597 * thing, because when zapping either it means it's dropping the
598 * page, or in TTU where the present pte will be quickly replaced
599 * with a swap pte. There's no way of leaking the bit.
600 */
601 if (vma_is_anonymous(vma) || !userfaultfd_wp(vma))
602 return;
603
604 /* A uffd-wp wr-protected normal pte */
605 if (unlikely(pte_present(pteval) && pte_uffd_wp(pteval)))
606 arm_uffd_pte = true;
607
608 /*
609 * A uffd-wp wr-protected swap pte. Note: this should even cover an
610 * existing pte marker with uffd-wp bit set.
611 */
612 if (unlikely(pte_swp_uffd_wp_any(pteval)))
613 arm_uffd_pte = true;
614
615 if (unlikely(arm_uffd_pte))
616 set_pte_at(vma->vm_mm, addr, pte,
617 make_pte_marker(PTE_MARKER_UFFD_WP));
618 #endif
619 }
620
vma_has_recency(struct vm_area_struct * vma)621 static inline bool vma_has_recency(struct vm_area_struct *vma)
622 {
623 if (vma->vm_flags & (VM_SEQ_READ | VM_RAND_READ))
624 return false;
625
626 if (vma->vm_file && (vma->vm_file->f_mode & FMODE_NOREUSE))
627 return false;
628
629 return true;
630 }
631
632 #endif
633