1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef LINUX_MM_INLINE_H
3 #define LINUX_MM_INLINE_H
4
5 #include <linux/atomic.h>
6 #include <linux/huge_mm.h>
7 #include <linux/swap.h>
8 #include <linux/string.h>
9 #include <linux/userfaultfd_k.h>
10 #include <linux/swapops.h>
11
12 /**
13 * folio_is_file_lru - Should the folio be on a file LRU or anon LRU?
14 * @folio: The folio to test.
15 *
16 * We would like to get this info without a page flag, but the state
17 * needs to survive until the folio is last deleted from the LRU, which
18 * could be as far down as __page_cache_release.
19 *
20 * Return: An integer (not a boolean!) used to sort a folio onto the
21 * right LRU list and to account folios correctly.
22 * 1 if @folio is a regular filesystem backed page cache folio
23 * or a lazily freed anonymous folio (e.g. via MADV_FREE).
24 * 0 if @folio is a normal anonymous folio, a tmpfs folio or otherwise
25 * ram or swap backed folio.
26 */
folio_is_file_lru(struct folio * folio)27 static inline int folio_is_file_lru(struct folio *folio)
28 {
29 return !folio_test_swapbacked(folio);
30 }
31
page_is_file_lru(struct page * page)32 static inline int page_is_file_lru(struct page *page)
33 {
34 return folio_is_file_lru(page_folio(page));
35 }
36
__update_lru_size(struct lruvec * lruvec,enum lru_list lru,enum zone_type zid,long nr_pages)37 static __always_inline void __update_lru_size(struct lruvec *lruvec,
38 enum lru_list lru, enum zone_type zid,
39 long nr_pages)
40 {
41 struct pglist_data *pgdat = lruvec_pgdat(lruvec);
42
43 lockdep_assert_held(&lruvec->lru_lock);
44 WARN_ON_ONCE(nr_pages != (int)nr_pages);
45
46 __mod_lruvec_state(lruvec, NR_LRU_BASE + lru, nr_pages);
47 __mod_zone_page_state(&pgdat->node_zones[zid],
48 NR_ZONE_LRU_BASE + lru, nr_pages);
49 }
50
update_lru_size(struct lruvec * lruvec,enum lru_list lru,enum zone_type zid,long nr_pages)51 static __always_inline void update_lru_size(struct lruvec *lruvec,
52 enum lru_list lru, enum zone_type zid,
53 long nr_pages)
54 {
55 __update_lru_size(lruvec, lru, zid, nr_pages);
56 #ifdef CONFIG_MEMCG
57 mem_cgroup_update_lru_size(lruvec, lru, zid, nr_pages);
58 #endif
59 }
60
61 /**
62 * __folio_clear_lru_flags - Clear page lru flags before releasing a page.
63 * @folio: The folio that was on lru and now has a zero reference.
64 */
__folio_clear_lru_flags(struct folio * folio)65 static __always_inline void __folio_clear_lru_flags(struct folio *folio)
66 {
67 VM_BUG_ON_FOLIO(!folio_test_lru(folio), folio);
68
69 __folio_clear_lru(folio);
70
71 /* this shouldn't happen, so leave the flags to bad_page() */
72 if (folio_test_active(folio) && folio_test_unevictable(folio))
73 return;
74
75 __folio_clear_active(folio);
76 __folio_clear_unevictable(folio);
77 }
78
79 /**
80 * folio_lru_list - Which LRU list should a folio be on?
81 * @folio: The folio to test.
82 *
83 * Return: The LRU list a folio should be on, as an index
84 * into the array of LRU lists.
85 */
folio_lru_list(struct folio * folio)86 static __always_inline enum lru_list folio_lru_list(struct folio *folio)
87 {
88 enum lru_list lru;
89
90 VM_BUG_ON_FOLIO(folio_test_active(folio) && folio_test_unevictable(folio), folio);
91
92 if (folio_test_unevictable(folio))
93 return LRU_UNEVICTABLE;
94
95 lru = folio_is_file_lru(folio) ? LRU_INACTIVE_FILE : LRU_INACTIVE_ANON;
96 #ifdef CONFIG_MEM_PURGEABLE
97 if (folio_test_purgeable(folio))
98 lru = LRU_INACTIVE_PURGEABLE;
99 #endif
100 if (folio_test_active(folio))
101 lru += LRU_ACTIVE;
102
103 return lru;
104 }
105
106 #ifdef CONFIG_LRU_GEN
107
108 #ifdef CONFIG_LRU_GEN_ENABLED
lru_gen_enabled(void)109 static inline bool lru_gen_enabled(void)
110 {
111 DECLARE_STATIC_KEY_TRUE(lru_gen_caps[NR_LRU_GEN_CAPS]);
112
113 return static_branch_likely(&lru_gen_caps[LRU_GEN_CORE]);
114 }
115 #else
lru_gen_enabled(void)116 static inline bool lru_gen_enabled(void)
117 {
118 DECLARE_STATIC_KEY_FALSE(lru_gen_caps[NR_LRU_GEN_CAPS]);
119
120 return static_branch_unlikely(&lru_gen_caps[LRU_GEN_CORE]);
121 }
122 #endif
123
lru_gen_in_fault(void)124 static inline bool lru_gen_in_fault(void)
125 {
126 return current->in_lru_fault;
127 }
128
lru_gen_from_seq(unsigned long seq)129 static inline int lru_gen_from_seq(unsigned long seq)
130 {
131 return seq % MAX_NR_GENS;
132 }
133
lru_hist_from_seq(unsigned long seq)134 static inline int lru_hist_from_seq(unsigned long seq)
135 {
136 return seq % NR_HIST_GENS;
137 }
138
lru_tier_from_refs(int refs)139 static inline int lru_tier_from_refs(int refs)
140 {
141 VM_WARN_ON_ONCE(refs > BIT(LRU_REFS_WIDTH));
142
143 /* see the comment in folio_lru_refs() */
144 return order_base_2(refs + 1);
145 }
146
folio_lru_refs(struct folio * folio)147 static inline int folio_lru_refs(struct folio *folio)
148 {
149 unsigned long flags = READ_ONCE(folio->flags);
150 bool workingset = flags & BIT(PG_workingset);
151
152 /*
153 * Return the number of accesses beyond PG_referenced, i.e., N-1 if the
154 * total number of accesses is N>1, since N=0,1 both map to the first
155 * tier. lru_tier_from_refs() will account for this off-by-one. Also see
156 * the comment on MAX_NR_TIERS.
157 */
158 return ((flags & LRU_REFS_MASK) >> LRU_REFS_PGOFF) + workingset;
159 }
160
folio_lru_gen(struct folio * folio)161 static inline int folio_lru_gen(struct folio *folio)
162 {
163 unsigned long flags = READ_ONCE(folio->flags);
164
165 return ((flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1;
166 }
167
lru_gen_is_active(struct lruvec * lruvec,int gen)168 static inline bool lru_gen_is_active(struct lruvec *lruvec, int gen)
169 {
170 unsigned long max_seq = lruvec->lrugen.max_seq;
171
172 VM_WARN_ON_ONCE(gen >= MAX_NR_GENS);
173
174 /* see the comment on MIN_NR_GENS */
175 return gen == lru_gen_from_seq(max_seq) || gen == lru_gen_from_seq(max_seq - 1);
176 }
177
lru_gen_update_size(struct lruvec * lruvec,struct folio * folio,int old_gen,int new_gen)178 static inline void lru_gen_update_size(struct lruvec *lruvec, struct folio *folio,
179 int old_gen, int new_gen)
180 {
181 int type = folio_is_file_lru(folio);
182 int zone = folio_zonenum(folio);
183 int delta = folio_nr_pages(folio);
184 enum lru_list lru = type * LRU_INACTIVE_FILE;
185 struct lru_gen_folio *lrugen = &lruvec->lrugen;
186
187 VM_WARN_ON_ONCE(old_gen != -1 && old_gen >= MAX_NR_GENS);
188 VM_WARN_ON_ONCE(new_gen != -1 && new_gen >= MAX_NR_GENS);
189 VM_WARN_ON_ONCE(old_gen == -1 && new_gen == -1);
190
191 if (old_gen >= 0)
192 WRITE_ONCE(lrugen->nr_pages[old_gen][type][zone],
193 lrugen->nr_pages[old_gen][type][zone] - delta);
194 if (new_gen >= 0)
195 WRITE_ONCE(lrugen->nr_pages[new_gen][type][zone],
196 lrugen->nr_pages[new_gen][type][zone] + delta);
197
198 /* addition */
199 if (old_gen < 0) {
200 if (lru_gen_is_active(lruvec, new_gen))
201 lru += LRU_ACTIVE;
202 __update_lru_size(lruvec, lru, zone, delta);
203 return;
204 }
205
206 /* deletion */
207 if (new_gen < 0) {
208 if (lru_gen_is_active(lruvec, old_gen))
209 lru += LRU_ACTIVE;
210 __update_lru_size(lruvec, lru, zone, -delta);
211 return;
212 }
213
214 /* promotion */
215 if (!lru_gen_is_active(lruvec, old_gen) && lru_gen_is_active(lruvec, new_gen)) {
216 __update_lru_size(lruvec, lru, zone, -delta);
217 __update_lru_size(lruvec, lru + LRU_ACTIVE, zone, delta);
218 }
219
220 /* demotion requires isolation, e.g., lru_deactivate_fn() */
221 VM_WARN_ON_ONCE(lru_gen_is_active(lruvec, old_gen) && !lru_gen_is_active(lruvec, new_gen));
222 }
223
lru_gen_add_folio(struct lruvec * lruvec,struct folio * folio,bool reclaiming)224 static inline bool lru_gen_add_folio(struct lruvec *lruvec, struct folio *folio, bool reclaiming)
225 {
226 unsigned long seq;
227 unsigned long flags;
228 int gen = folio_lru_gen(folio);
229 int type = folio_is_file_lru(folio);
230 int zone = folio_zonenum(folio);
231 struct lru_gen_folio *lrugen = &lruvec->lrugen;
232
233 VM_WARN_ON_ONCE_FOLIO(gen != -1, folio);
234
235 if (folio_test_unevictable(folio) || !lrugen->enabled)
236 return false;
237 /*
238 * There are four common cases for this page:
239 * 1. If it's hot, i.e., freshly faulted in, add it to the youngest
240 * generation, and it's protected over the rest below.
241 * 2. If it can't be evicted immediately, i.e., a dirty page pending
242 * writeback, add it to the second youngest generation.
243 * 3. If it should be evicted first, e.g., cold and clean from
244 * folio_rotate_reclaimable(), add it to the oldest generation.
245 * 4. Everything else falls between 2 & 3 above and is added to the
246 * second oldest generation if it's considered inactive, or the
247 * oldest generation otherwise. See lru_gen_is_active().
248 */
249 if (folio_test_active(folio))
250 seq = lrugen->max_seq;
251 else if ((type == LRU_GEN_ANON && !folio_test_swapcache(folio)) ||
252 (folio_test_reclaim(folio) &&
253 (folio_test_dirty(folio) || folio_test_writeback(folio))))
254 seq = lrugen->max_seq - 1;
255 else if (reclaiming || lrugen->min_seq[type] + MIN_NR_GENS >= lrugen->max_seq)
256 seq = lrugen->min_seq[type];
257 else
258 seq = lrugen->min_seq[type] + 1;
259
260 gen = lru_gen_from_seq(seq);
261 flags = (gen + 1UL) << LRU_GEN_PGOFF;
262 /* see the comment on MIN_NR_GENS about PG_active */
263 set_mask_bits(&folio->flags, LRU_GEN_MASK | BIT(PG_active), flags);
264
265 lru_gen_update_size(lruvec, folio, -1, gen);
266 /* for folio_rotate_reclaimable() */
267 if (reclaiming)
268 list_add_tail(&folio->lru, &lrugen->folios[gen][type][zone]);
269 else
270 list_add(&folio->lru, &lrugen->folios[gen][type][zone]);
271
272 return true;
273 }
274
lru_gen_del_folio(struct lruvec * lruvec,struct folio * folio,bool reclaiming)275 static inline bool lru_gen_del_folio(struct lruvec *lruvec, struct folio *folio, bool reclaiming)
276 {
277 unsigned long flags;
278 int gen = folio_lru_gen(folio);
279
280 if (gen < 0)
281 return false;
282
283 VM_WARN_ON_ONCE_FOLIO(folio_test_active(folio), folio);
284 VM_WARN_ON_ONCE_FOLIO(folio_test_unevictable(folio), folio);
285
286 /* for folio_migrate_flags() */
287 flags = !reclaiming && lru_gen_is_active(lruvec, gen) ? BIT(PG_active) : 0;
288 flags = set_mask_bits(&folio->flags, LRU_GEN_MASK, flags);
289 gen = ((flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1;
290
291 lru_gen_update_size(lruvec, folio, gen, -1);
292 list_del(&folio->lru);
293
294 return true;
295 }
296
297 #else /* !CONFIG_LRU_GEN */
298
lru_gen_enabled(void)299 static inline bool lru_gen_enabled(void)
300 {
301 return false;
302 }
303
lru_gen_in_fault(void)304 static inline bool lru_gen_in_fault(void)
305 {
306 return false;
307 }
308
lru_gen_add_folio(struct lruvec * lruvec,struct folio * folio,bool reclaiming)309 static inline bool lru_gen_add_folio(struct lruvec *lruvec, struct folio *folio, bool reclaiming)
310 {
311 return false;
312 }
313
lru_gen_del_folio(struct lruvec * lruvec,struct folio * folio,bool reclaiming)314 static inline bool lru_gen_del_folio(struct lruvec *lruvec, struct folio *folio, bool reclaiming)
315 {
316 return false;
317 }
318
319 #endif /* CONFIG_LRU_GEN */
320
321 static __always_inline
lruvec_add_folio(struct lruvec * lruvec,struct folio * folio)322 void lruvec_add_folio(struct lruvec *lruvec, struct folio *folio)
323 {
324 enum lru_list lru = folio_lru_list(folio);
325
326 if (lru_gen_add_folio(lruvec, folio, false))
327 return;
328
329 update_lru_size(lruvec, lru, folio_zonenum(folio),
330 folio_nr_pages(folio));
331 if (lru != LRU_UNEVICTABLE)
332 list_add(&folio->lru, &lruvec->lists[lru]);
333 }
334
335 static __always_inline
lruvec_add_folio_tail(struct lruvec * lruvec,struct folio * folio)336 void lruvec_add_folio_tail(struct lruvec *lruvec, struct folio *folio)
337 {
338 enum lru_list lru = folio_lru_list(folio);
339
340 if (lru_gen_add_folio(lruvec, folio, true))
341 return;
342
343 update_lru_size(lruvec, lru, folio_zonenum(folio),
344 folio_nr_pages(folio));
345 /* This is not expected to be used on LRU_UNEVICTABLE */
346 list_add_tail(&folio->lru, &lruvec->lists[lru]);
347 }
348
349 static __always_inline
lruvec_del_folio(struct lruvec * lruvec,struct folio * folio)350 void lruvec_del_folio(struct lruvec *lruvec, struct folio *folio)
351 {
352 enum lru_list lru = folio_lru_list(folio);
353
354 if (lru_gen_del_folio(lruvec, folio, false))
355 return;
356
357 if (lru != LRU_UNEVICTABLE)
358 list_del(&folio->lru);
359 update_lru_size(lruvec, lru, folio_zonenum(folio),
360 -folio_nr_pages(folio));
361 }
362
363 #ifdef CONFIG_ANON_VMA_NAME
364 /*
365 * mmap_lock should be read-locked when calling anon_vma_name(). Caller should
366 * either keep holding the lock while using the returned pointer or it should
367 * raise anon_vma_name refcount before releasing the lock.
368 */
369 extern struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma);
370 extern struct anon_vma_name *anon_vma_name_alloc(const char *name);
371 extern void anon_vma_name_free(struct kref *kref);
372
373 /* mmap_lock should be read-locked */
anon_vma_name_get(struct anon_vma_name * anon_name)374 static inline void anon_vma_name_get(struct anon_vma_name *anon_name)
375 {
376 if (anon_name)
377 kref_get(&anon_name->kref);
378 }
379
anon_vma_name_put(struct anon_vma_name * anon_name)380 static inline void anon_vma_name_put(struct anon_vma_name *anon_name)
381 {
382 if (anon_name)
383 kref_put(&anon_name->kref, anon_vma_name_free);
384 }
385
386 static inline
anon_vma_name_reuse(struct anon_vma_name * anon_name)387 struct anon_vma_name *anon_vma_name_reuse(struct anon_vma_name *anon_name)
388 {
389 /* Prevent anon_name refcount saturation early on */
390 if (kref_read(&anon_name->kref) < REFCOUNT_MAX) {
391 anon_vma_name_get(anon_name);
392 return anon_name;
393
394 }
395 return anon_vma_name_alloc(anon_name->name);
396 }
397
dup_anon_vma_name(struct vm_area_struct * orig_vma,struct vm_area_struct * new_vma)398 static inline void dup_anon_vma_name(struct vm_area_struct *orig_vma,
399 struct vm_area_struct *new_vma)
400 {
401 struct anon_vma_name *anon_name = anon_vma_name(orig_vma);
402
403 if (anon_name)
404 new_vma->anon_name = anon_vma_name_reuse(anon_name);
405 }
406
free_anon_vma_name(struct vm_area_struct * vma)407 static inline void free_anon_vma_name(struct vm_area_struct *vma)
408 {
409 /*
410 * Not using anon_vma_name because it generates a warning if mmap_lock
411 * is not held, which might be the case here.
412 */
413 anon_vma_name_put(vma->anon_name);
414 }
415
anon_vma_name_eq(struct anon_vma_name * anon_name1,struct anon_vma_name * anon_name2)416 static inline bool anon_vma_name_eq(struct anon_vma_name *anon_name1,
417 struct anon_vma_name *anon_name2)
418 {
419 if (anon_name1 == anon_name2)
420 return true;
421
422 return anon_name1 && anon_name2 &&
423 !strcmp(anon_name1->name, anon_name2->name);
424 }
425
426 #else /* CONFIG_ANON_VMA_NAME */
anon_vma_name(struct vm_area_struct * vma)427 static inline struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma)
428 {
429 return NULL;
430 }
431
anon_vma_name_alloc(const char * name)432 static inline struct anon_vma_name *anon_vma_name_alloc(const char *name)
433 {
434 return NULL;
435 }
436
anon_vma_name_get(struct anon_vma_name * anon_name)437 static inline void anon_vma_name_get(struct anon_vma_name *anon_name) {}
anon_vma_name_put(struct anon_vma_name * anon_name)438 static inline void anon_vma_name_put(struct anon_vma_name *anon_name) {}
dup_anon_vma_name(struct vm_area_struct * orig_vma,struct vm_area_struct * new_vma)439 static inline void dup_anon_vma_name(struct vm_area_struct *orig_vma,
440 struct vm_area_struct *new_vma) {}
free_anon_vma_name(struct vm_area_struct * vma)441 static inline void free_anon_vma_name(struct vm_area_struct *vma) {}
442
anon_vma_name_eq(struct anon_vma_name * anon_name1,struct anon_vma_name * anon_name2)443 static inline bool anon_vma_name_eq(struct anon_vma_name *anon_name1,
444 struct anon_vma_name *anon_name2)
445 {
446 return true;
447 }
448
449 #endif /* CONFIG_ANON_VMA_NAME */
450
init_tlb_flush_pending(struct mm_struct * mm)451 static inline void init_tlb_flush_pending(struct mm_struct *mm)
452 {
453 atomic_set(&mm->tlb_flush_pending, 0);
454 }
455
inc_tlb_flush_pending(struct mm_struct * mm)456 static inline void inc_tlb_flush_pending(struct mm_struct *mm)
457 {
458 atomic_inc(&mm->tlb_flush_pending);
459 /*
460 * The only time this value is relevant is when there are indeed pages
461 * to flush. And we'll only flush pages after changing them, which
462 * requires the PTL.
463 *
464 * So the ordering here is:
465 *
466 * atomic_inc(&mm->tlb_flush_pending);
467 * spin_lock(&ptl);
468 * ...
469 * set_pte_at();
470 * spin_unlock(&ptl);
471 *
472 * spin_lock(&ptl)
473 * mm_tlb_flush_pending();
474 * ....
475 * spin_unlock(&ptl);
476 *
477 * flush_tlb_range();
478 * atomic_dec(&mm->tlb_flush_pending);
479 *
480 * Where the increment if constrained by the PTL unlock, it thus
481 * ensures that the increment is visible if the PTE modification is
482 * visible. After all, if there is no PTE modification, nobody cares
483 * about TLB flushes either.
484 *
485 * This very much relies on users (mm_tlb_flush_pending() and
486 * mm_tlb_flush_nested()) only caring about _specific_ PTEs (and
487 * therefore specific PTLs), because with SPLIT_PTE_PTLOCKS and RCpc
488 * locks (PPC) the unlock of one doesn't order against the lock of
489 * another PTL.
490 *
491 * The decrement is ordered by the flush_tlb_range(), such that
492 * mm_tlb_flush_pending() will not return false unless all flushes have
493 * completed.
494 */
495 }
496
dec_tlb_flush_pending(struct mm_struct * mm)497 static inline void dec_tlb_flush_pending(struct mm_struct *mm)
498 {
499 /*
500 * See inc_tlb_flush_pending().
501 *
502 * This cannot be smp_mb__before_atomic() because smp_mb() simply does
503 * not order against TLB invalidate completion, which is what we need.
504 *
505 * Therefore we must rely on tlb_flush_*() to guarantee order.
506 */
507 atomic_dec(&mm->tlb_flush_pending);
508 }
509
mm_tlb_flush_pending(struct mm_struct * mm)510 static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
511 {
512 /*
513 * Must be called after having acquired the PTL; orders against that
514 * PTLs release and therefore ensures that if we observe the modified
515 * PTE we must also observe the increment from inc_tlb_flush_pending().
516 *
517 * That is, it only guarantees to return true if there is a flush
518 * pending for _this_ PTL.
519 */
520 return atomic_read(&mm->tlb_flush_pending);
521 }
522
mm_tlb_flush_nested(struct mm_struct * mm)523 static inline bool mm_tlb_flush_nested(struct mm_struct *mm)
524 {
525 /*
526 * Similar to mm_tlb_flush_pending(), we must have acquired the PTL
527 * for which there is a TLB flush pending in order to guarantee
528 * we've seen both that PTE modification and the increment.
529 *
530 * (no requirement on actually still holding the PTL, that is irrelevant)
531 */
532 return atomic_read(&mm->tlb_flush_pending) > 1;
533 }
534
535 #ifdef CONFIG_MMU
536 /*
537 * Computes the pte marker to copy from the given source entry into dst_vma.
538 * If no marker should be copied, returns 0.
539 * The caller should insert a new pte created with make_pte_marker().
540 */
copy_pte_marker(swp_entry_t entry,struct vm_area_struct * dst_vma)541 static inline pte_marker copy_pte_marker(
542 swp_entry_t entry, struct vm_area_struct *dst_vma)
543 {
544 pte_marker srcm = pte_marker_get(entry);
545 /* Always copy error entries. */
546 pte_marker dstm = srcm & PTE_MARKER_POISONED;
547
548 /* Only copy PTE markers if UFFD register matches. */
549 if ((srcm & PTE_MARKER_UFFD_WP) && userfaultfd_wp(dst_vma))
550 dstm |= PTE_MARKER_UFFD_WP;
551
552 return dstm;
553 }
554 #endif
555
556 /*
557 * If this pte is wr-protected by uffd-wp in any form, arm the special pte to
558 * replace a none pte. NOTE! This should only be called when *pte is already
559 * cleared so we will never accidentally replace something valuable. Meanwhile
560 * none pte also means we are not demoting the pte so tlb flushed is not needed.
561 * E.g., when pte cleared the caller should have taken care of the tlb flush.
562 *
563 * Must be called with pgtable lock held so that no thread will see the none
564 * pte, and if they see it, they'll fault and serialize at the pgtable lock.
565 *
566 * This function is a no-op if PTE_MARKER_UFFD_WP is not enabled.
567 */
568 static inline void
pte_install_uffd_wp_if_needed(struct vm_area_struct * vma,unsigned long addr,pte_t * pte,pte_t pteval)569 pte_install_uffd_wp_if_needed(struct vm_area_struct *vma, unsigned long addr,
570 pte_t *pte, pte_t pteval)
571 {
572 #ifdef CONFIG_PTE_MARKER_UFFD_WP
573 bool arm_uffd_pte = false;
574
575 /* The current status of the pte should be "cleared" before calling */
576 WARN_ON_ONCE(!pte_none(ptep_get(pte)));
577
578 /*
579 * NOTE: userfaultfd_wp_unpopulated() doesn't need this whole
580 * thing, because when zapping either it means it's dropping the
581 * page, or in TTU where the present pte will be quickly replaced
582 * with a swap pte. There's no way of leaking the bit.
583 */
584 if (vma_is_anonymous(vma) || !userfaultfd_wp(vma))
585 return;
586
587 /* A uffd-wp wr-protected normal pte */
588 if (unlikely(pte_present(pteval) && pte_uffd_wp(pteval)))
589 arm_uffd_pte = true;
590
591 /*
592 * A uffd-wp wr-protected swap pte. Note: this should even cover an
593 * existing pte marker with uffd-wp bit set.
594 */
595 if (unlikely(pte_swp_uffd_wp_any(pteval)))
596 arm_uffd_pte = true;
597
598 if (unlikely(arm_uffd_pte))
599 set_pte_at(vma->vm_mm, addr, pte,
600 make_pte_marker(PTE_MARKER_UFFD_WP));
601 #endif
602 }
603
vma_has_recency(struct vm_area_struct * vma)604 static inline bool vma_has_recency(struct vm_area_struct *vma)
605 {
606 if (vma->vm_flags & (VM_SEQ_READ | VM_RAND_READ))
607 return false;
608
609 if (vma->vm_file && (vma->vm_file->f_mode & FMODE_NOREUSE))
610 return false;
611
612 return true;
613 }
614
615 #endif
616