1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_MMU_NOTIFIER_H
3 #define _LINUX_MMU_NOTIFIER_H
4
5 #include <linux/list.h>
6 #include <linux/spinlock.h>
7 #include <linux/mm_types.h>
8 #include <linux/mmap_lock.h>
9 #include <linux/percpu-rwsem.h>
10 #include <linux/slab.h>
11 #include <linux/srcu.h>
12 #include <linux/interval_tree.h>
13 #include <linux/android_kabi.h>
14
15 struct mmu_notifier_subscriptions;
16 struct mmu_notifier;
17 struct mmu_notifier_range;
18 struct mmu_interval_notifier;
19
20 struct mmu_notifier_subscriptions_hdr {
21 bool valid;
22 #ifdef CONFIG_SPECULATIVE_PAGE_FAULT
23 struct percpu_rw_semaphore_atomic *mmu_notifier_lock;
24 #endif
25 };
26
27 /**
28 * enum mmu_notifier_event - reason for the mmu notifier callback
29 * @MMU_NOTIFY_UNMAP: either munmap() that unmap the range or a mremap() that
30 * move the range
31 *
32 * @MMU_NOTIFY_CLEAR: clear page table entry (many reasons for this like
33 * madvise() or replacing a page by another one, ...).
34 *
35 * @MMU_NOTIFY_PROTECTION_VMA: update is due to protection change for the range
36 * ie using the vma access permission (vm_page_prot) to update the whole range
37 * is enough no need to inspect changes to the CPU page table (mprotect()
38 * syscall)
39 *
40 * @MMU_NOTIFY_PROTECTION_PAGE: update is due to change in read/write flag for
41 * pages in the range so to mirror those changes the user must inspect the CPU
42 * page table (from the end callback).
43 *
44 * @MMU_NOTIFY_SOFT_DIRTY: soft dirty accounting (still same page and same
45 * access flags). User should soft dirty the page in the end callback to make
46 * sure that anyone relying on soft dirtyness catch pages that might be written
47 * through non CPU mappings.
48 *
49 * @MMU_NOTIFY_RELEASE: used during mmu_interval_notifier invalidate to signal
50 * that the mm refcount is zero and the range is no longer accessible.
51 *
52 * @MMU_NOTIFY_MIGRATE: used during migrate_vma_collect() invalidate to signal
53 * a device driver to possibly ignore the invalidation if the
54 * migrate_pgmap_owner field matches the driver's device private pgmap owner.
55 */
56 enum mmu_notifier_event {
57 MMU_NOTIFY_UNMAP = 0,
58 MMU_NOTIFY_CLEAR,
59 MMU_NOTIFY_PROTECTION_VMA,
60 MMU_NOTIFY_PROTECTION_PAGE,
61 MMU_NOTIFY_SOFT_DIRTY,
62 MMU_NOTIFY_RELEASE,
63 MMU_NOTIFY_MIGRATE,
64 };
65
66 #define MMU_NOTIFIER_RANGE_BLOCKABLE (1 << 0)
67
68 struct mmu_notifier_ops {
69 /*
70 * Called either by mmu_notifier_unregister or when the mm is
71 * being destroyed by exit_mmap, always before all pages are
72 * freed. This can run concurrently with other mmu notifier
73 * methods (the ones invoked outside the mm context) and it
74 * should tear down all secondary mmu mappings and freeze the
75 * secondary mmu. If this method isn't implemented you've to
76 * be sure that nothing could possibly write to the pages
77 * through the secondary mmu by the time the last thread with
78 * tsk->mm == mm exits.
79 *
80 * As side note: the pages freed after ->release returns could
81 * be immediately reallocated by the gart at an alias physical
82 * address with a different cache model, so if ->release isn't
83 * implemented because all _software_ driven memory accesses
84 * through the secondary mmu are terminated by the time the
85 * last thread of this mm quits, you've also to be sure that
86 * speculative _hardware_ operations can't allocate dirty
87 * cachelines in the cpu that could not be snooped and made
88 * coherent with the other read and write operations happening
89 * through the gart alias address, so leading to memory
90 * corruption.
91 */
92 void (*release)(struct mmu_notifier *subscription,
93 struct mm_struct *mm);
94
95 /*
96 * clear_flush_young is called after the VM is
97 * test-and-clearing the young/accessed bitflag in the
98 * pte. This way the VM will provide proper aging to the
99 * accesses to the page through the secondary MMUs and not
100 * only to the ones through the Linux pte.
101 * Start-end is necessary in case the secondary MMU is mapping the page
102 * at a smaller granularity than the primary MMU.
103 */
104 int (*clear_flush_young)(struct mmu_notifier *subscription,
105 struct mm_struct *mm,
106 unsigned long start,
107 unsigned long end);
108
109 /*
110 * clear_young is a lightweight version of clear_flush_young. Like the
111 * latter, it is supposed to test-and-clear the young/accessed bitflag
112 * in the secondary pte, but it may omit flushing the secondary tlb.
113 */
114 int (*clear_young)(struct mmu_notifier *subscription,
115 struct mm_struct *mm,
116 unsigned long start,
117 unsigned long end);
118
119 /*
120 * test_young is called to check the young/accessed bitflag in
121 * the secondary pte. This is used to know if the page is
122 * frequently used without actually clearing the flag or tearing
123 * down the secondary mapping on the page.
124 */
125 int (*test_young)(struct mmu_notifier *subscription,
126 struct mm_struct *mm,
127 unsigned long address);
128
129 /*
130 * change_pte is called in cases that pte mapping to page is changed:
131 * for example, when ksm remaps pte to point to a new shared page.
132 */
133 void (*change_pte)(struct mmu_notifier *subscription,
134 struct mm_struct *mm,
135 unsigned long address,
136 pte_t pte);
137
138 /*
139 * invalidate_range_start() and invalidate_range_end() must be
140 * paired and are called only when the mmap_lock and/or the
141 * locks protecting the reverse maps are held. If the subsystem
142 * can't guarantee that no additional references are taken to
143 * the pages in the range, it has to implement the
144 * invalidate_range() notifier to remove any references taken
145 * after invalidate_range_start().
146 *
147 * Invalidation of multiple concurrent ranges may be
148 * optionally permitted by the driver. Either way the
149 * establishment of sptes is forbidden in the range passed to
150 * invalidate_range_begin/end for the whole duration of the
151 * invalidate_range_begin/end critical section.
152 *
153 * invalidate_range_start() is called when all pages in the
154 * range are still mapped and have at least a refcount of one.
155 *
156 * invalidate_range_end() is called when all pages in the
157 * range have been unmapped and the pages have been freed by
158 * the VM.
159 *
160 * The VM will remove the page table entries and potentially
161 * the page between invalidate_range_start() and
162 * invalidate_range_end(). If the page must not be freed
163 * because of pending I/O or other circumstances then the
164 * invalidate_range_start() callback (or the initial mapping
165 * by the driver) must make sure that the refcount is kept
166 * elevated.
167 *
168 * If the driver increases the refcount when the pages are
169 * initially mapped into an address space then either
170 * invalidate_range_start() or invalidate_range_end() may
171 * decrease the refcount. If the refcount is decreased on
172 * invalidate_range_start() then the VM can free pages as page
173 * table entries are removed. If the refcount is only
174 * droppped on invalidate_range_end() then the driver itself
175 * will drop the last refcount but it must take care to flush
176 * any secondary tlb before doing the final free on the
177 * page. Pages will no longer be referenced by the linux
178 * address space but may still be referenced by sptes until
179 * the last refcount is dropped.
180 *
181 * If blockable argument is set to false then the callback cannot
182 * sleep and has to return with -EAGAIN if sleeping would be required.
183 * 0 should be returned otherwise. Please note that notifiers that can
184 * fail invalidate_range_start are not allowed to implement
185 * invalidate_range_end, as there is no mechanism for informing the
186 * notifier that its start failed.
187 */
188 int (*invalidate_range_start)(struct mmu_notifier *subscription,
189 const struct mmu_notifier_range *range);
190 void (*invalidate_range_end)(struct mmu_notifier *subscription,
191 const struct mmu_notifier_range *range);
192
193 /*
194 * invalidate_range() is either called between
195 * invalidate_range_start() and invalidate_range_end() when the
196 * VM has to free pages that where unmapped, but before the
197 * pages are actually freed, or outside of _start()/_end() when
198 * a (remote) TLB is necessary.
199 *
200 * If invalidate_range() is used to manage a non-CPU TLB with
201 * shared page-tables, it not necessary to implement the
202 * invalidate_range_start()/end() notifiers, as
203 * invalidate_range() alread catches the points in time when an
204 * external TLB range needs to be flushed. For more in depth
205 * discussion on this see Documentation/vm/mmu_notifier.rst
206 *
207 * Note that this function might be called with just a sub-range
208 * of what was passed to invalidate_range_start()/end(), if
209 * called between those functions.
210 */
211 void (*invalidate_range)(struct mmu_notifier *subscription,
212 struct mm_struct *mm,
213 unsigned long start,
214 unsigned long end);
215
216 /*
217 * These callbacks are used with the get/put interface to manage the
218 * lifetime of the mmu_notifier memory. alloc_notifier() returns a new
219 * notifier for use with the mm.
220 *
221 * free_notifier() is only called after the mmu_notifier has been
222 * fully put, calls to any ops callback are prevented and no ops
223 * callbacks are currently running. It is called from a SRCU callback
224 * and cannot sleep.
225 */
226 struct mmu_notifier *(*alloc_notifier)(struct mm_struct *mm);
227 void (*free_notifier)(struct mmu_notifier *subscription);
228
229 ANDROID_KABI_RESERVE(1);
230 ANDROID_KABI_RESERVE(2);
231 ANDROID_KABI_RESERVE(3);
232 ANDROID_KABI_RESERVE(4);
233 };
234
235 /*
236 * The notifier chains are protected by mmap_lock and/or the reverse map
237 * semaphores. Notifier chains are only changed when all reverse maps and
238 * the mmap_lock locks are taken.
239 *
240 * Therefore notifier chains can only be traversed when either
241 *
242 * 1. mmap_lock is held.
243 * 2. One of the reverse map locks is held (i_mmap_rwsem or anon_vma->rwsem).
244 * 3. No other concurrent thread can access the list (release)
245 */
246 struct mmu_notifier {
247 struct hlist_node hlist;
248 const struct mmu_notifier_ops *ops;
249 struct mm_struct *mm;
250 struct rcu_head rcu;
251 unsigned int users;
252
253 ANDROID_KABI_RESERVE(1);
254 ANDROID_KABI_RESERVE(2);
255 };
256
257 /**
258 * struct mmu_interval_notifier_ops
259 * @invalidate: Upon return the caller must stop using any SPTEs within this
260 * range. This function can sleep. Return false only if sleeping
261 * was required but mmu_notifier_range_blockable(range) is false.
262 */
263 struct mmu_interval_notifier_ops {
264 bool (*invalidate)(struct mmu_interval_notifier *interval_sub,
265 const struct mmu_notifier_range *range,
266 unsigned long cur_seq);
267 };
268
269 struct mmu_interval_notifier {
270 struct interval_tree_node interval_tree;
271 const struct mmu_interval_notifier_ops *ops;
272 struct mm_struct *mm;
273 struct hlist_node deferred_item;
274 unsigned long invalidate_seq;
275 };
276
277 #ifdef CONFIG_MMU_NOTIFIER
278
279 #ifdef CONFIG_LOCKDEP
280 extern struct lockdep_map __mmu_notifier_invalidate_range_start_map;
281 #endif
282
283 struct mmu_notifier_range {
284 struct vm_area_struct *vma;
285 struct mm_struct *mm;
286 unsigned long start;
287 unsigned long end;
288 unsigned flags;
289 enum mmu_notifier_event event;
290 void *migrate_pgmap_owner;
291 };
292
293 static inline
get_notifier_subscriptions_hdr(struct mm_struct * mm)294 struct mmu_notifier_subscriptions_hdr *get_notifier_subscriptions_hdr(
295 struct mm_struct *mm)
296 {
297 /*
298 * container_of() can't be used here because mmu_notifier_subscriptions
299 * struct should be kept invisible to mm_struct, otherwise it
300 * introduces KMI CRC breakage. Therefore the callers don't know what
301 * members struct mmu_notifier_subscriptions contains and can't call
302 * container_of(), which requires a member name.
303 *
304 * WARNING: For this typecasting to work, mmu_notifier_subscriptions_hdr
305 * should be the first member of struct mmu_notifier_subscriptions.
306 */
307 return (struct mmu_notifier_subscriptions_hdr *)mm->notifier_subscriptions;
308 }
309
mm_has_notifiers(struct mm_struct * mm)310 static inline int mm_has_notifiers(struct mm_struct *mm)
311 {
312 #ifdef CONFIG_SPECULATIVE_PAGE_FAULT
313 return unlikely(get_notifier_subscriptions_hdr(mm)->valid);
314 #else
315 return unlikely(mm->notifier_subscriptions);
316 #endif
317 }
318
319 struct mmu_notifier *mmu_notifier_get_locked(const struct mmu_notifier_ops *ops,
320 struct mm_struct *mm);
321 static inline struct mmu_notifier *
mmu_notifier_get(const struct mmu_notifier_ops * ops,struct mm_struct * mm)322 mmu_notifier_get(const struct mmu_notifier_ops *ops, struct mm_struct *mm)
323 {
324 struct mmu_notifier *ret;
325
326 mmap_write_lock(mm);
327 ret = mmu_notifier_get_locked(ops, mm);
328 mmap_write_unlock(mm);
329 return ret;
330 }
331 void mmu_notifier_put(struct mmu_notifier *subscription);
332 void mmu_notifier_synchronize(void);
333
334 extern int mmu_notifier_register(struct mmu_notifier *subscription,
335 struct mm_struct *mm);
336 extern int __mmu_notifier_register(struct mmu_notifier *subscription,
337 struct mm_struct *mm);
338 extern void mmu_notifier_unregister(struct mmu_notifier *subscription,
339 struct mm_struct *mm);
340
341 unsigned long
342 mmu_interval_read_begin(struct mmu_interval_notifier *interval_sub);
343 int mmu_interval_notifier_insert(struct mmu_interval_notifier *interval_sub,
344 struct mm_struct *mm, unsigned long start,
345 unsigned long length,
346 const struct mmu_interval_notifier_ops *ops);
347 int mmu_interval_notifier_insert_locked(
348 struct mmu_interval_notifier *interval_sub, struct mm_struct *mm,
349 unsigned long start, unsigned long length,
350 const struct mmu_interval_notifier_ops *ops);
351 void mmu_interval_notifier_remove(struct mmu_interval_notifier *interval_sub);
352
353 /**
354 * mmu_interval_set_seq - Save the invalidation sequence
355 * @interval_sub - The subscription passed to invalidate
356 * @cur_seq - The cur_seq passed to the invalidate() callback
357 *
358 * This must be called unconditionally from the invalidate callback of a
359 * struct mmu_interval_notifier_ops under the same lock that is used to call
360 * mmu_interval_read_retry(). It updates the sequence number for later use by
361 * mmu_interval_read_retry(). The provided cur_seq will always be odd.
362 *
363 * If the caller does not call mmu_interval_read_begin() or
364 * mmu_interval_read_retry() then this call is not required.
365 */
366 static inline void
mmu_interval_set_seq(struct mmu_interval_notifier * interval_sub,unsigned long cur_seq)367 mmu_interval_set_seq(struct mmu_interval_notifier *interval_sub,
368 unsigned long cur_seq)
369 {
370 WRITE_ONCE(interval_sub->invalidate_seq, cur_seq);
371 }
372
373 /**
374 * mmu_interval_read_retry - End a read side critical section against a VA range
375 * interval_sub: The subscription
376 * seq: The return of the paired mmu_interval_read_begin()
377 *
378 * This MUST be called under a user provided lock that is also held
379 * unconditionally by op->invalidate() when it calls mmu_interval_set_seq().
380 *
381 * Each call should be paired with a single mmu_interval_read_begin() and
382 * should be used to conclude the read side.
383 *
384 * Returns true if an invalidation collided with this critical section, and
385 * the caller should retry.
386 */
387 static inline bool
mmu_interval_read_retry(struct mmu_interval_notifier * interval_sub,unsigned long seq)388 mmu_interval_read_retry(struct mmu_interval_notifier *interval_sub,
389 unsigned long seq)
390 {
391 return interval_sub->invalidate_seq != seq;
392 }
393
394 /**
395 * mmu_interval_check_retry - Test if a collision has occurred
396 * interval_sub: The subscription
397 * seq: The return of the matching mmu_interval_read_begin()
398 *
399 * This can be used in the critical section between mmu_interval_read_begin()
400 * and mmu_interval_read_retry(). A return of true indicates an invalidation
401 * has collided with this critical region and a future
402 * mmu_interval_read_retry() will return true.
403 *
404 * False is not reliable and only suggests a collision may not have
405 * occured. It can be called many times and does not have to hold the user
406 * provided lock.
407 *
408 * This call can be used as part of loops and other expensive operations to
409 * expedite a retry.
410 */
411 static inline bool
mmu_interval_check_retry(struct mmu_interval_notifier * interval_sub,unsigned long seq)412 mmu_interval_check_retry(struct mmu_interval_notifier *interval_sub,
413 unsigned long seq)
414 {
415 /* Pairs with the WRITE_ONCE in mmu_interval_set_seq() */
416 return READ_ONCE(interval_sub->invalidate_seq) != seq;
417 }
418
419 extern void __mmu_notifier_subscriptions_destroy(struct mm_struct *mm);
420 extern void __mmu_notifier_release(struct mm_struct *mm);
421 extern int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
422 unsigned long start,
423 unsigned long end);
424 extern int __mmu_notifier_clear_young(struct mm_struct *mm,
425 unsigned long start,
426 unsigned long end);
427 extern int __mmu_notifier_test_young(struct mm_struct *mm,
428 unsigned long address);
429 extern void __mmu_notifier_change_pte(struct mm_struct *mm,
430 unsigned long address, pte_t pte);
431 extern int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *r);
432 extern void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *r,
433 bool only_end);
434 extern void __mmu_notifier_invalidate_range(struct mm_struct *mm,
435 unsigned long start, unsigned long end);
436 extern bool
437 mmu_notifier_range_update_to_read_only(const struct mmu_notifier_range *range);
438
439 static inline bool
mmu_notifier_range_blockable(const struct mmu_notifier_range * range)440 mmu_notifier_range_blockable(const struct mmu_notifier_range *range)
441 {
442 return (range->flags & MMU_NOTIFIER_RANGE_BLOCKABLE);
443 }
444
mmu_notifier_release(struct mm_struct * mm)445 static inline void mmu_notifier_release(struct mm_struct *mm)
446 {
447 if (mm_has_notifiers(mm))
448 __mmu_notifier_release(mm);
449 }
450
mmu_notifier_clear_flush_young(struct mm_struct * mm,unsigned long start,unsigned long end)451 static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm,
452 unsigned long start,
453 unsigned long end)
454 {
455 if (mm_has_notifiers(mm))
456 return __mmu_notifier_clear_flush_young(mm, start, end);
457 return 0;
458 }
459
mmu_notifier_clear_young(struct mm_struct * mm,unsigned long start,unsigned long end)460 static inline int mmu_notifier_clear_young(struct mm_struct *mm,
461 unsigned long start,
462 unsigned long end)
463 {
464 if (mm_has_notifiers(mm))
465 return __mmu_notifier_clear_young(mm, start, end);
466 return 0;
467 }
468
mmu_notifier_test_young(struct mm_struct * mm,unsigned long address)469 static inline int mmu_notifier_test_young(struct mm_struct *mm,
470 unsigned long address)
471 {
472 if (mm_has_notifiers(mm))
473 return __mmu_notifier_test_young(mm, address);
474 return 0;
475 }
476
mmu_notifier_change_pte(struct mm_struct * mm,unsigned long address,pte_t pte)477 static inline void mmu_notifier_change_pte(struct mm_struct *mm,
478 unsigned long address, pte_t pte)
479 {
480 if (mm_has_notifiers(mm))
481 __mmu_notifier_change_pte(mm, address, pte);
482 }
483
484 static inline void
mmu_notifier_invalidate_range_start(struct mmu_notifier_range * range)485 mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
486 {
487 might_sleep();
488
489 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
490 if (mm_has_notifiers(range->mm)) {
491 range->flags |= MMU_NOTIFIER_RANGE_BLOCKABLE;
492 __mmu_notifier_invalidate_range_start(range);
493 }
494 lock_map_release(&__mmu_notifier_invalidate_range_start_map);
495 }
496
497 static inline int
mmu_notifier_invalidate_range_start_nonblock(struct mmu_notifier_range * range)498 mmu_notifier_invalidate_range_start_nonblock(struct mmu_notifier_range *range)
499 {
500 int ret = 0;
501
502 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
503 if (mm_has_notifiers(range->mm)) {
504 range->flags &= ~MMU_NOTIFIER_RANGE_BLOCKABLE;
505 ret = __mmu_notifier_invalidate_range_start(range);
506 }
507 lock_map_release(&__mmu_notifier_invalidate_range_start_map);
508 return ret;
509 }
510
511 static inline void
mmu_notifier_invalidate_range_end(struct mmu_notifier_range * range)512 mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range)
513 {
514 if (mmu_notifier_range_blockable(range))
515 might_sleep();
516
517 if (mm_has_notifiers(range->mm))
518 __mmu_notifier_invalidate_range_end(range, false);
519 }
520
521 static inline void
mmu_notifier_invalidate_range_only_end(struct mmu_notifier_range * range)522 mmu_notifier_invalidate_range_only_end(struct mmu_notifier_range *range)
523 {
524 if (mm_has_notifiers(range->mm))
525 __mmu_notifier_invalidate_range_end(range, true);
526 }
527
mmu_notifier_invalidate_range(struct mm_struct * mm,unsigned long start,unsigned long end)528 static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
529 unsigned long start, unsigned long end)
530 {
531 if (mm_has_notifiers(mm))
532 __mmu_notifier_invalidate_range(mm, start, end);
533 }
534
535 #ifdef CONFIG_SPECULATIVE_PAGE_FAULT
536
537 extern bool mmu_notifier_subscriptions_init(struct mm_struct *mm);
538 extern void mmu_notifier_subscriptions_destroy(struct mm_struct *mm);
539
mmu_notifier_trylock(struct mm_struct * mm)540 static inline bool mmu_notifier_trylock(struct mm_struct *mm)
541 {
542 return percpu_down_read_trylock(
543 &get_notifier_subscriptions_hdr(mm)->mmu_notifier_lock->rw_sem);
544 }
545
mmu_notifier_unlock(struct mm_struct * mm)546 static inline void mmu_notifier_unlock(struct mm_struct *mm)
547 {
548 percpu_up_read(
549 &get_notifier_subscriptions_hdr(mm)->mmu_notifier_lock->rw_sem);
550 }
551
552 #else /* CONFIG_SPECULATIVE_PAGE_FAULT */
553
mmu_notifier_subscriptions_init(struct mm_struct * mm)554 static inline bool mmu_notifier_subscriptions_init(struct mm_struct *mm)
555 {
556 mm->notifier_subscriptions = NULL;
557 return true;
558 }
559
mmu_notifier_subscriptions_destroy(struct mm_struct * mm)560 static inline void mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
561 {
562 if (mm_has_notifiers(mm))
563 __mmu_notifier_subscriptions_destroy(mm);
564 }
565
mmu_notifier_trylock(struct mm_struct * mm)566 static inline bool mmu_notifier_trylock(struct mm_struct *mm)
567 {
568 return true;
569 }
570
mmu_notifier_unlock(struct mm_struct * mm)571 static inline void mmu_notifier_unlock(struct mm_struct *mm)
572 {
573 }
574
575 #endif /* CONFIG_SPECULATIVE_PAGE_FAULT */
576
mmu_notifier_range_init(struct mmu_notifier_range * range,enum mmu_notifier_event event,unsigned flags,struct vm_area_struct * vma,struct mm_struct * mm,unsigned long start,unsigned long end)577 static inline void mmu_notifier_range_init(struct mmu_notifier_range *range,
578 enum mmu_notifier_event event,
579 unsigned flags,
580 struct vm_area_struct *vma,
581 struct mm_struct *mm,
582 unsigned long start,
583 unsigned long end)
584 {
585 range->vma = vma;
586 range->event = event;
587 range->mm = mm;
588 range->start = start;
589 range->end = end;
590 range->flags = flags;
591 }
592
mmu_notifier_range_init_migrate(struct mmu_notifier_range * range,unsigned int flags,struct vm_area_struct * vma,struct mm_struct * mm,unsigned long start,unsigned long end,void * pgmap)593 static inline void mmu_notifier_range_init_migrate(
594 struct mmu_notifier_range *range, unsigned int flags,
595 struct vm_area_struct *vma, struct mm_struct *mm,
596 unsigned long start, unsigned long end, void *pgmap)
597 {
598 mmu_notifier_range_init(range, MMU_NOTIFY_MIGRATE, flags, vma, mm,
599 start, end);
600 range->migrate_pgmap_owner = pgmap;
601 }
602
603 #define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
604 ({ \
605 int __young; \
606 struct vm_area_struct *___vma = __vma; \
607 unsigned long ___address = __address; \
608 __young = ptep_clear_flush_young(___vma, ___address, __ptep); \
609 __young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \
610 ___address, \
611 ___address + \
612 PAGE_SIZE); \
613 __young; \
614 })
615
616 #define pmdp_clear_flush_young_notify(__vma, __address, __pmdp) \
617 ({ \
618 int __young; \
619 struct vm_area_struct *___vma = __vma; \
620 unsigned long ___address = __address; \
621 __young = pmdp_clear_flush_young(___vma, ___address, __pmdp); \
622 __young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \
623 ___address, \
624 ___address + \
625 PMD_SIZE); \
626 __young; \
627 })
628
629 #define ptep_clear_young_notify(__vma, __address, __ptep) \
630 ({ \
631 int __young; \
632 struct vm_area_struct *___vma = __vma; \
633 unsigned long ___address = __address; \
634 __young = ptep_test_and_clear_young(___vma, ___address, __ptep);\
635 __young |= mmu_notifier_clear_young(___vma->vm_mm, ___address, \
636 ___address + PAGE_SIZE); \
637 __young; \
638 })
639
640 #define pmdp_clear_young_notify(__vma, __address, __pmdp) \
641 ({ \
642 int __young; \
643 struct vm_area_struct *___vma = __vma; \
644 unsigned long ___address = __address; \
645 __young = pmdp_test_and_clear_young(___vma, ___address, __pmdp);\
646 __young |= mmu_notifier_clear_young(___vma->vm_mm, ___address, \
647 ___address + PMD_SIZE); \
648 __young; \
649 })
650
651 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
652 ({ \
653 unsigned long ___addr = __address & PAGE_MASK; \
654 struct mm_struct *___mm = (__vma)->vm_mm; \
655 pte_t ___pte; \
656 \
657 ___pte = ptep_clear_flush(__vma, __address, __ptep); \
658 mmu_notifier_invalidate_range(___mm, ___addr, \
659 ___addr + PAGE_SIZE); \
660 \
661 ___pte; \
662 })
663
664 #define pmdp_huge_clear_flush_notify(__vma, __haddr, __pmd) \
665 ({ \
666 unsigned long ___haddr = __haddr & HPAGE_PMD_MASK; \
667 struct mm_struct *___mm = (__vma)->vm_mm; \
668 pmd_t ___pmd; \
669 \
670 ___pmd = pmdp_huge_clear_flush(__vma, __haddr, __pmd); \
671 mmu_notifier_invalidate_range(___mm, ___haddr, \
672 ___haddr + HPAGE_PMD_SIZE); \
673 \
674 ___pmd; \
675 })
676
677 #define pudp_huge_clear_flush_notify(__vma, __haddr, __pud) \
678 ({ \
679 unsigned long ___haddr = __haddr & HPAGE_PUD_MASK; \
680 struct mm_struct *___mm = (__vma)->vm_mm; \
681 pud_t ___pud; \
682 \
683 ___pud = pudp_huge_clear_flush(__vma, __haddr, __pud); \
684 mmu_notifier_invalidate_range(___mm, ___haddr, \
685 ___haddr + HPAGE_PUD_SIZE); \
686 \
687 ___pud; \
688 })
689
690 /*
691 * set_pte_at_notify() sets the pte _after_ running the notifier.
692 * This is safe to start by updating the secondary MMUs, because the primary MMU
693 * pte invalidate must have already happened with a ptep_clear_flush() before
694 * set_pte_at_notify() has been invoked. Updating the secondary MMUs first is
695 * required when we change both the protection of the mapping from read-only to
696 * read-write and the pfn (like during copy on write page faults). Otherwise the
697 * old page would remain mapped readonly in the secondary MMUs after the new
698 * page is already writable by some CPU through the primary MMU.
699 */
700 #define set_pte_at_notify(__mm, __address, __ptep, __pte) \
701 ({ \
702 struct mm_struct *___mm = __mm; \
703 unsigned long ___address = __address; \
704 pte_t ___pte = __pte; \
705 \
706 mmu_notifier_change_pte(___mm, ___address, ___pte); \
707 set_pte_at(___mm, ___address, __ptep, ___pte); \
708 })
709
710 #else /* CONFIG_MMU_NOTIFIER */
711
712 struct mmu_notifier_range {
713 unsigned long start;
714 unsigned long end;
715 };
716
_mmu_notifier_range_init(struct mmu_notifier_range * range,unsigned long start,unsigned long end)717 static inline void _mmu_notifier_range_init(struct mmu_notifier_range *range,
718 unsigned long start,
719 unsigned long end)
720 {
721 range->start = start;
722 range->end = end;
723 }
724
725 #define mmu_notifier_range_init(range,event,flags,vma,mm,start,end) \
726 _mmu_notifier_range_init(range, start, end)
727 #define mmu_notifier_range_init_migrate(range, flags, vma, mm, start, end, \
728 pgmap) \
729 _mmu_notifier_range_init(range, start, end)
730
731 static inline bool
mmu_notifier_range_blockable(const struct mmu_notifier_range * range)732 mmu_notifier_range_blockable(const struct mmu_notifier_range *range)
733 {
734 return true;
735 }
736
mm_has_notifiers(struct mm_struct * mm)737 static inline int mm_has_notifiers(struct mm_struct *mm)
738 {
739 return 0;
740 }
741
mmu_notifier_release(struct mm_struct * mm)742 static inline void mmu_notifier_release(struct mm_struct *mm)
743 {
744 }
745
mmu_notifier_clear_flush_young(struct mm_struct * mm,unsigned long start,unsigned long end)746 static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm,
747 unsigned long start,
748 unsigned long end)
749 {
750 return 0;
751 }
752
mmu_notifier_test_young(struct mm_struct * mm,unsigned long address)753 static inline int mmu_notifier_test_young(struct mm_struct *mm,
754 unsigned long address)
755 {
756 return 0;
757 }
758
mmu_notifier_change_pte(struct mm_struct * mm,unsigned long address,pte_t pte)759 static inline void mmu_notifier_change_pte(struct mm_struct *mm,
760 unsigned long address, pte_t pte)
761 {
762 }
763
764 static inline void
mmu_notifier_invalidate_range_start(struct mmu_notifier_range * range)765 mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
766 {
767 }
768
769 static inline int
mmu_notifier_invalidate_range_start_nonblock(struct mmu_notifier_range * range)770 mmu_notifier_invalidate_range_start_nonblock(struct mmu_notifier_range *range)
771 {
772 return 0;
773 }
774
775 static inline
mmu_notifier_invalidate_range_end(struct mmu_notifier_range * range)776 void mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range)
777 {
778 }
779
780 static inline void
mmu_notifier_invalidate_range_only_end(struct mmu_notifier_range * range)781 mmu_notifier_invalidate_range_only_end(struct mmu_notifier_range *range)
782 {
783 }
784
mmu_notifier_invalidate_range(struct mm_struct * mm,unsigned long start,unsigned long end)785 static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
786 unsigned long start, unsigned long end)
787 {
788 }
789
mmu_notifier_subscriptions_init(struct mm_struct * mm)790 static inline bool mmu_notifier_subscriptions_init(struct mm_struct *mm)
791 {
792 return true;
793 }
794
mmu_notifier_subscriptions_destroy(struct mm_struct * mm)795 static inline void mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
796 {
797 }
798
mmu_notifier_trylock(struct mm_struct * mm)799 static inline bool mmu_notifier_trylock(struct mm_struct *mm)
800 {
801 return true;
802 }
803
mmu_notifier_unlock(struct mm_struct * mm)804 static inline void mmu_notifier_unlock(struct mm_struct *mm)
805 {
806 }
807
808 #define mmu_notifier_range_update_to_read_only(r) false
809
810 #define ptep_clear_flush_young_notify ptep_clear_flush_young
811 #define pmdp_clear_flush_young_notify pmdp_clear_flush_young
812 #define ptep_clear_young_notify ptep_test_and_clear_young
813 #define pmdp_clear_young_notify pmdp_test_and_clear_young
814 #define ptep_clear_flush_notify ptep_clear_flush
815 #define pmdp_huge_clear_flush_notify pmdp_huge_clear_flush
816 #define pudp_huge_clear_flush_notify pudp_huge_clear_flush
817 #define set_pte_at_notify set_pte_at
818
mmu_notifier_synchronize(void)819 static inline void mmu_notifier_synchronize(void)
820 {
821 }
822
823 #endif /* CONFIG_MMU_NOTIFIER */
824
825 #endif /* _LINUX_MMU_NOTIFIER_H */
826