• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_MMU_NOTIFIER_H
3 #define _LINUX_MMU_NOTIFIER_H
4 
5 #include <linux/list.h>
6 #include <linux/spinlock.h>
7 #include <linux/mm_types.h>
8 #include <linux/mmap_lock.h>
9 #include <linux/percpu-rwsem.h>
10 #include <linux/slab.h>
11 #include <linux/srcu.h>
12 #include <linux/interval_tree.h>
13 #include <linux/android_kabi.h>
14 
15 struct mmu_notifier_subscriptions;
16 struct mmu_notifier;
17 struct mmu_notifier_range;
18 struct mmu_interval_notifier;
19 
20 /**
21  * enum mmu_notifier_event - reason for the mmu notifier callback
22  * @MMU_NOTIFY_UNMAP: either munmap() that unmap the range or a mremap() that
23  * move the range
24  *
25  * @MMU_NOTIFY_CLEAR: clear page table entry (many reasons for this like
26  * madvise() or replacing a page by another one, ...).
27  *
28  * @MMU_NOTIFY_PROTECTION_VMA: update is due to protection change for the range
29  * ie using the vma access permission (vm_page_prot) to update the whole range
30  * is enough no need to inspect changes to the CPU page table (mprotect()
31  * syscall)
32  *
33  * @MMU_NOTIFY_PROTECTION_PAGE: update is due to change in read/write flag for
34  * pages in the range so to mirror those changes the user must inspect the CPU
35  * page table (from the end callback).
36  *
37  * @MMU_NOTIFY_SOFT_DIRTY: soft dirty accounting (still same page and same
38  * access flags). User should soft dirty the page in the end callback to make
39  * sure that anyone relying on soft dirtiness catch pages that might be written
40  * through non CPU mappings.
41  *
42  * @MMU_NOTIFY_RELEASE: used during mmu_interval_notifier invalidate to signal
43  * that the mm refcount is zero and the range is no longer accessible.
44  *
45  * @MMU_NOTIFY_MIGRATE: used during migrate_vma_collect() invalidate to signal
46  * a device driver to possibly ignore the invalidation if the
47  * owner field matches the driver's device private pgmap owner.
48  *
49  * @MMU_NOTIFY_EXCLUSIVE: to signal a device driver that the device will no
50  * longer have exclusive access to the page. When sent during creation of an
51  * exclusive range the owner will be initialised to the value provided by the
52  * caller of make_device_exclusive_range(), otherwise the owner will be NULL.
53  */
54 enum mmu_notifier_event {
55 	MMU_NOTIFY_UNMAP = 0,
56 	MMU_NOTIFY_CLEAR,
57 	MMU_NOTIFY_PROTECTION_VMA,
58 	MMU_NOTIFY_PROTECTION_PAGE,
59 	MMU_NOTIFY_SOFT_DIRTY,
60 	MMU_NOTIFY_RELEASE,
61 	MMU_NOTIFY_MIGRATE,
62 	MMU_NOTIFY_EXCLUSIVE,
63 };
64 
65 #define MMU_NOTIFIER_RANGE_BLOCKABLE (1 << 0)
66 
67 struct mmu_notifier_ops {
68 	/*
69 	 * Called either by mmu_notifier_unregister or when the mm is
70 	 * being destroyed by exit_mmap, always before all pages are
71 	 * freed. This can run concurrently with other mmu notifier
72 	 * methods (the ones invoked outside the mm context) and it
73 	 * should tear down all secondary mmu mappings and freeze the
74 	 * secondary mmu. If this method isn't implemented you've to
75 	 * be sure that nothing could possibly write to the pages
76 	 * through the secondary mmu by the time the last thread with
77 	 * tsk->mm == mm exits.
78 	 *
79 	 * As side note: the pages freed after ->release returns could
80 	 * be immediately reallocated by the gart at an alias physical
81 	 * address with a different cache model, so if ->release isn't
82 	 * implemented because all _software_ driven memory accesses
83 	 * through the secondary mmu are terminated by the time the
84 	 * last thread of this mm quits, you've also to be sure that
85 	 * speculative _hardware_ operations can't allocate dirty
86 	 * cachelines in the cpu that could not be snooped and made
87 	 * coherent with the other read and write operations happening
88 	 * through the gart alias address, so leading to memory
89 	 * corruption.
90 	 */
91 	void (*release)(struct mmu_notifier *subscription,
92 			struct mm_struct *mm);
93 
94 	/*
95 	 * clear_flush_young is called after the VM is
96 	 * test-and-clearing the young/accessed bitflag in the
97 	 * pte. This way the VM will provide proper aging to the
98 	 * accesses to the page through the secondary MMUs and not
99 	 * only to the ones through the Linux pte.
100 	 * Start-end is necessary in case the secondary MMU is mapping the page
101 	 * at a smaller granularity than the primary MMU.
102 	 */
103 	int (*clear_flush_young)(struct mmu_notifier *subscription,
104 				 struct mm_struct *mm,
105 				 unsigned long start,
106 				 unsigned long end);
107 
108 	/*
109 	 * clear_young is a lightweight version of clear_flush_young. Like the
110 	 * latter, it is supposed to test-and-clear the young/accessed bitflag
111 	 * in the secondary pte, but it may omit flushing the secondary tlb.
112 	 */
113 	int (*clear_young)(struct mmu_notifier *subscription,
114 			   struct mm_struct *mm,
115 			   unsigned long start,
116 			   unsigned long end);
117 
118 	/*
119 	 * test_young is called to check the young/accessed bitflag in
120 	 * the secondary pte. This is used to know if the page is
121 	 * frequently used without actually clearing the flag or tearing
122 	 * down the secondary mapping on the page.
123 	 */
124 	int (*test_young)(struct mmu_notifier *subscription,
125 			  struct mm_struct *mm,
126 			  unsigned long address);
127 
128 	/*
129 	 * change_pte is called in cases that pte mapping to page is changed:
130 	 * for example, when ksm remaps pte to point to a new shared page.
131 	 */
132 	void (*change_pte)(struct mmu_notifier *subscription,
133 			   struct mm_struct *mm,
134 			   unsigned long address,
135 			   pte_t pte);
136 
137 	/*
138 	 * invalidate_range_start() and invalidate_range_end() must be
139 	 * paired and are called only when the mmap_lock and/or the
140 	 * locks protecting the reverse maps are held. If the subsystem
141 	 * can't guarantee that no additional references are taken to
142 	 * the pages in the range, it has to implement the
143 	 * invalidate_range() notifier to remove any references taken
144 	 * after invalidate_range_start().
145 	 *
146 	 * Invalidation of multiple concurrent ranges may be
147 	 * optionally permitted by the driver. Either way the
148 	 * establishment of sptes is forbidden in the range passed to
149 	 * invalidate_range_begin/end for the whole duration of the
150 	 * invalidate_range_begin/end critical section.
151 	 *
152 	 * invalidate_range_start() is called when all pages in the
153 	 * range are still mapped and have at least a refcount of one.
154 	 *
155 	 * invalidate_range_end() is called when all pages in the
156 	 * range have been unmapped and the pages have been freed by
157 	 * the VM.
158 	 *
159 	 * The VM will remove the page table entries and potentially
160 	 * the page between invalidate_range_start() and
161 	 * invalidate_range_end(). If the page must not be freed
162 	 * because of pending I/O or other circumstances then the
163 	 * invalidate_range_start() callback (or the initial mapping
164 	 * by the driver) must make sure that the refcount is kept
165 	 * elevated.
166 	 *
167 	 * If the driver increases the refcount when the pages are
168 	 * initially mapped into an address space then either
169 	 * invalidate_range_start() or invalidate_range_end() may
170 	 * decrease the refcount. If the refcount is decreased on
171 	 * invalidate_range_start() then the VM can free pages as page
172 	 * table entries are removed.  If the refcount is only
173 	 * dropped on invalidate_range_end() then the driver itself
174 	 * will drop the last refcount but it must take care to flush
175 	 * any secondary tlb before doing the final free on the
176 	 * page. Pages will no longer be referenced by the linux
177 	 * address space but may still be referenced by sptes until
178 	 * the last refcount is dropped.
179 	 *
180 	 * If blockable argument is set to false then the callback cannot
181 	 * sleep and has to return with -EAGAIN if sleeping would be required.
182 	 * 0 should be returned otherwise. Please note that notifiers that can
183 	 * fail invalidate_range_start are not allowed to implement
184 	 * invalidate_range_end, as there is no mechanism for informing the
185 	 * notifier that its start failed.
186 	 */
187 	int (*invalidate_range_start)(struct mmu_notifier *subscription,
188 				      const struct mmu_notifier_range *range);
189 	void (*invalidate_range_end)(struct mmu_notifier *subscription,
190 				     const struct mmu_notifier_range *range);
191 
192 	/*
193 	 * invalidate_range() is either called between
194 	 * invalidate_range_start() and invalidate_range_end() when the
195 	 * VM has to free pages that where unmapped, but before the
196 	 * pages are actually freed, or outside of _start()/_end() when
197 	 * a (remote) TLB is necessary.
198 	 *
199 	 * If invalidate_range() is used to manage a non-CPU TLB with
200 	 * shared page-tables, it not necessary to implement the
201 	 * invalidate_range_start()/end() notifiers, as
202 	 * invalidate_range() already catches the points in time when an
203 	 * external TLB range needs to be flushed. For more in depth
204 	 * discussion on this see Documentation/vm/mmu_notifier.rst
205 	 *
206 	 * Note that this function might be called with just a sub-range
207 	 * of what was passed to invalidate_range_start()/end(), if
208 	 * called between those functions.
209 	 */
210 	void (*invalidate_range)(struct mmu_notifier *subscription,
211 				 struct mm_struct *mm,
212 				 unsigned long start,
213 				 unsigned long end);
214 
215 	/*
216 	 * These callbacks are used with the get/put interface to manage the
217 	 * lifetime of the mmu_notifier memory. alloc_notifier() returns a new
218 	 * notifier for use with the mm.
219 	 *
220 	 * free_notifier() is only called after the mmu_notifier has been
221 	 * fully put, calls to any ops callback are prevented and no ops
222 	 * callbacks are currently running. It is called from a SRCU callback
223 	 * and cannot sleep.
224 	 */
225 	struct mmu_notifier *(*alloc_notifier)(struct mm_struct *mm);
226 	void (*free_notifier)(struct mmu_notifier *subscription);
227 
228 	ANDROID_KABI_RESERVE(1);
229 	ANDROID_KABI_RESERVE(2);
230 	ANDROID_KABI_RESERVE(3);
231 	ANDROID_KABI_RESERVE(4);
232 };
233 
234 /*
235  * The notifier chains are protected by mmap_lock and/or the reverse map
236  * semaphores. Notifier chains are only changed when all reverse maps and
237  * the mmap_lock locks are taken.
238  *
239  * Therefore notifier chains can only be traversed when either
240  *
241  * 1. mmap_lock is held.
242  * 2. One of the reverse map locks is held (i_mmap_rwsem or anon_vma->rwsem).
243  * 3. No other concurrent thread can access the list (release)
244  */
245 struct mmu_notifier {
246 	struct hlist_node hlist;
247 	const struct mmu_notifier_ops *ops;
248 	struct mm_struct *mm;
249 	struct rcu_head rcu;
250 	unsigned int users;
251 
252 	ANDROID_KABI_RESERVE(1);
253 	ANDROID_KABI_RESERVE(2);
254 };
255 
256 /**
257  * struct mmu_interval_notifier_ops
258  * @invalidate: Upon return the caller must stop using any SPTEs within this
259  *              range. This function can sleep. Return false only if sleeping
260  *              was required but mmu_notifier_range_blockable(range) is false.
261  */
262 struct mmu_interval_notifier_ops {
263 	bool (*invalidate)(struct mmu_interval_notifier *interval_sub,
264 			   const struct mmu_notifier_range *range,
265 			   unsigned long cur_seq);
266 };
267 
268 struct mmu_interval_notifier {
269 	struct interval_tree_node interval_tree;
270 	const struct mmu_interval_notifier_ops *ops;
271 	struct mm_struct *mm;
272 	struct hlist_node deferred_item;
273 	unsigned long invalidate_seq;
274 };
275 
276 #ifdef CONFIG_MMU_NOTIFIER
277 
278 #ifdef CONFIG_LOCKDEP
279 extern struct lockdep_map __mmu_notifier_invalidate_range_start_map;
280 #endif
281 
282 struct mmu_notifier_range {
283 	struct vm_area_struct *vma;
284 	struct mm_struct *mm;
285 	unsigned long start;
286 	unsigned long end;
287 	unsigned flags;
288 	enum mmu_notifier_event event;
289 	void *owner;
290 };
291 
mm_has_notifiers(struct mm_struct * mm)292 static inline int mm_has_notifiers(struct mm_struct *mm)
293 {
294 	return unlikely(mm->notifier_subscriptions);
295 }
296 
297 struct mmu_notifier *mmu_notifier_get_locked(const struct mmu_notifier_ops *ops,
298 					     struct mm_struct *mm);
299 static inline struct mmu_notifier *
mmu_notifier_get(const struct mmu_notifier_ops * ops,struct mm_struct * mm)300 mmu_notifier_get(const struct mmu_notifier_ops *ops, struct mm_struct *mm)
301 {
302 	struct mmu_notifier *ret;
303 
304 	mmap_write_lock(mm);
305 	ret = mmu_notifier_get_locked(ops, mm);
306 	mmap_write_unlock(mm);
307 	return ret;
308 }
309 void mmu_notifier_put(struct mmu_notifier *subscription);
310 void mmu_notifier_synchronize(void);
311 
312 extern int mmu_notifier_register(struct mmu_notifier *subscription,
313 				 struct mm_struct *mm);
314 extern int __mmu_notifier_register(struct mmu_notifier *subscription,
315 				   struct mm_struct *mm);
316 extern void mmu_notifier_unregister(struct mmu_notifier *subscription,
317 				    struct mm_struct *mm);
318 
319 unsigned long
320 mmu_interval_read_begin(struct mmu_interval_notifier *interval_sub);
321 int mmu_interval_notifier_insert(struct mmu_interval_notifier *interval_sub,
322 				 struct mm_struct *mm, unsigned long start,
323 				 unsigned long length,
324 				 const struct mmu_interval_notifier_ops *ops);
325 int mmu_interval_notifier_insert_locked(
326 	struct mmu_interval_notifier *interval_sub, struct mm_struct *mm,
327 	unsigned long start, unsigned long length,
328 	const struct mmu_interval_notifier_ops *ops);
329 void mmu_interval_notifier_remove(struct mmu_interval_notifier *interval_sub);
330 
331 /**
332  * mmu_interval_set_seq - Save the invalidation sequence
333  * @interval_sub - The subscription passed to invalidate
334  * @cur_seq - The cur_seq passed to the invalidate() callback
335  *
336  * This must be called unconditionally from the invalidate callback of a
337  * struct mmu_interval_notifier_ops under the same lock that is used to call
338  * mmu_interval_read_retry(). It updates the sequence number for later use by
339  * mmu_interval_read_retry(). The provided cur_seq will always be odd.
340  *
341  * If the caller does not call mmu_interval_read_begin() or
342  * mmu_interval_read_retry() then this call is not required.
343  */
344 static inline void
mmu_interval_set_seq(struct mmu_interval_notifier * interval_sub,unsigned long cur_seq)345 mmu_interval_set_seq(struct mmu_interval_notifier *interval_sub,
346 		     unsigned long cur_seq)
347 {
348 	WRITE_ONCE(interval_sub->invalidate_seq, cur_seq);
349 }
350 
351 /**
352  * mmu_interval_read_retry - End a read side critical section against a VA range
353  * interval_sub: The subscription
354  * seq: The return of the paired mmu_interval_read_begin()
355  *
356  * This MUST be called under a user provided lock that is also held
357  * unconditionally by op->invalidate() when it calls mmu_interval_set_seq().
358  *
359  * Each call should be paired with a single mmu_interval_read_begin() and
360  * should be used to conclude the read side.
361  *
362  * Returns true if an invalidation collided with this critical section, and
363  * the caller should retry.
364  */
365 static inline bool
mmu_interval_read_retry(struct mmu_interval_notifier * interval_sub,unsigned long seq)366 mmu_interval_read_retry(struct mmu_interval_notifier *interval_sub,
367 			unsigned long seq)
368 {
369 	return interval_sub->invalidate_seq != seq;
370 }
371 
372 /**
373  * mmu_interval_check_retry - Test if a collision has occurred
374  * interval_sub: The subscription
375  * seq: The return of the matching mmu_interval_read_begin()
376  *
377  * This can be used in the critical section between mmu_interval_read_begin()
378  * and mmu_interval_read_retry().  A return of true indicates an invalidation
379  * has collided with this critical region and a future
380  * mmu_interval_read_retry() will return true.
381  *
382  * False is not reliable and only suggests a collision may not have
383  * occurred. It can be called many times and does not have to hold the user
384  * provided lock.
385  *
386  * This call can be used as part of loops and other expensive operations to
387  * expedite a retry.
388  */
389 static inline bool
mmu_interval_check_retry(struct mmu_interval_notifier * interval_sub,unsigned long seq)390 mmu_interval_check_retry(struct mmu_interval_notifier *interval_sub,
391 			 unsigned long seq)
392 {
393 	/* Pairs with the WRITE_ONCE in mmu_interval_set_seq() */
394 	return READ_ONCE(interval_sub->invalidate_seq) != seq;
395 }
396 
397 extern void __mmu_notifier_subscriptions_destroy(struct mm_struct *mm);
398 extern void __mmu_notifier_release(struct mm_struct *mm);
399 extern int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
400 					  unsigned long start,
401 					  unsigned long end);
402 extern int __mmu_notifier_clear_young(struct mm_struct *mm,
403 				      unsigned long start,
404 				      unsigned long end);
405 extern int __mmu_notifier_test_young(struct mm_struct *mm,
406 				     unsigned long address);
407 extern void __mmu_notifier_change_pte(struct mm_struct *mm,
408 				      unsigned long address, pte_t pte);
409 extern int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *r);
410 extern void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *r,
411 				  bool only_end);
412 extern void __mmu_notifier_invalidate_range(struct mm_struct *mm,
413 				  unsigned long start, unsigned long end);
414 extern bool
415 mmu_notifier_range_update_to_read_only(const struct mmu_notifier_range *range);
416 
417 static inline bool
mmu_notifier_range_blockable(const struct mmu_notifier_range * range)418 mmu_notifier_range_blockable(const struct mmu_notifier_range *range)
419 {
420 	return (range->flags & MMU_NOTIFIER_RANGE_BLOCKABLE);
421 }
422 
mmu_notifier_release(struct mm_struct * mm)423 static inline void mmu_notifier_release(struct mm_struct *mm)
424 {
425 	if (mm_has_notifiers(mm))
426 		__mmu_notifier_release(mm);
427 }
428 
mmu_notifier_clear_flush_young(struct mm_struct * mm,unsigned long start,unsigned long end)429 static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm,
430 					  unsigned long start,
431 					  unsigned long end)
432 {
433 	if (mm_has_notifiers(mm))
434 		return __mmu_notifier_clear_flush_young(mm, start, end);
435 	return 0;
436 }
437 
mmu_notifier_clear_young(struct mm_struct * mm,unsigned long start,unsigned long end)438 static inline int mmu_notifier_clear_young(struct mm_struct *mm,
439 					   unsigned long start,
440 					   unsigned long end)
441 {
442 	if (mm_has_notifiers(mm))
443 		return __mmu_notifier_clear_young(mm, start, end);
444 	return 0;
445 }
446 
mmu_notifier_test_young(struct mm_struct * mm,unsigned long address)447 static inline int mmu_notifier_test_young(struct mm_struct *mm,
448 					  unsigned long address)
449 {
450 	if (mm_has_notifiers(mm))
451 		return __mmu_notifier_test_young(mm, address);
452 	return 0;
453 }
454 
mmu_notifier_change_pte(struct mm_struct * mm,unsigned long address,pte_t pte)455 static inline void mmu_notifier_change_pte(struct mm_struct *mm,
456 					   unsigned long address, pte_t pte)
457 {
458 	if (mm_has_notifiers(mm))
459 		__mmu_notifier_change_pte(mm, address, pte);
460 }
461 
462 static inline void
mmu_notifier_invalidate_range_start(struct mmu_notifier_range * range)463 mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
464 {
465 	might_sleep();
466 
467 	lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
468 	if (mm_has_notifiers(range->mm)) {
469 		range->flags |= MMU_NOTIFIER_RANGE_BLOCKABLE;
470 		__mmu_notifier_invalidate_range_start(range);
471 	}
472 	lock_map_release(&__mmu_notifier_invalidate_range_start_map);
473 }
474 
475 static inline int
mmu_notifier_invalidate_range_start_nonblock(struct mmu_notifier_range * range)476 mmu_notifier_invalidate_range_start_nonblock(struct mmu_notifier_range *range)
477 {
478 	int ret = 0;
479 
480 	lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
481 	if (mm_has_notifiers(range->mm)) {
482 		range->flags &= ~MMU_NOTIFIER_RANGE_BLOCKABLE;
483 		ret = __mmu_notifier_invalidate_range_start(range);
484 	}
485 	lock_map_release(&__mmu_notifier_invalidate_range_start_map);
486 	return ret;
487 }
488 
489 static inline void
mmu_notifier_invalidate_range_end(struct mmu_notifier_range * range)490 mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range)
491 {
492 	if (mmu_notifier_range_blockable(range))
493 		might_sleep();
494 
495 	if (mm_has_notifiers(range->mm))
496 		__mmu_notifier_invalidate_range_end(range, false);
497 }
498 
499 static inline void
mmu_notifier_invalidate_range_only_end(struct mmu_notifier_range * range)500 mmu_notifier_invalidate_range_only_end(struct mmu_notifier_range *range)
501 {
502 	if (mm_has_notifiers(range->mm))
503 		__mmu_notifier_invalidate_range_end(range, true);
504 }
505 
mmu_notifier_invalidate_range(struct mm_struct * mm,unsigned long start,unsigned long end)506 static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
507 				  unsigned long start, unsigned long end)
508 {
509 	if (mm_has_notifiers(mm))
510 		__mmu_notifier_invalidate_range(mm, start, end);
511 }
512 
mmu_notifier_subscriptions_init(struct mm_struct * mm)513 static inline bool mmu_notifier_subscriptions_init(struct mm_struct *mm)
514 {
515 #ifdef CONFIG_SPECULATIVE_PAGE_FAULT
516 	mm->mmu_notifier_lock = kzalloc(sizeof(struct percpu_rw_semaphore), GFP_KERNEL);
517 	if (!mm->mmu_notifier_lock)
518 		return false;
519 	if (percpu_init_rwsem(mm->mmu_notifier_lock)) {
520 		kfree(mm->mmu_notifier_lock);
521 		return false;
522 	}
523 #endif
524 
525 	mm->notifier_subscriptions = NULL;
526 	return true;
527 }
528 
mmu_notifier_subscriptions_destroy(struct mm_struct * mm)529 static inline void mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
530 {
531 	if (mm_has_notifiers(mm))
532 		__mmu_notifier_subscriptions_destroy(mm);
533 
534 #ifdef CONFIG_SPECULATIVE_PAGE_FAULT
535 	if (!in_atomic()) {
536 		percpu_free_rwsem(mm->mmu_notifier_lock);
537 		kfree(mm->mmu_notifier_lock);
538 	} else {
539 		percpu_rwsem_async_destroy(mm->mmu_notifier_lock);
540 	}
541 #endif
542 }
543 
544 
mmu_notifier_range_init(struct mmu_notifier_range * range,enum mmu_notifier_event event,unsigned flags,struct vm_area_struct * vma,struct mm_struct * mm,unsigned long start,unsigned long end)545 static inline void mmu_notifier_range_init(struct mmu_notifier_range *range,
546 					   enum mmu_notifier_event event,
547 					   unsigned flags,
548 					   struct vm_area_struct *vma,
549 					   struct mm_struct *mm,
550 					   unsigned long start,
551 					   unsigned long end)
552 {
553 	range->vma = vma;
554 	range->event = event;
555 	range->mm = mm;
556 	range->start = start;
557 	range->end = end;
558 	range->flags = flags;
559 }
560 
mmu_notifier_range_init_owner(struct mmu_notifier_range * range,enum mmu_notifier_event event,unsigned int flags,struct vm_area_struct * vma,struct mm_struct * mm,unsigned long start,unsigned long end,void * owner)561 static inline void mmu_notifier_range_init_owner(
562 			struct mmu_notifier_range *range,
563 			enum mmu_notifier_event event, unsigned int flags,
564 			struct vm_area_struct *vma, struct mm_struct *mm,
565 			unsigned long start, unsigned long end, void *owner)
566 {
567 	mmu_notifier_range_init(range, event, flags, vma, mm, start, end);
568 	range->owner = owner;
569 }
570 
571 #define ptep_clear_flush_young_notify(__vma, __address, __ptep)		\
572 ({									\
573 	int __young;							\
574 	struct vm_area_struct *___vma = __vma;				\
575 	unsigned long ___address = __address;				\
576 	__young = ptep_clear_flush_young(___vma, ___address, __ptep);	\
577 	__young |= mmu_notifier_clear_flush_young(___vma->vm_mm,	\
578 						  ___address,		\
579 						  ___address +		\
580 							PAGE_SIZE);	\
581 	__young;							\
582 })
583 
584 #define pmdp_clear_flush_young_notify(__vma, __address, __pmdp)		\
585 ({									\
586 	int __young;							\
587 	struct vm_area_struct *___vma = __vma;				\
588 	unsigned long ___address = __address;				\
589 	__young = pmdp_clear_flush_young(___vma, ___address, __pmdp);	\
590 	__young |= mmu_notifier_clear_flush_young(___vma->vm_mm,	\
591 						  ___address,		\
592 						  ___address +		\
593 							PMD_SIZE);	\
594 	__young;							\
595 })
596 
597 #define ptep_clear_young_notify(__vma, __address, __ptep)		\
598 ({									\
599 	int __young;							\
600 	struct vm_area_struct *___vma = __vma;				\
601 	unsigned long ___address = __address;				\
602 	__young = ptep_test_and_clear_young(___vma, ___address, __ptep);\
603 	__young |= mmu_notifier_clear_young(___vma->vm_mm, ___address,	\
604 					    ___address + PAGE_SIZE);	\
605 	__young;							\
606 })
607 
608 #define pmdp_clear_young_notify(__vma, __address, __pmdp)		\
609 ({									\
610 	int __young;							\
611 	struct vm_area_struct *___vma = __vma;				\
612 	unsigned long ___address = __address;				\
613 	__young = pmdp_test_and_clear_young(___vma, ___address, __pmdp);\
614 	__young |= mmu_notifier_clear_young(___vma->vm_mm, ___address,	\
615 					    ___address + PMD_SIZE);	\
616 	__young;							\
617 })
618 
619 #define	ptep_clear_flush_notify(__vma, __address, __ptep)		\
620 ({									\
621 	unsigned long ___addr = __address & PAGE_MASK;			\
622 	struct mm_struct *___mm = (__vma)->vm_mm;			\
623 	pte_t ___pte;							\
624 									\
625 	___pte = ptep_clear_flush(__vma, __address, __ptep);		\
626 	mmu_notifier_invalidate_range(___mm, ___addr,			\
627 					___addr + PAGE_SIZE);		\
628 									\
629 	___pte;								\
630 })
631 
632 #define pmdp_huge_clear_flush_notify(__vma, __haddr, __pmd)		\
633 ({									\
634 	unsigned long ___haddr = __haddr & HPAGE_PMD_MASK;		\
635 	struct mm_struct *___mm = (__vma)->vm_mm;			\
636 	pmd_t ___pmd;							\
637 									\
638 	___pmd = pmdp_huge_clear_flush(__vma, __haddr, __pmd);		\
639 	mmu_notifier_invalidate_range(___mm, ___haddr,			\
640 				      ___haddr + HPAGE_PMD_SIZE);	\
641 									\
642 	___pmd;								\
643 })
644 
645 #define pudp_huge_clear_flush_notify(__vma, __haddr, __pud)		\
646 ({									\
647 	unsigned long ___haddr = __haddr & HPAGE_PUD_MASK;		\
648 	struct mm_struct *___mm = (__vma)->vm_mm;			\
649 	pud_t ___pud;							\
650 									\
651 	___pud = pudp_huge_clear_flush(__vma, __haddr, __pud);		\
652 	mmu_notifier_invalidate_range(___mm, ___haddr,			\
653 				      ___haddr + HPAGE_PUD_SIZE);	\
654 									\
655 	___pud;								\
656 })
657 
658 /*
659  * set_pte_at_notify() sets the pte _after_ running the notifier.
660  * This is safe to start by updating the secondary MMUs, because the primary MMU
661  * pte invalidate must have already happened with a ptep_clear_flush() before
662  * set_pte_at_notify() has been invoked.  Updating the secondary MMUs first is
663  * required when we change both the protection of the mapping from read-only to
664  * read-write and the pfn (like during copy on write page faults). Otherwise the
665  * old page would remain mapped readonly in the secondary MMUs after the new
666  * page is already writable by some CPU through the primary MMU.
667  */
668 #define set_pte_at_notify(__mm, __address, __ptep, __pte)		\
669 ({									\
670 	struct mm_struct *___mm = __mm;					\
671 	unsigned long ___address = __address;				\
672 	pte_t ___pte = __pte;						\
673 									\
674 	mmu_notifier_change_pte(___mm, ___address, ___pte);		\
675 	set_pte_at(___mm, ___address, __ptep, ___pte);			\
676 })
677 
678 #else /* CONFIG_MMU_NOTIFIER */
679 
680 struct mmu_notifier_range {
681 	unsigned long start;
682 	unsigned long end;
683 };
684 
_mmu_notifier_range_init(struct mmu_notifier_range * range,unsigned long start,unsigned long end)685 static inline void _mmu_notifier_range_init(struct mmu_notifier_range *range,
686 					    unsigned long start,
687 					    unsigned long end)
688 {
689 	range->start = start;
690 	range->end = end;
691 }
692 
693 #define mmu_notifier_range_init(range,event,flags,vma,mm,start,end)  \
694 	_mmu_notifier_range_init(range, start, end)
695 #define mmu_notifier_range_init_owner(range, event, flags, vma, mm, start, \
696 					end, owner) \
697 	_mmu_notifier_range_init(range, start, end)
698 
699 static inline bool
mmu_notifier_range_blockable(const struct mmu_notifier_range * range)700 mmu_notifier_range_blockable(const struct mmu_notifier_range *range)
701 {
702 	return true;
703 }
704 
mm_has_notifiers(struct mm_struct * mm)705 static inline int mm_has_notifiers(struct mm_struct *mm)
706 {
707 	return 0;
708 }
709 
mmu_notifier_release(struct mm_struct * mm)710 static inline void mmu_notifier_release(struct mm_struct *mm)
711 {
712 }
713 
mmu_notifier_clear_flush_young(struct mm_struct * mm,unsigned long start,unsigned long end)714 static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm,
715 					  unsigned long start,
716 					  unsigned long end)
717 {
718 	return 0;
719 }
720 
mmu_notifier_test_young(struct mm_struct * mm,unsigned long address)721 static inline int mmu_notifier_test_young(struct mm_struct *mm,
722 					  unsigned long address)
723 {
724 	return 0;
725 }
726 
mmu_notifier_change_pte(struct mm_struct * mm,unsigned long address,pte_t pte)727 static inline void mmu_notifier_change_pte(struct mm_struct *mm,
728 					   unsigned long address, pte_t pte)
729 {
730 }
731 
732 static inline void
mmu_notifier_invalidate_range_start(struct mmu_notifier_range * range)733 mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
734 {
735 }
736 
737 static inline int
mmu_notifier_invalidate_range_start_nonblock(struct mmu_notifier_range * range)738 mmu_notifier_invalidate_range_start_nonblock(struct mmu_notifier_range *range)
739 {
740 	return 0;
741 }
742 
743 static inline
mmu_notifier_invalidate_range_end(struct mmu_notifier_range * range)744 void mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range)
745 {
746 }
747 
748 static inline void
mmu_notifier_invalidate_range_only_end(struct mmu_notifier_range * range)749 mmu_notifier_invalidate_range_only_end(struct mmu_notifier_range *range)
750 {
751 }
752 
mmu_notifier_invalidate_range(struct mm_struct * mm,unsigned long start,unsigned long end)753 static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
754 				  unsigned long start, unsigned long end)
755 {
756 }
757 
mmu_notifier_subscriptions_init(struct mm_struct * mm)758 static inline bool mmu_notifier_subscriptions_init(struct mm_struct *mm)
759 {
760 	return true;
761 }
762 
mmu_notifier_subscriptions_destroy(struct mm_struct * mm)763 static inline void mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
764 {
765 }
766 
767 #define mmu_notifier_range_update_to_read_only(r) false
768 
769 #define ptep_clear_flush_young_notify ptep_clear_flush_young
770 #define pmdp_clear_flush_young_notify pmdp_clear_flush_young
771 #define ptep_clear_young_notify ptep_test_and_clear_young
772 #define pmdp_clear_young_notify pmdp_test_and_clear_young
773 #define	ptep_clear_flush_notify ptep_clear_flush
774 #define pmdp_huge_clear_flush_notify pmdp_huge_clear_flush
775 #define pudp_huge_clear_flush_notify pudp_huge_clear_flush
776 #define set_pte_at_notify set_pte_at
777 
mmu_notifier_synchronize(void)778 static inline void mmu_notifier_synchronize(void)
779 {
780 }
781 
782 #endif /* CONFIG_MMU_NOTIFIER */
783 
784 #if defined(CONFIG_MMU_NOTIFIER) && defined(CONFIG_SPECULATIVE_PAGE_FAULT)
785 
mmu_notifier_trylock(struct mm_struct * mm)786 static inline bool mmu_notifier_trylock(struct mm_struct *mm)
787 {
788 	return percpu_down_read_trylock(mm->mmu_notifier_lock);
789 }
790 
mmu_notifier_unlock(struct mm_struct * mm)791 static inline void mmu_notifier_unlock(struct mm_struct *mm)
792 {
793 	percpu_up_read(mm->mmu_notifier_lock);
794 }
795 
796 #else
797 
mmu_notifier_trylock(struct mm_struct * mm)798 static inline bool mmu_notifier_trylock(struct mm_struct *mm)
799 {
800 	return true;
801 }
802 
mmu_notifier_unlock(struct mm_struct * mm)803 static inline void mmu_notifier_unlock(struct mm_struct *mm)
804 {
805 }
806 
807 #endif
808 
809 #endif /* _LINUX_MMU_NOTIFIER_H */
810