1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_MMU_NOTIFIER_H
3 #define _LINUX_MMU_NOTIFIER_H
4 
5 #include <linux/list.h>
6 #include <linux/spinlock.h>
7 #include <linux/mm_types.h>
8 #include <linux/mmap_lock.h>
9 #include <linux/srcu.h>
10 #include <linux/interval_tree.h>
11 #include <linux/android_kabi.h>
12 
13 struct mmu_notifier_subscriptions;
14 struct mmu_notifier;
15 struct mmu_notifier_range;
16 struct mmu_interval_notifier;
17 
18 /**
19  * enum mmu_notifier_event - reason for the mmu notifier callback
20  * @MMU_NOTIFY_UNMAP: either munmap() that unmap the range or a mremap() that
21  * move the range
22  *
23  * @MMU_NOTIFY_CLEAR: clear page table entry (many reasons for this like
24  * madvise() or replacing a page by another one, ...).
25  *
26  * @MMU_NOTIFY_PROTECTION_VMA: update is due to protection change for the range
27  * ie using the vma access permission (vm_page_prot) to update the whole range
28  * is enough no need to inspect changes to the CPU page table (mprotect()
29  * syscall)
30  *
31  * @MMU_NOTIFY_PROTECTION_PAGE: update is due to change in read/write flag for
32  * pages in the range so to mirror those changes the user must inspect the CPU
33  * page table (from the end callback).
34  *
35  * @MMU_NOTIFY_SOFT_DIRTY: soft dirty accounting (still same page and same
36  * access flags). User should soft dirty the page in the end callback to make
37  * sure that anyone relying on soft dirtiness catch pages that might be written
38  * through non CPU mappings.
39  *
40  * @MMU_NOTIFY_RELEASE: used during mmu_interval_notifier invalidate to signal
41  * that the mm refcount is zero and the range is no longer accessible.
42  *
43  * @MMU_NOTIFY_MIGRATE: used during migrate_vma_collect() invalidate to signal
44  * a device driver to possibly ignore the invalidation if the
45  * owner field matches the driver's device private pgmap owner.
46  *
47  * @MMU_NOTIFY_EXCLUSIVE: to signal a device driver that the device will no
48  * longer have exclusive access to the page. When sent during creation of an
49  * exclusive range the owner will be initialised to the value provided by the
50  * caller of make_device_exclusive_range(), otherwise the owner will be NULL.
51  */
52 enum mmu_notifier_event {
53 	MMU_NOTIFY_UNMAP = 0,
54 	MMU_NOTIFY_CLEAR,
55 	MMU_NOTIFY_PROTECTION_VMA,
56 	MMU_NOTIFY_PROTECTION_PAGE,
57 	MMU_NOTIFY_SOFT_DIRTY,
58 	MMU_NOTIFY_RELEASE,
59 	MMU_NOTIFY_MIGRATE,
60 	MMU_NOTIFY_EXCLUSIVE,
61 };
62 
63 #define MMU_NOTIFIER_RANGE_BLOCKABLE (1 << 0)
64 
65 struct mmu_notifier_ops {
66 	/*
67 	 * Called either by mmu_notifier_unregister or when the mm is
68 	 * being destroyed by exit_mmap, always before all pages are
69 	 * freed. This can run concurrently with other mmu notifier
70 	 * methods (the ones invoked outside the mm context) and it
71 	 * should tear down all secondary mmu mappings and freeze the
72 	 * secondary mmu. If this method isn't implemented you've to
73 	 * be sure that nothing could possibly write to the pages
74 	 * through the secondary mmu by the time the last thread with
75 	 * tsk->mm == mm exits.
76 	 *
77 	 * As side note: the pages freed after ->release returns could
78 	 * be immediately reallocated by the gart at an alias physical
79 	 * address with a different cache model, so if ->release isn't
80 	 * implemented because all _software_ driven memory accesses
81 	 * through the secondary mmu are terminated by the time the
82 	 * last thread of this mm quits, you've also to be sure that
83 	 * speculative _hardware_ operations can't allocate dirty
84 	 * cachelines in the cpu that could not be snooped and made
85 	 * coherent with the other read and write operations happening
86 	 * through the gart alias address, so leading to memory
87 	 * corruption.
88 	 */
89 	void (*release)(struct mmu_notifier *subscription,
90 			struct mm_struct *mm);
91 
92 	/*
93 	 * clear_flush_young is called after the VM is
94 	 * test-and-clearing the young/accessed bitflag in the
95 	 * pte. This way the VM will provide proper aging to the
96 	 * accesses to the page through the secondary MMUs and not
97 	 * only to the ones through the Linux pte.
98 	 * Start-end is necessary in case the secondary MMU is mapping the page
99 	 * at a smaller granularity than the primary MMU.
100 	 */
101 	int (*clear_flush_young)(struct mmu_notifier *subscription,
102 				 struct mm_struct *mm,
103 				 unsigned long start,
104 				 unsigned long end);
105 
106 	/*
107 	 * clear_young is a lightweight version of clear_flush_young. Like the
108 	 * latter, it is supposed to test-and-clear the young/accessed bitflag
109 	 * in the secondary pte, but it may omit flushing the secondary tlb.
110 	 */
111 	int (*clear_young)(struct mmu_notifier *subscription,
112 			   struct mm_struct *mm,
113 			   unsigned long start,
114 			   unsigned long end);
115 
116 	/*
117 	 * test_young is called to check the young/accessed bitflag in
118 	 * the secondary pte. This is used to know if the page is
119 	 * frequently used without actually clearing the flag or tearing
120 	 * down the secondary mapping on the page.
121 	 */
122 	int (*test_young)(struct mmu_notifier *subscription,
123 			  struct mm_struct *mm,
124 			  unsigned long address);
125 
126 	/*
127 	 * invalidate_range_start() and invalidate_range_end() must be
128 	 * paired and are called only when the mmap_lock and/or the
129 	 * locks protecting the reverse maps are held. If the subsystem
130 	 * can't guarantee that no additional references are taken to
131 	 * the pages in the range, it has to implement the
132 	 * invalidate_range() notifier to remove any references taken
133 	 * after invalidate_range_start().
134 	 *
135 	 * Invalidation of multiple concurrent ranges may be
136 	 * optionally permitted by the driver. Either way the
137 	 * establishment of sptes is forbidden in the range passed to
138 	 * invalidate_range_begin/end for the whole duration of the
139 	 * invalidate_range_begin/end critical section.
140 	 *
141 	 * invalidate_range_start() is called when all pages in the
142 	 * range are still mapped and have at least a refcount of one.
143 	 *
144 	 * invalidate_range_end() is called when all pages in the
145 	 * range have been unmapped and the pages have been freed by
146 	 * the VM.
147 	 *
148 	 * The VM will remove the page table entries and potentially
149 	 * the page between invalidate_range_start() and
150 	 * invalidate_range_end(). If the page must not be freed
151 	 * because of pending I/O or other circumstances then the
152 	 * invalidate_range_start() callback (or the initial mapping
153 	 * by the driver) must make sure that the refcount is kept
154 	 * elevated.
155 	 *
156 	 * If the driver increases the refcount when the pages are
157 	 * initially mapped into an address space then either
158 	 * invalidate_range_start() or invalidate_range_end() may
159 	 * decrease the refcount. If the refcount is decreased on
160 	 * invalidate_range_start() then the VM can free pages as page
161 	 * table entries are removed.  If the refcount is only
162 	 * dropped on invalidate_range_end() then the driver itself
163 	 * will drop the last refcount but it must take care to flush
164 	 * any secondary tlb before doing the final free on the
165 	 * page. Pages will no longer be referenced by the linux
166 	 * address space but may still be referenced by sptes until
167 	 * the last refcount is dropped.
168 	 *
169 	 * If blockable argument is set to false then the callback cannot
170 	 * sleep and has to return with -EAGAIN if sleeping would be required.
171 	 * 0 should be returned otherwise. Please note that notifiers that can
172 	 * fail invalidate_range_start are not allowed to implement
173 	 * invalidate_range_end, as there is no mechanism for informing the
174 	 * notifier that its start failed.
175 	 */
176 	int (*invalidate_range_start)(struct mmu_notifier *subscription,
177 				      const struct mmu_notifier_range *range);
178 	void (*invalidate_range_end)(struct mmu_notifier *subscription,
179 				     const struct mmu_notifier_range *range);
180 
181 	/*
182 	 * arch_invalidate_secondary_tlbs() is used to manage a non-CPU TLB
183 	 * which shares page-tables with the CPU. The
184 	 * invalidate_range_start()/end() callbacks should not be implemented as
185 	 * invalidate_secondary_tlbs() already catches the points in time when
186 	 * an external TLB needs to be flushed.
187 	 *
188 	 * This requires arch_invalidate_secondary_tlbs() to be called while
189 	 * holding the ptl spin-lock and therefore this callback is not allowed
190 	 * to sleep.
191 	 *
192 	 * This is called by architecture code whenever invalidating a TLB
193 	 * entry. It is assumed that any secondary TLB has the same rules for
194 	 * when invalidations are required. If this is not the case architecture
195 	 * code will need to call this explicitly when required for secondary
196 	 * TLB invalidation.
197 	 */
198 	void (*arch_invalidate_secondary_tlbs)(
199 					struct mmu_notifier *subscription,
200 					struct mm_struct *mm,
201 					unsigned long start,
202 					unsigned long end);
203 
204 	/*
205 	 * These callbacks are used with the get/put interface to manage the
206 	 * lifetime of the mmu_notifier memory. alloc_notifier() returns a new
207 	 * notifier for use with the mm.
208 	 *
209 	 * free_notifier() is only called after the mmu_notifier has been
210 	 * fully put, calls to any ops callback are prevented and no ops
211 	 * callbacks are currently running. It is called from a SRCU callback
212 	 * and cannot sleep.
213 	 */
214 	struct mmu_notifier *(*alloc_notifier)(struct mm_struct *mm);
215 	void (*free_notifier)(struct mmu_notifier *subscription);
216 
217 	ANDROID_KABI_RESERVE(1);
218 	ANDROID_KABI_RESERVE(2);
219 	ANDROID_KABI_RESERVE(3);
220 	ANDROID_KABI_RESERVE(4);
221 };
222 
223 /*
224  * The notifier chains are protected by mmap_lock and/or the reverse map
225  * semaphores. Notifier chains are only changed when all reverse maps and
226  * the mmap_lock locks are taken.
227  *
228  * Therefore notifier chains can only be traversed when either
229  *
230  * 1. mmap_lock is held.
231  * 2. One of the reverse map locks is held (i_mmap_rwsem or anon_vma->rwsem).
232  * 3. No other concurrent thread can access the list (release)
233  */
234 struct mmu_notifier {
235 	struct hlist_node hlist;
236 	const struct mmu_notifier_ops *ops;
237 	struct mm_struct *mm;
238 	struct rcu_head rcu;
239 	unsigned int users;
240 
241 	ANDROID_KABI_RESERVE(1);
242 	ANDROID_KABI_RESERVE(2);
243 };
244 
245 /**
246  * struct mmu_interval_notifier_ops
247  * @invalidate: Upon return the caller must stop using any SPTEs within this
248  *              range. This function can sleep. Return false only if sleeping
249  *              was required but mmu_notifier_range_blockable(range) is false.
250  */
251 struct mmu_interval_notifier_ops {
252 	bool (*invalidate)(struct mmu_interval_notifier *interval_sub,
253 			   const struct mmu_notifier_range *range,
254 			   unsigned long cur_seq);
255 };
256 
257 struct mmu_interval_notifier {
258 	struct interval_tree_node interval_tree;
259 	const struct mmu_interval_notifier_ops *ops;
260 	struct mm_struct *mm;
261 	struct hlist_node deferred_item;
262 	unsigned long invalidate_seq;
263 };
264 
265 #ifdef CONFIG_MMU_NOTIFIER
266 
267 #ifdef CONFIG_LOCKDEP
268 extern struct lockdep_map __mmu_notifier_invalidate_range_start_map;
269 #endif
270 
271 struct mmu_notifier_range {
272 	struct mm_struct *mm;
273 	unsigned long start;
274 	unsigned long end;
275 	unsigned flags;
276 	enum mmu_notifier_event event;
277 	void *owner;
278 };
279 
mm_has_notifiers(struct mm_struct * mm)280 static inline int mm_has_notifiers(struct mm_struct *mm)
281 {
282 	return unlikely(mm->notifier_subscriptions);
283 }
284 
285 struct mmu_notifier *mmu_notifier_get_locked(const struct mmu_notifier_ops *ops,
286 					     struct mm_struct *mm);
287 static inline struct mmu_notifier *
mmu_notifier_get(const struct mmu_notifier_ops * ops,struct mm_struct * mm)288 mmu_notifier_get(const struct mmu_notifier_ops *ops, struct mm_struct *mm)
289 {
290 	struct mmu_notifier *ret;
291 
292 	mmap_write_lock(mm);
293 	ret = mmu_notifier_get_locked(ops, mm);
294 	mmap_write_unlock(mm);
295 	return ret;
296 }
297 void mmu_notifier_put(struct mmu_notifier *subscription);
298 void mmu_notifier_synchronize(void);
299 
300 extern int mmu_notifier_register(struct mmu_notifier *subscription,
301 				 struct mm_struct *mm);
302 extern int __mmu_notifier_register(struct mmu_notifier *subscription,
303 				   struct mm_struct *mm);
304 extern void mmu_notifier_unregister(struct mmu_notifier *subscription,
305 				    struct mm_struct *mm);
306 
307 unsigned long
308 mmu_interval_read_begin(struct mmu_interval_notifier *interval_sub);
309 int mmu_interval_notifier_insert(struct mmu_interval_notifier *interval_sub,
310 				 struct mm_struct *mm, unsigned long start,
311 				 unsigned long length,
312 				 const struct mmu_interval_notifier_ops *ops);
313 int mmu_interval_notifier_insert_locked(
314 	struct mmu_interval_notifier *interval_sub, struct mm_struct *mm,
315 	unsigned long start, unsigned long length,
316 	const struct mmu_interval_notifier_ops *ops);
317 void mmu_interval_notifier_remove(struct mmu_interval_notifier *interval_sub);
318 
319 /**
320  * mmu_interval_set_seq - Save the invalidation sequence
321  * @interval_sub - The subscription passed to invalidate
322  * @cur_seq - The cur_seq passed to the invalidate() callback
323  *
324  * This must be called unconditionally from the invalidate callback of a
325  * struct mmu_interval_notifier_ops under the same lock that is used to call
326  * mmu_interval_read_retry(). It updates the sequence number for later use by
327  * mmu_interval_read_retry(). The provided cur_seq will always be odd.
328  *
329  * If the caller does not call mmu_interval_read_begin() or
330  * mmu_interval_read_retry() then this call is not required.
331  */
332 static inline void
mmu_interval_set_seq(struct mmu_interval_notifier * interval_sub,unsigned long cur_seq)333 mmu_interval_set_seq(struct mmu_interval_notifier *interval_sub,
334 		     unsigned long cur_seq)
335 {
336 	WRITE_ONCE(interval_sub->invalidate_seq, cur_seq);
337 }
338 
339 /**
340  * mmu_interval_read_retry - End a read side critical section against a VA range
341  * interval_sub: The subscription
342  * seq: The return of the paired mmu_interval_read_begin()
343  *
344  * This MUST be called under a user provided lock that is also held
345  * unconditionally by op->invalidate() when it calls mmu_interval_set_seq().
346  *
347  * Each call should be paired with a single mmu_interval_read_begin() and
348  * should be used to conclude the read side.
349  *
350  * Returns true if an invalidation collided with this critical section, and
351  * the caller should retry.
352  */
353 static inline bool
mmu_interval_read_retry(struct mmu_interval_notifier * interval_sub,unsigned long seq)354 mmu_interval_read_retry(struct mmu_interval_notifier *interval_sub,
355 			unsigned long seq)
356 {
357 	return interval_sub->invalidate_seq != seq;
358 }
359 
360 /**
361  * mmu_interval_check_retry - Test if a collision has occurred
362  * interval_sub: The subscription
363  * seq: The return of the matching mmu_interval_read_begin()
364  *
365  * This can be used in the critical section between mmu_interval_read_begin()
366  * and mmu_interval_read_retry().  A return of true indicates an invalidation
367  * has collided with this critical region and a future
368  * mmu_interval_read_retry() will return true.
369  *
370  * False is not reliable and only suggests a collision may not have
371  * occurred. It can be called many times and does not have to hold the user
372  * provided lock.
373  *
374  * This call can be used as part of loops and other expensive operations to
375  * expedite a retry.
376  */
377 static inline bool
mmu_interval_check_retry(struct mmu_interval_notifier * interval_sub,unsigned long seq)378 mmu_interval_check_retry(struct mmu_interval_notifier *interval_sub,
379 			 unsigned long seq)
380 {
381 	/* Pairs with the WRITE_ONCE in mmu_interval_set_seq() */
382 	return READ_ONCE(interval_sub->invalidate_seq) != seq;
383 }
384 
385 extern void __mmu_notifier_subscriptions_destroy(struct mm_struct *mm);
386 extern void __mmu_notifier_release(struct mm_struct *mm);
387 extern int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
388 					  unsigned long start,
389 					  unsigned long end);
390 extern int __mmu_notifier_clear_young(struct mm_struct *mm,
391 				      unsigned long start,
392 				      unsigned long end);
393 extern int __mmu_notifier_test_young(struct mm_struct *mm,
394 				     unsigned long address);
395 extern int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *r);
396 extern void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *r);
397 extern void __mmu_notifier_arch_invalidate_secondary_tlbs(struct mm_struct *mm,
398 					unsigned long start, unsigned long end);
399 extern bool
400 mmu_notifier_range_update_to_read_only(const struct mmu_notifier_range *range);
401 
402 static inline bool
mmu_notifier_range_blockable(const struct mmu_notifier_range * range)403 mmu_notifier_range_blockable(const struct mmu_notifier_range *range)
404 {
405 	return (range->flags & MMU_NOTIFIER_RANGE_BLOCKABLE);
406 }
407 
mmu_notifier_release(struct mm_struct * mm)408 static inline void mmu_notifier_release(struct mm_struct *mm)
409 {
410 	if (mm_has_notifiers(mm))
411 		__mmu_notifier_release(mm);
412 }
413 
mmu_notifier_clear_flush_young(struct mm_struct * mm,unsigned long start,unsigned long end)414 static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm,
415 					  unsigned long start,
416 					  unsigned long end)
417 {
418 	if (mm_has_notifiers(mm))
419 		return __mmu_notifier_clear_flush_young(mm, start, end);
420 	return 0;
421 }
422 
mmu_notifier_clear_young(struct mm_struct * mm,unsigned long start,unsigned long end)423 static inline int mmu_notifier_clear_young(struct mm_struct *mm,
424 					   unsigned long start,
425 					   unsigned long end)
426 {
427 	if (mm_has_notifiers(mm))
428 		return __mmu_notifier_clear_young(mm, start, end);
429 	return 0;
430 }
431 
mmu_notifier_test_young(struct mm_struct * mm,unsigned long address)432 static inline int mmu_notifier_test_young(struct mm_struct *mm,
433 					  unsigned long address)
434 {
435 	if (mm_has_notifiers(mm))
436 		return __mmu_notifier_test_young(mm, address);
437 	return 0;
438 }
439 
440 static inline void
mmu_notifier_invalidate_range_start(struct mmu_notifier_range * range)441 mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
442 {
443 	might_sleep();
444 
445 	lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
446 	if (mm_has_notifiers(range->mm)) {
447 		range->flags |= MMU_NOTIFIER_RANGE_BLOCKABLE;
448 		__mmu_notifier_invalidate_range_start(range);
449 	}
450 	lock_map_release(&__mmu_notifier_invalidate_range_start_map);
451 }
452 
453 /*
454  * This version of mmu_notifier_invalidate_range_start() avoids blocking, but it
455  * can return an error if a notifier can't proceed without blocking, in which
456  * case you're not allowed to modify PTEs in the specified range.
457  *
458  * This is mainly intended for OOM handling.
459  */
460 static inline int __must_check
mmu_notifier_invalidate_range_start_nonblock(struct mmu_notifier_range * range)461 mmu_notifier_invalidate_range_start_nonblock(struct mmu_notifier_range *range)
462 {
463 	int ret = 0;
464 
465 	lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
466 	if (mm_has_notifiers(range->mm)) {
467 		range->flags &= ~MMU_NOTIFIER_RANGE_BLOCKABLE;
468 		ret = __mmu_notifier_invalidate_range_start(range);
469 	}
470 	lock_map_release(&__mmu_notifier_invalidate_range_start_map);
471 	return ret;
472 }
473 
474 static inline void
mmu_notifier_invalidate_range_end(struct mmu_notifier_range * range)475 mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range)
476 {
477 	if (mmu_notifier_range_blockable(range))
478 		might_sleep();
479 
480 	if (mm_has_notifiers(range->mm))
481 		__mmu_notifier_invalidate_range_end(range);
482 }
483 
mmu_notifier_arch_invalidate_secondary_tlbs(struct mm_struct * mm,unsigned long start,unsigned long end)484 static inline void mmu_notifier_arch_invalidate_secondary_tlbs(struct mm_struct *mm,
485 					unsigned long start, unsigned long end)
486 {
487 	if (mm_has_notifiers(mm))
488 		__mmu_notifier_arch_invalidate_secondary_tlbs(mm, start, end);
489 }
490 
mmu_notifier_subscriptions_init(struct mm_struct * mm)491 static inline void mmu_notifier_subscriptions_init(struct mm_struct *mm)
492 {
493 	mm->notifier_subscriptions = NULL;
494 }
495 
mmu_notifier_subscriptions_destroy(struct mm_struct * mm)496 static inline void mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
497 {
498 	if (mm_has_notifiers(mm))
499 		__mmu_notifier_subscriptions_destroy(mm);
500 }
501 
502 
mmu_notifier_range_init(struct mmu_notifier_range * range,enum mmu_notifier_event event,unsigned flags,struct mm_struct * mm,unsigned long start,unsigned long end)503 static inline void mmu_notifier_range_init(struct mmu_notifier_range *range,
504 					   enum mmu_notifier_event event,
505 					   unsigned flags,
506 					   struct mm_struct *mm,
507 					   unsigned long start,
508 					   unsigned long end)
509 {
510 	range->event = event;
511 	range->mm = mm;
512 	range->start = start;
513 	range->end = end;
514 	range->flags = flags;
515 }
516 
mmu_notifier_range_init_owner(struct mmu_notifier_range * range,enum mmu_notifier_event event,unsigned int flags,struct mm_struct * mm,unsigned long start,unsigned long end,void * owner)517 static inline void mmu_notifier_range_init_owner(
518 			struct mmu_notifier_range *range,
519 			enum mmu_notifier_event event, unsigned int flags,
520 			struct mm_struct *mm, unsigned long start,
521 			unsigned long end, void *owner)
522 {
523 	mmu_notifier_range_init(range, event, flags, mm, start, end);
524 	range->owner = owner;
525 }
526 
527 #define ptep_clear_flush_young_notify(__vma, __address, __ptep)		\
528 ({									\
529 	int __young;							\
530 	struct vm_area_struct *___vma = __vma;				\
531 	unsigned long ___address = __address;				\
532 	__young = ptep_clear_flush_young(___vma, ___address, __ptep);	\
533 	__young |= mmu_notifier_clear_flush_young(___vma->vm_mm,	\
534 						  ___address,		\
535 						  ___address +		\
536 							PAGE_SIZE);	\
537 	__young;							\
538 })
539 
540 #define pmdp_clear_flush_young_notify(__vma, __address, __pmdp)		\
541 ({									\
542 	int __young;							\
543 	struct vm_area_struct *___vma = __vma;				\
544 	unsigned long ___address = __address;				\
545 	__young = pmdp_clear_flush_young(___vma, ___address, __pmdp);	\
546 	__young |= mmu_notifier_clear_flush_young(___vma->vm_mm,	\
547 						  ___address,		\
548 						  ___address +		\
549 							PMD_SIZE);	\
550 	__young;							\
551 })
552 
553 #define ptep_clear_young_notify(__vma, __address, __ptep)		\
554 ({									\
555 	int __young;							\
556 	struct vm_area_struct *___vma = __vma;				\
557 	unsigned long ___address = __address;				\
558 	__young = ptep_test_and_clear_young(___vma, ___address, __ptep);\
559 	__young |= mmu_notifier_clear_young(___vma->vm_mm, ___address,	\
560 					    ___address + PAGE_SIZE);	\
561 	__young;							\
562 })
563 
564 #define pmdp_clear_young_notify(__vma, __address, __pmdp)		\
565 ({									\
566 	int __young;							\
567 	struct vm_area_struct *___vma = __vma;				\
568 	unsigned long ___address = __address;				\
569 	__young = pmdp_test_and_clear_young(___vma, ___address, __pmdp);\
570 	__young |= mmu_notifier_clear_young(___vma->vm_mm, ___address,	\
571 					    ___address + PMD_SIZE);	\
572 	__young;							\
573 })
574 
575 #else /* CONFIG_MMU_NOTIFIER */
576 
577 struct mmu_notifier_range {
578 	unsigned long start;
579 	unsigned long end;
580 };
581 
_mmu_notifier_range_init(struct mmu_notifier_range * range,unsigned long start,unsigned long end)582 static inline void _mmu_notifier_range_init(struct mmu_notifier_range *range,
583 					    unsigned long start,
584 					    unsigned long end)
585 {
586 	range->start = start;
587 	range->end = end;
588 }
589 
590 #define mmu_notifier_range_init(range,event,flags,mm,start,end)  \
591 	_mmu_notifier_range_init(range, start, end)
592 #define mmu_notifier_range_init_owner(range, event, flags, mm, start, \
593 					end, owner) \
594 	_mmu_notifier_range_init(range, start, end)
595 
596 static inline bool
mmu_notifier_range_blockable(const struct mmu_notifier_range * range)597 mmu_notifier_range_blockable(const struct mmu_notifier_range *range)
598 {
599 	return true;
600 }
601 
mm_has_notifiers(struct mm_struct * mm)602 static inline int mm_has_notifiers(struct mm_struct *mm)
603 {
604 	return 0;
605 }
606 
mmu_notifier_release(struct mm_struct * mm)607 static inline void mmu_notifier_release(struct mm_struct *mm)
608 {
609 }
610 
mmu_notifier_clear_flush_young(struct mm_struct * mm,unsigned long start,unsigned long end)611 static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm,
612 					  unsigned long start,
613 					  unsigned long end)
614 {
615 	return 0;
616 }
617 
mmu_notifier_test_young(struct mm_struct * mm,unsigned long address)618 static inline int mmu_notifier_test_young(struct mm_struct *mm,
619 					  unsigned long address)
620 {
621 	return 0;
622 }
623 
624 static inline void
mmu_notifier_invalidate_range_start(struct mmu_notifier_range * range)625 mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
626 {
627 }
628 
629 static inline int
mmu_notifier_invalidate_range_start_nonblock(struct mmu_notifier_range * range)630 mmu_notifier_invalidate_range_start_nonblock(struct mmu_notifier_range *range)
631 {
632 	return 0;
633 }
634 
635 static inline
mmu_notifier_invalidate_range_end(struct mmu_notifier_range * range)636 void mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range)
637 {
638 }
639 
mmu_notifier_arch_invalidate_secondary_tlbs(struct mm_struct * mm,unsigned long start,unsigned long end)640 static inline void mmu_notifier_arch_invalidate_secondary_tlbs(struct mm_struct *mm,
641 				  unsigned long start, unsigned long end)
642 {
643 }
644 
mmu_notifier_subscriptions_init(struct mm_struct * mm)645 static inline void mmu_notifier_subscriptions_init(struct mm_struct *mm)
646 {
647 }
648 
mmu_notifier_subscriptions_destroy(struct mm_struct * mm)649 static inline void mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
650 {
651 }
652 
653 #define mmu_notifier_range_update_to_read_only(r) false
654 
655 #define ptep_clear_flush_young_notify ptep_clear_flush_young
656 #define pmdp_clear_flush_young_notify pmdp_clear_flush_young
657 #define ptep_clear_young_notify ptep_test_and_clear_young
658 #define pmdp_clear_young_notify pmdp_test_and_clear_young
659 #define	ptep_clear_flush_notify ptep_clear_flush
660 #define pmdp_huge_clear_flush_notify pmdp_huge_clear_flush
661 #define pudp_huge_clear_flush_notify pudp_huge_clear_flush
662 
mmu_notifier_synchronize(void)663 static inline void mmu_notifier_synchronize(void)
664 {
665 }
666 
667 #endif /* CONFIG_MMU_NOTIFIER */
668 
669 #endif /* _LINUX_MMU_NOTIFIER_H */
670