• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_MMU_NOTIFIER_H
3 #define _LINUX_MMU_NOTIFIER_H
4 
5 #include <linux/list.h>
6 #include <linux/spinlock.h>
7 #include <linux/mm_types.h>
8 #include <linux/srcu.h>
9 #include <linux/android_kabi.h>
10 
11 struct mmu_notifier;
12 struct mmu_notifier_ops;
13 
14 /**
15  * enum mmu_notifier_event - reason for the mmu notifier callback
16  * @MMU_NOTIFY_UNMAP: either munmap() that unmap the range or a mremap() that
17  * move the range
18  *
19  * @MMU_NOTIFY_CLEAR: clear page table entry (many reasons for this like
20  * madvise() or replacing a page by another one, ...).
21  *
22  * @MMU_NOTIFY_PROTECTION_VMA: update is due to protection change for the range
23  * ie using the vma access permission (vm_page_prot) to update the whole range
24  * is enough no need to inspect changes to the CPU page table (mprotect()
25  * syscall)
26  *
27  * @MMU_NOTIFY_PROTECTION_PAGE: update is due to change in read/write flag for
28  * pages in the range so to mirror those changes the user must inspect the CPU
29  * page table (from the end callback).
30  *
31  * @MMU_NOTIFY_SOFT_DIRTY: soft dirty accounting (still same page and same
32  * access flags). User should soft dirty the page in the end callback to make
33  * sure that anyone relying on soft dirtyness catch pages that might be written
34  * through non CPU mappings.
35  */
36 enum mmu_notifier_event {
37 	MMU_NOTIFY_UNMAP = 0,
38 	MMU_NOTIFY_CLEAR,
39 	MMU_NOTIFY_PROTECTION_VMA,
40 	MMU_NOTIFY_PROTECTION_PAGE,
41 	MMU_NOTIFY_SOFT_DIRTY,
42 };
43 
44 #ifdef CONFIG_MMU_NOTIFIER
45 
46 #ifdef CONFIG_LOCKDEP
47 extern struct lockdep_map __mmu_notifier_invalidate_range_start_map;
48 #endif
49 
50 /*
51  * The mmu notifier_mm structure is allocated and installed in
52  * mm->mmu_notifier_mm inside the mm_take_all_locks() protected
53  * critical section and it's released only when mm_count reaches zero
54  * in mmdrop().
55  */
56 struct mmu_notifier_mm {
57 	/* all mmu notifiers registerd in this mm are queued in this list */
58 	struct hlist_head list;
59 	/* to serialize the list modifications and hlist_unhashed */
60 	spinlock_t lock;
61 };
62 
63 #define MMU_NOTIFIER_RANGE_BLOCKABLE (1 << 0)
64 
65 struct mmu_notifier_range {
66 	struct vm_area_struct *vma;
67 	struct mm_struct *mm;
68 	unsigned long start;
69 	unsigned long end;
70 	unsigned flags;
71 	enum mmu_notifier_event event;
72 };
73 
74 struct mmu_notifier_ops {
75 	/*
76 	 * Called either by mmu_notifier_unregister or when the mm is
77 	 * being destroyed by exit_mmap, always before all pages are
78 	 * freed. This can run concurrently with other mmu notifier
79 	 * methods (the ones invoked outside the mm context) and it
80 	 * should tear down all secondary mmu mappings and freeze the
81 	 * secondary mmu. If this method isn't implemented you've to
82 	 * be sure that nothing could possibly write to the pages
83 	 * through the secondary mmu by the time the last thread with
84 	 * tsk->mm == mm exits.
85 	 *
86 	 * As side note: the pages freed after ->release returns could
87 	 * be immediately reallocated by the gart at an alias physical
88 	 * address with a different cache model, so if ->release isn't
89 	 * implemented because all _software_ driven memory accesses
90 	 * through the secondary mmu are terminated by the time the
91 	 * last thread of this mm quits, you've also to be sure that
92 	 * speculative _hardware_ operations can't allocate dirty
93 	 * cachelines in the cpu that could not be snooped and made
94 	 * coherent with the other read and write operations happening
95 	 * through the gart alias address, so leading to memory
96 	 * corruption.
97 	 */
98 	void (*release)(struct mmu_notifier *mn,
99 			struct mm_struct *mm);
100 
101 	/*
102 	 * clear_flush_young is called after the VM is
103 	 * test-and-clearing the young/accessed bitflag in the
104 	 * pte. This way the VM will provide proper aging to the
105 	 * accesses to the page through the secondary MMUs and not
106 	 * only to the ones through the Linux pte.
107 	 * Start-end is necessary in case the secondary MMU is mapping the page
108 	 * at a smaller granularity than the primary MMU.
109 	 */
110 	int (*clear_flush_young)(struct mmu_notifier *mn,
111 				 struct mm_struct *mm,
112 				 unsigned long start,
113 				 unsigned long end);
114 
115 	/*
116 	 * clear_young is a lightweight version of clear_flush_young. Like the
117 	 * latter, it is supposed to test-and-clear the young/accessed bitflag
118 	 * in the secondary pte, but it may omit flushing the secondary tlb.
119 	 */
120 	int (*clear_young)(struct mmu_notifier *mn,
121 			   struct mm_struct *mm,
122 			   unsigned long start,
123 			   unsigned long end);
124 
125 	/*
126 	 * test_young is called to check the young/accessed bitflag in
127 	 * the secondary pte. This is used to know if the page is
128 	 * frequently used without actually clearing the flag or tearing
129 	 * down the secondary mapping on the page.
130 	 */
131 	int (*test_young)(struct mmu_notifier *mn,
132 			  struct mm_struct *mm,
133 			  unsigned long address);
134 
135 	/*
136 	 * change_pte is called in cases that pte mapping to page is changed:
137 	 * for example, when ksm remaps pte to point to a new shared page.
138 	 */
139 	void (*change_pte)(struct mmu_notifier *mn,
140 			   struct mm_struct *mm,
141 			   unsigned long address,
142 			   pte_t pte);
143 
144 	/*
145 	 * invalidate_range_start() and invalidate_range_end() must be
146 	 * paired and are called only when the mmap_sem and/or the
147 	 * locks protecting the reverse maps are held. If the subsystem
148 	 * can't guarantee that no additional references are taken to
149 	 * the pages in the range, it has to implement the
150 	 * invalidate_range() notifier to remove any references taken
151 	 * after invalidate_range_start().
152 	 *
153 	 * Invalidation of multiple concurrent ranges may be
154 	 * optionally permitted by the driver. Either way the
155 	 * establishment of sptes is forbidden in the range passed to
156 	 * invalidate_range_begin/end for the whole duration of the
157 	 * invalidate_range_begin/end critical section.
158 	 *
159 	 * invalidate_range_start() is called when all pages in the
160 	 * range are still mapped and have at least a refcount of one.
161 	 *
162 	 * invalidate_range_end() is called when all pages in the
163 	 * range have been unmapped and the pages have been freed by
164 	 * the VM.
165 	 *
166 	 * The VM will remove the page table entries and potentially
167 	 * the page between invalidate_range_start() and
168 	 * invalidate_range_end(). If the page must not be freed
169 	 * because of pending I/O or other circumstances then the
170 	 * invalidate_range_start() callback (or the initial mapping
171 	 * by the driver) must make sure that the refcount is kept
172 	 * elevated.
173 	 *
174 	 * If the driver increases the refcount when the pages are
175 	 * initially mapped into an address space then either
176 	 * invalidate_range_start() or invalidate_range_end() may
177 	 * decrease the refcount. If the refcount is decreased on
178 	 * invalidate_range_start() then the VM can free pages as page
179 	 * table entries are removed.  If the refcount is only
180 	 * droppped on invalidate_range_end() then the driver itself
181 	 * will drop the last refcount but it must take care to flush
182 	 * any secondary tlb before doing the final free on the
183 	 * page. Pages will no longer be referenced by the linux
184 	 * address space but may still be referenced by sptes until
185 	 * the last refcount is dropped.
186 	 *
187 	 * If blockable argument is set to false then the callback cannot
188 	 * sleep and has to return with -EAGAIN. 0 should be returned
189 	 * otherwise. Please note that if invalidate_range_start approves
190 	 * a non-blocking behavior then the same applies to
191 	 * invalidate_range_end.
192 	 *
193 	 */
194 	int (*invalidate_range_start)(struct mmu_notifier *mn,
195 				      const struct mmu_notifier_range *range);
196 	void (*invalidate_range_end)(struct mmu_notifier *mn,
197 				     const struct mmu_notifier_range *range);
198 
199 	/*
200 	 * invalidate_range() is either called between
201 	 * invalidate_range_start() and invalidate_range_end() when the
202 	 * VM has to free pages that where unmapped, but before the
203 	 * pages are actually freed, or outside of _start()/_end() when
204 	 * a (remote) TLB is necessary.
205 	 *
206 	 * If invalidate_range() is used to manage a non-CPU TLB with
207 	 * shared page-tables, it not necessary to implement the
208 	 * invalidate_range_start()/end() notifiers, as
209 	 * invalidate_range() alread catches the points in time when an
210 	 * external TLB range needs to be flushed. For more in depth
211 	 * discussion on this see Documentation/vm/mmu_notifier.rst
212 	 *
213 	 * Note that this function might be called with just a sub-range
214 	 * of what was passed to invalidate_range_start()/end(), if
215 	 * called between those functions.
216 	 */
217 	void (*invalidate_range)(struct mmu_notifier *mn, struct mm_struct *mm,
218 				 unsigned long start, unsigned long end);
219 
220 	/*
221 	 * These callbacks are used with the get/put interface to manage the
222 	 * lifetime of the mmu_notifier memory. alloc_notifier() returns a new
223 	 * notifier for use with the mm.
224 	 *
225 	 * free_notifier() is only called after the mmu_notifier has been
226 	 * fully put, calls to any ops callback are prevented and no ops
227 	 * callbacks are currently running. It is called from a SRCU callback
228 	 * and cannot sleep.
229 	 */
230 	struct mmu_notifier *(*alloc_notifier)(struct mm_struct *mm);
231 	void (*free_notifier)(struct mmu_notifier *mn);
232 
233 	ANDROID_KABI_RESERVE(1);
234 	ANDROID_KABI_RESERVE(2);
235 	ANDROID_KABI_RESERVE(3);
236 	ANDROID_KABI_RESERVE(4);
237 };
238 
239 /*
240  * The notifier chains are protected by mmap_sem and/or the reverse map
241  * semaphores. Notifier chains are only changed when all reverse maps and
242  * the mmap_sem locks are taken.
243  *
244  * Therefore notifier chains can only be traversed when either
245  *
246  * 1. mmap_sem is held.
247  * 2. One of the reverse map locks is held (i_mmap_rwsem or anon_vma->rwsem).
248  * 3. No other concurrent thread can access the list (release)
249  */
250 struct mmu_notifier {
251 	struct hlist_node hlist;
252 	const struct mmu_notifier_ops *ops;
253 	struct mm_struct *mm;
254 	struct rcu_head rcu;
255 	unsigned int users;
256 
257 	ANDROID_KABI_RESERVE(1);
258 	ANDROID_KABI_RESERVE(2);
259 };
260 
mm_has_notifiers(struct mm_struct * mm)261 static inline int mm_has_notifiers(struct mm_struct *mm)
262 {
263 	return unlikely(mm->mmu_notifier_mm);
264 }
265 
266 struct mmu_notifier *mmu_notifier_get_locked(const struct mmu_notifier_ops *ops,
267 					     struct mm_struct *mm);
268 static inline struct mmu_notifier *
mmu_notifier_get(const struct mmu_notifier_ops * ops,struct mm_struct * mm)269 mmu_notifier_get(const struct mmu_notifier_ops *ops, struct mm_struct *mm)
270 {
271 	struct mmu_notifier *ret;
272 
273 	down_write(&mm->mmap_sem);
274 	ret = mmu_notifier_get_locked(ops, mm);
275 	up_write(&mm->mmap_sem);
276 	return ret;
277 }
278 void mmu_notifier_put(struct mmu_notifier *mn);
279 void mmu_notifier_synchronize(void);
280 
281 extern int mmu_notifier_register(struct mmu_notifier *mn,
282 				 struct mm_struct *mm);
283 extern int __mmu_notifier_register(struct mmu_notifier *mn,
284 				   struct mm_struct *mm);
285 extern void mmu_notifier_unregister(struct mmu_notifier *mn,
286 				    struct mm_struct *mm);
287 extern void __mmu_notifier_mm_destroy(struct mm_struct *mm);
288 extern void __mmu_notifier_release(struct mm_struct *mm);
289 extern int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
290 					  unsigned long start,
291 					  unsigned long end);
292 extern int __mmu_notifier_clear_young(struct mm_struct *mm,
293 				      unsigned long start,
294 				      unsigned long end);
295 extern int __mmu_notifier_test_young(struct mm_struct *mm,
296 				     unsigned long address);
297 extern void __mmu_notifier_change_pte(struct mm_struct *mm,
298 				      unsigned long address, pte_t pte);
299 extern int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *r);
300 extern void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *r,
301 				  bool only_end);
302 extern void __mmu_notifier_invalidate_range(struct mm_struct *mm,
303 				  unsigned long start, unsigned long end);
304 extern bool
305 mmu_notifier_range_update_to_read_only(const struct mmu_notifier_range *range);
306 
307 static inline bool
mmu_notifier_range_blockable(const struct mmu_notifier_range * range)308 mmu_notifier_range_blockable(const struct mmu_notifier_range *range)
309 {
310 	return (range->flags & MMU_NOTIFIER_RANGE_BLOCKABLE);
311 }
312 
mmu_notifier_release(struct mm_struct * mm)313 static inline void mmu_notifier_release(struct mm_struct *mm)
314 {
315 	if (mm_has_notifiers(mm))
316 		__mmu_notifier_release(mm);
317 }
318 
mmu_notifier_clear_flush_young(struct mm_struct * mm,unsigned long start,unsigned long end)319 static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm,
320 					  unsigned long start,
321 					  unsigned long end)
322 {
323 	if (mm_has_notifiers(mm))
324 		return __mmu_notifier_clear_flush_young(mm, start, end);
325 	return 0;
326 }
327 
mmu_notifier_clear_young(struct mm_struct * mm,unsigned long start,unsigned long end)328 static inline int mmu_notifier_clear_young(struct mm_struct *mm,
329 					   unsigned long start,
330 					   unsigned long end)
331 {
332 	if (mm_has_notifiers(mm))
333 		return __mmu_notifier_clear_young(mm, start, end);
334 	return 0;
335 }
336 
mmu_notifier_test_young(struct mm_struct * mm,unsigned long address)337 static inline int mmu_notifier_test_young(struct mm_struct *mm,
338 					  unsigned long address)
339 {
340 	if (mm_has_notifiers(mm))
341 		return __mmu_notifier_test_young(mm, address);
342 	return 0;
343 }
344 
mmu_notifier_change_pte(struct mm_struct * mm,unsigned long address,pte_t pte)345 static inline void mmu_notifier_change_pte(struct mm_struct *mm,
346 					   unsigned long address, pte_t pte)
347 {
348 	if (mm_has_notifiers(mm))
349 		__mmu_notifier_change_pte(mm, address, pte);
350 }
351 
352 static inline void
mmu_notifier_invalidate_range_start(struct mmu_notifier_range * range)353 mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
354 {
355 	might_sleep();
356 
357 	lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
358 	if (mm_has_notifiers(range->mm)) {
359 		range->flags |= MMU_NOTIFIER_RANGE_BLOCKABLE;
360 		__mmu_notifier_invalidate_range_start(range);
361 	}
362 	lock_map_release(&__mmu_notifier_invalidate_range_start_map);
363 }
364 
365 static inline int
mmu_notifier_invalidate_range_start_nonblock(struct mmu_notifier_range * range)366 mmu_notifier_invalidate_range_start_nonblock(struct mmu_notifier_range *range)
367 {
368 	int ret = 0;
369 
370 	lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
371 	if (mm_has_notifiers(range->mm)) {
372 		range->flags &= ~MMU_NOTIFIER_RANGE_BLOCKABLE;
373 		ret = __mmu_notifier_invalidate_range_start(range);
374 	}
375 	lock_map_release(&__mmu_notifier_invalidate_range_start_map);
376 	return ret;
377 }
378 
379 static inline void
mmu_notifier_invalidate_range_end(struct mmu_notifier_range * range)380 mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range)
381 {
382 	if (mmu_notifier_range_blockable(range))
383 		might_sleep();
384 
385 	if (mm_has_notifiers(range->mm))
386 		__mmu_notifier_invalidate_range_end(range, false);
387 }
388 
389 static inline void
mmu_notifier_invalidate_range_only_end(struct mmu_notifier_range * range)390 mmu_notifier_invalidate_range_only_end(struct mmu_notifier_range *range)
391 {
392 	if (mm_has_notifiers(range->mm))
393 		__mmu_notifier_invalidate_range_end(range, true);
394 }
395 
mmu_notifier_invalidate_range(struct mm_struct * mm,unsigned long start,unsigned long end)396 static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
397 				  unsigned long start, unsigned long end)
398 {
399 	if (mm_has_notifiers(mm))
400 		__mmu_notifier_invalidate_range(mm, start, end);
401 }
402 
mmu_notifier_mm_init(struct mm_struct * mm)403 static inline void mmu_notifier_mm_init(struct mm_struct *mm)
404 {
405 	mm->mmu_notifier_mm = NULL;
406 }
407 
mmu_notifier_mm_destroy(struct mm_struct * mm)408 static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
409 {
410 	if (mm_has_notifiers(mm))
411 		__mmu_notifier_mm_destroy(mm);
412 }
413 
414 
mmu_notifier_range_init(struct mmu_notifier_range * range,enum mmu_notifier_event event,unsigned flags,struct vm_area_struct * vma,struct mm_struct * mm,unsigned long start,unsigned long end)415 static inline void mmu_notifier_range_init(struct mmu_notifier_range *range,
416 					   enum mmu_notifier_event event,
417 					   unsigned flags,
418 					   struct vm_area_struct *vma,
419 					   struct mm_struct *mm,
420 					   unsigned long start,
421 					   unsigned long end)
422 {
423 	range->vma = vma;
424 	range->event = event;
425 	range->mm = mm;
426 	range->start = start;
427 	range->end = end;
428 	range->flags = flags;
429 }
430 
431 #define ptep_clear_flush_young_notify(__vma, __address, __ptep)		\
432 ({									\
433 	int __young;							\
434 	struct vm_area_struct *___vma = __vma;				\
435 	unsigned long ___address = __address;				\
436 	__young = ptep_clear_flush_young(___vma, ___address, __ptep);	\
437 	__young |= mmu_notifier_clear_flush_young(___vma->vm_mm,	\
438 						  ___address,		\
439 						  ___address +		\
440 							PAGE_SIZE);	\
441 	__young;							\
442 })
443 
444 #define pmdp_clear_flush_young_notify(__vma, __address, __pmdp)		\
445 ({									\
446 	int __young;							\
447 	struct vm_area_struct *___vma = __vma;				\
448 	unsigned long ___address = __address;				\
449 	__young = pmdp_clear_flush_young(___vma, ___address, __pmdp);	\
450 	__young |= mmu_notifier_clear_flush_young(___vma->vm_mm,	\
451 						  ___address,		\
452 						  ___address +		\
453 							PMD_SIZE);	\
454 	__young;							\
455 })
456 
457 #define ptep_clear_young_notify(__vma, __address, __ptep)		\
458 ({									\
459 	int __young;							\
460 	struct vm_area_struct *___vma = __vma;				\
461 	unsigned long ___address = __address;				\
462 	__young = ptep_test_and_clear_young(___vma, ___address, __ptep);\
463 	__young |= mmu_notifier_clear_young(___vma->vm_mm, ___address,	\
464 					    ___address + PAGE_SIZE);	\
465 	__young;							\
466 })
467 
468 #define pmdp_clear_young_notify(__vma, __address, __pmdp)		\
469 ({									\
470 	int __young;							\
471 	struct vm_area_struct *___vma = __vma;				\
472 	unsigned long ___address = __address;				\
473 	__young = pmdp_test_and_clear_young(___vma, ___address, __pmdp);\
474 	__young |= mmu_notifier_clear_young(___vma->vm_mm, ___address,	\
475 					    ___address + PMD_SIZE);	\
476 	__young;							\
477 })
478 
479 #define	ptep_clear_flush_notify(__vma, __address, __ptep)		\
480 ({									\
481 	unsigned long ___addr = __address & PAGE_MASK;			\
482 	struct mm_struct *___mm = (__vma)->vm_mm;			\
483 	pte_t ___pte;							\
484 									\
485 	___pte = ptep_clear_flush(__vma, __address, __ptep);		\
486 	mmu_notifier_invalidate_range(___mm, ___addr,			\
487 					___addr + PAGE_SIZE);		\
488 									\
489 	___pte;								\
490 })
491 
492 #define pmdp_huge_clear_flush_notify(__vma, __haddr, __pmd)		\
493 ({									\
494 	unsigned long ___haddr = __haddr & HPAGE_PMD_MASK;		\
495 	struct mm_struct *___mm = (__vma)->vm_mm;			\
496 	pmd_t ___pmd;							\
497 									\
498 	___pmd = pmdp_huge_clear_flush(__vma, __haddr, __pmd);		\
499 	mmu_notifier_invalidate_range(___mm, ___haddr,			\
500 				      ___haddr + HPAGE_PMD_SIZE);	\
501 									\
502 	___pmd;								\
503 })
504 
505 #define pudp_huge_clear_flush_notify(__vma, __haddr, __pud)		\
506 ({									\
507 	unsigned long ___haddr = __haddr & HPAGE_PUD_MASK;		\
508 	struct mm_struct *___mm = (__vma)->vm_mm;			\
509 	pud_t ___pud;							\
510 									\
511 	___pud = pudp_huge_clear_flush(__vma, __haddr, __pud);		\
512 	mmu_notifier_invalidate_range(___mm, ___haddr,			\
513 				      ___haddr + HPAGE_PUD_SIZE);	\
514 									\
515 	___pud;								\
516 })
517 
518 /*
519  * set_pte_at_notify() sets the pte _after_ running the notifier.
520  * This is safe to start by updating the secondary MMUs, because the primary MMU
521  * pte invalidate must have already happened with a ptep_clear_flush() before
522  * set_pte_at_notify() has been invoked.  Updating the secondary MMUs first is
523  * required when we change both the protection of the mapping from read-only to
524  * read-write and the pfn (like during copy on write page faults). Otherwise the
525  * old page would remain mapped readonly in the secondary MMUs after the new
526  * page is already writable by some CPU through the primary MMU.
527  */
528 #define set_pte_at_notify(__mm, __address, __ptep, __pte)		\
529 ({									\
530 	struct mm_struct *___mm = __mm;					\
531 	unsigned long ___address = __address;				\
532 	pte_t ___pte = __pte;						\
533 									\
534 	mmu_notifier_change_pte(___mm, ___address, ___pte);		\
535 	set_pte_at(___mm, ___address, __ptep, ___pte);			\
536 })
537 
538 #else /* CONFIG_MMU_NOTIFIER */
539 
540 struct mmu_notifier_range {
541 	unsigned long start;
542 	unsigned long end;
543 };
544 
_mmu_notifier_range_init(struct mmu_notifier_range * range,unsigned long start,unsigned long end)545 static inline void _mmu_notifier_range_init(struct mmu_notifier_range *range,
546 					    unsigned long start,
547 					    unsigned long end)
548 {
549 	range->start = start;
550 	range->end = end;
551 }
552 
553 #define mmu_notifier_range_init(range,event,flags,vma,mm,start,end)  \
554 	_mmu_notifier_range_init(range, start, end)
555 
556 static inline bool
mmu_notifier_range_blockable(const struct mmu_notifier_range * range)557 mmu_notifier_range_blockable(const struct mmu_notifier_range *range)
558 {
559 	return true;
560 }
561 
mm_has_notifiers(struct mm_struct * mm)562 static inline int mm_has_notifiers(struct mm_struct *mm)
563 {
564 	return 0;
565 }
566 
mmu_notifier_release(struct mm_struct * mm)567 static inline void mmu_notifier_release(struct mm_struct *mm)
568 {
569 }
570 
mmu_notifier_clear_flush_young(struct mm_struct * mm,unsigned long start,unsigned long end)571 static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm,
572 					  unsigned long start,
573 					  unsigned long end)
574 {
575 	return 0;
576 }
577 
mmu_notifier_test_young(struct mm_struct * mm,unsigned long address)578 static inline int mmu_notifier_test_young(struct mm_struct *mm,
579 					  unsigned long address)
580 {
581 	return 0;
582 }
583 
mmu_notifier_change_pte(struct mm_struct * mm,unsigned long address,pte_t pte)584 static inline void mmu_notifier_change_pte(struct mm_struct *mm,
585 					   unsigned long address, pte_t pte)
586 {
587 }
588 
589 static inline void
mmu_notifier_invalidate_range_start(struct mmu_notifier_range * range)590 mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
591 {
592 }
593 
594 static inline int
mmu_notifier_invalidate_range_start_nonblock(struct mmu_notifier_range * range)595 mmu_notifier_invalidate_range_start_nonblock(struct mmu_notifier_range *range)
596 {
597 	return 0;
598 }
599 
600 static inline
mmu_notifier_invalidate_range_end(struct mmu_notifier_range * range)601 void mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range)
602 {
603 }
604 
605 static inline void
mmu_notifier_invalidate_range_only_end(struct mmu_notifier_range * range)606 mmu_notifier_invalidate_range_only_end(struct mmu_notifier_range *range)
607 {
608 }
609 
mmu_notifier_invalidate_range(struct mm_struct * mm,unsigned long start,unsigned long end)610 static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
611 				  unsigned long start, unsigned long end)
612 {
613 }
614 
mmu_notifier_mm_init(struct mm_struct * mm)615 static inline void mmu_notifier_mm_init(struct mm_struct *mm)
616 {
617 }
618 
mmu_notifier_mm_destroy(struct mm_struct * mm)619 static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
620 {
621 }
622 
623 #define mmu_notifier_range_update_to_read_only(r) false
624 
625 #define ptep_clear_flush_young_notify ptep_clear_flush_young
626 #define pmdp_clear_flush_young_notify pmdp_clear_flush_young
627 #define ptep_clear_young_notify ptep_test_and_clear_young
628 #define pmdp_clear_young_notify pmdp_test_and_clear_young
629 #define	ptep_clear_flush_notify ptep_clear_flush
630 #define pmdp_huge_clear_flush_notify pmdp_huge_clear_flush
631 #define pudp_huge_clear_flush_notify pudp_huge_clear_flush
632 #define set_pte_at_notify set_pte_at
633 
mmu_notifier_synchronize(void)634 static inline void mmu_notifier_synchronize(void)
635 {
636 }
637 
638 #endif /* CONFIG_MMU_NOTIFIER */
639 
640 #endif /* _LINUX_MMU_NOTIFIER_H */
641