1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_MMU_NOTIFIER_H
3 #define _LINUX_MMU_NOTIFIER_H
4
5 #include <linux/list.h>
6 #include <linux/spinlock.h>
7 #include <linux/mm_types.h>
8 #include <linux/mmap_lock.h>
9 #include <linux/srcu.h>
10 #include <linux/interval_tree.h>
11 #include <linux/android_kabi.h>
12
13 struct mmu_notifier_subscriptions;
14 struct mmu_notifier;
15 struct mmu_notifier_range;
16 struct mmu_interval_notifier;
17
18 /**
19 * enum mmu_notifier_event - reason for the mmu notifier callback
20 * @MMU_NOTIFY_UNMAP: either munmap() that unmap the range or a mremap() that
21 * move the range
22 *
23 * @MMU_NOTIFY_CLEAR: clear page table entry (many reasons for this like
24 * madvise() or replacing a page by another one, ...).
25 *
26 * @MMU_NOTIFY_PROTECTION_VMA: update is due to protection change for the range
27 * ie using the vma access permission (vm_page_prot) to update the whole range
28 * is enough no need to inspect changes to the CPU page table (mprotect()
29 * syscall)
30 *
31 * @MMU_NOTIFY_PROTECTION_PAGE: update is due to change in read/write flag for
32 * pages in the range so to mirror those changes the user must inspect the CPU
33 * page table (from the end callback).
34 *
35 * @MMU_NOTIFY_SOFT_DIRTY: soft dirty accounting (still same page and same
36 * access flags). User should soft dirty the page in the end callback to make
37 * sure that anyone relying on soft dirtiness catch pages that might be written
38 * through non CPU mappings.
39 *
40 * @MMU_NOTIFY_RELEASE: used during mmu_interval_notifier invalidate to signal
41 * that the mm refcount is zero and the range is no longer accessible.
42 *
43 * @MMU_NOTIFY_MIGRATE: used during migrate_vma_collect() invalidate to signal
44 * a device driver to possibly ignore the invalidation if the
45 * owner field matches the driver's device private pgmap owner.
46 *
47 * @MMU_NOTIFY_EXCLUSIVE: to signal a device driver that the device will no
48 * longer have exclusive access to the page. When sent during creation of an
49 * exclusive range the owner will be initialised to the value provided by the
50 * caller of make_device_exclusive_range(), otherwise the owner will be NULL.
51 */
52 enum mmu_notifier_event {
53 MMU_NOTIFY_UNMAP = 0,
54 MMU_NOTIFY_CLEAR,
55 MMU_NOTIFY_PROTECTION_VMA,
56 MMU_NOTIFY_PROTECTION_PAGE,
57 MMU_NOTIFY_SOFT_DIRTY,
58 MMU_NOTIFY_RELEASE,
59 MMU_NOTIFY_MIGRATE,
60 MMU_NOTIFY_EXCLUSIVE,
61 };
62
63 #define MMU_NOTIFIER_RANGE_BLOCKABLE (1 << 0)
64
65 struct mmu_notifier_ops {
66 /*
67 * Called either by mmu_notifier_unregister or when the mm is
68 * being destroyed by exit_mmap, always before all pages are
69 * freed. This can run concurrently with other mmu notifier
70 * methods (the ones invoked outside the mm context) and it
71 * should tear down all secondary mmu mappings and freeze the
72 * secondary mmu. If this method isn't implemented you've to
73 * be sure that nothing could possibly write to the pages
74 * through the secondary mmu by the time the last thread with
75 * tsk->mm == mm exits.
76 *
77 * As side note: the pages freed after ->release returns could
78 * be immediately reallocated by the gart at an alias physical
79 * address with a different cache model, so if ->release isn't
80 * implemented because all _software_ driven memory accesses
81 * through the secondary mmu are terminated by the time the
82 * last thread of this mm quits, you've also to be sure that
83 * speculative _hardware_ operations can't allocate dirty
84 * cachelines in the cpu that could not be snooped and made
85 * coherent with the other read and write operations happening
86 * through the gart alias address, so leading to memory
87 * corruption.
88 */
89 void (*release)(struct mmu_notifier *subscription,
90 struct mm_struct *mm);
91
92 /*
93 * clear_flush_young is called after the VM is
94 * test-and-clearing the young/accessed bitflag in the
95 * pte. This way the VM will provide proper aging to the
96 * accesses to the page through the secondary MMUs and not
97 * only to the ones through the Linux pte.
98 * Start-end is necessary in case the secondary MMU is mapping the page
99 * at a smaller granularity than the primary MMU.
100 */
101 int (*clear_flush_young)(struct mmu_notifier *subscription,
102 struct mm_struct *mm,
103 unsigned long start,
104 unsigned long end);
105
106 /*
107 * clear_young is a lightweight version of clear_flush_young. Like the
108 * latter, it is supposed to test-and-clear the young/accessed bitflag
109 * in the secondary pte, but it may omit flushing the secondary tlb.
110 */
111 int (*clear_young)(struct mmu_notifier *subscription,
112 struct mm_struct *mm,
113 unsigned long start,
114 unsigned long end);
115
116 /*
117 * test_young is called to check the young/accessed bitflag in
118 * the secondary pte. This is used to know if the page is
119 * frequently used without actually clearing the flag or tearing
120 * down the secondary mapping on the page.
121 */
122 int (*test_young)(struct mmu_notifier *subscription,
123 struct mm_struct *mm,
124 unsigned long address);
125
126 /*
127 * change_pte is called in cases that pte mapping to page is changed:
128 * for example, when ksm remaps pte to point to a new shared page.
129 */
130 void (*change_pte)(struct mmu_notifier *subscription,
131 struct mm_struct *mm,
132 unsigned long address,
133 pte_t pte);
134
135 /*
136 * invalidate_range_start() and invalidate_range_end() must be
137 * paired and are called only when the mmap_lock and/or the
138 * locks protecting the reverse maps are held. If the subsystem
139 * can't guarantee that no additional references are taken to
140 * the pages in the range, it has to implement the
141 * invalidate_range() notifier to remove any references taken
142 * after invalidate_range_start().
143 *
144 * Invalidation of multiple concurrent ranges may be
145 * optionally permitted by the driver. Either way the
146 * establishment of sptes is forbidden in the range passed to
147 * invalidate_range_begin/end for the whole duration of the
148 * invalidate_range_begin/end critical section.
149 *
150 * invalidate_range_start() is called when all pages in the
151 * range are still mapped and have at least a refcount of one.
152 *
153 * invalidate_range_end() is called when all pages in the
154 * range have been unmapped and the pages have been freed by
155 * the VM.
156 *
157 * The VM will remove the page table entries and potentially
158 * the page between invalidate_range_start() and
159 * invalidate_range_end(). If the page must not be freed
160 * because of pending I/O or other circumstances then the
161 * invalidate_range_start() callback (or the initial mapping
162 * by the driver) must make sure that the refcount is kept
163 * elevated.
164 *
165 * If the driver increases the refcount when the pages are
166 * initially mapped into an address space then either
167 * invalidate_range_start() or invalidate_range_end() may
168 * decrease the refcount. If the refcount is decreased on
169 * invalidate_range_start() then the VM can free pages as page
170 * table entries are removed. If the refcount is only
171 * dropped on invalidate_range_end() then the driver itself
172 * will drop the last refcount but it must take care to flush
173 * any secondary tlb before doing the final free on the
174 * page. Pages will no longer be referenced by the linux
175 * address space but may still be referenced by sptes until
176 * the last refcount is dropped.
177 *
178 * If blockable argument is set to false then the callback cannot
179 * sleep and has to return with -EAGAIN if sleeping would be required.
180 * 0 should be returned otherwise. Please note that notifiers that can
181 * fail invalidate_range_start are not allowed to implement
182 * invalidate_range_end, as there is no mechanism for informing the
183 * notifier that its start failed.
184 */
185 int (*invalidate_range_start)(struct mmu_notifier *subscription,
186 const struct mmu_notifier_range *range);
187 void (*invalidate_range_end)(struct mmu_notifier *subscription,
188 const struct mmu_notifier_range *range);
189
190 /*
191 * arch_invalidate_secondary_tlbs() is used to manage a non-CPU TLB
192 * which shares page-tables with the CPU. The
193 * invalidate_range_start()/end() callbacks should not be implemented as
194 * invalidate_secondary_tlbs() already catches the points in time when
195 * an external TLB needs to be flushed.
196 *
197 * This requires arch_invalidate_secondary_tlbs() to be called while
198 * holding the ptl spin-lock and therefore this callback is not allowed
199 * to sleep.
200 *
201 * This is called by architecture code whenever invalidating a TLB
202 * entry. It is assumed that any secondary TLB has the same rules for
203 * when invalidations are required. If this is not the case architecture
204 * code will need to call this explicitly when required for secondary
205 * TLB invalidation.
206 */
207 void (*arch_invalidate_secondary_tlbs)(
208 struct mmu_notifier *subscription,
209 struct mm_struct *mm,
210 unsigned long start,
211 unsigned long end);
212
213 /*
214 * These callbacks are used with the get/put interface to manage the
215 * lifetime of the mmu_notifier memory. alloc_notifier() returns a new
216 * notifier for use with the mm.
217 *
218 * free_notifier() is only called after the mmu_notifier has been
219 * fully put, calls to any ops callback are prevented and no ops
220 * callbacks are currently running. It is called from a SRCU callback
221 * and cannot sleep.
222 */
223 struct mmu_notifier *(*alloc_notifier)(struct mm_struct *mm);
224 void (*free_notifier)(struct mmu_notifier *subscription);
225
226 ANDROID_KABI_RESERVE(1);
227 ANDROID_KABI_RESERVE(2);
228 ANDROID_KABI_RESERVE(3);
229 ANDROID_KABI_RESERVE(4);
230 };
231
232 /*
233 * The notifier chains are protected by mmap_lock and/or the reverse map
234 * semaphores. Notifier chains are only changed when all reverse maps and
235 * the mmap_lock locks are taken.
236 *
237 * Therefore notifier chains can only be traversed when either
238 *
239 * 1. mmap_lock is held.
240 * 2. One of the reverse map locks is held (i_mmap_rwsem or anon_vma->rwsem).
241 * 3. No other concurrent thread can access the list (release)
242 */
243 struct mmu_notifier {
244 struct hlist_node hlist;
245 const struct mmu_notifier_ops *ops;
246 struct mm_struct *mm;
247 struct rcu_head rcu;
248 unsigned int users;
249
250 ANDROID_KABI_RESERVE(1);
251 ANDROID_KABI_RESERVE(2);
252 };
253
254 /**
255 * struct mmu_interval_notifier_ops
256 * @invalidate: Upon return the caller must stop using any SPTEs within this
257 * range. This function can sleep. Return false only if sleeping
258 * was required but mmu_notifier_range_blockable(range) is false.
259 */
260 struct mmu_interval_notifier_ops {
261 bool (*invalidate)(struct mmu_interval_notifier *interval_sub,
262 const struct mmu_notifier_range *range,
263 unsigned long cur_seq);
264 };
265
266 struct mmu_interval_notifier {
267 struct interval_tree_node interval_tree;
268 const struct mmu_interval_notifier_ops *ops;
269 struct mm_struct *mm;
270 struct hlist_node deferred_item;
271 unsigned long invalidate_seq;
272 };
273
274 #ifdef CONFIG_MMU_NOTIFIER
275
276 #ifdef CONFIG_LOCKDEP
277 extern struct lockdep_map __mmu_notifier_invalidate_range_start_map;
278 #endif
279
280 struct mmu_notifier_range {
281 struct mm_struct *mm;
282 unsigned long start;
283 unsigned long end;
284 unsigned flags;
285 enum mmu_notifier_event event;
286 void *owner;
287 };
288
mm_has_notifiers(struct mm_struct * mm)289 static inline int mm_has_notifiers(struct mm_struct *mm)
290 {
291 return unlikely(mm->notifier_subscriptions);
292 }
293
294 struct mmu_notifier *mmu_notifier_get_locked(const struct mmu_notifier_ops *ops,
295 struct mm_struct *mm);
296 static inline struct mmu_notifier *
mmu_notifier_get(const struct mmu_notifier_ops * ops,struct mm_struct * mm)297 mmu_notifier_get(const struct mmu_notifier_ops *ops, struct mm_struct *mm)
298 {
299 struct mmu_notifier *ret;
300
301 mmap_write_lock(mm);
302 ret = mmu_notifier_get_locked(ops, mm);
303 mmap_write_unlock(mm);
304 return ret;
305 }
306 void mmu_notifier_put(struct mmu_notifier *subscription);
307 void mmu_notifier_synchronize(void);
308
309 extern int mmu_notifier_register(struct mmu_notifier *subscription,
310 struct mm_struct *mm);
311 extern int __mmu_notifier_register(struct mmu_notifier *subscription,
312 struct mm_struct *mm);
313 extern void mmu_notifier_unregister(struct mmu_notifier *subscription,
314 struct mm_struct *mm);
315
316 unsigned long
317 mmu_interval_read_begin(struct mmu_interval_notifier *interval_sub);
318 int mmu_interval_notifier_insert(struct mmu_interval_notifier *interval_sub,
319 struct mm_struct *mm, unsigned long start,
320 unsigned long length,
321 const struct mmu_interval_notifier_ops *ops);
322 int mmu_interval_notifier_insert_locked(
323 struct mmu_interval_notifier *interval_sub, struct mm_struct *mm,
324 unsigned long start, unsigned long length,
325 const struct mmu_interval_notifier_ops *ops);
326 void mmu_interval_notifier_remove(struct mmu_interval_notifier *interval_sub);
327
328 /**
329 * mmu_interval_set_seq - Save the invalidation sequence
330 * @interval_sub - The subscription passed to invalidate
331 * @cur_seq - The cur_seq passed to the invalidate() callback
332 *
333 * This must be called unconditionally from the invalidate callback of a
334 * struct mmu_interval_notifier_ops under the same lock that is used to call
335 * mmu_interval_read_retry(). It updates the sequence number for later use by
336 * mmu_interval_read_retry(). The provided cur_seq will always be odd.
337 *
338 * If the caller does not call mmu_interval_read_begin() or
339 * mmu_interval_read_retry() then this call is not required.
340 */
341 static inline void
mmu_interval_set_seq(struct mmu_interval_notifier * interval_sub,unsigned long cur_seq)342 mmu_interval_set_seq(struct mmu_interval_notifier *interval_sub,
343 unsigned long cur_seq)
344 {
345 WRITE_ONCE(interval_sub->invalidate_seq, cur_seq);
346 }
347
348 /**
349 * mmu_interval_read_retry - End a read side critical section against a VA range
350 * interval_sub: The subscription
351 * seq: The return of the paired mmu_interval_read_begin()
352 *
353 * This MUST be called under a user provided lock that is also held
354 * unconditionally by op->invalidate() when it calls mmu_interval_set_seq().
355 *
356 * Each call should be paired with a single mmu_interval_read_begin() and
357 * should be used to conclude the read side.
358 *
359 * Returns true if an invalidation collided with this critical section, and
360 * the caller should retry.
361 */
362 static inline bool
mmu_interval_read_retry(struct mmu_interval_notifier * interval_sub,unsigned long seq)363 mmu_interval_read_retry(struct mmu_interval_notifier *interval_sub,
364 unsigned long seq)
365 {
366 return interval_sub->invalidate_seq != seq;
367 }
368
369 /**
370 * mmu_interval_check_retry - Test if a collision has occurred
371 * interval_sub: The subscription
372 * seq: The return of the matching mmu_interval_read_begin()
373 *
374 * This can be used in the critical section between mmu_interval_read_begin()
375 * and mmu_interval_read_retry(). A return of true indicates an invalidation
376 * has collided with this critical region and a future
377 * mmu_interval_read_retry() will return true.
378 *
379 * False is not reliable and only suggests a collision may not have
380 * occurred. It can be called many times and does not have to hold the user
381 * provided lock.
382 *
383 * This call can be used as part of loops and other expensive operations to
384 * expedite a retry.
385 */
386 static inline bool
mmu_interval_check_retry(struct mmu_interval_notifier * interval_sub,unsigned long seq)387 mmu_interval_check_retry(struct mmu_interval_notifier *interval_sub,
388 unsigned long seq)
389 {
390 /* Pairs with the WRITE_ONCE in mmu_interval_set_seq() */
391 return READ_ONCE(interval_sub->invalidate_seq) != seq;
392 }
393
394 extern void __mmu_notifier_subscriptions_destroy(struct mm_struct *mm);
395 extern void __mmu_notifier_release(struct mm_struct *mm);
396 extern int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
397 unsigned long start,
398 unsigned long end);
399 extern int __mmu_notifier_clear_young(struct mm_struct *mm,
400 unsigned long start,
401 unsigned long end);
402 extern int __mmu_notifier_test_young(struct mm_struct *mm,
403 unsigned long address);
404 extern void __mmu_notifier_change_pte(struct mm_struct *mm,
405 unsigned long address, pte_t pte);
406 extern int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *r);
407 extern void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *r);
408 extern void __mmu_notifier_arch_invalidate_secondary_tlbs(struct mm_struct *mm,
409 unsigned long start, unsigned long end);
410 extern bool
411 mmu_notifier_range_update_to_read_only(const struct mmu_notifier_range *range);
412
413 static inline bool
mmu_notifier_range_blockable(const struct mmu_notifier_range * range)414 mmu_notifier_range_blockable(const struct mmu_notifier_range *range)
415 {
416 return (range->flags & MMU_NOTIFIER_RANGE_BLOCKABLE);
417 }
418
mmu_notifier_release(struct mm_struct * mm)419 static inline void mmu_notifier_release(struct mm_struct *mm)
420 {
421 if (mm_has_notifiers(mm))
422 __mmu_notifier_release(mm);
423 }
424
mmu_notifier_clear_flush_young(struct mm_struct * mm,unsigned long start,unsigned long end)425 static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm,
426 unsigned long start,
427 unsigned long end)
428 {
429 if (mm_has_notifiers(mm))
430 return __mmu_notifier_clear_flush_young(mm, start, end);
431 return 0;
432 }
433
mmu_notifier_clear_young(struct mm_struct * mm,unsigned long start,unsigned long end)434 static inline int mmu_notifier_clear_young(struct mm_struct *mm,
435 unsigned long start,
436 unsigned long end)
437 {
438 if (mm_has_notifiers(mm))
439 return __mmu_notifier_clear_young(mm, start, end);
440 return 0;
441 }
442
mmu_notifier_test_young(struct mm_struct * mm,unsigned long address)443 static inline int mmu_notifier_test_young(struct mm_struct *mm,
444 unsigned long address)
445 {
446 if (mm_has_notifiers(mm))
447 return __mmu_notifier_test_young(mm, address);
448 return 0;
449 }
450
mmu_notifier_change_pte(struct mm_struct * mm,unsigned long address,pte_t pte)451 static inline void mmu_notifier_change_pte(struct mm_struct *mm,
452 unsigned long address, pte_t pte)
453 {
454 if (mm_has_notifiers(mm))
455 __mmu_notifier_change_pte(mm, address, pte);
456 }
457
458 static inline void
mmu_notifier_invalidate_range_start(struct mmu_notifier_range * range)459 mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
460 {
461 might_sleep();
462
463 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
464 if (mm_has_notifiers(range->mm)) {
465 range->flags |= MMU_NOTIFIER_RANGE_BLOCKABLE;
466 __mmu_notifier_invalidate_range_start(range);
467 }
468 lock_map_release(&__mmu_notifier_invalidate_range_start_map);
469 }
470
471 static inline int
mmu_notifier_invalidate_range_start_nonblock(struct mmu_notifier_range * range)472 mmu_notifier_invalidate_range_start_nonblock(struct mmu_notifier_range *range)
473 {
474 int ret = 0;
475
476 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
477 if (mm_has_notifiers(range->mm)) {
478 range->flags &= ~MMU_NOTIFIER_RANGE_BLOCKABLE;
479 ret = __mmu_notifier_invalidate_range_start(range);
480 }
481 lock_map_release(&__mmu_notifier_invalidate_range_start_map);
482 return ret;
483 }
484
485 static inline void
mmu_notifier_invalidate_range_end(struct mmu_notifier_range * range)486 mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range)
487 {
488 if (mmu_notifier_range_blockable(range))
489 might_sleep();
490
491 if (mm_has_notifiers(range->mm))
492 __mmu_notifier_invalidate_range_end(range);
493 }
494
mmu_notifier_arch_invalidate_secondary_tlbs(struct mm_struct * mm,unsigned long start,unsigned long end)495 static inline void mmu_notifier_arch_invalidate_secondary_tlbs(struct mm_struct *mm,
496 unsigned long start, unsigned long end)
497 {
498 if (mm_has_notifiers(mm))
499 __mmu_notifier_arch_invalidate_secondary_tlbs(mm, start, end);
500 }
501
mmu_notifier_subscriptions_init(struct mm_struct * mm)502 static inline void mmu_notifier_subscriptions_init(struct mm_struct *mm)
503 {
504 mm->notifier_subscriptions = NULL;
505 }
506
mmu_notifier_subscriptions_destroy(struct mm_struct * mm)507 static inline void mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
508 {
509 if (mm_has_notifiers(mm))
510 __mmu_notifier_subscriptions_destroy(mm);
511 }
512
513
mmu_notifier_range_init(struct mmu_notifier_range * range,enum mmu_notifier_event event,unsigned flags,struct mm_struct * mm,unsigned long start,unsigned long end)514 static inline void mmu_notifier_range_init(struct mmu_notifier_range *range,
515 enum mmu_notifier_event event,
516 unsigned flags,
517 struct mm_struct *mm,
518 unsigned long start,
519 unsigned long end)
520 {
521 range->event = event;
522 range->mm = mm;
523 range->start = start;
524 range->end = end;
525 range->flags = flags;
526 }
527
mmu_notifier_range_init_owner(struct mmu_notifier_range * range,enum mmu_notifier_event event,unsigned int flags,struct mm_struct * mm,unsigned long start,unsigned long end,void * owner)528 static inline void mmu_notifier_range_init_owner(
529 struct mmu_notifier_range *range,
530 enum mmu_notifier_event event, unsigned int flags,
531 struct mm_struct *mm, unsigned long start,
532 unsigned long end, void *owner)
533 {
534 mmu_notifier_range_init(range, event, flags, mm, start, end);
535 range->owner = owner;
536 }
537
538 #define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
539 ({ \
540 int __young; \
541 struct vm_area_struct *___vma = __vma; \
542 unsigned long ___address = __address; \
543 __young = ptep_clear_flush_young(___vma, ___address, __ptep); \
544 __young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \
545 ___address, \
546 ___address + \
547 PAGE_SIZE); \
548 __young; \
549 })
550
551 #define pmdp_clear_flush_young_notify(__vma, __address, __pmdp) \
552 ({ \
553 int __young; \
554 struct vm_area_struct *___vma = __vma; \
555 unsigned long ___address = __address; \
556 __young = pmdp_clear_flush_young(___vma, ___address, __pmdp); \
557 __young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \
558 ___address, \
559 ___address + \
560 PMD_SIZE); \
561 __young; \
562 })
563
564 #define ptep_clear_young_notify(__vma, __address, __ptep) \
565 ({ \
566 int __young; \
567 struct vm_area_struct *___vma = __vma; \
568 unsigned long ___address = __address; \
569 __young = ptep_test_and_clear_young(___vma, ___address, __ptep);\
570 __young |= mmu_notifier_clear_young(___vma->vm_mm, ___address, \
571 ___address + PAGE_SIZE); \
572 __young; \
573 })
574
575 #define pmdp_clear_young_notify(__vma, __address, __pmdp) \
576 ({ \
577 int __young; \
578 struct vm_area_struct *___vma = __vma; \
579 unsigned long ___address = __address; \
580 __young = pmdp_test_and_clear_young(___vma, ___address, __pmdp);\
581 __young |= mmu_notifier_clear_young(___vma->vm_mm, ___address, \
582 ___address + PMD_SIZE); \
583 __young; \
584 })
585
586 /*
587 * set_pte_at_notify() sets the pte _after_ running the notifier.
588 * This is safe to start by updating the secondary MMUs, because the primary MMU
589 * pte invalidate must have already happened with a ptep_clear_flush() before
590 * set_pte_at_notify() has been invoked. Updating the secondary MMUs first is
591 * required when we change both the protection of the mapping from read-only to
592 * read-write and the pfn (like during copy on write page faults). Otherwise the
593 * old page would remain mapped readonly in the secondary MMUs after the new
594 * page is already writable by some CPU through the primary MMU.
595 */
596 #define set_pte_at_notify(__mm, __address, __ptep, __pte) \
597 ({ \
598 struct mm_struct *___mm = __mm; \
599 unsigned long ___address = __address; \
600 pte_t ___pte = __pte; \
601 \
602 mmu_notifier_change_pte(___mm, ___address, ___pte); \
603 set_pte_at(___mm, ___address, __ptep, ___pte); \
604 })
605
606 #else /* CONFIG_MMU_NOTIFIER */
607
608 struct mmu_notifier_range {
609 unsigned long start;
610 unsigned long end;
611 };
612
_mmu_notifier_range_init(struct mmu_notifier_range * range,unsigned long start,unsigned long end)613 static inline void _mmu_notifier_range_init(struct mmu_notifier_range *range,
614 unsigned long start,
615 unsigned long end)
616 {
617 range->start = start;
618 range->end = end;
619 }
620
621 #define mmu_notifier_range_init(range,event,flags,mm,start,end) \
622 _mmu_notifier_range_init(range, start, end)
623 #define mmu_notifier_range_init_owner(range, event, flags, mm, start, \
624 end, owner) \
625 _mmu_notifier_range_init(range, start, end)
626
627 static inline bool
mmu_notifier_range_blockable(const struct mmu_notifier_range * range)628 mmu_notifier_range_blockable(const struct mmu_notifier_range *range)
629 {
630 return true;
631 }
632
mm_has_notifiers(struct mm_struct * mm)633 static inline int mm_has_notifiers(struct mm_struct *mm)
634 {
635 return 0;
636 }
637
mmu_notifier_release(struct mm_struct * mm)638 static inline void mmu_notifier_release(struct mm_struct *mm)
639 {
640 }
641
mmu_notifier_clear_flush_young(struct mm_struct * mm,unsigned long start,unsigned long end)642 static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm,
643 unsigned long start,
644 unsigned long end)
645 {
646 return 0;
647 }
648
mmu_notifier_test_young(struct mm_struct * mm,unsigned long address)649 static inline int mmu_notifier_test_young(struct mm_struct *mm,
650 unsigned long address)
651 {
652 return 0;
653 }
654
mmu_notifier_change_pte(struct mm_struct * mm,unsigned long address,pte_t pte)655 static inline void mmu_notifier_change_pte(struct mm_struct *mm,
656 unsigned long address, pte_t pte)
657 {
658 }
659
660 static inline void
mmu_notifier_invalidate_range_start(struct mmu_notifier_range * range)661 mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
662 {
663 }
664
665 static inline int
mmu_notifier_invalidate_range_start_nonblock(struct mmu_notifier_range * range)666 mmu_notifier_invalidate_range_start_nonblock(struct mmu_notifier_range *range)
667 {
668 return 0;
669 }
670
671 static inline
mmu_notifier_invalidate_range_end(struct mmu_notifier_range * range)672 void mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range)
673 {
674 }
675
mmu_notifier_arch_invalidate_secondary_tlbs(struct mm_struct * mm,unsigned long start,unsigned long end)676 static inline void mmu_notifier_arch_invalidate_secondary_tlbs(struct mm_struct *mm,
677 unsigned long start, unsigned long end)
678 {
679 }
680
mmu_notifier_subscriptions_init(struct mm_struct * mm)681 static inline void mmu_notifier_subscriptions_init(struct mm_struct *mm)
682 {
683 }
684
mmu_notifier_subscriptions_destroy(struct mm_struct * mm)685 static inline void mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
686 {
687 }
688
689 #define mmu_notifier_range_update_to_read_only(r) false
690
691 #define ptep_clear_flush_young_notify ptep_clear_flush_young
692 #define pmdp_clear_flush_young_notify pmdp_clear_flush_young
693 #define ptep_clear_young_notify ptep_test_and_clear_young
694 #define pmdp_clear_young_notify pmdp_test_and_clear_young
695 #define ptep_clear_flush_notify ptep_clear_flush
696 #define pmdp_huge_clear_flush_notify pmdp_huge_clear_flush
697 #define pudp_huge_clear_flush_notify pudp_huge_clear_flush
698 #define set_pte_at_notify set_pte_at
699
mmu_notifier_synchronize(void)700 static inline void mmu_notifier_synchronize(void)
701 {
702 }
703
704 #endif /* CONFIG_MMU_NOTIFIER */
705
706 #endif /* _LINUX_MMU_NOTIFIER_H */
707