• Home
  • Raw
  • Download

Lines Matching +full:can +full:- +full:secondary

1 /* SPDX-License-Identifier: GPL-2.0 */
18 * enum mmu_notifier_event - reason for the mmu notifier callback
62 * freed. This can run concurrently with other mmu notifier
64 * should tear down all secondary mmu mappings and freeze the
65 * secondary mmu. If this method isn't implemented you've to
67 * through the secondary mmu by the time the last thread with
68 * tsk->mm == mm exits.
70 * As side note: the pages freed after ->release returns could
72 * address with a different cache model, so if ->release isn't
74 * through the secondary mmu are terminated by the time the
76 * speculative _hardware_ operations can't allocate dirty
87 * test-and-clearing the young/accessed bitflag in the
89 * accesses to the page through the secondary MMUs and not
91 * Start-end is necessary in case the secondary MMU is mapping the page
101 * latter, it is supposed to test-and-clear the young/accessed bitflag
102 * in the secondary pte, but it may omit flushing the secondary tlb.
111 * the secondary pte. This is used to know if the page is
113 * down the secondary mapping on the page.
132 * can't guarantee that no additional references are taken to
162 * invalidate_range_start() then the VM can free pages as page
166 * any secondary tlb before doing the final free on the
172 * sleep and has to return with -EAGAIN if sleeping would be required.
173 * 0 should be returned otherwise. Please note that notifiers that can
190 * If invalidate_range() is used to manage a non-CPU TLB with
191 * shared page-tables, it not necessary to implement the
197 * Note that this function might be called with just a sub-range
225 * Therefore notifier chains can only be traversed when either
228 * 2. One of the reverse map locks is held (i_mmap_rwsem or anon_vma->rwsem).
229 * 3. No other concurrent thread can access the list (release)
242 * range. This function can sleep. Return false only if sleeping
277 return unlikely(mm->notifier_subscriptions); in mm_has_notifiers()
315 * mmu_interval_set_seq - Save the invalidation sequence
316 * @interval_sub - The subscription passed to invalidate
317 * @cur_seq - The cur_seq passed to the invalidate() callback
331 WRITE_ONCE(interval_sub->invalidate_seq, cur_seq); in mmu_interval_set_seq()
335 * mmu_interval_read_retry - End a read side critical section against a VA range
340 * unconditionally by op->invalidate() when it calls mmu_interval_set_seq().
352 return interval_sub->invalidate_seq != seq; in mmu_interval_read_retry()
356 * mmu_interval_check_retry - Test if a collision has occurred
360 * This can be used in the critical section between mmu_interval_read_begin()
366 * occured. It can be called many times and does not have to hold the user
369 * This call can be used as part of loops and other expensive operations to
377 return READ_ONCE(interval_sub->invalidate_seq) != seq; in mmu_interval_check_retry()
403 return (range->flags & MMU_NOTIFIER_RANGE_BLOCKABLE); in mmu_notifier_range_blockable()
451 if (mm_has_notifiers(range->mm)) { in mmu_notifier_invalidate_range_start()
452 range->flags |= MMU_NOTIFIER_RANGE_BLOCKABLE; in mmu_notifier_invalidate_range_start()
464 if (mm_has_notifiers(range->mm)) { in mmu_notifier_invalidate_range_start_nonblock()
465 range->flags &= ~MMU_NOTIFIER_RANGE_BLOCKABLE; in mmu_notifier_invalidate_range_start_nonblock()
478 if (mm_has_notifiers(range->mm)) in mmu_notifier_invalidate_range_end()
485 if (mm_has_notifiers(range->mm)) in mmu_notifier_invalidate_range_only_end()
498 mm->notifier_subscriptions = NULL; in mmu_notifier_subscriptions_init()
516 range->vma = vma; in mmu_notifier_range_init()
517 range->event = event; in mmu_notifier_range_init()
518 range->mm = mm; in mmu_notifier_range_init()
519 range->start = start; in mmu_notifier_range_init()
520 range->end = end; in mmu_notifier_range_init()
521 range->flags = flags; in mmu_notifier_range_init()
531 range->migrate_pgmap_owner = pgmap; in mmu_notifier_range_init_migrate()
540 __young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \
553 __young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \
566 __young |= mmu_notifier_clear_young(___vma->vm_mm, ___address, \
577 __young |= mmu_notifier_clear_young(___vma->vm_mm, ___address, \
585 struct mm_struct *___mm = (__vma)->vm_mm; \
598 struct mm_struct *___mm = (__vma)->vm_mm; \
611 struct mm_struct *___mm = (__vma)->vm_mm; \
623 * This is safe to start by updating the secondary MMUs, because the primary MMU
625 * set_pte_at_notify() has been invoked. Updating the secondary MMUs first is
626 * required when we change both the protection of the mapping from read-only to
627 * read-write and the pfn (like during copy on write page faults). Otherwise the
628 * old page would remain mapped readonly in the secondary MMUs after the new
652 range->start = start; in _mmu_notifier_range_init()
653 range->end = end; in _mmu_notifier_range_init()