Lines Matching refs:range
101 const struct mmu_notifier_range *range, in mn_itree_inv_start_range() argument
109 node = interval_tree_iter_first(&subscriptions->itree, range->start, in mn_itree_inv_start_range()
110 range->end - 1); in mn_itree_inv_start_range()
124 const struct mmu_notifier_range *range) in mn_itree_inv_next() argument
129 range->start, range->end - 1); in mn_itree_inv_next()
270 struct mmu_notifier_range range = { in mn_itree_release() local
282 mn_itree_inv_start_range(subscriptions, &range, &cur_seq); in mn_itree_release()
284 interval_sub = mn_itree_inv_next(interval_sub, &range)) { in mn_itree_release()
285 ret = interval_sub->ops->invalidate(interval_sub, &range, in mn_itree_release()
451 const struct mmu_notifier_range *range) in mn_itree_invalidate() argument
457 mn_itree_inv_start_range(subscriptions, range, &cur_seq); in mn_itree_invalidate()
459 interval_sub = mn_itree_inv_next(interval_sub, range)) { in mn_itree_invalidate()
462 ret = interval_sub->ops->invalidate(interval_sub, range, in mn_itree_invalidate()
465 if (WARN_ON(mmu_notifier_range_blockable(range))) in mn_itree_invalidate()
483 struct mmu_notifier_range *range) in mn_hlist_invalidate_range_start() argument
497 if (!mmu_notifier_range_blockable(range)) in mn_hlist_invalidate_range_start()
499 _ret = ops->invalidate_range_start(subscription, range); in mn_hlist_invalidate_range_start()
500 if (!mmu_notifier_range_blockable(range)) in mn_hlist_invalidate_range_start()
505 !mmu_notifier_range_blockable(range) ? in mn_hlist_invalidate_range_start()
508 WARN_ON(mmu_notifier_range_blockable(range) || in mn_hlist_invalidate_range_start()
534 range); in mn_hlist_invalidate_range_start()
542 int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range) in __mmu_notifier_invalidate_range_start() argument
545 range->mm->notifier_subscriptions; in __mmu_notifier_invalidate_range_start()
549 ret = mn_itree_invalidate(subscriptions, range); in __mmu_notifier_invalidate_range_start()
554 return mn_hlist_invalidate_range_start(subscriptions, range); in __mmu_notifier_invalidate_range_start()
560 struct mmu_notifier_range *range, bool only_end) in mn_hlist_invalidate_end() argument
583 range->mm, in mn_hlist_invalidate_end()
584 range->start, in mn_hlist_invalidate_end()
585 range->end); in mn_hlist_invalidate_end()
587 if (!mmu_notifier_range_blockable(range)) in mn_hlist_invalidate_end()
590 range); in mn_hlist_invalidate_end()
591 if (!mmu_notifier_range_blockable(range)) in mn_hlist_invalidate_end()
598 void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range, in __mmu_notifier_invalidate_range_end() argument
602 range->mm->notifier_subscriptions; in __mmu_notifier_invalidate_range_end()
609 mn_hlist_invalidate_end(subscriptions, range, only_end); in __mmu_notifier_invalidate_range_end()
1170 mmu_notifier_range_update_to_read_only(const struct mmu_notifier_range *range) in mmu_notifier_range_update_to_read_only() argument
1172 if (!range->vma || range->event != MMU_NOTIFY_PROTECTION_VMA) in mmu_notifier_range_update_to_read_only()
1175 return range->vma->vm_flags & VM_READ; in mmu_notifier_range_update_to_read_only()