Lines Matching refs:subscriptions
87 mn_itree_is_invalidating(struct mmu_notifier_subscriptions *subscriptions) in mn_itree_is_invalidating() argument
89 lockdep_assert_held(&subscriptions->lock); in mn_itree_is_invalidating()
90 return subscriptions->invalidate_seq & 1; in mn_itree_is_invalidating()
94 mn_itree_inv_start_range(struct mmu_notifier_subscriptions *subscriptions, in mn_itree_inv_start_range() argument
101 spin_lock(&subscriptions->lock); in mn_itree_inv_start_range()
102 subscriptions->active_invalidate_ranges++; in mn_itree_inv_start_range()
103 node = interval_tree_iter_first(&subscriptions->itree, range->start, in mn_itree_inv_start_range()
106 subscriptions->invalidate_seq |= 1; in mn_itree_inv_start_range()
111 *seq = subscriptions->invalidate_seq; in mn_itree_inv_start_range()
112 spin_unlock(&subscriptions->lock); in mn_itree_inv_start_range()
129 static void mn_itree_inv_end(struct mmu_notifier_subscriptions *subscriptions) in mn_itree_inv_end() argument
134 spin_lock(&subscriptions->lock); in mn_itree_inv_end()
135 if (--subscriptions->active_invalidate_ranges || in mn_itree_inv_end()
136 !mn_itree_is_invalidating(subscriptions)) { in mn_itree_inv_end()
137 spin_unlock(&subscriptions->lock); in mn_itree_inv_end()
142 subscriptions->invalidate_seq++; in mn_itree_inv_end()
151 &subscriptions->deferred_list, in mn_itree_inv_end()
155 &subscriptions->itree); in mn_itree_inv_end()
158 &subscriptions->itree); in mn_itree_inv_end()
161 spin_unlock(&subscriptions->lock); in mn_itree_inv_end()
163 wake_up_all(&subscriptions->wq); in mn_itree_inv_end()
188 struct mmu_notifier_subscriptions *subscriptions = in mmu_interval_read_begin() local
232 spin_lock(&subscriptions->lock); in mmu_interval_read_begin()
235 is_invalidating = seq == subscriptions->invalidate_seq; in mmu_interval_read_begin()
236 spin_unlock(&subscriptions->lock); in mmu_interval_read_begin()
248 wait_event(subscriptions->wq, in mmu_interval_read_begin()
249 READ_ONCE(subscriptions->invalidate_seq) != seq); in mmu_interval_read_begin()
261 static void mn_itree_release(struct mmu_notifier_subscriptions *subscriptions, in mn_itree_release() argument
276 mn_itree_inv_start_range(subscriptions, &range, &cur_seq); in mn_itree_release()
284 mn_itree_inv_end(subscriptions); in mn_itree_release()
299 static void mn_hlist_release(struct mmu_notifier_subscriptions *subscriptions, in mn_hlist_release() argument
310 hlist_for_each_entry_rcu(subscription, &subscriptions->list, hlist, in mn_hlist_release()
321 spin_lock(&subscriptions->lock); in mn_hlist_release()
322 while (unlikely(!hlist_empty(&subscriptions->list))) { in mn_hlist_release()
323 subscription = hlist_entry(subscriptions->list.first, in mn_hlist_release()
333 spin_unlock(&subscriptions->lock); in mn_hlist_release()
350 struct mmu_notifier_subscriptions *subscriptions = in __mmu_notifier_release() local
353 if (subscriptions->has_itree) in __mmu_notifier_release()
354 mn_itree_release(subscriptions, mm); in __mmu_notifier_release()
356 if (!hlist_empty(&subscriptions->list)) in __mmu_notifier_release()
357 mn_hlist_release(subscriptions, mm); in __mmu_notifier_release()
444 static int mn_itree_invalidate(struct mmu_notifier_subscriptions *subscriptions, in mn_itree_invalidate() argument
451 mn_itree_inv_start_range(subscriptions, range, &cur_seq); in mn_itree_invalidate()
471 mn_itree_inv_end(subscriptions); in mn_itree_invalidate()
476 struct mmu_notifier_subscriptions *subscriptions, in mn_hlist_invalidate_range_start() argument
484 hlist_for_each_entry_rcu(subscription, &subscriptions->list, hlist, in mn_hlist_invalidate_range_start()
522 hlist_for_each_entry_rcu(subscription, &subscriptions->list, in mn_hlist_invalidate_range_start()
538 struct mmu_notifier_subscriptions *subscriptions = in __mmu_notifier_invalidate_range_start() local
542 if (subscriptions->has_itree) { in __mmu_notifier_invalidate_range_start()
543 ret = mn_itree_invalidate(subscriptions, range); in __mmu_notifier_invalidate_range_start()
547 if (!hlist_empty(&subscriptions->list)) in __mmu_notifier_invalidate_range_start()
548 return mn_hlist_invalidate_range_start(subscriptions, range); in __mmu_notifier_invalidate_range_start()
553 mn_hlist_invalidate_end(struct mmu_notifier_subscriptions *subscriptions, in mn_hlist_invalidate_end() argument
560 hlist_for_each_entry_rcu(subscription, &subscriptions->list, hlist, in mn_hlist_invalidate_end()
595 struct mmu_notifier_subscriptions *subscriptions = in __mmu_notifier_invalidate_range_end() local
599 if (subscriptions->has_itree) in __mmu_notifier_invalidate_range_end()
600 mn_itree_inv_end(subscriptions); in __mmu_notifier_invalidate_range_end()
602 if (!hlist_empty(&subscriptions->list)) in __mmu_notifier_invalidate_range_end()
603 mn_hlist_invalidate_end(subscriptions, range, only_end); in __mmu_notifier_invalidate_range_end()
632 struct mmu_notifier_subscriptions *subscriptions = NULL; in __mmu_notifier_register() local
651 subscriptions = kzalloc( in __mmu_notifier_register()
653 if (!subscriptions) in __mmu_notifier_register()
656 INIT_HLIST_HEAD(&subscriptions->list); in __mmu_notifier_register()
657 spin_lock_init(&subscriptions->lock); in __mmu_notifier_register()
658 subscriptions->invalidate_seq = 2; in __mmu_notifier_register()
659 subscriptions->itree = RB_ROOT_CACHED; in __mmu_notifier_register()
660 init_waitqueue_head(&subscriptions->wq); in __mmu_notifier_register()
661 INIT_HLIST_HEAD(&subscriptions->deferred_list); in __mmu_notifier_register()
684 if (subscriptions) in __mmu_notifier_register()
685 smp_store_release(&mm->notifier_subscriptions, subscriptions); in __mmu_notifier_register()
705 kfree(subscriptions); in __mmu_notifier_register()
922 struct mmu_notifier_subscriptions *subscriptions, unsigned long start, in __mmu_interval_notifier_insert() argument
958 spin_lock(&subscriptions->lock); in __mmu_interval_notifier_insert()
959 if (subscriptions->active_invalidate_ranges) { in __mmu_interval_notifier_insert()
960 if (mn_itree_is_invalidating(subscriptions)) in __mmu_interval_notifier_insert()
962 &subscriptions->deferred_list); in __mmu_interval_notifier_insert()
964 subscriptions->invalidate_seq |= 1; in __mmu_interval_notifier_insert()
966 &subscriptions->itree); in __mmu_interval_notifier_insert()
968 interval_sub->invalidate_seq = subscriptions->invalidate_seq; in __mmu_interval_notifier_insert()
970 WARN_ON(mn_itree_is_invalidating(subscriptions)); in __mmu_interval_notifier_insert()
978 subscriptions->invalidate_seq - 1; in __mmu_interval_notifier_insert()
980 &subscriptions->itree); in __mmu_interval_notifier_insert()
982 spin_unlock(&subscriptions->lock); in __mmu_interval_notifier_insert()
1007 struct mmu_notifier_subscriptions *subscriptions; in mmu_interval_notifier_insert() local
1012 subscriptions = smp_load_acquire(&mm->notifier_subscriptions); in mmu_interval_notifier_insert()
1013 if (!subscriptions || !subscriptions->has_itree) { in mmu_interval_notifier_insert()
1017 subscriptions = mm->notifier_subscriptions; in mmu_interval_notifier_insert()
1019 return __mmu_interval_notifier_insert(interval_sub, mm, subscriptions, in mmu_interval_notifier_insert()
1029 struct mmu_notifier_subscriptions *subscriptions = in mmu_interval_notifier_insert_locked() local
1035 if (!subscriptions || !subscriptions->has_itree) { in mmu_interval_notifier_insert_locked()
1039 subscriptions = mm->notifier_subscriptions; in mmu_interval_notifier_insert_locked()
1041 return __mmu_interval_notifier_insert(interval_sub, mm, subscriptions, in mmu_interval_notifier_insert_locked()
1047 mmu_interval_seq_released(struct mmu_notifier_subscriptions *subscriptions, in mmu_interval_seq_released() argument
1052 spin_lock(&subscriptions->lock); in mmu_interval_seq_released()
1053 ret = subscriptions->invalidate_seq != seq; in mmu_interval_seq_released()
1054 spin_unlock(&subscriptions->lock); in mmu_interval_seq_released()
1071 struct mmu_notifier_subscriptions *subscriptions = in mmu_interval_notifier_remove() local
1077 spin_lock(&subscriptions->lock); in mmu_interval_notifier_remove()
1078 if (mn_itree_is_invalidating(subscriptions)) { in mmu_interval_notifier_remove()
1087 &subscriptions->deferred_list); in mmu_interval_notifier_remove()
1088 seq = subscriptions->invalidate_seq; in mmu_interval_notifier_remove()
1093 &subscriptions->itree); in mmu_interval_notifier_remove()
1095 spin_unlock(&subscriptions->lock); in mmu_interval_notifier_remove()
1104 wait_event(subscriptions->wq, in mmu_interval_notifier_remove()
1105 mmu_interval_seq_released(subscriptions, seq)); in mmu_interval_notifier_remove()