Lines Matching refs:handler
45 struct mmu_rb_handler **handler) in hfi1_mmu_rb_register() argument
71 *handler = h; in hfi1_mmu_rb_register()
75 void hfi1_mmu_rb_unregister(struct mmu_rb_handler *handler) in hfi1_mmu_rb_unregister() argument
83 mmgrab(handler->mn.mm); in hfi1_mmu_rb_unregister()
86 mmu_notifier_unregister(&handler->mn, handler->mn.mm); in hfi1_mmu_rb_unregister()
92 flush_work(&handler->del_work); in hfi1_mmu_rb_unregister()
96 spin_lock_irqsave(&handler->lock, flags); in hfi1_mmu_rb_unregister()
97 while ((node = rb_first_cached(&handler->root))) { in hfi1_mmu_rb_unregister()
99 rb_erase_cached(node, &handler->root); in hfi1_mmu_rb_unregister()
103 spin_unlock_irqrestore(&handler->lock, flags); in hfi1_mmu_rb_unregister()
112 mmdrop(handler->mn.mm); in hfi1_mmu_rb_unregister()
114 kfree(handler); in hfi1_mmu_rb_unregister()
117 int hfi1_mmu_rb_insert(struct mmu_rb_handler *handler, in hfi1_mmu_rb_insert() argument
126 if (current->mm != handler->mn.mm) in hfi1_mmu_rb_insert()
129 spin_lock_irqsave(&handler->lock, flags); in hfi1_mmu_rb_insert()
130 node = __mmu_rb_search(handler, mnode->addr, mnode->len); in hfi1_mmu_rb_insert()
135 __mmu_int_rb_insert(mnode, &handler->root); in hfi1_mmu_rb_insert()
136 list_add_tail(&mnode->list, &handler->lru_list); in hfi1_mmu_rb_insert()
137 mnode->handler = handler; in hfi1_mmu_rb_insert()
139 spin_unlock_irqrestore(&handler->lock, flags); in hfi1_mmu_rb_insert()
144 struct mmu_rb_node *hfi1_mmu_rb_get_first(struct mmu_rb_handler *handler, in hfi1_mmu_rb_get_first() argument
150 node = __mmu_int_rb_iter_first(&handler->root, addr, (addr + len) - 1); in hfi1_mmu_rb_get_first()
152 list_move_tail(&node->list, &handler->lru_list); in hfi1_mmu_rb_get_first()
157 static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *handler, in __mmu_rb_search() argument
164 if (!handler->ops->filter) { in __mmu_rb_search()
165 node = __mmu_int_rb_iter_first(&handler->root, addr, in __mmu_rb_search()
168 for (node = __mmu_int_rb_iter_first(&handler->root, addr, in __mmu_rb_search()
173 if (handler->ops->filter(node, addr, len)) in __mmu_rb_search()
189 mnode->handler->ops->remove(mnode->handler->ops_arg, mnode); in release_immediate()
197 list_move(&mnode->list, &mnode->handler->del_list); in release_nolock()
198 queue_work(mnode->handler->wq, &mnode->handler->del_work); in release_nolock()
213 struct mmu_rb_handler *handler = mnode->handler; in hfi1_mmu_rb_release() local
216 spin_lock_irqsave(&handler->lock, flags); in hfi1_mmu_rb_release()
217 list_move(&mnode->list, &mnode->handler->del_list); in hfi1_mmu_rb_release()
218 spin_unlock_irqrestore(&handler->lock, flags); in hfi1_mmu_rb_release()
219 queue_work(handler->wq, &handler->del_work); in hfi1_mmu_rb_release()
222 void hfi1_mmu_rb_evict(struct mmu_rb_handler *handler, void *evict_arg) in hfi1_mmu_rb_evict() argument
229 if (current->mm != handler->mn.mm) in hfi1_mmu_rb_evict()
234 spin_lock_irqsave(&handler->lock, flags); in hfi1_mmu_rb_evict()
235 list_for_each_entry_safe(rbnode, ptr, &handler->lru_list, list) { in hfi1_mmu_rb_evict()
240 if (handler->ops->evict(handler->ops_arg, rbnode, evict_arg, in hfi1_mmu_rb_evict()
242 __mmu_int_rb_remove(rbnode, &handler->root); in hfi1_mmu_rb_evict()
249 spin_unlock_irqrestore(&handler->lock, flags); in hfi1_mmu_rb_evict()
259 struct mmu_rb_handler *handler = in mmu_notifier_range_start() local
261 struct rb_root_cached *root = &handler->root; in mmu_notifier_range_start()
265 spin_lock_irqsave(&handler->lock, flags); in mmu_notifier_range_start()
277 spin_unlock_irqrestore(&handler->lock, flags); in mmu_notifier_range_start()
289 struct mmu_rb_handler *handler = container_of(work, in handle_remove() local
297 spin_lock_irqsave(&handler->lock, flags); in handle_remove()
298 list_replace_init(&handler->del_list, &del_list); in handle_remove()
299 spin_unlock_irqrestore(&handler->lock, flags); in handle_remove()
304 handler->ops->remove(handler->ops_arg, node); in handle_remove()