1 // SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
2 /*
3 * Copyright(c) 2020 Cornelis Networks, Inc.
4 * Copyright(c) 2016 - 2017 Intel Corporation.
5 */
6
7 #include <linux/list.h>
8 #include <linux/rculist.h>
9 #include <linux/mmu_notifier.h>
10 #include <linux/interval_tree_generic.h>
11 #include <linux/sched/mm.h>
12
13 #include "mmu_rb.h"
14 #include "trace.h"
15
16 static unsigned long mmu_node_start(struct mmu_rb_node *);
17 static unsigned long mmu_node_last(struct mmu_rb_node *);
18 static int mmu_notifier_range_start(struct mmu_notifier *,
19 const struct mmu_notifier_range *);
20 static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *,
21 unsigned long, unsigned long);
22 static void release_immediate(struct kref *refcount);
23 static void handle_remove(struct work_struct *work);
24
25 static const struct mmu_notifier_ops mn_opts = {
26 .invalidate_range_start = mmu_notifier_range_start,
27 };
28
29 INTERVAL_TREE_DEFINE(struct mmu_rb_node, node, unsigned long, __last,
30 mmu_node_start, mmu_node_last, static, __mmu_int_rb);
31
mmu_node_start(struct mmu_rb_node * node)32 static unsigned long mmu_node_start(struct mmu_rb_node *node)
33 {
34 return node->addr & PAGE_MASK;
35 }
36
mmu_node_last(struct mmu_rb_node * node)37 static unsigned long mmu_node_last(struct mmu_rb_node *node)
38 {
39 return PAGE_ALIGN(node->addr + node->len) - 1;
40 }
41
hfi1_mmu_rb_register(void * ops_arg,struct mmu_rb_ops * ops,struct workqueue_struct * wq,struct mmu_rb_handler ** handler)42 int hfi1_mmu_rb_register(void *ops_arg,
43 struct mmu_rb_ops *ops,
44 struct workqueue_struct *wq,
45 struct mmu_rb_handler **handler)
46 {
47 struct mmu_rb_handler *h;
48 int ret;
49
50 h = kzalloc(sizeof(*h), GFP_KERNEL);
51 if (!h)
52 return -ENOMEM;
53
54 h->root = RB_ROOT_CACHED;
55 h->ops = ops;
56 h->ops_arg = ops_arg;
57 INIT_HLIST_NODE(&h->mn.hlist);
58 spin_lock_init(&h->lock);
59 h->mn.ops = &mn_opts;
60 INIT_WORK(&h->del_work, handle_remove);
61 INIT_LIST_HEAD(&h->del_list);
62 INIT_LIST_HEAD(&h->lru_list);
63 h->wq = wq;
64
65 ret = mmu_notifier_register(&h->mn, current->mm);
66 if (ret) {
67 kfree(h);
68 return ret;
69 }
70
71 *handler = h;
72 return 0;
73 }
74
hfi1_mmu_rb_unregister(struct mmu_rb_handler * handler)75 void hfi1_mmu_rb_unregister(struct mmu_rb_handler *handler)
76 {
77 struct mmu_rb_node *rbnode;
78 struct rb_node *node;
79 unsigned long flags;
80 struct list_head del_list;
81
82 /* Prevent freeing of mm until we are completely finished. */
83 mmgrab(handler->mn.mm);
84
85 /* Unregister first so we don't get any more notifications. */
86 mmu_notifier_unregister(&handler->mn, handler->mn.mm);
87
88 /*
89 * Make sure the wq delete handler is finished running. It will not
90 * be triggered once the mmu notifiers are unregistered above.
91 */
92 flush_work(&handler->del_work);
93
94 INIT_LIST_HEAD(&del_list);
95
96 spin_lock_irqsave(&handler->lock, flags);
97 while ((node = rb_first_cached(&handler->root))) {
98 rbnode = rb_entry(node, struct mmu_rb_node, node);
99 rb_erase_cached(node, &handler->root);
100 /* move from LRU list to delete list */
101 list_move(&rbnode->list, &del_list);
102 }
103 spin_unlock_irqrestore(&handler->lock, flags);
104
105 while (!list_empty(&del_list)) {
106 rbnode = list_first_entry(&del_list, struct mmu_rb_node, list);
107 list_del(&rbnode->list);
108 kref_put(&rbnode->refcount, release_immediate);
109 }
110
111 /* Now the mm may be freed. */
112 mmdrop(handler->mn.mm);
113
114 kfree(handler);
115 }
116
hfi1_mmu_rb_insert(struct mmu_rb_handler * handler,struct mmu_rb_node * mnode)117 int hfi1_mmu_rb_insert(struct mmu_rb_handler *handler,
118 struct mmu_rb_node *mnode)
119 {
120 struct mmu_rb_node *node;
121 unsigned long flags;
122 int ret = 0;
123
124 trace_hfi1_mmu_rb_insert(mnode->addr, mnode->len);
125
126 if (current->mm != handler->mn.mm)
127 return -EPERM;
128
129 spin_lock_irqsave(&handler->lock, flags);
130 node = __mmu_rb_search(handler, mnode->addr, mnode->len);
131 if (node) {
132 ret = -EEXIST;
133 goto unlock;
134 }
135 __mmu_int_rb_insert(mnode, &handler->root);
136 list_add_tail(&mnode->list, &handler->lru_list);
137 mnode->handler = handler;
138 unlock:
139 spin_unlock_irqrestore(&handler->lock, flags);
140 return ret;
141 }
142
143 /* Caller must hold handler lock */
hfi1_mmu_rb_get_first(struct mmu_rb_handler * handler,unsigned long addr,unsigned long len)144 struct mmu_rb_node *hfi1_mmu_rb_get_first(struct mmu_rb_handler *handler,
145 unsigned long addr, unsigned long len)
146 {
147 struct mmu_rb_node *node;
148
149 trace_hfi1_mmu_rb_search(addr, len);
150 node = __mmu_int_rb_iter_first(&handler->root, addr, (addr + len) - 1);
151 if (node)
152 list_move_tail(&node->list, &handler->lru_list);
153 return node;
154 }
155
156 /* Caller must hold handler lock */
__mmu_rb_search(struct mmu_rb_handler * handler,unsigned long addr,unsigned long len)157 static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *handler,
158 unsigned long addr,
159 unsigned long len)
160 {
161 struct mmu_rb_node *node = NULL;
162
163 trace_hfi1_mmu_rb_search(addr, len);
164 if (!handler->ops->filter) {
165 node = __mmu_int_rb_iter_first(&handler->root, addr,
166 (addr + len) - 1);
167 } else {
168 for (node = __mmu_int_rb_iter_first(&handler->root, addr,
169 (addr + len) - 1);
170 node;
171 node = __mmu_int_rb_iter_next(node, addr,
172 (addr + len) - 1)) {
173 if (handler->ops->filter(node, addr, len))
174 return node;
175 }
176 }
177 return node;
178 }
179
180 /*
181 * Must NOT call while holding mnode->handler->lock.
182 * mnode->handler->ops->remove() may sleep and mnode->handler->lock is a
183 * spinlock.
184 */
release_immediate(struct kref * refcount)185 static void release_immediate(struct kref *refcount)
186 {
187 struct mmu_rb_node *mnode =
188 container_of(refcount, struct mmu_rb_node, refcount);
189 mnode->handler->ops->remove(mnode->handler->ops_arg, mnode);
190 }
191
192 /* Caller must hold mnode->handler->lock */
release_nolock(struct kref * refcount)193 static void release_nolock(struct kref *refcount)
194 {
195 struct mmu_rb_node *mnode =
196 container_of(refcount, struct mmu_rb_node, refcount);
197 list_move(&mnode->list, &mnode->handler->del_list);
198 queue_work(mnode->handler->wq, &mnode->handler->del_work);
199 }
200
201 /*
202 * struct mmu_rb_node->refcount kref_put() callback.
203 * Adds mmu_rb_node to mmu_rb_node->handler->del_list and queues
204 * handler->del_work on handler->wq.
205 * Does not remove mmu_rb_node from handler->lru_list or handler->rb_root.
206 * Acquires mmu_rb_node->handler->lock; do not call while already holding
207 * handler->lock.
208 */
hfi1_mmu_rb_release(struct kref * refcount)209 void hfi1_mmu_rb_release(struct kref *refcount)
210 {
211 struct mmu_rb_node *mnode =
212 container_of(refcount, struct mmu_rb_node, refcount);
213 struct mmu_rb_handler *handler = mnode->handler;
214 unsigned long flags;
215
216 spin_lock_irqsave(&handler->lock, flags);
217 list_move(&mnode->list, &mnode->handler->del_list);
218 spin_unlock_irqrestore(&handler->lock, flags);
219 queue_work(handler->wq, &handler->del_work);
220 }
221
hfi1_mmu_rb_evict(struct mmu_rb_handler * handler,void * evict_arg)222 void hfi1_mmu_rb_evict(struct mmu_rb_handler *handler, void *evict_arg)
223 {
224 struct mmu_rb_node *rbnode, *ptr;
225 struct list_head del_list;
226 unsigned long flags;
227 bool stop = false;
228
229 if (current->mm != handler->mn.mm)
230 return;
231
232 INIT_LIST_HEAD(&del_list);
233
234 spin_lock_irqsave(&handler->lock, flags);
235 list_for_each_entry_safe(rbnode, ptr, &handler->lru_list, list) {
236 /* refcount == 1 implies mmu_rb_handler has only rbnode ref */
237 if (kref_read(&rbnode->refcount) > 1)
238 continue;
239
240 if (handler->ops->evict(handler->ops_arg, rbnode, evict_arg,
241 &stop)) {
242 __mmu_int_rb_remove(rbnode, &handler->root);
243 /* move from LRU list to delete list */
244 list_move(&rbnode->list, &del_list);
245 }
246 if (stop)
247 break;
248 }
249 spin_unlock_irqrestore(&handler->lock, flags);
250
251 list_for_each_entry_safe(rbnode, ptr, &del_list, list) {
252 kref_put(&rbnode->refcount, release_immediate);
253 }
254 }
255
mmu_notifier_range_start(struct mmu_notifier * mn,const struct mmu_notifier_range * range)256 static int mmu_notifier_range_start(struct mmu_notifier *mn,
257 const struct mmu_notifier_range *range)
258 {
259 struct mmu_rb_handler *handler =
260 container_of(mn, struct mmu_rb_handler, mn);
261 struct rb_root_cached *root = &handler->root;
262 struct mmu_rb_node *node, *ptr = NULL;
263 unsigned long flags;
264
265 spin_lock_irqsave(&handler->lock, flags);
266 for (node = __mmu_int_rb_iter_first(root, range->start, range->end-1);
267 node; node = ptr) {
268 /* Guard against node removal. */
269 ptr = __mmu_int_rb_iter_next(node, range->start,
270 range->end - 1);
271 trace_hfi1_mmu_mem_invalidate(node->addr, node->len);
272 /* Remove from rb tree and lru_list. */
273 __mmu_int_rb_remove(node, root);
274 list_del_init(&node->list);
275 kref_put(&node->refcount, release_nolock);
276 }
277 spin_unlock_irqrestore(&handler->lock, flags);
278
279 return 0;
280 }
281
282 /*
283 * Work queue function to remove all nodes that have been queued up to
284 * be removed. The key feature is that mm->mmap_lock is not being held
285 * and the remove callback can sleep while taking it, if needed.
286 */
handle_remove(struct work_struct * work)287 static void handle_remove(struct work_struct *work)
288 {
289 struct mmu_rb_handler *handler = container_of(work,
290 struct mmu_rb_handler,
291 del_work);
292 struct list_head del_list;
293 unsigned long flags;
294 struct mmu_rb_node *node;
295
296 /* remove anything that is queued to get removed */
297 spin_lock_irqsave(&handler->lock, flags);
298 list_replace_init(&handler->del_list, &del_list);
299 spin_unlock_irqrestore(&handler->lock, flags);
300
301 while (!list_empty(&del_list)) {
302 node = list_first_entry(&del_list, struct mmu_rb_node, list);
303 list_del(&node->list);
304 handler->ops->remove(handler->ops_arg, node);
305 }
306 }
307