• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright(c) 2020 Cornelis Networks, Inc.
3  * Copyright(c) 2016 - 2017 Intel Corporation.
4  *
5  * This file is provided under a dual BSD/GPLv2 license.  When using or
6  * redistributing this file, you may do so under either license.
7  *
8  * GPL LICENSE SUMMARY
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of version 2 of the GNU General Public License as
12  * published by the Free Software Foundation.
13  *
14  * This program is distributed in the hope that it will be useful, but
15  * WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * General Public License for more details.
18  *
19  * BSD LICENSE
20  *
21  * Redistribution and use in source and binary forms, with or without
22  * modification, are permitted provided that the following conditions
23  * are met:
24  *
25  *  - Redistributions of source code must retain the above copyright
26  *    notice, this list of conditions and the following disclaimer.
27  *  - Redistributions in binary form must reproduce the above copyright
28  *    notice, this list of conditions and the following disclaimer in
29  *    the documentation and/or other materials provided with the
30  *    distribution.
31  *  - Neither the name of Intel Corporation nor the names of its
32  *    contributors may be used to endorse or promote products derived
33  *    from this software without specific prior written permission.
34  *
35  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
46  *
47  */
48 #include <linux/list.h>
49 #include <linux/rculist.h>
50 #include <linux/mmu_notifier.h>
51 #include <linux/interval_tree_generic.h>
52 #include <linux/sched/mm.h>
53 
54 #include "mmu_rb.h"
55 #include "trace.h"
56 
57 static unsigned long mmu_node_start(struct mmu_rb_node *);
58 static unsigned long mmu_node_last(struct mmu_rb_node *);
59 static int mmu_notifier_range_start(struct mmu_notifier *,
60 		const struct mmu_notifier_range *);
61 static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *,
62 					   unsigned long, unsigned long);
63 static void release_immediate(struct kref *refcount);
64 static void handle_remove(struct work_struct *work);
65 
66 static const struct mmu_notifier_ops mn_opts = {
67 	.invalidate_range_start = mmu_notifier_range_start,
68 };
69 
70 INTERVAL_TREE_DEFINE(struct mmu_rb_node, node, unsigned long, __last,
71 		     mmu_node_start, mmu_node_last, static, __mmu_int_rb);
72 
mmu_node_start(struct mmu_rb_node * node)73 static unsigned long mmu_node_start(struct mmu_rb_node *node)
74 {
75 	return node->addr & PAGE_MASK;
76 }
77 
mmu_node_last(struct mmu_rb_node * node)78 static unsigned long mmu_node_last(struct mmu_rb_node *node)
79 {
80 	return PAGE_ALIGN(node->addr + node->len) - 1;
81 }
82 
hfi1_mmu_rb_register(void * ops_arg,struct mmu_rb_ops * ops,struct workqueue_struct * wq,struct mmu_rb_handler ** handler)83 int hfi1_mmu_rb_register(void *ops_arg,
84 			 struct mmu_rb_ops *ops,
85 			 struct workqueue_struct *wq,
86 			 struct mmu_rb_handler **handler)
87 {
88 	struct mmu_rb_handler *h;
89 	int ret;
90 
91 	h = kzalloc(sizeof(*h), GFP_KERNEL);
92 	if (!h)
93 		return -ENOMEM;
94 
95 	h->root = RB_ROOT_CACHED;
96 	h->ops = ops;
97 	h->ops_arg = ops_arg;
98 	INIT_HLIST_NODE(&h->mn.hlist);
99 	spin_lock_init(&h->lock);
100 	h->mn.ops = &mn_opts;
101 	INIT_WORK(&h->del_work, handle_remove);
102 	INIT_LIST_HEAD(&h->del_list);
103 	INIT_LIST_HEAD(&h->lru_list);
104 	h->wq = wq;
105 
106 	ret = mmu_notifier_register(&h->mn, current->mm);
107 	if (ret) {
108 		kfree(h);
109 		return ret;
110 	}
111 
112 	*handler = h;
113 	return 0;
114 }
115 
hfi1_mmu_rb_unregister(struct mmu_rb_handler * handler)116 void hfi1_mmu_rb_unregister(struct mmu_rb_handler *handler)
117 {
118 	struct mmu_rb_node *rbnode;
119 	struct rb_node *node;
120 	unsigned long flags;
121 	struct list_head del_list;
122 
123 	/* Prevent freeing of mm until we are completely finished. */
124 	mmgrab(handler->mn.mm);
125 
126 	/* Unregister first so we don't get any more notifications. */
127 	mmu_notifier_unregister(&handler->mn, handler->mn.mm);
128 
129 	/*
130 	 * Make sure the wq delete handler is finished running.  It will not
131 	 * be triggered once the mmu notifiers are unregistered above.
132 	 */
133 	flush_work(&handler->del_work);
134 
135 	INIT_LIST_HEAD(&del_list);
136 
137 	spin_lock_irqsave(&handler->lock, flags);
138 	while ((node = rb_first_cached(&handler->root))) {
139 		rbnode = rb_entry(node, struct mmu_rb_node, node);
140 		rb_erase_cached(node, &handler->root);
141 		/* move from LRU list to delete list */
142 		list_move(&rbnode->list, &del_list);
143 	}
144 	spin_unlock_irqrestore(&handler->lock, flags);
145 
146 	while (!list_empty(&del_list)) {
147 		rbnode = list_first_entry(&del_list, struct mmu_rb_node, list);
148 		list_del(&rbnode->list);
149 		kref_put(&rbnode->refcount, release_immediate);
150 	}
151 
152 	/* Now the mm may be freed. */
153 	mmdrop(handler->mn.mm);
154 
155 	kfree(handler);
156 }
157 
hfi1_mmu_rb_insert(struct mmu_rb_handler * handler,struct mmu_rb_node * mnode)158 int hfi1_mmu_rb_insert(struct mmu_rb_handler *handler,
159 		       struct mmu_rb_node *mnode)
160 {
161 	struct mmu_rb_node *node;
162 	unsigned long flags;
163 	int ret = 0;
164 
165 	trace_hfi1_mmu_rb_insert(mnode->addr, mnode->len);
166 
167 	if (current->mm != handler->mn.mm)
168 		return -EPERM;
169 
170 	spin_lock_irqsave(&handler->lock, flags);
171 	node = __mmu_rb_search(handler, mnode->addr, mnode->len);
172 	if (node) {
173 		ret = -EEXIST;
174 		goto unlock;
175 	}
176 	__mmu_int_rb_insert(mnode, &handler->root);
177 	list_add_tail(&mnode->list, &handler->lru_list);
178 	mnode->handler = handler;
179 unlock:
180 	spin_unlock_irqrestore(&handler->lock, flags);
181 	return ret;
182 }
183 
184 /* Caller must hold handler lock */
hfi1_mmu_rb_get_first(struct mmu_rb_handler * handler,unsigned long addr,unsigned long len)185 struct mmu_rb_node *hfi1_mmu_rb_get_first(struct mmu_rb_handler *handler,
186 					  unsigned long addr, unsigned long len)
187 {
188 	struct mmu_rb_node *node;
189 
190 	trace_hfi1_mmu_rb_search(addr, len);
191 	node = __mmu_int_rb_iter_first(&handler->root, addr, (addr + len) - 1);
192 	if (node)
193 		list_move_tail(&node->list, &handler->lru_list);
194 	return node;
195 }
196 
197 /* Caller must hold handler lock */
__mmu_rb_search(struct mmu_rb_handler * handler,unsigned long addr,unsigned long len)198 static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *handler,
199 					   unsigned long addr,
200 					   unsigned long len)
201 {
202 	struct mmu_rb_node *node = NULL;
203 
204 	trace_hfi1_mmu_rb_search(addr, len);
205 	if (!handler->ops->filter) {
206 		node = __mmu_int_rb_iter_first(&handler->root, addr,
207 					       (addr + len) - 1);
208 	} else {
209 		for (node = __mmu_int_rb_iter_first(&handler->root, addr,
210 						    (addr + len) - 1);
211 		     node;
212 		     node = __mmu_int_rb_iter_next(node, addr,
213 						   (addr + len) - 1)) {
214 			if (handler->ops->filter(node, addr, len))
215 				return node;
216 		}
217 	}
218 	return node;
219 }
220 
221 /*
222  * Must NOT call while holding mnode->handler->lock.
223  * mnode->handler->ops->remove() may sleep and mnode->handler->lock is a
224  * spinlock.
225  */
release_immediate(struct kref * refcount)226 static void release_immediate(struct kref *refcount)
227 {
228 	struct mmu_rb_node *mnode =
229 		container_of(refcount, struct mmu_rb_node, refcount);
230 	mnode->handler->ops->remove(mnode->handler->ops_arg, mnode);
231 }
232 
233 /* Caller must hold mnode->handler->lock */
release_nolock(struct kref * refcount)234 static void release_nolock(struct kref *refcount)
235 {
236 	struct mmu_rb_node *mnode =
237 		container_of(refcount, struct mmu_rb_node, refcount);
238 	list_move(&mnode->list, &mnode->handler->del_list);
239 	queue_work(mnode->handler->wq, &mnode->handler->del_work);
240 }
241 
242 /*
243  * struct mmu_rb_node->refcount kref_put() callback.
244  * Adds mmu_rb_node to mmu_rb_node->handler->del_list and queues
245  * handler->del_work on handler->wq.
246  * Does not remove mmu_rb_node from handler->lru_list or handler->rb_root.
247  * Acquires mmu_rb_node->handler->lock; do not call while already holding
248  * handler->lock.
249  */
hfi1_mmu_rb_release(struct kref * refcount)250 void hfi1_mmu_rb_release(struct kref *refcount)
251 {
252 	struct mmu_rb_node *mnode =
253 		container_of(refcount, struct mmu_rb_node, refcount);
254 	struct mmu_rb_handler *handler = mnode->handler;
255 	unsigned long flags;
256 
257 	spin_lock_irqsave(&handler->lock, flags);
258 	list_move(&mnode->list, &mnode->handler->del_list);
259 	spin_unlock_irqrestore(&handler->lock, flags);
260 	queue_work(handler->wq, &handler->del_work);
261 }
262 
hfi1_mmu_rb_evict(struct mmu_rb_handler * handler,void * evict_arg)263 void hfi1_mmu_rb_evict(struct mmu_rb_handler *handler, void *evict_arg)
264 {
265 	struct mmu_rb_node *rbnode, *ptr;
266 	struct list_head del_list;
267 	unsigned long flags;
268 	bool stop = false;
269 
270 	if (current->mm != handler->mn.mm)
271 		return;
272 
273 	INIT_LIST_HEAD(&del_list);
274 
275 	spin_lock_irqsave(&handler->lock, flags);
276 	list_for_each_entry_safe(rbnode, ptr, &handler->lru_list, list) {
277 		/* refcount == 1 implies mmu_rb_handler has only rbnode ref */
278 		if (kref_read(&rbnode->refcount) > 1)
279 			continue;
280 
281 		if (handler->ops->evict(handler->ops_arg, rbnode, evict_arg,
282 					&stop)) {
283 			__mmu_int_rb_remove(rbnode, &handler->root);
284 			/* move from LRU list to delete list */
285 			list_move(&rbnode->list, &del_list);
286 		}
287 		if (stop)
288 			break;
289 	}
290 	spin_unlock_irqrestore(&handler->lock, flags);
291 
292 	list_for_each_entry_safe(rbnode, ptr, &del_list, list) {
293 		kref_put(&rbnode->refcount, release_immediate);
294 	}
295 }
296 
mmu_notifier_range_start(struct mmu_notifier * mn,const struct mmu_notifier_range * range)297 static int mmu_notifier_range_start(struct mmu_notifier *mn,
298 		const struct mmu_notifier_range *range)
299 {
300 	struct mmu_rb_handler *handler =
301 		container_of(mn, struct mmu_rb_handler, mn);
302 	struct rb_root_cached *root = &handler->root;
303 	struct mmu_rb_node *node, *ptr = NULL;
304 	unsigned long flags;
305 
306 	spin_lock_irqsave(&handler->lock, flags);
307 	for (node = __mmu_int_rb_iter_first(root, range->start, range->end-1);
308 	     node; node = ptr) {
309 		/* Guard against node removal. */
310 		ptr = __mmu_int_rb_iter_next(node, range->start,
311 					     range->end - 1);
312 		trace_hfi1_mmu_mem_invalidate(node->addr, node->len);
313 		/* Remove from rb tree and lru_list. */
314 		__mmu_int_rb_remove(node, root);
315 		list_del_init(&node->list);
316 		kref_put(&node->refcount, release_nolock);
317 	}
318 	spin_unlock_irqrestore(&handler->lock, flags);
319 
320 	return 0;
321 }
322 
323 /*
324  * Work queue function to remove all nodes that have been queued up to
325  * be removed.  The key feature is that mm->mmap_lock is not being held
326  * and the remove callback can sleep while taking it, if needed.
327  */
handle_remove(struct work_struct * work)328 static void handle_remove(struct work_struct *work)
329 {
330 	struct mmu_rb_handler *handler = container_of(work,
331 						struct mmu_rb_handler,
332 						del_work);
333 	struct list_head del_list;
334 	unsigned long flags;
335 	struct mmu_rb_node *node;
336 
337 	/* remove anything that is queued to get removed */
338 	spin_lock_irqsave(&handler->lock, flags);
339 	list_replace_init(&handler->del_list, &del_list);
340 	spin_unlock_irqrestore(&handler->lock, flags);
341 
342 	while (!list_empty(&del_list)) {
343 		node = list_first_entry(&del_list, struct mmu_rb_node, list);
344 		list_del(&node->list);
345 		handler->ops->remove(handler->ops_arg, node);
346 	}
347 }
348