• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2005 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
4  * Copyright (c) 2013 Cisco Systems.  All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 
35 #include <linux/mm.h>
36 #include <linux/dma-mapping.h>
37 #include <linux/sched.h>
38 #include <linux/hugetlb.h>
39 #include <linux/iommu.h>
40 #include <linux/workqueue.h>
41 #include <linux/list.h>
42 #include <linux/pci.h>
43 
44 #include "usnic_log.h"
45 #include "usnic_uiom.h"
46 #include "usnic_uiom_interval_tree.h"
47 
48 static struct workqueue_struct *usnic_uiom_wq;
49 
50 #define USNIC_UIOM_PAGE_CHUNK						\
51 	((PAGE_SIZE - offsetof(struct usnic_uiom_chunk, page_list))	/\
52 	((void *) &((struct usnic_uiom_chunk *) 0)->page_list[1] -	\
53 	(void *) &((struct usnic_uiom_chunk *) 0)->page_list[0]))
54 
usnic_uiom_reg_account(struct work_struct * work)55 static void usnic_uiom_reg_account(struct work_struct *work)
56 {
57 	struct usnic_uiom_reg *umem = container_of(work,
58 						struct usnic_uiom_reg, work);
59 
60 	down_write(&umem->mm->mmap_sem);
61 	umem->mm->locked_vm -= umem->diff;
62 	up_write(&umem->mm->mmap_sem);
63 	mmput(umem->mm);
64 	kfree(umem);
65 }
66 
usnic_uiom_dma_fault(struct iommu_domain * domain,struct device * dev,unsigned long iova,int flags,void * token)67 static int usnic_uiom_dma_fault(struct iommu_domain *domain,
68 				struct device *dev,
69 				unsigned long iova, int flags,
70 				void *token)
71 {
72 	usnic_err("Device %s iommu fault domain 0x%pK va 0x%lx flags 0x%x\n",
73 		dev_name(dev),
74 		domain, iova, flags);
75 	return -ENOSYS;
76 }
77 
usnic_uiom_put_pages(struct list_head * chunk_list,int dirty)78 static void usnic_uiom_put_pages(struct list_head *chunk_list, int dirty)
79 {
80 	struct usnic_uiom_chunk *chunk, *tmp;
81 	struct page *page;
82 	struct scatterlist *sg;
83 	int i;
84 	dma_addr_t pa;
85 
86 	list_for_each_entry_safe(chunk, tmp, chunk_list, list) {
87 		for_each_sg(chunk->page_list, sg, chunk->nents, i) {
88 			page = sg_page(sg);
89 			pa = sg_phys(sg);
90 			if (dirty)
91 				set_page_dirty_lock(page);
92 			put_page(page);
93 			usnic_dbg("pa: %pa\n", &pa);
94 		}
95 		kfree(chunk);
96 	}
97 }
98 
usnic_uiom_get_pages(unsigned long addr,size_t size,int writable,int dmasync,struct list_head * chunk_list)99 static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
100 				int dmasync, struct list_head *chunk_list)
101 {
102 	struct page **page_list;
103 	struct scatterlist *sg;
104 	struct usnic_uiom_chunk *chunk;
105 	unsigned long locked;
106 	unsigned long lock_limit;
107 	unsigned long cur_base;
108 	unsigned long npages;
109 	int ret;
110 	int off;
111 	int i;
112 	int flags;
113 	dma_addr_t pa;
114 	unsigned int gup_flags;
115 
116 	if (!can_do_mlock())
117 		return -EPERM;
118 
119 	INIT_LIST_HEAD(chunk_list);
120 
121 	page_list = (struct page **) __get_free_page(GFP_KERNEL);
122 	if (!page_list)
123 		return -ENOMEM;
124 
125 	npages = PAGE_ALIGN(size + (addr & ~PAGE_MASK)) >> PAGE_SHIFT;
126 
127 	down_write(&current->mm->mmap_sem);
128 
129 	locked = npages + current->mm->locked_vm;
130 	lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
131 
132 	if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) {
133 		ret = -ENOMEM;
134 		goto out;
135 	}
136 
137 	flags = IOMMU_READ | IOMMU_CACHE;
138 	flags |= (writable) ? IOMMU_WRITE : 0;
139 	gup_flags = FOLL_WRITE;
140 	gup_flags |= (writable) ? 0 : FOLL_FORCE;
141 	cur_base = addr & PAGE_MASK;
142 	ret = 0;
143 
144 	while (npages) {
145 		ret = get_user_pages(cur_base,
146 					min_t(unsigned long, npages,
147 					PAGE_SIZE / sizeof(struct page *)),
148 					gup_flags, page_list, NULL);
149 
150 		if (ret < 0)
151 			goto out;
152 
153 		npages -= ret;
154 		off = 0;
155 
156 		while (ret) {
157 			chunk = kmalloc(sizeof(*chunk) +
158 					sizeof(struct scatterlist) *
159 					min_t(int, ret, USNIC_UIOM_PAGE_CHUNK),
160 					GFP_KERNEL);
161 			if (!chunk) {
162 				ret = -ENOMEM;
163 				goto out;
164 			}
165 
166 			chunk->nents = min_t(int, ret, USNIC_UIOM_PAGE_CHUNK);
167 			sg_init_table(chunk->page_list, chunk->nents);
168 			for_each_sg(chunk->page_list, sg, chunk->nents, i) {
169 				sg_set_page(sg, page_list[i + off],
170 						PAGE_SIZE, 0);
171 				pa = sg_phys(sg);
172 				usnic_dbg("va: 0x%lx pa: %pa\n",
173 						cur_base + i*PAGE_SIZE, &pa);
174 			}
175 			cur_base += chunk->nents * PAGE_SIZE;
176 			ret -= chunk->nents;
177 			off += chunk->nents;
178 			list_add_tail(&chunk->list, chunk_list);
179 		}
180 
181 		ret = 0;
182 	}
183 
184 out:
185 	if (ret < 0)
186 		usnic_uiom_put_pages(chunk_list, 0);
187 	else
188 		current->mm->locked_vm = locked;
189 
190 	up_write(&current->mm->mmap_sem);
191 	free_page((unsigned long) page_list);
192 	return ret;
193 }
194 
usnic_uiom_unmap_sorted_intervals(struct list_head * intervals,struct usnic_uiom_pd * pd)195 static void usnic_uiom_unmap_sorted_intervals(struct list_head *intervals,
196 						struct usnic_uiom_pd *pd)
197 {
198 	struct usnic_uiom_interval_node *interval, *tmp;
199 	long unsigned va, size;
200 
201 	list_for_each_entry_safe(interval, tmp, intervals, link) {
202 		va = interval->start << PAGE_SHIFT;
203 		size = ((interval->last - interval->start) + 1) << PAGE_SHIFT;
204 		while (size > 0) {
205 			/* Workaround for RH 970401 */
206 			usnic_dbg("va 0x%lx size 0x%lx", va, PAGE_SIZE);
207 			iommu_unmap(pd->domain, va, PAGE_SIZE);
208 			va += PAGE_SIZE;
209 			size -= PAGE_SIZE;
210 		}
211 	}
212 }
213 
__usnic_uiom_reg_release(struct usnic_uiom_pd * pd,struct usnic_uiom_reg * uiomr,int dirty)214 static void __usnic_uiom_reg_release(struct usnic_uiom_pd *pd,
215 					struct usnic_uiom_reg *uiomr,
216 					int dirty)
217 {
218 	int npages;
219 	unsigned long vpn_start, vpn_last;
220 	struct usnic_uiom_interval_node *interval, *tmp;
221 	int writable = 0;
222 	LIST_HEAD(rm_intervals);
223 
224 	npages = PAGE_ALIGN(uiomr->length + uiomr->offset) >> PAGE_SHIFT;
225 	vpn_start = (uiomr->va & PAGE_MASK) >> PAGE_SHIFT;
226 	vpn_last = vpn_start + npages - 1;
227 
228 	spin_lock(&pd->lock);
229 	usnic_uiom_remove_interval(&pd->rb_root, vpn_start,
230 					vpn_last, &rm_intervals);
231 	usnic_uiom_unmap_sorted_intervals(&rm_intervals, pd);
232 
233 	list_for_each_entry_safe(interval, tmp, &rm_intervals, link) {
234 		if (interval->flags & IOMMU_WRITE)
235 			writable = 1;
236 		list_del(&interval->link);
237 		kfree(interval);
238 	}
239 
240 	usnic_uiom_put_pages(&uiomr->chunk_list, dirty & writable);
241 	spin_unlock(&pd->lock);
242 }
243 
usnic_uiom_map_sorted_intervals(struct list_head * intervals,struct usnic_uiom_reg * uiomr)244 static int usnic_uiom_map_sorted_intervals(struct list_head *intervals,
245 						struct usnic_uiom_reg *uiomr)
246 {
247 	int i, err;
248 	size_t size;
249 	struct usnic_uiom_chunk *chunk;
250 	struct usnic_uiom_interval_node *interval_node;
251 	dma_addr_t pa;
252 	dma_addr_t pa_start = 0;
253 	dma_addr_t pa_end = 0;
254 	long int va_start = -EINVAL;
255 	struct usnic_uiom_pd *pd = uiomr->pd;
256 	long int va = uiomr->va & PAGE_MASK;
257 	int flags = IOMMU_READ | IOMMU_CACHE;
258 
259 	flags |= (uiomr->writable) ? IOMMU_WRITE : 0;
260 	chunk = list_first_entry(&uiomr->chunk_list, struct usnic_uiom_chunk,
261 									list);
262 	list_for_each_entry(interval_node, intervals, link) {
263 iter_chunk:
264 		for (i = 0; i < chunk->nents; i++, va += PAGE_SIZE) {
265 			pa = sg_phys(&chunk->page_list[i]);
266 			if ((va >> PAGE_SHIFT) < interval_node->start)
267 				continue;
268 
269 			if ((va >> PAGE_SHIFT) == interval_node->start) {
270 				/* First page of the interval */
271 				va_start = va;
272 				pa_start = pa;
273 				pa_end = pa;
274 			}
275 
276 			WARN_ON(va_start == -EINVAL);
277 
278 			if ((pa_end + PAGE_SIZE != pa) &&
279 					(pa != pa_start)) {
280 				/* PAs are not contiguous */
281 				size = pa_end - pa_start + PAGE_SIZE;
282 				usnic_dbg("va 0x%lx pa %pa size 0x%zx flags 0x%x",
283 					va_start, &pa_start, size, flags);
284 				err = iommu_map(pd->domain, va_start, pa_start,
285 							size, flags);
286 				if (err) {
287 					usnic_err("Failed to map va 0x%lx pa %pa size 0x%zx with err %d\n",
288 						va_start, &pa_start, size, err);
289 					goto err_out;
290 				}
291 				va_start = va;
292 				pa_start = pa;
293 				pa_end = pa;
294 			}
295 
296 			if ((va >> PAGE_SHIFT) == interval_node->last) {
297 				/* Last page of the interval */
298 				size = pa - pa_start + PAGE_SIZE;
299 				usnic_dbg("va 0x%lx pa %pa size 0x%zx flags 0x%x\n",
300 					va_start, &pa_start, size, flags);
301 				err = iommu_map(pd->domain, va_start, pa_start,
302 						size, flags);
303 				if (err) {
304 					usnic_err("Failed to map va 0x%lx pa %pa size 0x%zx with err %d\n",
305 						va_start, &pa_start, size, err);
306 					goto err_out;
307 				}
308 				break;
309 			}
310 
311 			if (pa != pa_start)
312 				pa_end += PAGE_SIZE;
313 		}
314 
315 		if (i == chunk->nents) {
316 			/*
317 			 * Hit last entry of the chunk,
318 			 * hence advance to next chunk
319 			 */
320 			chunk = list_first_entry(&chunk->list,
321 							struct usnic_uiom_chunk,
322 							list);
323 			goto iter_chunk;
324 		}
325 	}
326 
327 	return 0;
328 
329 err_out:
330 	usnic_uiom_unmap_sorted_intervals(intervals, pd);
331 	return err;
332 }
333 
usnic_uiom_reg_get(struct usnic_uiom_pd * pd,unsigned long addr,size_t size,int writable,int dmasync)334 struct usnic_uiom_reg *usnic_uiom_reg_get(struct usnic_uiom_pd *pd,
335 						unsigned long addr, size_t size,
336 						int writable, int dmasync)
337 {
338 	struct usnic_uiom_reg *uiomr;
339 	unsigned long va_base, vpn_start, vpn_last;
340 	unsigned long npages;
341 	int offset, err;
342 	LIST_HEAD(sorted_diff_intervals);
343 
344 	/*
345 	 * Intel IOMMU map throws an error if a translation entry is
346 	 * changed from read to write.  This module may not unmap
347 	 * and then remap the entry after fixing the permission
348 	 * b/c this open up a small windows where hw DMA may page fault
349 	 * Hence, make all entries to be writable.
350 	 */
351 	writable = 1;
352 
353 	va_base = addr & PAGE_MASK;
354 	offset = addr & ~PAGE_MASK;
355 	npages = PAGE_ALIGN(size + offset) >> PAGE_SHIFT;
356 	vpn_start = (addr & PAGE_MASK) >> PAGE_SHIFT;
357 	vpn_last = vpn_start + npages - 1;
358 
359 	uiomr = kmalloc(sizeof(*uiomr), GFP_KERNEL);
360 	if (!uiomr)
361 		return ERR_PTR(-ENOMEM);
362 
363 	uiomr->va = va_base;
364 	uiomr->offset = offset;
365 	uiomr->length = size;
366 	uiomr->writable = writable;
367 	uiomr->pd = pd;
368 
369 	err = usnic_uiom_get_pages(addr, size, writable, dmasync,
370 					&uiomr->chunk_list);
371 	if (err) {
372 		usnic_err("Failed get_pages vpn [0x%lx,0x%lx] err %d\n",
373 				vpn_start, vpn_last, err);
374 		goto out_free_uiomr;
375 	}
376 
377 	spin_lock(&pd->lock);
378 	err = usnic_uiom_get_intervals_diff(vpn_start, vpn_last,
379 						(writable) ? IOMMU_WRITE : 0,
380 						IOMMU_WRITE,
381 						&pd->rb_root,
382 						&sorted_diff_intervals);
383 	if (err) {
384 		usnic_err("Failed disjoint interval vpn [0x%lx,0x%lx] err %d\n",
385 						vpn_start, vpn_last, err);
386 		goto out_put_pages;
387 	}
388 
389 	err = usnic_uiom_map_sorted_intervals(&sorted_diff_intervals, uiomr);
390 	if (err) {
391 		usnic_err("Failed map interval vpn [0x%lx,0x%lx] err %d\n",
392 						vpn_start, vpn_last, err);
393 		goto out_put_intervals;
394 
395 	}
396 
397 	err = usnic_uiom_insert_interval(&pd->rb_root, vpn_start, vpn_last,
398 					(writable) ? IOMMU_WRITE : 0);
399 	if (err) {
400 		usnic_err("Failed insert interval vpn [0x%lx,0x%lx] err %d\n",
401 						vpn_start, vpn_last, err);
402 		goto out_unmap_intervals;
403 	}
404 
405 	usnic_uiom_put_interval_set(&sorted_diff_intervals);
406 	spin_unlock(&pd->lock);
407 
408 	return uiomr;
409 
410 out_unmap_intervals:
411 	usnic_uiom_unmap_sorted_intervals(&sorted_diff_intervals, pd);
412 out_put_intervals:
413 	usnic_uiom_put_interval_set(&sorted_diff_intervals);
414 out_put_pages:
415 	usnic_uiom_put_pages(&uiomr->chunk_list, 0);
416 	spin_unlock(&pd->lock);
417 out_free_uiomr:
418 	kfree(uiomr);
419 	return ERR_PTR(err);
420 }
421 
usnic_uiom_reg_release(struct usnic_uiom_reg * uiomr,int closing)422 void usnic_uiom_reg_release(struct usnic_uiom_reg *uiomr, int closing)
423 {
424 	struct mm_struct *mm;
425 	unsigned long diff;
426 
427 	__usnic_uiom_reg_release(uiomr->pd, uiomr, 1);
428 
429 	mm = get_task_mm(current);
430 	if (!mm) {
431 		kfree(uiomr);
432 		return;
433 	}
434 
435 	diff = PAGE_ALIGN(uiomr->length + uiomr->offset) >> PAGE_SHIFT;
436 
437 	/*
438 	 * We may be called with the mm's mmap_sem already held.  This
439 	 * can happen when a userspace munmap() is the call that drops
440 	 * the last reference to our file and calls our release
441 	 * method.  If there are memory regions to destroy, we'll end
442 	 * up here and not be able to take the mmap_sem.  In that case
443 	 * we defer the vm_locked accounting to the system workqueue.
444 	 */
445 	if (closing) {
446 		if (!down_write_trylock(&mm->mmap_sem)) {
447 			INIT_WORK(&uiomr->work, usnic_uiom_reg_account);
448 			uiomr->mm = mm;
449 			uiomr->diff = diff;
450 
451 			queue_work(usnic_uiom_wq, &uiomr->work);
452 			return;
453 		}
454 	} else
455 		down_write(&mm->mmap_sem);
456 
457 	current->mm->locked_vm -= diff;
458 	up_write(&mm->mmap_sem);
459 	mmput(mm);
460 	kfree(uiomr);
461 }
462 
usnic_uiom_alloc_pd(void)463 struct usnic_uiom_pd *usnic_uiom_alloc_pd(void)
464 {
465 	struct usnic_uiom_pd *pd;
466 	void *domain;
467 
468 	pd = kzalloc(sizeof(*pd), GFP_KERNEL);
469 	if (!pd)
470 		return ERR_PTR(-ENOMEM);
471 
472 	pd->domain = domain = iommu_domain_alloc(&pci_bus_type);
473 	if (!domain) {
474 		usnic_err("Failed to allocate IOMMU domain");
475 		kfree(pd);
476 		return ERR_PTR(-ENOMEM);
477 	}
478 
479 	iommu_set_fault_handler(pd->domain, usnic_uiom_dma_fault, NULL);
480 
481 	spin_lock_init(&pd->lock);
482 	INIT_LIST_HEAD(&pd->devs);
483 
484 	return pd;
485 }
486 
usnic_uiom_dealloc_pd(struct usnic_uiom_pd * pd)487 void usnic_uiom_dealloc_pd(struct usnic_uiom_pd *pd)
488 {
489 	iommu_domain_free(pd->domain);
490 	kfree(pd);
491 }
492 
usnic_uiom_attach_dev_to_pd(struct usnic_uiom_pd * pd,struct device * dev)493 int usnic_uiom_attach_dev_to_pd(struct usnic_uiom_pd *pd, struct device *dev)
494 {
495 	struct usnic_uiom_dev *uiom_dev;
496 	int err;
497 
498 	uiom_dev = kzalloc(sizeof(*uiom_dev), GFP_ATOMIC);
499 	if (!uiom_dev)
500 		return -ENOMEM;
501 	uiom_dev->dev = dev;
502 
503 	err = iommu_attach_device(pd->domain, dev);
504 	if (err)
505 		goto out_free_dev;
506 
507 	if (!iommu_capable(dev->bus, IOMMU_CAP_CACHE_COHERENCY)) {
508 		usnic_err("IOMMU of %s does not support cache coherency\n",
509 				dev_name(dev));
510 		err = -EINVAL;
511 		goto out_detach_device;
512 	}
513 
514 	spin_lock(&pd->lock);
515 	list_add_tail(&uiom_dev->link, &pd->devs);
516 	pd->dev_cnt++;
517 	spin_unlock(&pd->lock);
518 
519 	return 0;
520 
521 out_detach_device:
522 	iommu_detach_device(pd->domain, dev);
523 out_free_dev:
524 	kfree(uiom_dev);
525 	return err;
526 }
527 
usnic_uiom_detach_dev_from_pd(struct usnic_uiom_pd * pd,struct device * dev)528 void usnic_uiom_detach_dev_from_pd(struct usnic_uiom_pd *pd, struct device *dev)
529 {
530 	struct usnic_uiom_dev *uiom_dev;
531 	int found = 0;
532 
533 	spin_lock(&pd->lock);
534 	list_for_each_entry(uiom_dev, &pd->devs, link) {
535 		if (uiom_dev->dev == dev) {
536 			found = 1;
537 			break;
538 		}
539 	}
540 
541 	if (!found) {
542 		usnic_err("Unable to free dev %s - not found\n",
543 				dev_name(dev));
544 		spin_unlock(&pd->lock);
545 		return;
546 	}
547 
548 	list_del(&uiom_dev->link);
549 	pd->dev_cnt--;
550 	spin_unlock(&pd->lock);
551 
552 	return iommu_detach_device(pd->domain, dev);
553 }
554 
usnic_uiom_get_dev_list(struct usnic_uiom_pd * pd)555 struct device **usnic_uiom_get_dev_list(struct usnic_uiom_pd *pd)
556 {
557 	struct usnic_uiom_dev *uiom_dev;
558 	struct device **devs;
559 	int i = 0;
560 
561 	spin_lock(&pd->lock);
562 	devs = kcalloc(pd->dev_cnt + 1, sizeof(*devs), GFP_ATOMIC);
563 	if (!devs) {
564 		devs = ERR_PTR(-ENOMEM);
565 		goto out;
566 	}
567 
568 	list_for_each_entry(uiom_dev, &pd->devs, link) {
569 		devs[i++] = uiom_dev->dev;
570 	}
571 out:
572 	spin_unlock(&pd->lock);
573 	return devs;
574 }
575 
usnic_uiom_free_dev_list(struct device ** devs)576 void usnic_uiom_free_dev_list(struct device **devs)
577 {
578 	kfree(devs);
579 }
580 
usnic_uiom_init(char * drv_name)581 int usnic_uiom_init(char *drv_name)
582 {
583 	if (!iommu_present(&pci_bus_type)) {
584 		usnic_err("IOMMU required but not present or enabled.  USNIC QPs will not function w/o enabling IOMMU\n");
585 		return -EPERM;
586 	}
587 
588 	usnic_uiom_wq = create_workqueue(drv_name);
589 	if (!usnic_uiom_wq) {
590 		usnic_err("Unable to alloc wq for drv %s\n", drv_name);
591 		return -ENOMEM;
592 	}
593 
594 	return 0;
595 }
596 
usnic_uiom_fini(void)597 void usnic_uiom_fini(void)
598 {
599 	flush_workqueue(usnic_uiom_wq);
600 	destroy_workqueue(usnic_uiom_wq);
601 }
602