• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
4  * Author: Joerg Roedel <jroedel@suse.de>
5  */
6 
7 #define pr_fmt(fmt)    "iommu: " fmt
8 
9 #include <linux/device.h>
10 #include <linux/kernel.h>
11 #include <linux/bug.h>
12 #include <linux/types.h>
13 #include <linux/init.h>
14 #include <linux/export.h>
15 #include <linux/slab.h>
16 #include <linux/errno.h>
17 #include <linux/iommu.h>
18 #include <linux/idr.h>
19 #include <linux/notifier.h>
20 #include <linux/err.h>
21 #include <linux/pci.h>
22 #include <linux/bitops.h>
23 #include <linux/property.h>
24 #include <linux/fsl/mc.h>
25 #include <linux/module.h>
26 #include <trace/events/iommu.h>
27 
28 static struct kset *iommu_group_kset;
29 static DEFINE_IDA(iommu_group_ida);
30 
31 static unsigned int iommu_def_domain_type __read_mostly;
32 static bool iommu_dma_strict __read_mostly = true;
33 static u32 iommu_cmd_line __read_mostly;
34 
35 struct iommu_group {
36 	struct kobject kobj;
37 	struct kobject *devices_kobj;
38 	struct list_head devices;
39 	struct mutex mutex;
40 	struct blocking_notifier_head notifier;
41 	void *iommu_data;
42 	void (*iommu_data_release)(void *iommu_data);
43 	char *name;
44 	int id;
45 	struct iommu_domain *default_domain;
46 	struct iommu_domain *domain;
47 	struct list_head entry;
48 };
49 
50 struct group_device {
51 	struct list_head list;
52 	struct device *dev;
53 	char *name;
54 };
55 
56 struct iommu_group_attribute {
57 	struct attribute attr;
58 	ssize_t (*show)(struct iommu_group *group, char *buf);
59 	ssize_t (*store)(struct iommu_group *group,
60 			 const char *buf, size_t count);
61 };
62 
63 static const char * const iommu_group_resv_type_string[] = {
64 	[IOMMU_RESV_DIRECT]			= "direct",
65 	[IOMMU_RESV_DIRECT_RELAXABLE]		= "direct-relaxable",
66 	[IOMMU_RESV_RESERVED]			= "reserved",
67 	[IOMMU_RESV_MSI]			= "msi",
68 	[IOMMU_RESV_SW_MSI]			= "msi",
69 };
70 
71 #define IOMMU_CMD_LINE_DMA_API		BIT(0)
72 
iommu_set_cmd_line_dma_api(void)73 static void iommu_set_cmd_line_dma_api(void)
74 {
75 	iommu_cmd_line |= IOMMU_CMD_LINE_DMA_API;
76 }
77 
iommu_cmd_line_dma_api(void)78 static bool iommu_cmd_line_dma_api(void)
79 {
80 	return !!(iommu_cmd_line & IOMMU_CMD_LINE_DMA_API);
81 }
82 
83 static int iommu_alloc_default_domain(struct iommu_group *group,
84 				      struct device *dev);
85 static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
86 						 unsigned type);
87 static int __iommu_attach_device(struct iommu_domain *domain,
88 				 struct device *dev);
89 static int __iommu_attach_group(struct iommu_domain *domain,
90 				struct iommu_group *group);
91 static void __iommu_detach_group(struct iommu_domain *domain,
92 				 struct iommu_group *group);
93 static int iommu_create_device_direct_mappings(struct iommu_group *group,
94 					       struct device *dev);
95 static struct iommu_group *iommu_group_get_for_dev(struct device *dev);
96 
97 #define IOMMU_GROUP_ATTR(_name, _mode, _show, _store)		\
98 struct iommu_group_attribute iommu_group_attr_##_name =		\
99 	__ATTR(_name, _mode, _show, _store)
100 
101 #define to_iommu_group_attr(_attr)	\
102 	container_of(_attr, struct iommu_group_attribute, attr)
103 #define to_iommu_group(_kobj)		\
104 	container_of(_kobj, struct iommu_group, kobj)
105 
106 static LIST_HEAD(iommu_device_list);
107 static DEFINE_SPINLOCK(iommu_device_lock);
108 
109 /*
110  * Use a function instead of an array here because the domain-type is a
111  * bit-field, so an array would waste memory.
112  */
iommu_domain_type_str(unsigned int t)113 static const char *iommu_domain_type_str(unsigned int t)
114 {
115 	switch (t) {
116 	case IOMMU_DOMAIN_BLOCKED:
117 		return "Blocked";
118 	case IOMMU_DOMAIN_IDENTITY:
119 		return "Passthrough";
120 	case IOMMU_DOMAIN_UNMANAGED:
121 		return "Unmanaged";
122 	case IOMMU_DOMAIN_DMA:
123 		return "Translated";
124 	default:
125 		return "Unknown";
126 	}
127 }
128 
iommu_subsys_init(void)129 static int __init iommu_subsys_init(void)
130 {
131 	bool cmd_line = iommu_cmd_line_dma_api();
132 
133 	if (!cmd_line) {
134 		if (IS_ENABLED(CONFIG_IOMMU_DEFAULT_PASSTHROUGH))
135 			iommu_set_default_passthrough(false);
136 		else
137 			iommu_set_default_translated(false);
138 
139 		if (iommu_default_passthrough() && mem_encrypt_active()) {
140 			pr_info("Memory encryption detected - Disabling default IOMMU Passthrough\n");
141 			iommu_set_default_translated(false);
142 		}
143 	}
144 
145 	pr_info("Default domain type: %s %s\n",
146 		iommu_domain_type_str(iommu_def_domain_type),
147 		cmd_line ? "(set via kernel command line)" : "");
148 
149 	return 0;
150 }
151 subsys_initcall(iommu_subsys_init);
152 
iommu_device_register(struct iommu_device * iommu)153 int iommu_device_register(struct iommu_device *iommu)
154 {
155 	spin_lock(&iommu_device_lock);
156 	list_add_tail(&iommu->list, &iommu_device_list);
157 	spin_unlock(&iommu_device_lock);
158 	return 0;
159 }
160 EXPORT_SYMBOL_GPL(iommu_device_register);
161 
iommu_device_unregister(struct iommu_device * iommu)162 void iommu_device_unregister(struct iommu_device *iommu)
163 {
164 	spin_lock(&iommu_device_lock);
165 	list_del(&iommu->list);
166 	spin_unlock(&iommu_device_lock);
167 }
168 EXPORT_SYMBOL_GPL(iommu_device_unregister);
169 
dev_iommu_get(struct device * dev)170 static struct dev_iommu *dev_iommu_get(struct device *dev)
171 {
172 	struct dev_iommu *param = dev->iommu;
173 
174 	if (param)
175 		return param;
176 
177 	param = kzalloc(sizeof(*param), GFP_KERNEL);
178 	if (!param)
179 		return NULL;
180 
181 	mutex_init(&param->lock);
182 	dev->iommu = param;
183 	return param;
184 }
185 
dev_iommu_free(struct device * dev)186 static void dev_iommu_free(struct device *dev)
187 {
188 	struct dev_iommu *param = dev->iommu;
189 
190 	dev->iommu = NULL;
191 	if (param->fwspec) {
192 		fwnode_handle_put(param->fwspec->iommu_fwnode);
193 		kfree(param->fwspec);
194 	}
195 	kfree(param);
196 }
197 
__iommu_probe_device(struct device * dev,struct list_head * group_list)198 static int __iommu_probe_device(struct device *dev, struct list_head *group_list)
199 {
200 	const struct iommu_ops *ops = dev->bus->iommu_ops;
201 	struct iommu_device *iommu_dev;
202 	struct iommu_group *group;
203 	int ret;
204 
205 	if (!ops)
206 		return -ENODEV;
207 
208 	if (!dev_iommu_get(dev))
209 		return -ENOMEM;
210 
211 	if (!try_module_get(ops->owner)) {
212 		ret = -EINVAL;
213 		goto err_free;
214 	}
215 
216 	iommu_dev = ops->probe_device(dev);
217 	if (IS_ERR(iommu_dev)) {
218 		ret = PTR_ERR(iommu_dev);
219 		goto out_module_put;
220 	}
221 
222 	dev->iommu->iommu_dev = iommu_dev;
223 
224 	group = iommu_group_get_for_dev(dev);
225 	if (IS_ERR(group)) {
226 		ret = PTR_ERR(group);
227 		goto out_release;
228 	}
229 	iommu_group_put(group);
230 
231 	if (group_list && !group->default_domain && list_empty(&group->entry))
232 		list_add_tail(&group->entry, group_list);
233 
234 	iommu_device_link(iommu_dev, dev);
235 
236 	return 0;
237 
238 out_release:
239 	ops->release_device(dev);
240 
241 out_module_put:
242 	module_put(ops->owner);
243 
244 err_free:
245 	dev_iommu_free(dev);
246 
247 	return ret;
248 }
249 
iommu_probe_device(struct device * dev)250 int iommu_probe_device(struct device *dev)
251 {
252 	const struct iommu_ops *ops = dev->bus->iommu_ops;
253 	struct iommu_group *group;
254 	int ret;
255 
256 	ret = __iommu_probe_device(dev, NULL);
257 	if (ret)
258 		goto err_out;
259 
260 	group = iommu_group_get(dev);
261 	if (!group)
262 		goto err_release;
263 
264 	/*
265 	 * Try to allocate a default domain - needs support from the
266 	 * IOMMU driver. There are still some drivers which don't
267 	 * support default domains, so the return value is not yet
268 	 * checked.
269 	 */
270 	iommu_alloc_default_domain(group, dev);
271 
272 	if (group->default_domain) {
273 		ret = __iommu_attach_device(group->default_domain, dev);
274 		if (ret) {
275 			iommu_group_put(group);
276 			goto err_release;
277 		}
278 	}
279 
280 	iommu_create_device_direct_mappings(group, dev);
281 
282 	iommu_group_put(group);
283 
284 	if (ops->probe_finalize)
285 		ops->probe_finalize(dev);
286 
287 	return 0;
288 
289 err_release:
290 	iommu_release_device(dev);
291 
292 err_out:
293 	return ret;
294 
295 }
296 
iommu_release_device(struct device * dev)297 void iommu_release_device(struct device *dev)
298 {
299 	const struct iommu_ops *ops = dev->bus->iommu_ops;
300 
301 	if (!dev->iommu)
302 		return;
303 
304 	iommu_device_unlink(dev->iommu->iommu_dev, dev);
305 
306 	ops->release_device(dev);
307 
308 	iommu_group_remove_device(dev);
309 	module_put(ops->owner);
310 	dev_iommu_free(dev);
311 }
312 
iommu_set_def_domain_type(char * str)313 static int __init iommu_set_def_domain_type(char *str)
314 {
315 	bool pt;
316 	int ret;
317 
318 	ret = kstrtobool(str, &pt);
319 	if (ret)
320 		return ret;
321 
322 	if (pt)
323 		iommu_set_default_passthrough(true);
324 	else
325 		iommu_set_default_translated(true);
326 
327 	return 0;
328 }
329 early_param("iommu.passthrough", iommu_set_def_domain_type);
330 
iommu_dma_setup(char * str)331 static int __init iommu_dma_setup(char *str)
332 {
333 	return kstrtobool(str, &iommu_dma_strict);
334 }
335 early_param("iommu.strict", iommu_dma_setup);
336 
iommu_group_attr_show(struct kobject * kobj,struct attribute * __attr,char * buf)337 static ssize_t iommu_group_attr_show(struct kobject *kobj,
338 				     struct attribute *__attr, char *buf)
339 {
340 	struct iommu_group_attribute *attr = to_iommu_group_attr(__attr);
341 	struct iommu_group *group = to_iommu_group(kobj);
342 	ssize_t ret = -EIO;
343 
344 	if (attr->show)
345 		ret = attr->show(group, buf);
346 	return ret;
347 }
348 
iommu_group_attr_store(struct kobject * kobj,struct attribute * __attr,const char * buf,size_t count)349 static ssize_t iommu_group_attr_store(struct kobject *kobj,
350 				      struct attribute *__attr,
351 				      const char *buf, size_t count)
352 {
353 	struct iommu_group_attribute *attr = to_iommu_group_attr(__attr);
354 	struct iommu_group *group = to_iommu_group(kobj);
355 	ssize_t ret = -EIO;
356 
357 	if (attr->store)
358 		ret = attr->store(group, buf, count);
359 	return ret;
360 }
361 
362 static const struct sysfs_ops iommu_group_sysfs_ops = {
363 	.show = iommu_group_attr_show,
364 	.store = iommu_group_attr_store,
365 };
366 
iommu_group_create_file(struct iommu_group * group,struct iommu_group_attribute * attr)367 static int iommu_group_create_file(struct iommu_group *group,
368 				   struct iommu_group_attribute *attr)
369 {
370 	return sysfs_create_file(&group->kobj, &attr->attr);
371 }
372 
iommu_group_remove_file(struct iommu_group * group,struct iommu_group_attribute * attr)373 static void iommu_group_remove_file(struct iommu_group *group,
374 				    struct iommu_group_attribute *attr)
375 {
376 	sysfs_remove_file(&group->kobj, &attr->attr);
377 }
378 
iommu_group_show_name(struct iommu_group * group,char * buf)379 static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf)
380 {
381 	return sprintf(buf, "%s\n", group->name);
382 }
383 
384 /**
385  * iommu_insert_resv_region - Insert a new region in the
386  * list of reserved regions.
387  * @new: new region to insert
388  * @regions: list of regions
389  *
390  * Elements are sorted by start address and overlapping segments
391  * of the same type are merged.
392  */
iommu_insert_resv_region(struct iommu_resv_region * new,struct list_head * regions)393 static int iommu_insert_resv_region(struct iommu_resv_region *new,
394 				    struct list_head *regions)
395 {
396 	struct iommu_resv_region *iter, *tmp, *nr, *top;
397 	LIST_HEAD(stack);
398 
399 	nr = iommu_alloc_resv_region(new->start, new->length,
400 				     new->prot, new->type);
401 	if (!nr)
402 		return -ENOMEM;
403 
404 	/* First add the new element based on start address sorting */
405 	list_for_each_entry(iter, regions, list) {
406 		if (nr->start < iter->start ||
407 		    (nr->start == iter->start && nr->type <= iter->type))
408 			break;
409 	}
410 	list_add_tail(&nr->list, &iter->list);
411 
412 	/* Merge overlapping segments of type nr->type in @regions, if any */
413 	list_for_each_entry_safe(iter, tmp, regions, list) {
414 		phys_addr_t top_end, iter_end = iter->start + iter->length - 1;
415 
416 		/* no merge needed on elements of different types than @new */
417 		if (iter->type != new->type) {
418 			list_move_tail(&iter->list, &stack);
419 			continue;
420 		}
421 
422 		/* look for the last stack element of same type as @iter */
423 		list_for_each_entry_reverse(top, &stack, list)
424 			if (top->type == iter->type)
425 				goto check_overlap;
426 
427 		list_move_tail(&iter->list, &stack);
428 		continue;
429 
430 check_overlap:
431 		top_end = top->start + top->length - 1;
432 
433 		if (iter->start > top_end + 1) {
434 			list_move_tail(&iter->list, &stack);
435 		} else {
436 			top->length = max(top_end, iter_end) - top->start + 1;
437 			list_del(&iter->list);
438 			kfree(iter);
439 		}
440 	}
441 	list_splice(&stack, regions);
442 	return 0;
443 }
444 
445 static int
iommu_insert_device_resv_regions(struct list_head * dev_resv_regions,struct list_head * group_resv_regions)446 iommu_insert_device_resv_regions(struct list_head *dev_resv_regions,
447 				 struct list_head *group_resv_regions)
448 {
449 	struct iommu_resv_region *entry;
450 	int ret = 0;
451 
452 	list_for_each_entry(entry, dev_resv_regions, list) {
453 		ret = iommu_insert_resv_region(entry, group_resv_regions);
454 		if (ret)
455 			break;
456 	}
457 	return ret;
458 }
459 
iommu_get_group_resv_regions(struct iommu_group * group,struct list_head * head)460 int iommu_get_group_resv_regions(struct iommu_group *group,
461 				 struct list_head *head)
462 {
463 	struct group_device *device;
464 	int ret = 0;
465 
466 	mutex_lock(&group->mutex);
467 	list_for_each_entry(device, &group->devices, list) {
468 		struct list_head dev_resv_regions;
469 
470 		INIT_LIST_HEAD(&dev_resv_regions);
471 		iommu_get_resv_regions(device->dev, &dev_resv_regions);
472 		ret = iommu_insert_device_resv_regions(&dev_resv_regions, head);
473 		iommu_put_resv_regions(device->dev, &dev_resv_regions);
474 		if (ret)
475 			break;
476 	}
477 	mutex_unlock(&group->mutex);
478 	return ret;
479 }
480 EXPORT_SYMBOL_GPL(iommu_get_group_resv_regions);
481 
iommu_group_show_resv_regions(struct iommu_group * group,char * buf)482 static ssize_t iommu_group_show_resv_regions(struct iommu_group *group,
483 					     char *buf)
484 {
485 	struct iommu_resv_region *region, *next;
486 	struct list_head group_resv_regions;
487 	char *str = buf;
488 
489 	INIT_LIST_HEAD(&group_resv_regions);
490 	iommu_get_group_resv_regions(group, &group_resv_regions);
491 
492 	list_for_each_entry_safe(region, next, &group_resv_regions, list) {
493 		str += sprintf(str, "0x%016llx 0x%016llx %s\n",
494 			       (long long int)region->start,
495 			       (long long int)(region->start +
496 						region->length - 1),
497 			       iommu_group_resv_type_string[region->type]);
498 		kfree(region);
499 	}
500 
501 	return (str - buf);
502 }
503 
iommu_group_show_type(struct iommu_group * group,char * buf)504 static ssize_t iommu_group_show_type(struct iommu_group *group,
505 				     char *buf)
506 {
507 	char *type = "unknown\n";
508 
509 	if (group->default_domain) {
510 		switch (group->default_domain->type) {
511 		case IOMMU_DOMAIN_BLOCKED:
512 			type = "blocked\n";
513 			break;
514 		case IOMMU_DOMAIN_IDENTITY:
515 			type = "identity\n";
516 			break;
517 		case IOMMU_DOMAIN_UNMANAGED:
518 			type = "unmanaged\n";
519 			break;
520 		case IOMMU_DOMAIN_DMA:
521 			type = "DMA\n";
522 			break;
523 		}
524 	}
525 	strcpy(buf, type);
526 
527 	return strlen(type);
528 }
529 
530 static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL);
531 
532 static IOMMU_GROUP_ATTR(reserved_regions, 0444,
533 			iommu_group_show_resv_regions, NULL);
534 
535 static IOMMU_GROUP_ATTR(type, 0444, iommu_group_show_type, NULL);
536 
iommu_group_release(struct kobject * kobj)537 static void iommu_group_release(struct kobject *kobj)
538 {
539 	struct iommu_group *group = to_iommu_group(kobj);
540 
541 	pr_debug("Releasing group %d\n", group->id);
542 
543 	if (group->iommu_data_release)
544 		group->iommu_data_release(group->iommu_data);
545 
546 	ida_simple_remove(&iommu_group_ida, group->id);
547 
548 	if (group->default_domain)
549 		iommu_domain_free(group->default_domain);
550 
551 	kfree(group->name);
552 	kfree(group);
553 }
554 
555 static struct kobj_type iommu_group_ktype = {
556 	.sysfs_ops = &iommu_group_sysfs_ops,
557 	.release = iommu_group_release,
558 };
559 
560 /**
561  * iommu_group_alloc - Allocate a new group
562  *
563  * This function is called by an iommu driver to allocate a new iommu
564  * group.  The iommu group represents the minimum granularity of the iommu.
565  * Upon successful return, the caller holds a reference to the supplied
566  * group in order to hold the group until devices are added.  Use
567  * iommu_group_put() to release this extra reference count, allowing the
568  * group to be automatically reclaimed once it has no devices or external
569  * references.
570  */
iommu_group_alloc(void)571 struct iommu_group *iommu_group_alloc(void)
572 {
573 	struct iommu_group *group;
574 	int ret;
575 
576 	group = kzalloc(sizeof(*group), GFP_KERNEL);
577 	if (!group)
578 		return ERR_PTR(-ENOMEM);
579 
580 	group->kobj.kset = iommu_group_kset;
581 	mutex_init(&group->mutex);
582 	INIT_LIST_HEAD(&group->devices);
583 	INIT_LIST_HEAD(&group->entry);
584 	BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier);
585 
586 	ret = ida_simple_get(&iommu_group_ida, 0, 0, GFP_KERNEL);
587 	if (ret < 0) {
588 		kfree(group);
589 		return ERR_PTR(ret);
590 	}
591 	group->id = ret;
592 
593 	ret = kobject_init_and_add(&group->kobj, &iommu_group_ktype,
594 				   NULL, "%d", group->id);
595 	if (ret) {
596 		ida_simple_remove(&iommu_group_ida, group->id);
597 		kobject_put(&group->kobj);
598 		return ERR_PTR(ret);
599 	}
600 
601 	group->devices_kobj = kobject_create_and_add("devices", &group->kobj);
602 	if (!group->devices_kobj) {
603 		kobject_put(&group->kobj); /* triggers .release & free */
604 		return ERR_PTR(-ENOMEM);
605 	}
606 
607 	/*
608 	 * The devices_kobj holds a reference on the group kobject, so
609 	 * as long as that exists so will the group.  We can therefore
610 	 * use the devices_kobj for reference counting.
611 	 */
612 	kobject_put(&group->kobj);
613 
614 	ret = iommu_group_create_file(group,
615 				      &iommu_group_attr_reserved_regions);
616 	if (ret)
617 		return ERR_PTR(ret);
618 
619 	ret = iommu_group_create_file(group, &iommu_group_attr_type);
620 	if (ret)
621 		return ERR_PTR(ret);
622 
623 	pr_debug("Allocated group %d\n", group->id);
624 
625 	return group;
626 }
627 EXPORT_SYMBOL_GPL(iommu_group_alloc);
628 
iommu_group_get_by_id(int id)629 struct iommu_group *iommu_group_get_by_id(int id)
630 {
631 	struct kobject *group_kobj;
632 	struct iommu_group *group;
633 	const char *name;
634 
635 	if (!iommu_group_kset)
636 		return NULL;
637 
638 	name = kasprintf(GFP_KERNEL, "%d", id);
639 	if (!name)
640 		return NULL;
641 
642 	group_kobj = kset_find_obj(iommu_group_kset, name);
643 	kfree(name);
644 
645 	if (!group_kobj)
646 		return NULL;
647 
648 	group = container_of(group_kobj, struct iommu_group, kobj);
649 	BUG_ON(group->id != id);
650 
651 	kobject_get(group->devices_kobj);
652 	kobject_put(&group->kobj);
653 
654 	return group;
655 }
656 EXPORT_SYMBOL_GPL(iommu_group_get_by_id);
657 
658 /**
659  * iommu_group_get_iommudata - retrieve iommu_data registered for a group
660  * @group: the group
661  *
662  * iommu drivers can store data in the group for use when doing iommu
663  * operations.  This function provides a way to retrieve it.  Caller
664  * should hold a group reference.
665  */
iommu_group_get_iommudata(struct iommu_group * group)666 void *iommu_group_get_iommudata(struct iommu_group *group)
667 {
668 	return group->iommu_data;
669 }
670 EXPORT_SYMBOL_GPL(iommu_group_get_iommudata);
671 
672 /**
673  * iommu_group_set_iommudata - set iommu_data for a group
674  * @group: the group
675  * @iommu_data: new data
676  * @release: release function for iommu_data
677  *
678  * iommu drivers can store data in the group for use when doing iommu
679  * operations.  This function provides a way to set the data after
680  * the group has been allocated.  Caller should hold a group reference.
681  */
iommu_group_set_iommudata(struct iommu_group * group,void * iommu_data,void (* release)(void * iommu_data))682 void iommu_group_set_iommudata(struct iommu_group *group, void *iommu_data,
683 			       void (*release)(void *iommu_data))
684 {
685 	group->iommu_data = iommu_data;
686 	group->iommu_data_release = release;
687 }
688 EXPORT_SYMBOL_GPL(iommu_group_set_iommudata);
689 
690 /**
691  * iommu_group_set_name - set name for a group
692  * @group: the group
693  * @name: name
694  *
695  * Allow iommu driver to set a name for a group.  When set it will
696  * appear in a name attribute file under the group in sysfs.
697  */
iommu_group_set_name(struct iommu_group * group,const char * name)698 int iommu_group_set_name(struct iommu_group *group, const char *name)
699 {
700 	int ret;
701 
702 	if (group->name) {
703 		iommu_group_remove_file(group, &iommu_group_attr_name);
704 		kfree(group->name);
705 		group->name = NULL;
706 		if (!name)
707 			return 0;
708 	}
709 
710 	group->name = kstrdup(name, GFP_KERNEL);
711 	if (!group->name)
712 		return -ENOMEM;
713 
714 	ret = iommu_group_create_file(group, &iommu_group_attr_name);
715 	if (ret) {
716 		kfree(group->name);
717 		group->name = NULL;
718 		return ret;
719 	}
720 
721 	return 0;
722 }
723 EXPORT_SYMBOL_GPL(iommu_group_set_name);
724 
iommu_create_device_direct_mappings(struct iommu_group * group,struct device * dev)725 static int iommu_create_device_direct_mappings(struct iommu_group *group,
726 					       struct device *dev)
727 {
728 	struct iommu_domain *domain = group->default_domain;
729 	struct iommu_resv_region *entry;
730 	struct list_head mappings;
731 	unsigned long pg_size;
732 	int ret = 0;
733 
734 	if (!domain || domain->type != IOMMU_DOMAIN_DMA)
735 		return 0;
736 
737 	BUG_ON(!domain->pgsize_bitmap);
738 
739 	pg_size = 1UL << __ffs(domain->pgsize_bitmap);
740 	INIT_LIST_HEAD(&mappings);
741 
742 	iommu_get_resv_regions(dev, &mappings);
743 
744 	/* We need to consider overlapping regions for different devices */
745 	list_for_each_entry(entry, &mappings, list) {
746 		dma_addr_t start, end, addr;
747 
748 		if (domain->ops->apply_resv_region)
749 			domain->ops->apply_resv_region(dev, domain, entry);
750 
751 		start = ALIGN(entry->start, pg_size);
752 		end   = ALIGN(entry->start + entry->length, pg_size);
753 
754 		if (entry->type != IOMMU_RESV_DIRECT &&
755 		    entry->type != IOMMU_RESV_DIRECT_RELAXABLE)
756 			continue;
757 
758 		for (addr = start; addr < end; addr += pg_size) {
759 			phys_addr_t phys_addr;
760 
761 			phys_addr = iommu_iova_to_phys(domain, addr);
762 			if (phys_addr)
763 				continue;
764 
765 			ret = iommu_map(domain, addr, addr, pg_size, entry->prot);
766 			if (ret)
767 				goto out;
768 		}
769 
770 	}
771 
772 	iommu_flush_iotlb_all(domain);
773 
774 out:
775 	iommu_put_resv_regions(dev, &mappings);
776 
777 	return ret;
778 }
779 
iommu_is_attach_deferred(struct iommu_domain * domain,struct device * dev)780 static bool iommu_is_attach_deferred(struct iommu_domain *domain,
781 				     struct device *dev)
782 {
783 	if (domain->ops->is_attach_deferred)
784 		return domain->ops->is_attach_deferred(domain, dev);
785 
786 	return false;
787 }
788 
789 /**
790  * iommu_group_add_device - add a device to an iommu group
791  * @group: the group into which to add the device (reference should be held)
792  * @dev: the device
793  *
794  * This function is called by an iommu driver to add a device into a
795  * group.  Adding a device increments the group reference count.
796  */
iommu_group_add_device(struct iommu_group * group,struct device * dev)797 int iommu_group_add_device(struct iommu_group *group, struct device *dev)
798 {
799 	int ret, i = 0;
800 	struct group_device *device;
801 
802 	device = kzalloc(sizeof(*device), GFP_KERNEL);
803 	if (!device)
804 		return -ENOMEM;
805 
806 	device->dev = dev;
807 
808 	ret = sysfs_create_link(&dev->kobj, &group->kobj, "iommu_group");
809 	if (ret)
810 		goto err_free_device;
811 
812 	device->name = kasprintf(GFP_KERNEL, "%s", kobject_name(&dev->kobj));
813 rename:
814 	if (!device->name) {
815 		ret = -ENOMEM;
816 		goto err_remove_link;
817 	}
818 
819 	ret = sysfs_create_link_nowarn(group->devices_kobj,
820 				       &dev->kobj, device->name);
821 	if (ret) {
822 		if (ret == -EEXIST && i >= 0) {
823 			/*
824 			 * Account for the slim chance of collision
825 			 * and append an instance to the name.
826 			 */
827 			kfree(device->name);
828 			device->name = kasprintf(GFP_KERNEL, "%s.%d",
829 						 kobject_name(&dev->kobj), i++);
830 			goto rename;
831 		}
832 		goto err_free_name;
833 	}
834 
835 	kobject_get(group->devices_kobj);
836 
837 	dev->iommu_group = group;
838 
839 	mutex_lock(&group->mutex);
840 	list_add_tail(&device->list, &group->devices);
841 	if (group->domain  && !iommu_is_attach_deferred(group->domain, dev))
842 		ret = __iommu_attach_device(group->domain, dev);
843 	mutex_unlock(&group->mutex);
844 	if (ret)
845 		goto err_put_group;
846 
847 	/* Notify any listeners about change to group. */
848 	blocking_notifier_call_chain(&group->notifier,
849 				     IOMMU_GROUP_NOTIFY_ADD_DEVICE, dev);
850 
851 	trace_add_device_to_group(group->id, dev);
852 
853 	dev_info(dev, "Adding to iommu group %d\n", group->id);
854 
855 	return 0;
856 
857 err_put_group:
858 	mutex_lock(&group->mutex);
859 	list_del(&device->list);
860 	mutex_unlock(&group->mutex);
861 	dev->iommu_group = NULL;
862 	kobject_put(group->devices_kobj);
863 	sysfs_remove_link(group->devices_kobj, device->name);
864 err_free_name:
865 	kfree(device->name);
866 err_remove_link:
867 	sysfs_remove_link(&dev->kobj, "iommu_group");
868 err_free_device:
869 	kfree(device);
870 	dev_err(dev, "Failed to add to iommu group %d: %d\n", group->id, ret);
871 	return ret;
872 }
873 EXPORT_SYMBOL_GPL(iommu_group_add_device);
874 
875 /**
876  * iommu_group_remove_device - remove a device from it's current group
877  * @dev: device to be removed
878  *
879  * This function is called by an iommu driver to remove the device from
880  * it's current group.  This decrements the iommu group reference count.
881  */
iommu_group_remove_device(struct device * dev)882 void iommu_group_remove_device(struct device *dev)
883 {
884 	struct iommu_group *group = dev->iommu_group;
885 	struct group_device *tmp_device, *device = NULL;
886 
887 	if (!group)
888 		return;
889 
890 	dev_info(dev, "Removing from iommu group %d\n", group->id);
891 
892 	/* Pre-notify listeners that a device is being removed. */
893 	blocking_notifier_call_chain(&group->notifier,
894 				     IOMMU_GROUP_NOTIFY_DEL_DEVICE, dev);
895 
896 	mutex_lock(&group->mutex);
897 	list_for_each_entry(tmp_device, &group->devices, list) {
898 		if (tmp_device->dev == dev) {
899 			device = tmp_device;
900 			list_del(&device->list);
901 			break;
902 		}
903 	}
904 	mutex_unlock(&group->mutex);
905 
906 	if (!device)
907 		return;
908 
909 	sysfs_remove_link(group->devices_kobj, device->name);
910 	sysfs_remove_link(&dev->kobj, "iommu_group");
911 
912 	trace_remove_device_from_group(group->id, dev);
913 
914 	kfree(device->name);
915 	kfree(device);
916 	dev->iommu_group = NULL;
917 	kobject_put(group->devices_kobj);
918 }
919 EXPORT_SYMBOL_GPL(iommu_group_remove_device);
920 
iommu_group_device_count(struct iommu_group * group)921 static int iommu_group_device_count(struct iommu_group *group)
922 {
923 	struct group_device *entry;
924 	int ret = 0;
925 
926 	list_for_each_entry(entry, &group->devices, list)
927 		ret++;
928 
929 	return ret;
930 }
931 
932 /**
933  * iommu_group_for_each_dev - iterate over each device in the group
934  * @group: the group
935  * @data: caller opaque data to be passed to callback function
936  * @fn: caller supplied callback function
937  *
938  * This function is called by group users to iterate over group devices.
939  * Callers should hold a reference count to the group during callback.
940  * The group->mutex is held across callbacks, which will block calls to
941  * iommu_group_add/remove_device.
942  */
__iommu_group_for_each_dev(struct iommu_group * group,void * data,int (* fn)(struct device *,void *))943 static int __iommu_group_for_each_dev(struct iommu_group *group, void *data,
944 				      int (*fn)(struct device *, void *))
945 {
946 	struct group_device *device;
947 	int ret = 0;
948 
949 	list_for_each_entry(device, &group->devices, list) {
950 		ret = fn(device->dev, data);
951 		if (ret)
952 			break;
953 	}
954 	return ret;
955 }
956 
957 
iommu_group_for_each_dev(struct iommu_group * group,void * data,int (* fn)(struct device *,void *))958 int iommu_group_for_each_dev(struct iommu_group *group, void *data,
959 			     int (*fn)(struct device *, void *))
960 {
961 	int ret;
962 
963 	mutex_lock(&group->mutex);
964 	ret = __iommu_group_for_each_dev(group, data, fn);
965 	mutex_unlock(&group->mutex);
966 
967 	return ret;
968 }
969 EXPORT_SYMBOL_GPL(iommu_group_for_each_dev);
970 
971 /**
972  * iommu_group_get - Return the group for a device and increment reference
973  * @dev: get the group that this device belongs to
974  *
975  * This function is called by iommu drivers and users to get the group
976  * for the specified device.  If found, the group is returned and the group
977  * reference in incremented, else NULL.
978  */
iommu_group_get(struct device * dev)979 struct iommu_group *iommu_group_get(struct device *dev)
980 {
981 	struct iommu_group *group = dev->iommu_group;
982 
983 	if (group)
984 		kobject_get(group->devices_kobj);
985 
986 	return group;
987 }
988 EXPORT_SYMBOL_GPL(iommu_group_get);
989 
990 /**
991  * iommu_group_ref_get - Increment reference on a group
992  * @group: the group to use, must not be NULL
993  *
994  * This function is called by iommu drivers to take additional references on an
995  * existing group.  Returns the given group for convenience.
996  */
iommu_group_ref_get(struct iommu_group * group)997 struct iommu_group *iommu_group_ref_get(struct iommu_group *group)
998 {
999 	kobject_get(group->devices_kobj);
1000 	return group;
1001 }
1002 EXPORT_SYMBOL_GPL(iommu_group_ref_get);
1003 
1004 /**
1005  * iommu_group_put - Decrement group reference
1006  * @group: the group to use
1007  *
1008  * This function is called by iommu drivers and users to release the
1009  * iommu group.  Once the reference count is zero, the group is released.
1010  */
iommu_group_put(struct iommu_group * group)1011 void iommu_group_put(struct iommu_group *group)
1012 {
1013 	if (group)
1014 		kobject_put(group->devices_kobj);
1015 }
1016 EXPORT_SYMBOL_GPL(iommu_group_put);
1017 
1018 /**
1019  * iommu_group_register_notifier - Register a notifier for group changes
1020  * @group: the group to watch
1021  * @nb: notifier block to signal
1022  *
1023  * This function allows iommu group users to track changes in a group.
1024  * See include/linux/iommu.h for actions sent via this notifier.  Caller
1025  * should hold a reference to the group throughout notifier registration.
1026  */
iommu_group_register_notifier(struct iommu_group * group,struct notifier_block * nb)1027 int iommu_group_register_notifier(struct iommu_group *group,
1028 				  struct notifier_block *nb)
1029 {
1030 	return blocking_notifier_chain_register(&group->notifier, nb);
1031 }
1032 EXPORT_SYMBOL_GPL(iommu_group_register_notifier);
1033 
1034 /**
1035  * iommu_group_unregister_notifier - Unregister a notifier
1036  * @group: the group to watch
1037  * @nb: notifier block to signal
1038  *
1039  * Unregister a previously registered group notifier block.
1040  */
iommu_group_unregister_notifier(struct iommu_group * group,struct notifier_block * nb)1041 int iommu_group_unregister_notifier(struct iommu_group *group,
1042 				    struct notifier_block *nb)
1043 {
1044 	return blocking_notifier_chain_unregister(&group->notifier, nb);
1045 }
1046 EXPORT_SYMBOL_GPL(iommu_group_unregister_notifier);
1047 
1048 /**
1049  * iommu_register_device_fault_handler() - Register a device fault handler
1050  * @dev: the device
1051  * @handler: the fault handler
1052  * @data: private data passed as argument to the handler
1053  *
1054  * When an IOMMU fault event is received, this handler gets called with the
1055  * fault event and data as argument. The handler should return 0 on success. If
1056  * the fault is recoverable (IOMMU_FAULT_PAGE_REQ), the consumer should also
1057  * complete the fault by calling iommu_page_response() with one of the following
1058  * response code:
1059  * - IOMMU_PAGE_RESP_SUCCESS: retry the translation
1060  * - IOMMU_PAGE_RESP_INVALID: terminate the fault
1061  * - IOMMU_PAGE_RESP_FAILURE: terminate the fault and stop reporting
1062  *   page faults if possible.
1063  *
1064  * Return 0 if the fault handler was installed successfully, or an error.
1065  */
iommu_register_device_fault_handler(struct device * dev,iommu_dev_fault_handler_t handler,void * data)1066 int iommu_register_device_fault_handler(struct device *dev,
1067 					iommu_dev_fault_handler_t handler,
1068 					void *data)
1069 {
1070 	struct dev_iommu *param = dev->iommu;
1071 	int ret = 0;
1072 
1073 	if (!param)
1074 		return -EINVAL;
1075 
1076 	mutex_lock(&param->lock);
1077 	/* Only allow one fault handler registered for each device */
1078 	if (param->fault_param) {
1079 		ret = -EBUSY;
1080 		goto done_unlock;
1081 	}
1082 
1083 	get_device(dev);
1084 	param->fault_param = kzalloc(sizeof(*param->fault_param), GFP_KERNEL);
1085 	if (!param->fault_param) {
1086 		put_device(dev);
1087 		ret = -ENOMEM;
1088 		goto done_unlock;
1089 	}
1090 	param->fault_param->handler = handler;
1091 	param->fault_param->data = data;
1092 	mutex_init(&param->fault_param->lock);
1093 	INIT_LIST_HEAD(&param->fault_param->faults);
1094 
1095 done_unlock:
1096 	mutex_unlock(&param->lock);
1097 
1098 	return ret;
1099 }
1100 EXPORT_SYMBOL_GPL(iommu_register_device_fault_handler);
1101 
1102 /**
1103  * iommu_unregister_device_fault_handler() - Unregister the device fault handler
1104  * @dev: the device
1105  *
1106  * Remove the device fault handler installed with
1107  * iommu_register_device_fault_handler().
1108  *
1109  * Return 0 on success, or an error.
1110  */
iommu_unregister_device_fault_handler(struct device * dev)1111 int iommu_unregister_device_fault_handler(struct device *dev)
1112 {
1113 	struct dev_iommu *param = dev->iommu;
1114 	int ret = 0;
1115 
1116 	if (!param)
1117 		return -EINVAL;
1118 
1119 	mutex_lock(&param->lock);
1120 
1121 	if (!param->fault_param)
1122 		goto unlock;
1123 
1124 	/* we cannot unregister handler if there are pending faults */
1125 	if (!list_empty(&param->fault_param->faults)) {
1126 		ret = -EBUSY;
1127 		goto unlock;
1128 	}
1129 
1130 	kfree(param->fault_param);
1131 	param->fault_param = NULL;
1132 	put_device(dev);
1133 unlock:
1134 	mutex_unlock(&param->lock);
1135 
1136 	return ret;
1137 }
1138 EXPORT_SYMBOL_GPL(iommu_unregister_device_fault_handler);
1139 
1140 /**
1141  * iommu_report_device_fault() - Report fault event to device driver
1142  * @dev: the device
1143  * @evt: fault event data
1144  *
1145  * Called by IOMMU drivers when a fault is detected, typically in a threaded IRQ
1146  * handler. When this function fails and the fault is recoverable, it is the
1147  * caller's responsibility to complete the fault.
1148  *
1149  * Return 0 on success, or an error.
1150  */
iommu_report_device_fault(struct device * dev,struct iommu_fault_event * evt)1151 int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt)
1152 {
1153 	struct dev_iommu *param = dev->iommu;
1154 	struct iommu_fault_event *evt_pending = NULL;
1155 	struct iommu_fault_param *fparam;
1156 	int ret = 0;
1157 
1158 	if (!param || !evt)
1159 		return -EINVAL;
1160 
1161 	/* we only report device fault if there is a handler registered */
1162 	mutex_lock(&param->lock);
1163 	fparam = param->fault_param;
1164 	if (!fparam || !fparam->handler) {
1165 		ret = -EINVAL;
1166 		goto done_unlock;
1167 	}
1168 
1169 	if (evt->fault.type == IOMMU_FAULT_PAGE_REQ &&
1170 	    (evt->fault.prm.flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE)) {
1171 		evt_pending = kmemdup(evt, sizeof(struct iommu_fault_event),
1172 				      GFP_KERNEL);
1173 		if (!evt_pending) {
1174 			ret = -ENOMEM;
1175 			goto done_unlock;
1176 		}
1177 		mutex_lock(&fparam->lock);
1178 		list_add_tail(&evt_pending->list, &fparam->faults);
1179 		mutex_unlock(&fparam->lock);
1180 	}
1181 
1182 	ret = fparam->handler(&evt->fault, fparam->data);
1183 	if (ret && evt_pending) {
1184 		mutex_lock(&fparam->lock);
1185 		list_del(&evt_pending->list);
1186 		mutex_unlock(&fparam->lock);
1187 		kfree(evt_pending);
1188 	}
1189 done_unlock:
1190 	mutex_unlock(&param->lock);
1191 	return ret;
1192 }
1193 EXPORT_SYMBOL_GPL(iommu_report_device_fault);
1194 
iommu_page_response(struct device * dev,struct iommu_page_response * msg)1195 int iommu_page_response(struct device *dev,
1196 			struct iommu_page_response *msg)
1197 {
1198 	bool needs_pasid;
1199 	int ret = -EINVAL;
1200 	struct iommu_fault_event *evt;
1201 	struct iommu_fault_page_request *prm;
1202 	struct dev_iommu *param = dev->iommu;
1203 	bool has_pasid = msg->flags & IOMMU_PAGE_RESP_PASID_VALID;
1204 	struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
1205 
1206 	if (!domain || !domain->ops->page_response)
1207 		return -ENODEV;
1208 
1209 	if (!param || !param->fault_param)
1210 		return -EINVAL;
1211 
1212 	if (msg->version != IOMMU_PAGE_RESP_VERSION_1 ||
1213 	    msg->flags & ~IOMMU_PAGE_RESP_PASID_VALID)
1214 		return -EINVAL;
1215 
1216 	/* Only send response if there is a fault report pending */
1217 	mutex_lock(&param->fault_param->lock);
1218 	if (list_empty(&param->fault_param->faults)) {
1219 		dev_warn_ratelimited(dev, "no pending PRQ, drop response\n");
1220 		goto done_unlock;
1221 	}
1222 	/*
1223 	 * Check if we have a matching page request pending to respond,
1224 	 * otherwise return -EINVAL
1225 	 */
1226 	list_for_each_entry(evt, &param->fault_param->faults, list) {
1227 		prm = &evt->fault.prm;
1228 		if (prm->grpid != msg->grpid)
1229 			continue;
1230 
1231 		/*
1232 		 * If the PASID is required, the corresponding request is
1233 		 * matched using the group ID, the PASID valid bit and the PASID
1234 		 * value. Otherwise only the group ID matches request and
1235 		 * response.
1236 		 */
1237 		needs_pasid = prm->flags & IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID;
1238 		if (needs_pasid && (!has_pasid || msg->pasid != prm->pasid))
1239 			continue;
1240 
1241 		if (!needs_pasid && has_pasid) {
1242 			/* No big deal, just clear it. */
1243 			msg->flags &= ~IOMMU_PAGE_RESP_PASID_VALID;
1244 			msg->pasid = 0;
1245 		}
1246 
1247 		ret = domain->ops->page_response(dev, evt, msg);
1248 		list_del(&evt->list);
1249 		kfree(evt);
1250 		break;
1251 	}
1252 
1253 done_unlock:
1254 	mutex_unlock(&param->fault_param->lock);
1255 	return ret;
1256 }
1257 EXPORT_SYMBOL_GPL(iommu_page_response);
1258 
1259 /**
1260  * iommu_group_id - Return ID for a group
1261  * @group: the group to ID
1262  *
1263  * Return the unique ID for the group matching the sysfs group number.
1264  */
iommu_group_id(struct iommu_group * group)1265 int iommu_group_id(struct iommu_group *group)
1266 {
1267 	return group->id;
1268 }
1269 EXPORT_SYMBOL_GPL(iommu_group_id);
1270 
1271 static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev,
1272 					       unsigned long *devfns);
1273 
1274 /*
1275  * To consider a PCI device isolated, we require ACS to support Source
1276  * Validation, Request Redirection, Completer Redirection, and Upstream
1277  * Forwarding.  This effectively means that devices cannot spoof their
1278  * requester ID, requests and completions cannot be redirected, and all
1279  * transactions are forwarded upstream, even as it passes through a
1280  * bridge where the target device is downstream.
1281  */
1282 #define REQ_ACS_FLAGS   (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF)
1283 
1284 /*
1285  * For multifunction devices which are not isolated from each other, find
1286  * all the other non-isolated functions and look for existing groups.  For
1287  * each function, we also need to look for aliases to or from other devices
1288  * that may already have a group.
1289  */
get_pci_function_alias_group(struct pci_dev * pdev,unsigned long * devfns)1290 static struct iommu_group *get_pci_function_alias_group(struct pci_dev *pdev,
1291 							unsigned long *devfns)
1292 {
1293 	struct pci_dev *tmp = NULL;
1294 	struct iommu_group *group;
1295 
1296 	if (!pdev->multifunction || pci_acs_enabled(pdev, REQ_ACS_FLAGS))
1297 		return NULL;
1298 
1299 	for_each_pci_dev(tmp) {
1300 		if (tmp == pdev || tmp->bus != pdev->bus ||
1301 		    PCI_SLOT(tmp->devfn) != PCI_SLOT(pdev->devfn) ||
1302 		    pci_acs_enabled(tmp, REQ_ACS_FLAGS))
1303 			continue;
1304 
1305 		group = get_pci_alias_group(tmp, devfns);
1306 		if (group) {
1307 			pci_dev_put(tmp);
1308 			return group;
1309 		}
1310 	}
1311 
1312 	return NULL;
1313 }
1314 
1315 /*
1316  * Look for aliases to or from the given device for existing groups. DMA
1317  * aliases are only supported on the same bus, therefore the search
1318  * space is quite small (especially since we're really only looking at pcie
1319  * device, and therefore only expect multiple slots on the root complex or
1320  * downstream switch ports).  It's conceivable though that a pair of
1321  * multifunction devices could have aliases between them that would cause a
1322  * loop.  To prevent this, we use a bitmap to track where we've been.
1323  */
get_pci_alias_group(struct pci_dev * pdev,unsigned long * devfns)1324 static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev,
1325 					       unsigned long *devfns)
1326 {
1327 	struct pci_dev *tmp = NULL;
1328 	struct iommu_group *group;
1329 
1330 	if (test_and_set_bit(pdev->devfn & 0xff, devfns))
1331 		return NULL;
1332 
1333 	group = iommu_group_get(&pdev->dev);
1334 	if (group)
1335 		return group;
1336 
1337 	for_each_pci_dev(tmp) {
1338 		if (tmp == pdev || tmp->bus != pdev->bus)
1339 			continue;
1340 
1341 		/* We alias them or they alias us */
1342 		if (pci_devs_are_dma_aliases(pdev, tmp)) {
1343 			group = get_pci_alias_group(tmp, devfns);
1344 			if (group) {
1345 				pci_dev_put(tmp);
1346 				return group;
1347 			}
1348 
1349 			group = get_pci_function_alias_group(tmp, devfns);
1350 			if (group) {
1351 				pci_dev_put(tmp);
1352 				return group;
1353 			}
1354 		}
1355 	}
1356 
1357 	return NULL;
1358 }
1359 
1360 struct group_for_pci_data {
1361 	struct pci_dev *pdev;
1362 	struct iommu_group *group;
1363 };
1364 
1365 /*
1366  * DMA alias iterator callback, return the last seen device.  Stop and return
1367  * the IOMMU group if we find one along the way.
1368  */
get_pci_alias_or_group(struct pci_dev * pdev,u16 alias,void * opaque)1369 static int get_pci_alias_or_group(struct pci_dev *pdev, u16 alias, void *opaque)
1370 {
1371 	struct group_for_pci_data *data = opaque;
1372 
1373 	data->pdev = pdev;
1374 	data->group = iommu_group_get(&pdev->dev);
1375 
1376 	return data->group != NULL;
1377 }
1378 
1379 /*
1380  * Generic device_group call-back function. It just allocates one
1381  * iommu-group per device.
1382  */
generic_device_group(struct device * dev)1383 struct iommu_group *generic_device_group(struct device *dev)
1384 {
1385 	return iommu_group_alloc();
1386 }
1387 EXPORT_SYMBOL_GPL(generic_device_group);
1388 
1389 /*
1390  * Use standard PCI bus topology, isolation features, and DMA alias quirks
1391  * to find or create an IOMMU group for a device.
1392  */
pci_device_group(struct device * dev)1393 struct iommu_group *pci_device_group(struct device *dev)
1394 {
1395 	struct pci_dev *pdev = to_pci_dev(dev);
1396 	struct group_for_pci_data data;
1397 	struct pci_bus *bus;
1398 	struct iommu_group *group = NULL;
1399 	u64 devfns[4] = { 0 };
1400 
1401 	if (WARN_ON(!dev_is_pci(dev)))
1402 		return ERR_PTR(-EINVAL);
1403 
1404 	/*
1405 	 * Find the upstream DMA alias for the device.  A device must not
1406 	 * be aliased due to topology in order to have its own IOMMU group.
1407 	 * If we find an alias along the way that already belongs to a
1408 	 * group, use it.
1409 	 */
1410 	if (pci_for_each_dma_alias(pdev, get_pci_alias_or_group, &data))
1411 		return data.group;
1412 
1413 	pdev = data.pdev;
1414 
1415 	/*
1416 	 * Continue upstream from the point of minimum IOMMU granularity
1417 	 * due to aliases to the point where devices are protected from
1418 	 * peer-to-peer DMA by PCI ACS.  Again, if we find an existing
1419 	 * group, use it.
1420 	 */
1421 	for (bus = pdev->bus; !pci_is_root_bus(bus); bus = bus->parent) {
1422 		if (!bus->self)
1423 			continue;
1424 
1425 		if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS))
1426 			break;
1427 
1428 		pdev = bus->self;
1429 
1430 		group = iommu_group_get(&pdev->dev);
1431 		if (group)
1432 			return group;
1433 	}
1434 
1435 	/*
1436 	 * Look for existing groups on device aliases.  If we alias another
1437 	 * device or another device aliases us, use the same group.
1438 	 */
1439 	group = get_pci_alias_group(pdev, (unsigned long *)devfns);
1440 	if (group)
1441 		return group;
1442 
1443 	/*
1444 	 * Look for existing groups on non-isolated functions on the same
1445 	 * slot and aliases of those funcions, if any.  No need to clear
1446 	 * the search bitmap, the tested devfns are still valid.
1447 	 */
1448 	group = get_pci_function_alias_group(pdev, (unsigned long *)devfns);
1449 	if (group)
1450 		return group;
1451 
1452 	/* No shared group found, allocate new */
1453 	return iommu_group_alloc();
1454 }
1455 EXPORT_SYMBOL_GPL(pci_device_group);
1456 
1457 /* Get the IOMMU group for device on fsl-mc bus */
fsl_mc_device_group(struct device * dev)1458 struct iommu_group *fsl_mc_device_group(struct device *dev)
1459 {
1460 	struct device *cont_dev = fsl_mc_cont_dev(dev);
1461 	struct iommu_group *group;
1462 
1463 	group = iommu_group_get(cont_dev);
1464 	if (!group)
1465 		group = iommu_group_alloc();
1466 	return group;
1467 }
1468 EXPORT_SYMBOL_GPL(fsl_mc_device_group);
1469 
iommu_get_def_domain_type(struct device * dev)1470 static int iommu_get_def_domain_type(struct device *dev)
1471 {
1472 	const struct iommu_ops *ops = dev->bus->iommu_ops;
1473 	unsigned int type = 0;
1474 
1475 	if (ops->def_domain_type)
1476 		type = ops->def_domain_type(dev);
1477 
1478 	return (type == 0) ? iommu_def_domain_type : type;
1479 }
1480 
iommu_group_alloc_default_domain(struct bus_type * bus,struct iommu_group * group,unsigned int type)1481 static int iommu_group_alloc_default_domain(struct bus_type *bus,
1482 					    struct iommu_group *group,
1483 					    unsigned int type)
1484 {
1485 	struct iommu_domain *dom;
1486 
1487 	dom = __iommu_domain_alloc(bus, type);
1488 	if (!dom && type != IOMMU_DOMAIN_DMA) {
1489 		dom = __iommu_domain_alloc(bus, IOMMU_DOMAIN_DMA);
1490 		if (dom)
1491 			pr_warn("Failed to allocate default IOMMU domain of type %u for group %s - Falling back to IOMMU_DOMAIN_DMA",
1492 				type, group->name);
1493 	}
1494 
1495 	if (!dom)
1496 		return -ENOMEM;
1497 
1498 	group->default_domain = dom;
1499 	if (!group->domain)
1500 		group->domain = dom;
1501 
1502 	if (!iommu_dma_strict) {
1503 		int attr = 1;
1504 		iommu_domain_set_attr(dom,
1505 				      DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE,
1506 				      &attr);
1507 	}
1508 
1509 	return 0;
1510 }
1511 
iommu_alloc_default_domain(struct iommu_group * group,struct device * dev)1512 static int iommu_alloc_default_domain(struct iommu_group *group,
1513 				      struct device *dev)
1514 {
1515 	unsigned int type;
1516 
1517 	if (group->default_domain)
1518 		return 0;
1519 
1520 	type = iommu_get_def_domain_type(dev);
1521 
1522 	return iommu_group_alloc_default_domain(dev->bus, group, type);
1523 }
1524 
1525 /**
1526  * iommu_group_get_for_dev - Find or create the IOMMU group for a device
1527  * @dev: target device
1528  *
1529  * This function is intended to be called by IOMMU drivers and extended to
1530  * support common, bus-defined algorithms when determining or creating the
1531  * IOMMU group for a device.  On success, the caller will hold a reference
1532  * to the returned IOMMU group, which will already include the provided
1533  * device.  The reference should be released with iommu_group_put().
1534  */
iommu_group_get_for_dev(struct device * dev)1535 static struct iommu_group *iommu_group_get_for_dev(struct device *dev)
1536 {
1537 	const struct iommu_ops *ops = dev->bus->iommu_ops;
1538 	struct iommu_group *group;
1539 	int ret;
1540 
1541 	group = iommu_group_get(dev);
1542 	if (group)
1543 		return group;
1544 
1545 	if (!ops)
1546 		return ERR_PTR(-EINVAL);
1547 
1548 	group = ops->device_group(dev);
1549 	if (WARN_ON_ONCE(group == NULL))
1550 		return ERR_PTR(-EINVAL);
1551 
1552 	if (IS_ERR(group))
1553 		return group;
1554 
1555 	ret = iommu_group_add_device(group, dev);
1556 	if (ret)
1557 		goto out_put_group;
1558 
1559 	return group;
1560 
1561 out_put_group:
1562 	iommu_group_put(group);
1563 
1564 	return ERR_PTR(ret);
1565 }
1566 
iommu_group_default_domain(struct iommu_group * group)1567 struct iommu_domain *iommu_group_default_domain(struct iommu_group *group)
1568 {
1569 	return group->default_domain;
1570 }
1571 
probe_iommu_group(struct device * dev,void * data)1572 static int probe_iommu_group(struct device *dev, void *data)
1573 {
1574 	struct list_head *group_list = data;
1575 	struct iommu_group *group;
1576 	int ret;
1577 
1578 	/* Device is probed already if in a group */
1579 	group = iommu_group_get(dev);
1580 	if (group) {
1581 		iommu_group_put(group);
1582 		return 0;
1583 	}
1584 
1585 	ret = __iommu_probe_device(dev, group_list);
1586 	if (ret == -ENODEV)
1587 		ret = 0;
1588 
1589 	return ret;
1590 }
1591 
remove_iommu_group(struct device * dev,void * data)1592 static int remove_iommu_group(struct device *dev, void *data)
1593 {
1594 	iommu_release_device(dev);
1595 
1596 	return 0;
1597 }
1598 
iommu_bus_notifier(struct notifier_block * nb,unsigned long action,void * data)1599 static int iommu_bus_notifier(struct notifier_block *nb,
1600 			      unsigned long action, void *data)
1601 {
1602 	unsigned long group_action = 0;
1603 	struct device *dev = data;
1604 	struct iommu_group *group;
1605 
1606 	/*
1607 	 * ADD/DEL call into iommu driver ops if provided, which may
1608 	 * result in ADD/DEL notifiers to group->notifier
1609 	 */
1610 	if (action == BUS_NOTIFY_ADD_DEVICE) {
1611 		int ret;
1612 
1613 		ret = iommu_probe_device(dev);
1614 		return (ret) ? NOTIFY_DONE : NOTIFY_OK;
1615 	} else if (action == BUS_NOTIFY_REMOVED_DEVICE) {
1616 		iommu_release_device(dev);
1617 		return NOTIFY_OK;
1618 	}
1619 
1620 	/*
1621 	 * Remaining BUS_NOTIFYs get filtered and republished to the
1622 	 * group, if anyone is listening
1623 	 */
1624 	group = iommu_group_get(dev);
1625 	if (!group)
1626 		return 0;
1627 
1628 	switch (action) {
1629 	case BUS_NOTIFY_BIND_DRIVER:
1630 		group_action = IOMMU_GROUP_NOTIFY_BIND_DRIVER;
1631 		break;
1632 	case BUS_NOTIFY_BOUND_DRIVER:
1633 		group_action = IOMMU_GROUP_NOTIFY_BOUND_DRIVER;
1634 		break;
1635 	case BUS_NOTIFY_UNBIND_DRIVER:
1636 		group_action = IOMMU_GROUP_NOTIFY_UNBIND_DRIVER;
1637 		break;
1638 	case BUS_NOTIFY_UNBOUND_DRIVER:
1639 		group_action = IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER;
1640 		break;
1641 	}
1642 
1643 	if (group_action)
1644 		blocking_notifier_call_chain(&group->notifier,
1645 					     group_action, dev);
1646 
1647 	iommu_group_put(group);
1648 	return 0;
1649 }
1650 
1651 struct __group_domain_type {
1652 	struct device *dev;
1653 	unsigned int type;
1654 };
1655 
probe_get_default_domain_type(struct device * dev,void * data)1656 static int probe_get_default_domain_type(struct device *dev, void *data)
1657 {
1658 	const struct iommu_ops *ops = dev->bus->iommu_ops;
1659 	struct __group_domain_type *gtype = data;
1660 	unsigned int type = 0;
1661 
1662 	if (ops->def_domain_type)
1663 		type = ops->def_domain_type(dev);
1664 
1665 	if (type) {
1666 		if (gtype->type && gtype->type != type) {
1667 			dev_warn(dev, "Device needs domain type %s, but device %s in the same iommu group requires type %s - using default\n",
1668 				 iommu_domain_type_str(type),
1669 				 dev_name(gtype->dev),
1670 				 iommu_domain_type_str(gtype->type));
1671 			gtype->type = 0;
1672 		}
1673 
1674 		if (!gtype->dev) {
1675 			gtype->dev  = dev;
1676 			gtype->type = type;
1677 		}
1678 	}
1679 
1680 	return 0;
1681 }
1682 
probe_alloc_default_domain(struct bus_type * bus,struct iommu_group * group)1683 static void probe_alloc_default_domain(struct bus_type *bus,
1684 				       struct iommu_group *group)
1685 {
1686 	struct __group_domain_type gtype;
1687 
1688 	memset(&gtype, 0, sizeof(gtype));
1689 
1690 	/* Ask for default domain requirements of all devices in the group */
1691 	__iommu_group_for_each_dev(group, &gtype,
1692 				   probe_get_default_domain_type);
1693 
1694 	if (!gtype.type)
1695 		gtype.type = iommu_def_domain_type;
1696 
1697 	iommu_group_alloc_default_domain(bus, group, gtype.type);
1698 
1699 }
1700 
iommu_group_do_dma_attach(struct device * dev,void * data)1701 static int iommu_group_do_dma_attach(struct device *dev, void *data)
1702 {
1703 	struct iommu_domain *domain = data;
1704 	int ret = 0;
1705 
1706 	if (!iommu_is_attach_deferred(domain, dev))
1707 		ret = __iommu_attach_device(domain, dev);
1708 
1709 	return ret;
1710 }
1711 
__iommu_group_dma_attach(struct iommu_group * group)1712 static int __iommu_group_dma_attach(struct iommu_group *group)
1713 {
1714 	return __iommu_group_for_each_dev(group, group->default_domain,
1715 					  iommu_group_do_dma_attach);
1716 }
1717 
iommu_group_do_probe_finalize(struct device * dev,void * data)1718 static int iommu_group_do_probe_finalize(struct device *dev, void *data)
1719 {
1720 	struct iommu_domain *domain = data;
1721 
1722 	if (domain->ops->probe_finalize)
1723 		domain->ops->probe_finalize(dev);
1724 
1725 	return 0;
1726 }
1727 
__iommu_group_dma_finalize(struct iommu_group * group)1728 static void __iommu_group_dma_finalize(struct iommu_group *group)
1729 {
1730 	__iommu_group_for_each_dev(group, group->default_domain,
1731 				   iommu_group_do_probe_finalize);
1732 }
1733 
iommu_do_create_direct_mappings(struct device * dev,void * data)1734 static int iommu_do_create_direct_mappings(struct device *dev, void *data)
1735 {
1736 	struct iommu_group *group = data;
1737 
1738 	iommu_create_device_direct_mappings(group, dev);
1739 
1740 	return 0;
1741 }
1742 
iommu_group_create_direct_mappings(struct iommu_group * group)1743 static int iommu_group_create_direct_mappings(struct iommu_group *group)
1744 {
1745 	return __iommu_group_for_each_dev(group, group,
1746 					  iommu_do_create_direct_mappings);
1747 }
1748 
bus_iommu_probe(struct bus_type * bus)1749 int bus_iommu_probe(struct bus_type *bus)
1750 {
1751 	struct iommu_group *group, *next;
1752 	LIST_HEAD(group_list);
1753 	int ret;
1754 
1755 	/*
1756 	 * This code-path does not allocate the default domain when
1757 	 * creating the iommu group, so do it after the groups are
1758 	 * created.
1759 	 */
1760 	ret = bus_for_each_dev(bus, NULL, &group_list, probe_iommu_group);
1761 	if (ret)
1762 		return ret;
1763 
1764 	list_for_each_entry_safe(group, next, &group_list, entry) {
1765 		/* Remove item from the list */
1766 		list_del_init(&group->entry);
1767 
1768 		mutex_lock(&group->mutex);
1769 
1770 		/* Try to allocate default domain */
1771 		probe_alloc_default_domain(bus, group);
1772 
1773 		if (!group->default_domain) {
1774 			mutex_unlock(&group->mutex);
1775 			continue;
1776 		}
1777 
1778 		iommu_group_create_direct_mappings(group);
1779 
1780 		ret = __iommu_group_dma_attach(group);
1781 
1782 		mutex_unlock(&group->mutex);
1783 
1784 		if (ret)
1785 			break;
1786 
1787 		__iommu_group_dma_finalize(group);
1788 	}
1789 
1790 	return ret;
1791 }
1792 
iommu_bus_init(struct bus_type * bus,const struct iommu_ops * ops)1793 static int iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops)
1794 {
1795 	struct notifier_block *nb;
1796 	int err;
1797 
1798 	nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
1799 	if (!nb)
1800 		return -ENOMEM;
1801 
1802 	nb->notifier_call = iommu_bus_notifier;
1803 
1804 	err = bus_register_notifier(bus, nb);
1805 	if (err)
1806 		goto out_free;
1807 
1808 	err = bus_iommu_probe(bus);
1809 	if (err)
1810 		goto out_err;
1811 
1812 
1813 	return 0;
1814 
1815 out_err:
1816 	/* Clean up */
1817 	bus_for_each_dev(bus, NULL, NULL, remove_iommu_group);
1818 	bus_unregister_notifier(bus, nb);
1819 
1820 out_free:
1821 	kfree(nb);
1822 
1823 	return err;
1824 }
1825 
1826 /**
1827  * bus_set_iommu - set iommu-callbacks for the bus
1828  * @bus: bus.
1829  * @ops: the callbacks provided by the iommu-driver
1830  *
1831  * This function is called by an iommu driver to set the iommu methods
1832  * used for a particular bus. Drivers for devices on that bus can use
1833  * the iommu-api after these ops are registered.
1834  * This special function is needed because IOMMUs are usually devices on
1835  * the bus itself, so the iommu drivers are not initialized when the bus
1836  * is set up. With this function the iommu-driver can set the iommu-ops
1837  * afterwards.
1838  */
bus_set_iommu(struct bus_type * bus,const struct iommu_ops * ops)1839 int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops)
1840 {
1841 	int err;
1842 
1843 	if (ops == NULL) {
1844 		bus->iommu_ops = NULL;
1845 		return 0;
1846 	}
1847 
1848 	if (bus->iommu_ops != NULL)
1849 		return -EBUSY;
1850 
1851 	bus->iommu_ops = ops;
1852 
1853 	/* Do IOMMU specific setup for this bus-type */
1854 	err = iommu_bus_init(bus, ops);
1855 	if (err)
1856 		bus->iommu_ops = NULL;
1857 
1858 	return err;
1859 }
1860 EXPORT_SYMBOL_GPL(bus_set_iommu);
1861 
iommu_present(struct bus_type * bus)1862 bool iommu_present(struct bus_type *bus)
1863 {
1864 	return bus->iommu_ops != NULL;
1865 }
1866 EXPORT_SYMBOL_GPL(iommu_present);
1867 
iommu_capable(struct bus_type * bus,enum iommu_cap cap)1868 bool iommu_capable(struct bus_type *bus, enum iommu_cap cap)
1869 {
1870 	if (!bus->iommu_ops || !bus->iommu_ops->capable)
1871 		return false;
1872 
1873 	return bus->iommu_ops->capable(cap);
1874 }
1875 EXPORT_SYMBOL_GPL(iommu_capable);
1876 
1877 /**
1878  * iommu_set_fault_handler() - set a fault handler for an iommu domain
1879  * @domain: iommu domain
1880  * @handler: fault handler
1881  * @token: user data, will be passed back to the fault handler
1882  *
1883  * This function should be used by IOMMU users which want to be notified
1884  * whenever an IOMMU fault happens.
1885  *
1886  * The fault handler itself should return 0 on success, and an appropriate
1887  * error code otherwise.
1888  */
iommu_set_fault_handler(struct iommu_domain * domain,iommu_fault_handler_t handler,void * token)1889 void iommu_set_fault_handler(struct iommu_domain *domain,
1890 					iommu_fault_handler_t handler,
1891 					void *token)
1892 {
1893 	BUG_ON(!domain);
1894 
1895 	domain->handler = handler;
1896 	domain->handler_token = token;
1897 }
1898 EXPORT_SYMBOL_GPL(iommu_set_fault_handler);
1899 
__iommu_domain_alloc(struct bus_type * bus,unsigned type)1900 static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
1901 						 unsigned type)
1902 {
1903 	struct iommu_domain *domain;
1904 
1905 	if (bus == NULL || bus->iommu_ops == NULL)
1906 		return NULL;
1907 
1908 	domain = bus->iommu_ops->domain_alloc(type);
1909 	if (!domain)
1910 		return NULL;
1911 
1912 	domain->ops  = bus->iommu_ops;
1913 	domain->type = type;
1914 	/* Assume all sizes by default; the driver may override this later */
1915 	domain->pgsize_bitmap  = bus->iommu_ops->pgsize_bitmap;
1916 
1917 	return domain;
1918 }
1919 
iommu_domain_alloc(struct bus_type * bus)1920 struct iommu_domain *iommu_domain_alloc(struct bus_type *bus)
1921 {
1922 	return __iommu_domain_alloc(bus, IOMMU_DOMAIN_UNMANAGED);
1923 }
1924 EXPORT_SYMBOL_GPL(iommu_domain_alloc);
1925 
iommu_domain_free(struct iommu_domain * domain)1926 void iommu_domain_free(struct iommu_domain *domain)
1927 {
1928 	domain->ops->domain_free(domain);
1929 }
1930 EXPORT_SYMBOL_GPL(iommu_domain_free);
1931 
__iommu_attach_device(struct iommu_domain * domain,struct device * dev)1932 static int __iommu_attach_device(struct iommu_domain *domain,
1933 				 struct device *dev)
1934 {
1935 	int ret;
1936 
1937 	if (unlikely(domain->ops->attach_dev == NULL))
1938 		return -ENODEV;
1939 
1940 	ret = domain->ops->attach_dev(domain, dev);
1941 	if (!ret)
1942 		trace_attach_device_to_domain(dev);
1943 	return ret;
1944 }
1945 
iommu_attach_device(struct iommu_domain * domain,struct device * dev)1946 int iommu_attach_device(struct iommu_domain *domain, struct device *dev)
1947 {
1948 	struct iommu_group *group;
1949 	int ret;
1950 
1951 	group = iommu_group_get(dev);
1952 	if (!group)
1953 		return -ENODEV;
1954 
1955 	/*
1956 	 * Lock the group to make sure the device-count doesn't
1957 	 * change while we are attaching
1958 	 */
1959 	mutex_lock(&group->mutex);
1960 	ret = -EINVAL;
1961 	if (iommu_group_device_count(group) != 1)
1962 		goto out_unlock;
1963 
1964 	ret = __iommu_attach_group(domain, group);
1965 
1966 out_unlock:
1967 	mutex_unlock(&group->mutex);
1968 	iommu_group_put(group);
1969 
1970 	return ret;
1971 }
1972 EXPORT_SYMBOL_GPL(iommu_attach_device);
1973 
1974 /*
1975  * Check flags and other user provided data for valid combinations. We also
1976  * make sure no reserved fields or unused flags are set. This is to ensure
1977  * not breaking userspace in the future when these fields or flags are used.
1978  */
iommu_check_cache_invl_data(struct iommu_cache_invalidate_info * info)1979 static int iommu_check_cache_invl_data(struct iommu_cache_invalidate_info *info)
1980 {
1981 	u32 mask;
1982 	int i;
1983 
1984 	if (info->version != IOMMU_CACHE_INVALIDATE_INFO_VERSION_1)
1985 		return -EINVAL;
1986 
1987 	mask = (1 << IOMMU_CACHE_INV_TYPE_NR) - 1;
1988 	if (info->cache & ~mask)
1989 		return -EINVAL;
1990 
1991 	if (info->granularity >= IOMMU_INV_GRANU_NR)
1992 		return -EINVAL;
1993 
1994 	switch (info->granularity) {
1995 	case IOMMU_INV_GRANU_ADDR:
1996 		if (info->cache & IOMMU_CACHE_INV_TYPE_PASID)
1997 			return -EINVAL;
1998 
1999 		mask = IOMMU_INV_ADDR_FLAGS_PASID |
2000 			IOMMU_INV_ADDR_FLAGS_ARCHID |
2001 			IOMMU_INV_ADDR_FLAGS_LEAF;
2002 
2003 		if (info->granu.addr_info.flags & ~mask)
2004 			return -EINVAL;
2005 		break;
2006 	case IOMMU_INV_GRANU_PASID:
2007 		mask = IOMMU_INV_PASID_FLAGS_PASID |
2008 			IOMMU_INV_PASID_FLAGS_ARCHID;
2009 		if (info->granu.pasid_info.flags & ~mask)
2010 			return -EINVAL;
2011 
2012 		break;
2013 	case IOMMU_INV_GRANU_DOMAIN:
2014 		if (info->cache & IOMMU_CACHE_INV_TYPE_DEV_IOTLB)
2015 			return -EINVAL;
2016 		break;
2017 	default:
2018 		return -EINVAL;
2019 	}
2020 
2021 	/* Check reserved padding fields */
2022 	for (i = 0; i < sizeof(info->padding); i++) {
2023 		if (info->padding[i])
2024 			return -EINVAL;
2025 	}
2026 
2027 	return 0;
2028 }
2029 
iommu_uapi_cache_invalidate(struct iommu_domain * domain,struct device * dev,void __user * uinfo)2030 int iommu_uapi_cache_invalidate(struct iommu_domain *domain, struct device *dev,
2031 				void __user *uinfo)
2032 {
2033 	struct iommu_cache_invalidate_info inv_info = { 0 };
2034 	u32 minsz;
2035 	int ret;
2036 
2037 	if (unlikely(!domain->ops->cache_invalidate))
2038 		return -ENODEV;
2039 
2040 	/*
2041 	 * No new spaces can be added before the variable sized union, the
2042 	 * minimum size is the offset to the union.
2043 	 */
2044 	minsz = offsetof(struct iommu_cache_invalidate_info, granu);
2045 
2046 	/* Copy minsz from user to get flags and argsz */
2047 	if (copy_from_user(&inv_info, uinfo, minsz))
2048 		return -EFAULT;
2049 
2050 	/* Fields before the variable size union are mandatory */
2051 	if (inv_info.argsz < minsz)
2052 		return -EINVAL;
2053 
2054 	/* PASID and address granu require additional info beyond minsz */
2055 	if (inv_info.granularity == IOMMU_INV_GRANU_PASID &&
2056 	    inv_info.argsz < offsetofend(struct iommu_cache_invalidate_info, granu.pasid_info))
2057 		return -EINVAL;
2058 
2059 	if (inv_info.granularity == IOMMU_INV_GRANU_ADDR &&
2060 	    inv_info.argsz < offsetofend(struct iommu_cache_invalidate_info, granu.addr_info))
2061 		return -EINVAL;
2062 
2063 	/*
2064 	 * User might be using a newer UAPI header which has a larger data
2065 	 * size, we shall support the existing flags within the current
2066 	 * size. Copy the remaining user data _after_ minsz but not more
2067 	 * than the current kernel supported size.
2068 	 */
2069 	if (copy_from_user((void *)&inv_info + minsz, uinfo + minsz,
2070 			   min_t(u32, inv_info.argsz, sizeof(inv_info)) - minsz))
2071 		return -EFAULT;
2072 
2073 	/* Now the argsz is validated, check the content */
2074 	ret = iommu_check_cache_invl_data(&inv_info);
2075 	if (ret)
2076 		return ret;
2077 
2078 	return domain->ops->cache_invalidate(domain, dev, &inv_info);
2079 }
2080 EXPORT_SYMBOL_GPL(iommu_uapi_cache_invalidate);
2081 
iommu_check_bind_data(struct iommu_gpasid_bind_data * data)2082 static int iommu_check_bind_data(struct iommu_gpasid_bind_data *data)
2083 {
2084 	u64 mask;
2085 	int i;
2086 
2087 	if (data->version != IOMMU_GPASID_BIND_VERSION_1)
2088 		return -EINVAL;
2089 
2090 	/* Check the range of supported formats */
2091 	if (data->format >= IOMMU_PASID_FORMAT_LAST)
2092 		return -EINVAL;
2093 
2094 	/* Check all flags */
2095 	mask = IOMMU_SVA_GPASID_VAL;
2096 	if (data->flags & ~mask)
2097 		return -EINVAL;
2098 
2099 	/* Check reserved padding fields */
2100 	for (i = 0; i < sizeof(data->padding); i++) {
2101 		if (data->padding[i])
2102 			return -EINVAL;
2103 	}
2104 
2105 	return 0;
2106 }
2107 
iommu_sva_prepare_bind_data(void __user * udata,struct iommu_gpasid_bind_data * data)2108 static int iommu_sva_prepare_bind_data(void __user *udata,
2109 				       struct iommu_gpasid_bind_data *data)
2110 {
2111 	u32 minsz;
2112 
2113 	/*
2114 	 * No new spaces can be added before the variable sized union, the
2115 	 * minimum size is the offset to the union.
2116 	 */
2117 	minsz = offsetof(struct iommu_gpasid_bind_data, vendor);
2118 
2119 	/* Copy minsz from user to get flags and argsz */
2120 	if (copy_from_user(data, udata, minsz))
2121 		return -EFAULT;
2122 
2123 	/* Fields before the variable size union are mandatory */
2124 	if (data->argsz < minsz)
2125 		return -EINVAL;
2126 	/*
2127 	 * User might be using a newer UAPI header, we shall let IOMMU vendor
2128 	 * driver decide on what size it needs. Since the guest PASID bind data
2129 	 * can be vendor specific, larger argsz could be the result of extension
2130 	 * for one vendor but it should not affect another vendor.
2131 	 * Copy the remaining user data _after_ minsz
2132 	 */
2133 	if (copy_from_user((void *)data + minsz, udata + minsz,
2134 			   min_t(u32, data->argsz, sizeof(*data)) - minsz))
2135 		return -EFAULT;
2136 
2137 	return iommu_check_bind_data(data);
2138 }
2139 
iommu_uapi_sva_bind_gpasid(struct iommu_domain * domain,struct device * dev,void __user * udata)2140 int iommu_uapi_sva_bind_gpasid(struct iommu_domain *domain, struct device *dev,
2141 			       void __user *udata)
2142 {
2143 	struct iommu_gpasid_bind_data data = { 0 };
2144 	int ret;
2145 
2146 	if (unlikely(!domain->ops->sva_bind_gpasid))
2147 		return -ENODEV;
2148 
2149 	ret = iommu_sva_prepare_bind_data(udata, &data);
2150 	if (ret)
2151 		return ret;
2152 
2153 	return domain->ops->sva_bind_gpasid(domain, dev, &data);
2154 }
2155 EXPORT_SYMBOL_GPL(iommu_uapi_sva_bind_gpasid);
2156 
iommu_sva_unbind_gpasid(struct iommu_domain * domain,struct device * dev,ioasid_t pasid)2157 int iommu_sva_unbind_gpasid(struct iommu_domain *domain, struct device *dev,
2158 			     ioasid_t pasid)
2159 {
2160 	if (unlikely(!domain->ops->sva_unbind_gpasid))
2161 		return -ENODEV;
2162 
2163 	return domain->ops->sva_unbind_gpasid(dev, pasid);
2164 }
2165 EXPORT_SYMBOL_GPL(iommu_sva_unbind_gpasid);
2166 
iommu_uapi_sva_unbind_gpasid(struct iommu_domain * domain,struct device * dev,void __user * udata)2167 int iommu_uapi_sva_unbind_gpasid(struct iommu_domain *domain, struct device *dev,
2168 				 void __user *udata)
2169 {
2170 	struct iommu_gpasid_bind_data data = { 0 };
2171 	int ret;
2172 
2173 	if (unlikely(!domain->ops->sva_bind_gpasid))
2174 		return -ENODEV;
2175 
2176 	ret = iommu_sva_prepare_bind_data(udata, &data);
2177 	if (ret)
2178 		return ret;
2179 
2180 	return iommu_sva_unbind_gpasid(domain, dev, data.hpasid);
2181 }
2182 EXPORT_SYMBOL_GPL(iommu_uapi_sva_unbind_gpasid);
2183 
__iommu_detach_device(struct iommu_domain * domain,struct device * dev)2184 static void __iommu_detach_device(struct iommu_domain *domain,
2185 				  struct device *dev)
2186 {
2187 	if (iommu_is_attach_deferred(domain, dev))
2188 		return;
2189 
2190 	if (unlikely(domain->ops->detach_dev == NULL))
2191 		return;
2192 
2193 	domain->ops->detach_dev(domain, dev);
2194 	trace_detach_device_from_domain(dev);
2195 }
2196 
iommu_detach_device(struct iommu_domain * domain,struct device * dev)2197 void iommu_detach_device(struct iommu_domain *domain, struct device *dev)
2198 {
2199 	struct iommu_group *group;
2200 
2201 	group = iommu_group_get(dev);
2202 	if (!group)
2203 		return;
2204 
2205 	mutex_lock(&group->mutex);
2206 	if (iommu_group_device_count(group) != 1) {
2207 		WARN_ON(1);
2208 		goto out_unlock;
2209 	}
2210 
2211 	__iommu_detach_group(domain, group);
2212 
2213 out_unlock:
2214 	mutex_unlock(&group->mutex);
2215 	iommu_group_put(group);
2216 }
2217 EXPORT_SYMBOL_GPL(iommu_detach_device);
2218 
iommu_get_domain_for_dev(struct device * dev)2219 struct iommu_domain *iommu_get_domain_for_dev(struct device *dev)
2220 {
2221 	struct iommu_domain *domain;
2222 	struct iommu_group *group;
2223 
2224 	group = iommu_group_get(dev);
2225 	if (!group)
2226 		return NULL;
2227 
2228 	domain = group->domain;
2229 
2230 	iommu_group_put(group);
2231 
2232 	return domain;
2233 }
2234 EXPORT_SYMBOL_GPL(iommu_get_domain_for_dev);
2235 
2236 /*
2237  * For IOMMU_DOMAIN_DMA implementations which already provide their own
2238  * guarantees that the group and its default domain are valid and correct.
2239  */
iommu_get_dma_domain(struct device * dev)2240 struct iommu_domain *iommu_get_dma_domain(struct device *dev)
2241 {
2242 	return dev->iommu_group->default_domain;
2243 }
2244 
2245 /*
2246  * IOMMU groups are really the natural working unit of the IOMMU, but
2247  * the IOMMU API works on domains and devices.  Bridge that gap by
2248  * iterating over the devices in a group.  Ideally we'd have a single
2249  * device which represents the requestor ID of the group, but we also
2250  * allow IOMMU drivers to create policy defined minimum sets, where
2251  * the physical hardware may be able to distiguish members, but we
2252  * wish to group them at a higher level (ex. untrusted multi-function
2253  * PCI devices).  Thus we attach each device.
2254  */
iommu_group_do_attach_device(struct device * dev,void * data)2255 static int iommu_group_do_attach_device(struct device *dev, void *data)
2256 {
2257 	struct iommu_domain *domain = data;
2258 
2259 	return __iommu_attach_device(domain, dev);
2260 }
2261 
__iommu_attach_group(struct iommu_domain * domain,struct iommu_group * group)2262 static int __iommu_attach_group(struct iommu_domain *domain,
2263 				struct iommu_group *group)
2264 {
2265 	int ret;
2266 
2267 	if (group->default_domain && group->domain != group->default_domain)
2268 		return -EBUSY;
2269 
2270 	ret = __iommu_group_for_each_dev(group, domain,
2271 					 iommu_group_do_attach_device);
2272 	if (ret == 0)
2273 		group->domain = domain;
2274 
2275 	return ret;
2276 }
2277 
iommu_attach_group(struct iommu_domain * domain,struct iommu_group * group)2278 int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group)
2279 {
2280 	int ret;
2281 
2282 	mutex_lock(&group->mutex);
2283 	ret = __iommu_attach_group(domain, group);
2284 	mutex_unlock(&group->mutex);
2285 
2286 	return ret;
2287 }
2288 EXPORT_SYMBOL_GPL(iommu_attach_group);
2289 
iommu_group_do_detach_device(struct device * dev,void * data)2290 static int iommu_group_do_detach_device(struct device *dev, void *data)
2291 {
2292 	struct iommu_domain *domain = data;
2293 
2294 	__iommu_detach_device(domain, dev);
2295 
2296 	return 0;
2297 }
2298 
__iommu_detach_group(struct iommu_domain * domain,struct iommu_group * group)2299 static void __iommu_detach_group(struct iommu_domain *domain,
2300 				 struct iommu_group *group)
2301 {
2302 	int ret;
2303 
2304 	if (!group->default_domain) {
2305 		__iommu_group_for_each_dev(group, domain,
2306 					   iommu_group_do_detach_device);
2307 		group->domain = NULL;
2308 		return;
2309 	}
2310 
2311 	if (group->domain == group->default_domain)
2312 		return;
2313 
2314 	/* Detach by re-attaching to the default domain */
2315 	ret = __iommu_group_for_each_dev(group, group->default_domain,
2316 					 iommu_group_do_attach_device);
2317 	if (ret != 0)
2318 		WARN_ON(1);
2319 	else
2320 		group->domain = group->default_domain;
2321 }
2322 
iommu_detach_group(struct iommu_domain * domain,struct iommu_group * group)2323 void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group)
2324 {
2325 	mutex_lock(&group->mutex);
2326 	__iommu_detach_group(domain, group);
2327 	mutex_unlock(&group->mutex);
2328 }
2329 EXPORT_SYMBOL_GPL(iommu_detach_group);
2330 
iommu_iova_to_phys(struct iommu_domain * domain,dma_addr_t iova)2331 phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
2332 {
2333 	if (unlikely(domain->ops->iova_to_phys == NULL))
2334 		return 0;
2335 
2336 	return domain->ops->iova_to_phys(domain, iova);
2337 }
2338 EXPORT_SYMBOL_GPL(iommu_iova_to_phys);
2339 
iommu_pgsize(struct iommu_domain * domain,unsigned long addr_merge,size_t size)2340 static size_t iommu_pgsize(struct iommu_domain *domain,
2341 			   unsigned long addr_merge, size_t size)
2342 {
2343 	unsigned int pgsize_idx;
2344 	size_t pgsize;
2345 
2346 	/* Max page size that still fits into 'size' */
2347 	pgsize_idx = __fls(size);
2348 
2349 	/* need to consider alignment requirements ? */
2350 	if (likely(addr_merge)) {
2351 		/* Max page size allowed by address */
2352 		unsigned int align_pgsize_idx = __ffs(addr_merge);
2353 		pgsize_idx = min(pgsize_idx, align_pgsize_idx);
2354 	}
2355 
2356 	/* build a mask of acceptable page sizes */
2357 	pgsize = (1UL << (pgsize_idx + 1)) - 1;
2358 
2359 	/* throw away page sizes not supported by the hardware */
2360 	pgsize &= domain->pgsize_bitmap;
2361 
2362 	/* make sure we're still sane */
2363 	BUG_ON(!pgsize);
2364 
2365 	/* pick the biggest page */
2366 	pgsize_idx = __fls(pgsize);
2367 	pgsize = 1UL << pgsize_idx;
2368 
2369 	return pgsize;
2370 }
2371 
__iommu_map(struct iommu_domain * domain,unsigned long iova,phys_addr_t paddr,size_t size,int prot,gfp_t gfp)2372 static int __iommu_map(struct iommu_domain *domain, unsigned long iova,
2373 		       phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
2374 {
2375 	const struct iommu_ops *ops = domain->ops;
2376 	unsigned long orig_iova = iova;
2377 	unsigned int min_pagesz;
2378 	size_t orig_size = size;
2379 	phys_addr_t orig_paddr = paddr;
2380 	int ret = 0;
2381 
2382 	if (unlikely(ops->map == NULL ||
2383 		     domain->pgsize_bitmap == 0UL))
2384 		return -ENODEV;
2385 
2386 	if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
2387 		return -EINVAL;
2388 
2389 	/* find out the minimum page size supported */
2390 	min_pagesz = 1 << __ffs(domain->pgsize_bitmap);
2391 
2392 	/*
2393 	 * both the virtual address and the physical one, as well as
2394 	 * the size of the mapping, must be aligned (at least) to the
2395 	 * size of the smallest page supported by the hardware
2396 	 */
2397 	if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
2398 		pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x\n",
2399 		       iova, &paddr, size, min_pagesz);
2400 		return -EINVAL;
2401 	}
2402 
2403 	pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr, size);
2404 
2405 	while (size) {
2406 		size_t pgsize = iommu_pgsize(domain, iova | paddr, size);
2407 
2408 		pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx\n",
2409 			 iova, &paddr, pgsize);
2410 		ret = ops->map(domain, iova, paddr, pgsize, prot, gfp);
2411 
2412 		if (ret)
2413 			break;
2414 
2415 		iova += pgsize;
2416 		paddr += pgsize;
2417 		size -= pgsize;
2418 	}
2419 
2420 	/* unroll mapping in case something went wrong */
2421 	if (ret)
2422 		iommu_unmap(domain, orig_iova, orig_size - size);
2423 	else
2424 		trace_map(orig_iova, orig_paddr, orig_size);
2425 
2426 	return ret;
2427 }
2428 
_iommu_map(struct iommu_domain * domain,unsigned long iova,phys_addr_t paddr,size_t size,int prot,gfp_t gfp)2429 static int _iommu_map(struct iommu_domain *domain, unsigned long iova,
2430 		      phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
2431 {
2432 	const struct iommu_ops *ops = domain->ops;
2433 	int ret;
2434 
2435 	ret = __iommu_map(domain, iova, paddr, size, prot, gfp);
2436 	if (ret == 0 && ops->iotlb_sync_map)
2437 		ops->iotlb_sync_map(domain);
2438 
2439 	return ret;
2440 }
2441 
iommu_map(struct iommu_domain * domain,unsigned long iova,phys_addr_t paddr,size_t size,int prot)2442 int iommu_map(struct iommu_domain *domain, unsigned long iova,
2443 	      phys_addr_t paddr, size_t size, int prot)
2444 {
2445 	might_sleep();
2446 	return _iommu_map(domain, iova, paddr, size, prot, GFP_KERNEL);
2447 }
2448 EXPORT_SYMBOL_GPL(iommu_map);
2449 
iommu_map_atomic(struct iommu_domain * domain,unsigned long iova,phys_addr_t paddr,size_t size,int prot)2450 int iommu_map_atomic(struct iommu_domain *domain, unsigned long iova,
2451 	      phys_addr_t paddr, size_t size, int prot)
2452 {
2453 	return _iommu_map(domain, iova, paddr, size, prot, GFP_ATOMIC);
2454 }
2455 EXPORT_SYMBOL_GPL(iommu_map_atomic);
2456 
__iommu_unmap(struct iommu_domain * domain,unsigned long iova,size_t size,struct iommu_iotlb_gather * iotlb_gather)2457 static size_t __iommu_unmap(struct iommu_domain *domain,
2458 			    unsigned long iova, size_t size,
2459 			    struct iommu_iotlb_gather *iotlb_gather)
2460 {
2461 	const struct iommu_ops *ops = domain->ops;
2462 	size_t unmapped_page, unmapped = 0;
2463 	unsigned long orig_iova = iova;
2464 	unsigned int min_pagesz;
2465 
2466 	if (unlikely(ops->unmap == NULL ||
2467 		     domain->pgsize_bitmap == 0UL))
2468 		return 0;
2469 
2470 	if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
2471 		return 0;
2472 
2473 	/* find out the minimum page size supported */
2474 	min_pagesz = 1 << __ffs(domain->pgsize_bitmap);
2475 
2476 	/*
2477 	 * The virtual address, as well as the size of the mapping, must be
2478 	 * aligned (at least) to the size of the smallest page supported
2479 	 * by the hardware
2480 	 */
2481 	if (!IS_ALIGNED(iova | size, min_pagesz)) {
2482 		pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n",
2483 		       iova, size, min_pagesz);
2484 		return 0;
2485 	}
2486 
2487 	pr_debug("unmap this: iova 0x%lx size 0x%zx\n", iova, size);
2488 
2489 	/*
2490 	 * Keep iterating until we either unmap 'size' bytes (or more)
2491 	 * or we hit an area that isn't mapped.
2492 	 */
2493 	while (unmapped < size) {
2494 		size_t pgsize = iommu_pgsize(domain, iova, size - unmapped);
2495 
2496 		unmapped_page = ops->unmap(domain, iova, pgsize, iotlb_gather);
2497 		if (!unmapped_page)
2498 			break;
2499 
2500 		pr_debug("unmapped: iova 0x%lx size 0x%zx\n",
2501 			 iova, unmapped_page);
2502 
2503 		iova += unmapped_page;
2504 		unmapped += unmapped_page;
2505 	}
2506 
2507 	trace_unmap(orig_iova, size, unmapped);
2508 	return unmapped;
2509 }
2510 
iommu_unmap(struct iommu_domain * domain,unsigned long iova,size_t size)2511 size_t iommu_unmap(struct iommu_domain *domain,
2512 		   unsigned long iova, size_t size)
2513 {
2514 	struct iommu_iotlb_gather iotlb_gather;
2515 	size_t ret;
2516 
2517 	iommu_iotlb_gather_init(&iotlb_gather);
2518 	ret = __iommu_unmap(domain, iova, size, &iotlb_gather);
2519 	iommu_iotlb_sync(domain, &iotlb_gather);
2520 
2521 	return ret;
2522 }
2523 EXPORT_SYMBOL_GPL(iommu_unmap);
2524 
iommu_unmap_fast(struct iommu_domain * domain,unsigned long iova,size_t size,struct iommu_iotlb_gather * iotlb_gather)2525 size_t iommu_unmap_fast(struct iommu_domain *domain,
2526 			unsigned long iova, size_t size,
2527 			struct iommu_iotlb_gather *iotlb_gather)
2528 {
2529 	return __iommu_unmap(domain, iova, size, iotlb_gather);
2530 }
2531 EXPORT_SYMBOL_GPL(iommu_unmap_fast);
2532 
__iommu_map_sg(struct iommu_domain * domain,unsigned long iova,struct scatterlist * sg,unsigned int nents,int prot,gfp_t gfp)2533 static size_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
2534 			     struct scatterlist *sg, unsigned int nents, int prot,
2535 			     gfp_t gfp)
2536 {
2537 	const struct iommu_ops *ops = domain->ops;
2538 	size_t len = 0, mapped = 0;
2539 	phys_addr_t start;
2540 	unsigned int i = 0;
2541 	int ret;
2542 
2543 	while (i <= nents) {
2544 		phys_addr_t s_phys = sg_phys(sg);
2545 
2546 		if (len && s_phys != start + len) {
2547 			ret = __iommu_map(domain, iova + mapped, start,
2548 					len, prot, gfp);
2549 
2550 			if (ret)
2551 				goto out_err;
2552 
2553 			mapped += len;
2554 			len = 0;
2555 		}
2556 
2557 		if (len) {
2558 			len += sg->length;
2559 		} else {
2560 			len = sg->length;
2561 			start = s_phys;
2562 		}
2563 
2564 		if (++i < nents)
2565 			sg = sg_next(sg);
2566 	}
2567 
2568 	if (ops->iotlb_sync_map)
2569 		ops->iotlb_sync_map(domain);
2570 	return mapped;
2571 
2572 out_err:
2573 	/* undo mappings already done */
2574 	iommu_unmap(domain, iova, mapped);
2575 
2576 	return 0;
2577 
2578 }
2579 
iommu_map_sg(struct iommu_domain * domain,unsigned long iova,struct scatterlist * sg,unsigned int nents,int prot)2580 size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
2581 		    struct scatterlist *sg, unsigned int nents, int prot)
2582 {
2583 	might_sleep();
2584 	return __iommu_map_sg(domain, iova, sg, nents, prot, GFP_KERNEL);
2585 }
2586 EXPORT_SYMBOL_GPL(iommu_map_sg);
2587 
iommu_map_sg_atomic(struct iommu_domain * domain,unsigned long iova,struct scatterlist * sg,unsigned int nents,int prot)2588 size_t iommu_map_sg_atomic(struct iommu_domain *domain, unsigned long iova,
2589 		    struct scatterlist *sg, unsigned int nents, int prot)
2590 {
2591 	return __iommu_map_sg(domain, iova, sg, nents, prot, GFP_ATOMIC);
2592 }
2593 EXPORT_SYMBOL_GPL(iommu_map_sg_atomic);
2594 
iommu_domain_window_enable(struct iommu_domain * domain,u32 wnd_nr,phys_addr_t paddr,u64 size,int prot)2595 int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr,
2596 			       phys_addr_t paddr, u64 size, int prot)
2597 {
2598 	if (unlikely(domain->ops->domain_window_enable == NULL))
2599 		return -ENODEV;
2600 
2601 	return domain->ops->domain_window_enable(domain, wnd_nr, paddr, size,
2602 						 prot);
2603 }
2604 EXPORT_SYMBOL_GPL(iommu_domain_window_enable);
2605 
iommu_domain_window_disable(struct iommu_domain * domain,u32 wnd_nr)2606 void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr)
2607 {
2608 	if (unlikely(domain->ops->domain_window_disable == NULL))
2609 		return;
2610 
2611 	return domain->ops->domain_window_disable(domain, wnd_nr);
2612 }
2613 EXPORT_SYMBOL_GPL(iommu_domain_window_disable);
2614 
2615 /**
2616  * report_iommu_fault() - report about an IOMMU fault to the IOMMU framework
2617  * @domain: the iommu domain where the fault has happened
2618  * @dev: the device where the fault has happened
2619  * @iova: the faulting address
2620  * @flags: mmu fault flags (e.g. IOMMU_FAULT_READ/IOMMU_FAULT_WRITE/...)
2621  *
2622  * This function should be called by the low-level IOMMU implementations
2623  * whenever IOMMU faults happen, to allow high-level users, that are
2624  * interested in such events, to know about them.
2625  *
2626  * This event may be useful for several possible use cases:
2627  * - mere logging of the event
2628  * - dynamic TLB/PTE loading
2629  * - if restarting of the faulting device is required
2630  *
2631  * Returns 0 on success and an appropriate error code otherwise (if dynamic
2632  * PTE/TLB loading will one day be supported, implementations will be able
2633  * to tell whether it succeeded or not according to this return value).
2634  *
2635  * Specifically, -ENOSYS is returned if a fault handler isn't installed
2636  * (though fault handlers can also return -ENOSYS, in case they want to
2637  * elicit the default behavior of the IOMMU drivers).
2638  */
report_iommu_fault(struct iommu_domain * domain,struct device * dev,unsigned long iova,int flags)2639 int report_iommu_fault(struct iommu_domain *domain, struct device *dev,
2640 		       unsigned long iova, int flags)
2641 {
2642 	int ret = -ENOSYS;
2643 
2644 	/*
2645 	 * if upper layers showed interest and installed a fault handler,
2646 	 * invoke it.
2647 	 */
2648 	if (domain->handler)
2649 		ret = domain->handler(domain, dev, iova, flags,
2650 						domain->handler_token);
2651 
2652 	trace_io_page_fault(dev, iova, flags);
2653 	return ret;
2654 }
2655 EXPORT_SYMBOL_GPL(report_iommu_fault);
2656 
iommu_init(void)2657 static int __init iommu_init(void)
2658 {
2659 	iommu_group_kset = kset_create_and_add("iommu_groups",
2660 					       NULL, kernel_kobj);
2661 	BUG_ON(!iommu_group_kset);
2662 
2663 	iommu_debugfs_setup();
2664 
2665 	return 0;
2666 }
2667 core_initcall(iommu_init);
2668 
iommu_domain_get_attr(struct iommu_domain * domain,enum iommu_attr attr,void * data)2669 int iommu_domain_get_attr(struct iommu_domain *domain,
2670 			  enum iommu_attr attr, void *data)
2671 {
2672 	struct iommu_domain_geometry *geometry;
2673 	bool *paging;
2674 	int ret = 0;
2675 
2676 	switch (attr) {
2677 	case DOMAIN_ATTR_GEOMETRY:
2678 		geometry  = data;
2679 		*geometry = domain->geometry;
2680 
2681 		break;
2682 	case DOMAIN_ATTR_PAGING:
2683 		paging  = data;
2684 		*paging = (domain->pgsize_bitmap != 0UL);
2685 		break;
2686 	default:
2687 		if (!domain->ops->domain_get_attr)
2688 			return -EINVAL;
2689 
2690 		ret = domain->ops->domain_get_attr(domain, attr, data);
2691 	}
2692 
2693 	return ret;
2694 }
2695 EXPORT_SYMBOL_GPL(iommu_domain_get_attr);
2696 
iommu_domain_set_attr(struct iommu_domain * domain,enum iommu_attr attr,void * data)2697 int iommu_domain_set_attr(struct iommu_domain *domain,
2698 			  enum iommu_attr attr, void *data)
2699 {
2700 	int ret = 0;
2701 
2702 	switch (attr) {
2703 	default:
2704 		if (domain->ops->domain_set_attr == NULL)
2705 			return -EINVAL;
2706 
2707 		ret = domain->ops->domain_set_attr(domain, attr, data);
2708 	}
2709 
2710 	return ret;
2711 }
2712 EXPORT_SYMBOL_GPL(iommu_domain_set_attr);
2713 
iommu_get_resv_regions(struct device * dev,struct list_head * list)2714 void iommu_get_resv_regions(struct device *dev, struct list_head *list)
2715 {
2716 	const struct iommu_ops *ops = dev->bus->iommu_ops;
2717 
2718 	if (ops && ops->get_resv_regions)
2719 		ops->get_resv_regions(dev, list);
2720 }
2721 
iommu_put_resv_regions(struct device * dev,struct list_head * list)2722 void iommu_put_resv_regions(struct device *dev, struct list_head *list)
2723 {
2724 	const struct iommu_ops *ops = dev->bus->iommu_ops;
2725 
2726 	if (ops && ops->put_resv_regions)
2727 		ops->put_resv_regions(dev, list);
2728 }
2729 
2730 /**
2731  * generic_iommu_put_resv_regions - Reserved region driver helper
2732  * @dev: device for which to free reserved regions
2733  * @list: reserved region list for device
2734  *
2735  * IOMMU drivers can use this to implement their .put_resv_regions() callback
2736  * for simple reservations. Memory allocated for each reserved region will be
2737  * freed. If an IOMMU driver allocates additional resources per region, it is
2738  * going to have to implement a custom callback.
2739  */
generic_iommu_put_resv_regions(struct device * dev,struct list_head * list)2740 void generic_iommu_put_resv_regions(struct device *dev, struct list_head *list)
2741 {
2742 	struct iommu_resv_region *entry, *next;
2743 
2744 	list_for_each_entry_safe(entry, next, list, list)
2745 		kfree(entry);
2746 }
2747 EXPORT_SYMBOL(generic_iommu_put_resv_regions);
2748 
iommu_alloc_resv_region(phys_addr_t start,size_t length,int prot,enum iommu_resv_type type)2749 struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start,
2750 						  size_t length, int prot,
2751 						  enum iommu_resv_type type)
2752 {
2753 	struct iommu_resv_region *region;
2754 
2755 	region = kzalloc(sizeof(*region), GFP_KERNEL);
2756 	if (!region)
2757 		return NULL;
2758 
2759 	INIT_LIST_HEAD(&region->list);
2760 	region->start = start;
2761 	region->length = length;
2762 	region->prot = prot;
2763 	region->type = type;
2764 	return region;
2765 }
2766 EXPORT_SYMBOL_GPL(iommu_alloc_resv_region);
2767 
iommu_set_default_passthrough(bool cmd_line)2768 void iommu_set_default_passthrough(bool cmd_line)
2769 {
2770 	if (cmd_line)
2771 		iommu_set_cmd_line_dma_api();
2772 
2773 	iommu_def_domain_type = IOMMU_DOMAIN_IDENTITY;
2774 }
2775 
iommu_set_default_translated(bool cmd_line)2776 void iommu_set_default_translated(bool cmd_line)
2777 {
2778 	if (cmd_line)
2779 		iommu_set_cmd_line_dma_api();
2780 
2781 	iommu_def_domain_type = IOMMU_DOMAIN_DMA;
2782 }
2783 
iommu_default_passthrough(void)2784 bool iommu_default_passthrough(void)
2785 {
2786 	return iommu_def_domain_type == IOMMU_DOMAIN_IDENTITY;
2787 }
2788 EXPORT_SYMBOL_GPL(iommu_default_passthrough);
2789 
iommu_ops_from_fwnode(struct fwnode_handle * fwnode)2790 const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode)
2791 {
2792 	const struct iommu_ops *ops = NULL;
2793 	struct iommu_device *iommu;
2794 
2795 	spin_lock(&iommu_device_lock);
2796 	list_for_each_entry(iommu, &iommu_device_list, list)
2797 		if (iommu->fwnode == fwnode) {
2798 			ops = iommu->ops;
2799 			break;
2800 		}
2801 	spin_unlock(&iommu_device_lock);
2802 	return ops;
2803 }
2804 
iommu_fwspec_init(struct device * dev,struct fwnode_handle * iommu_fwnode,const struct iommu_ops * ops)2805 int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode,
2806 		      const struct iommu_ops *ops)
2807 {
2808 	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
2809 
2810 	if (fwspec)
2811 		return ops == fwspec->ops ? 0 : -EINVAL;
2812 
2813 	if (!dev_iommu_get(dev))
2814 		return -ENOMEM;
2815 
2816 	/* Preallocate for the overwhelmingly common case of 1 ID */
2817 	fwspec = kzalloc(struct_size(fwspec, ids, 1), GFP_KERNEL);
2818 	if (!fwspec)
2819 		return -ENOMEM;
2820 
2821 	of_node_get(to_of_node(iommu_fwnode));
2822 	fwspec->iommu_fwnode = iommu_fwnode;
2823 	fwspec->ops = ops;
2824 	dev_iommu_fwspec_set(dev, fwspec);
2825 	return 0;
2826 }
2827 EXPORT_SYMBOL_GPL(iommu_fwspec_init);
2828 
iommu_fwspec_free(struct device * dev)2829 void iommu_fwspec_free(struct device *dev)
2830 {
2831 	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
2832 
2833 	if (fwspec) {
2834 		fwnode_handle_put(fwspec->iommu_fwnode);
2835 		kfree(fwspec);
2836 		dev_iommu_fwspec_set(dev, NULL);
2837 	}
2838 }
2839 EXPORT_SYMBOL_GPL(iommu_fwspec_free);
2840 
iommu_fwspec_add_ids(struct device * dev,u32 * ids,int num_ids)2841 int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids)
2842 {
2843 	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
2844 	int i, new_num;
2845 
2846 	if (!fwspec)
2847 		return -EINVAL;
2848 
2849 	new_num = fwspec->num_ids + num_ids;
2850 	if (new_num > 1) {
2851 		fwspec = krealloc(fwspec, struct_size(fwspec, ids, new_num),
2852 				  GFP_KERNEL);
2853 		if (!fwspec)
2854 			return -ENOMEM;
2855 
2856 		dev_iommu_fwspec_set(dev, fwspec);
2857 	}
2858 
2859 	for (i = 0; i < num_ids; i++)
2860 		fwspec->ids[fwspec->num_ids + i] = ids[i];
2861 
2862 	fwspec->num_ids = new_num;
2863 	return 0;
2864 }
2865 EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids);
2866 
2867 /*
2868  * Per device IOMMU features.
2869  */
iommu_dev_has_feature(struct device * dev,enum iommu_dev_features feat)2870 bool iommu_dev_has_feature(struct device *dev, enum iommu_dev_features feat)
2871 {
2872 	const struct iommu_ops *ops = dev->bus->iommu_ops;
2873 
2874 	if (ops && ops->dev_has_feat)
2875 		return ops->dev_has_feat(dev, feat);
2876 
2877 	return false;
2878 }
2879 EXPORT_SYMBOL_GPL(iommu_dev_has_feature);
2880 
iommu_dev_enable_feature(struct device * dev,enum iommu_dev_features feat)2881 int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat)
2882 {
2883 	if (dev->iommu && dev->iommu->iommu_dev) {
2884 		const struct iommu_ops *ops = dev->iommu->iommu_dev->ops;
2885 
2886 		if (ops->dev_enable_feat)
2887 			return ops->dev_enable_feat(dev, feat);
2888 	}
2889 
2890 	return -ENODEV;
2891 }
2892 EXPORT_SYMBOL_GPL(iommu_dev_enable_feature);
2893 
2894 /*
2895  * The device drivers should do the necessary cleanups before calling this.
2896  * For example, before disabling the aux-domain feature, the device driver
2897  * should detach all aux-domains. Otherwise, this will return -EBUSY.
2898  */
iommu_dev_disable_feature(struct device * dev,enum iommu_dev_features feat)2899 int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat)
2900 {
2901 	if (dev->iommu && dev->iommu->iommu_dev) {
2902 		const struct iommu_ops *ops = dev->iommu->iommu_dev->ops;
2903 
2904 		if (ops->dev_disable_feat)
2905 			return ops->dev_disable_feat(dev, feat);
2906 	}
2907 
2908 	return -EBUSY;
2909 }
2910 EXPORT_SYMBOL_GPL(iommu_dev_disable_feature);
2911 
iommu_dev_feature_enabled(struct device * dev,enum iommu_dev_features feat)2912 bool iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features feat)
2913 {
2914 	if (dev->iommu && dev->iommu->iommu_dev) {
2915 		const struct iommu_ops *ops = dev->iommu->iommu_dev->ops;
2916 
2917 		if (ops->dev_feat_enabled)
2918 			return ops->dev_feat_enabled(dev, feat);
2919 	}
2920 
2921 	return false;
2922 }
2923 EXPORT_SYMBOL_GPL(iommu_dev_feature_enabled);
2924 
2925 /*
2926  * Aux-domain specific attach/detach.
2927  *
2928  * Only works if iommu_dev_feature_enabled(dev, IOMMU_DEV_FEAT_AUX) returns
2929  * true. Also, as long as domains are attached to a device through this
2930  * interface, any tries to call iommu_attach_device() should fail
2931  * (iommu_detach_device() can't fail, so we fail when trying to re-attach).
2932  * This should make us safe against a device being attached to a guest as a
2933  * whole while there are still pasid users on it (aux and sva).
2934  */
iommu_aux_attach_device(struct iommu_domain * domain,struct device * dev)2935 int iommu_aux_attach_device(struct iommu_domain *domain, struct device *dev)
2936 {
2937 	int ret = -ENODEV;
2938 
2939 	if (domain->ops->aux_attach_dev)
2940 		ret = domain->ops->aux_attach_dev(domain, dev);
2941 
2942 	if (!ret)
2943 		trace_attach_device_to_domain(dev);
2944 
2945 	return ret;
2946 }
2947 EXPORT_SYMBOL_GPL(iommu_aux_attach_device);
2948 
iommu_aux_detach_device(struct iommu_domain * domain,struct device * dev)2949 void iommu_aux_detach_device(struct iommu_domain *domain, struct device *dev)
2950 {
2951 	if (domain->ops->aux_detach_dev) {
2952 		domain->ops->aux_detach_dev(domain, dev);
2953 		trace_detach_device_from_domain(dev);
2954 	}
2955 }
2956 EXPORT_SYMBOL_GPL(iommu_aux_detach_device);
2957 
iommu_aux_get_pasid(struct iommu_domain * domain,struct device * dev)2958 int iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev)
2959 {
2960 	int ret = -ENODEV;
2961 
2962 	if (domain->ops->aux_get_pasid)
2963 		ret = domain->ops->aux_get_pasid(domain, dev);
2964 
2965 	return ret;
2966 }
2967 EXPORT_SYMBOL_GPL(iommu_aux_get_pasid);
2968 
2969 /**
2970  * iommu_sva_bind_device() - Bind a process address space to a device
2971  * @dev: the device
2972  * @mm: the mm to bind, caller must hold a reference to it
2973  *
2974  * Create a bond between device and address space, allowing the device to access
2975  * the mm using the returned PASID. If a bond already exists between @device and
2976  * @mm, it is returned and an additional reference is taken. Caller must call
2977  * iommu_sva_unbind_device() to release each reference.
2978  *
2979  * iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA) must be called first, to
2980  * initialize the required SVA features.
2981  *
2982  * On error, returns an ERR_PTR value.
2983  */
2984 struct iommu_sva *
iommu_sva_bind_device(struct device * dev,struct mm_struct * mm,void * drvdata)2985 iommu_sva_bind_device(struct device *dev, struct mm_struct *mm, void *drvdata)
2986 {
2987 	struct iommu_group *group;
2988 	struct iommu_sva *handle = ERR_PTR(-EINVAL);
2989 	const struct iommu_ops *ops = dev->bus->iommu_ops;
2990 
2991 	if (!ops || !ops->sva_bind)
2992 		return ERR_PTR(-ENODEV);
2993 
2994 	group = iommu_group_get(dev);
2995 	if (!group)
2996 		return ERR_PTR(-ENODEV);
2997 
2998 	/* Ensure device count and domain don't change while we're binding */
2999 	mutex_lock(&group->mutex);
3000 
3001 	/*
3002 	 * To keep things simple, SVA currently doesn't support IOMMU groups
3003 	 * with more than one device. Existing SVA-capable systems are not
3004 	 * affected by the problems that required IOMMU groups (lack of ACS
3005 	 * isolation, device ID aliasing and other hardware issues).
3006 	 */
3007 	if (iommu_group_device_count(group) != 1)
3008 		goto out_unlock;
3009 
3010 	handle = ops->sva_bind(dev, mm, drvdata);
3011 
3012 out_unlock:
3013 	mutex_unlock(&group->mutex);
3014 	iommu_group_put(group);
3015 
3016 	return handle;
3017 }
3018 EXPORT_SYMBOL_GPL(iommu_sva_bind_device);
3019 
3020 /**
3021  * iommu_sva_unbind_device() - Remove a bond created with iommu_sva_bind_device
3022  * @handle: the handle returned by iommu_sva_bind_device()
3023  *
3024  * Put reference to a bond between device and address space. The device should
3025  * not be issuing any more transaction for this PASID. All outstanding page
3026  * requests for this PASID must have been flushed to the IOMMU.
3027  *
3028  * Returns 0 on success, or an error value
3029  */
iommu_sva_unbind_device(struct iommu_sva * handle)3030 void iommu_sva_unbind_device(struct iommu_sva *handle)
3031 {
3032 	struct iommu_group *group;
3033 	struct device *dev = handle->dev;
3034 	const struct iommu_ops *ops = dev->bus->iommu_ops;
3035 
3036 	if (!ops || !ops->sva_unbind)
3037 		return;
3038 
3039 	group = iommu_group_get(dev);
3040 	if (!group)
3041 		return;
3042 
3043 	mutex_lock(&group->mutex);
3044 	ops->sva_unbind(handle);
3045 	mutex_unlock(&group->mutex);
3046 
3047 	iommu_group_put(group);
3048 }
3049 EXPORT_SYMBOL_GPL(iommu_sva_unbind_device);
3050 
iommu_sva_get_pasid(struct iommu_sva * handle)3051 u32 iommu_sva_get_pasid(struct iommu_sva *handle)
3052 {
3053 	const struct iommu_ops *ops = handle->dev->bus->iommu_ops;
3054 
3055 	if (!ops || !ops->sva_get_pasid)
3056 		return IOMMU_PASID_INVALID;
3057 
3058 	return ops->sva_get_pasid(handle);
3059 }
3060 EXPORT_SYMBOL_GPL(iommu_sva_get_pasid);
3061