1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
4 * Author: Joerg Roedel <jroedel@suse.de>
5 */
6
7 #define pr_fmt(fmt) "iommu: " fmt
8
9 #include <linux/amba/bus.h>
10 #include <linux/device.h>
11 #include <linux/kernel.h>
12 #include <linux/bits.h>
13 #include <linux/bug.h>
14 #include <linux/types.h>
15 #include <linux/init.h>
16 #include <linux/export.h>
17 #include <linux/slab.h>
18 #include <linux/errno.h>
19 #include <linux/host1x_context_bus.h>
20 #include <linux/iommu.h>
21 #include <linux/idr.h>
22 #include <linux/err.h>
23 #include <linux/pci.h>
24 #include <linux/pci-ats.h>
25 #include <linux/bitops.h>
26 #include <linux/platform_device.h>
27 #include <linux/property.h>
28 #include <linux/fsl/mc.h>
29 #include <linux/module.h>
30 #include <linux/cc_platform.h>
31 #include <trace/events/iommu.h>
32 #include <linux/sched/mm.h>
33 #include <trace/hooks/iommu.h>
34
35 #include "dma-iommu.h"
36
37 #include "iommu-sva.h"
38
39 static struct kset *iommu_group_kset;
40 static DEFINE_IDA(iommu_group_ida);
41
42 static unsigned int iommu_def_domain_type __read_mostly;
43 static bool iommu_dma_strict __read_mostly = IS_ENABLED(CONFIG_IOMMU_DEFAULT_DMA_STRICT);
44 static u32 iommu_cmd_line __read_mostly;
45
46 struct iommu_group {
47 struct kobject kobj;
48 struct kobject *devices_kobj;
49 struct list_head devices;
50 struct xarray pasid_array;
51 struct mutex mutex;
52 void *iommu_data;
53 void (*iommu_data_release)(void *iommu_data);
54 char *name;
55 int id;
56 struct iommu_domain *default_domain;
57 struct iommu_domain *blocking_domain;
58 struct iommu_domain *domain;
59 struct list_head entry;
60 unsigned int owner_cnt;
61 void *owner;
62 };
63
64 struct group_device {
65 struct list_head list;
66 struct device *dev;
67 char *name;
68 };
69
70 struct iommu_group_attribute {
71 struct attribute attr;
72 ssize_t (*show)(struct iommu_group *group, char *buf);
73 ssize_t (*store)(struct iommu_group *group,
74 const char *buf, size_t count);
75 };
76
77 static const char * const iommu_group_resv_type_string[] = {
78 [IOMMU_RESV_DIRECT] = "direct",
79 [IOMMU_RESV_DIRECT_RELAXABLE] = "direct-relaxable",
80 [IOMMU_RESV_RESERVED] = "reserved",
81 [IOMMU_RESV_MSI] = "msi",
82 [IOMMU_RESV_SW_MSI] = "msi",
83 };
84
85 #define IOMMU_CMD_LINE_DMA_API BIT(0)
86 #define IOMMU_CMD_LINE_STRICT BIT(1)
87
88 static int iommu_bus_notifier(struct notifier_block *nb,
89 unsigned long action, void *data);
90 static int iommu_alloc_default_domain(struct iommu_group *group,
91 struct device *dev);
92 static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
93 unsigned type);
94 static int __iommu_attach_device(struct iommu_domain *domain,
95 struct device *dev);
96 static int __iommu_attach_group(struct iommu_domain *domain,
97 struct iommu_group *group);
98 static int __iommu_group_set_domain(struct iommu_group *group,
99 struct iommu_domain *new_domain);
100 static int iommu_create_device_direct_mappings(struct iommu_group *group,
101 struct device *dev);
102 static struct iommu_group *iommu_group_get_for_dev(struct device *dev);
103 static ssize_t iommu_group_store_type(struct iommu_group *group,
104 const char *buf, size_t count);
105
106 #define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \
107 struct iommu_group_attribute iommu_group_attr_##_name = \
108 __ATTR(_name, _mode, _show, _store)
109
110 #define to_iommu_group_attr(_attr) \
111 container_of(_attr, struct iommu_group_attribute, attr)
112 #define to_iommu_group(_kobj) \
113 container_of(_kobj, struct iommu_group, kobj)
114
115 static LIST_HEAD(iommu_device_list);
116 static DEFINE_SPINLOCK(iommu_device_lock);
117
118 static struct bus_type * const iommu_buses[] = {
119 &platform_bus_type,
120 #ifdef CONFIG_PCI
121 &pci_bus_type,
122 #endif
123 #ifdef CONFIG_ARM_AMBA
124 &amba_bustype,
125 #endif
126 #ifdef CONFIG_FSL_MC_BUS
127 &fsl_mc_bus_type,
128 #endif
129 #ifdef CONFIG_TEGRA_HOST1X_CONTEXT_BUS
130 &host1x_context_device_bus_type,
131 #endif
132 };
133
134 /*
135 * Use a function instead of an array here because the domain-type is a
136 * bit-field, so an array would waste memory.
137 */
iommu_domain_type_str(unsigned int t)138 static const char *iommu_domain_type_str(unsigned int t)
139 {
140 switch (t) {
141 case IOMMU_DOMAIN_BLOCKED:
142 return "Blocked";
143 case IOMMU_DOMAIN_IDENTITY:
144 return "Passthrough";
145 case IOMMU_DOMAIN_UNMANAGED:
146 return "Unmanaged";
147 case IOMMU_DOMAIN_DMA:
148 case IOMMU_DOMAIN_DMA_FQ:
149 return "Translated";
150 default:
151 return "Unknown";
152 }
153 }
154
iommu_subsys_init(void)155 static int __init iommu_subsys_init(void)
156 {
157 struct notifier_block *nb;
158
159 if (!(iommu_cmd_line & IOMMU_CMD_LINE_DMA_API)) {
160 if (IS_ENABLED(CONFIG_IOMMU_DEFAULT_PASSTHROUGH))
161 iommu_set_default_passthrough(false);
162 else
163 iommu_set_default_translated(false);
164
165 if (iommu_default_passthrough() && cc_platform_has(CC_ATTR_MEM_ENCRYPT)) {
166 pr_info("Memory encryption detected - Disabling default IOMMU Passthrough\n");
167 iommu_set_default_translated(false);
168 }
169 }
170
171 if (!iommu_default_passthrough() && !iommu_dma_strict)
172 iommu_def_domain_type = IOMMU_DOMAIN_DMA_FQ;
173
174 pr_info("Default domain type: %s %s\n",
175 iommu_domain_type_str(iommu_def_domain_type),
176 (iommu_cmd_line & IOMMU_CMD_LINE_DMA_API) ?
177 "(set via kernel command line)" : "");
178
179 if (!iommu_default_passthrough())
180 pr_info("DMA domain TLB invalidation policy: %s mode %s\n",
181 iommu_dma_strict ? "strict" : "lazy",
182 (iommu_cmd_line & IOMMU_CMD_LINE_STRICT) ?
183 "(set via kernel command line)" : "");
184
185 nb = kcalloc(ARRAY_SIZE(iommu_buses), sizeof(*nb), GFP_KERNEL);
186 if (!nb)
187 return -ENOMEM;
188
189 for (int i = 0; i < ARRAY_SIZE(iommu_buses); i++) {
190 nb[i].notifier_call = iommu_bus_notifier;
191 bus_register_notifier(iommu_buses[i], &nb[i]);
192 }
193
194 return 0;
195 }
196 subsys_initcall(iommu_subsys_init);
197
remove_iommu_group(struct device * dev,void * data)198 static int remove_iommu_group(struct device *dev, void *data)
199 {
200 if (dev->iommu && dev->iommu->iommu_dev == data)
201 iommu_release_device(dev);
202
203 return 0;
204 }
205
206 /**
207 * iommu_device_register() - Register an IOMMU hardware instance
208 * @iommu: IOMMU handle for the instance
209 * @ops: IOMMU ops to associate with the instance
210 * @hwdev: (optional) actual instance device, used for fwnode lookup
211 *
212 * Return: 0 on success, or an error.
213 */
iommu_device_register(struct iommu_device * iommu,const struct iommu_ops * ops,struct device * hwdev)214 int iommu_device_register(struct iommu_device *iommu,
215 const struct iommu_ops *ops, struct device *hwdev)
216 {
217 int err = 0;
218
219 /* We need to be able to take module references appropriately */
220 if (WARN_ON(is_module_address((unsigned long)ops) && !ops->owner))
221 return -EINVAL;
222 /*
223 * Temporarily enforce global restriction to a single driver. This was
224 * already the de-facto behaviour, since any possible combination of
225 * existing drivers would compete for at least the PCI or platform bus.
226 */
227 if (iommu_buses[0]->iommu_ops && iommu_buses[0]->iommu_ops != ops
228 && !trace_android_vh_bus_iommu_probe_enabled())
229 return -EBUSY;
230
231 iommu->ops = ops;
232 if (hwdev)
233 iommu->fwnode = dev_fwnode(hwdev);
234
235 spin_lock(&iommu_device_lock);
236 list_add_tail(&iommu->list, &iommu_device_list);
237 spin_unlock(&iommu_device_lock);
238
239 for (int i = 0; i < ARRAY_SIZE(iommu_buses) && !err; i++) {
240 bool skip = false;
241
242 trace_android_vh_bus_iommu_probe(iommu, iommu_buses[i], &skip);
243 if (skip)
244 continue;
245 iommu_buses[i]->iommu_ops = ops;
246 err = bus_iommu_probe(iommu_buses[i]);
247 }
248 if (err)
249 iommu_device_unregister(iommu);
250 return err;
251 }
252 EXPORT_SYMBOL_GPL(iommu_device_register);
253
iommu_device_unregister(struct iommu_device * iommu)254 void iommu_device_unregister(struct iommu_device *iommu)
255 {
256 for (int i = 0; i < ARRAY_SIZE(iommu_buses); i++)
257 bus_for_each_dev(iommu_buses[i], NULL, iommu, remove_iommu_group);
258
259 spin_lock(&iommu_device_lock);
260 list_del(&iommu->list);
261 spin_unlock(&iommu_device_lock);
262 }
263 EXPORT_SYMBOL_GPL(iommu_device_unregister);
264
dev_iommu_get(struct device * dev)265 static struct dev_iommu *dev_iommu_get(struct device *dev)
266 {
267 struct dev_iommu *param = dev->iommu;
268
269 if (param)
270 return param;
271
272 param = kzalloc(sizeof(*param), GFP_KERNEL);
273 if (!param)
274 return NULL;
275
276 mutex_init(¶m->lock);
277 dev->iommu = param;
278 return param;
279 }
280
dev_iommu_free(struct device * dev)281 static void dev_iommu_free(struct device *dev)
282 {
283 struct dev_iommu *param = dev->iommu;
284
285 dev->iommu = NULL;
286 if (param->fwspec) {
287 fwnode_handle_put(param->fwspec->iommu_fwnode);
288 kfree(param->fwspec);
289 }
290 kfree(param);
291 }
292
dev_iommu_get_max_pasids(struct device * dev)293 static u32 dev_iommu_get_max_pasids(struct device *dev)
294 {
295 u32 max_pasids = 0, bits = 0;
296 int ret;
297
298 if (dev_is_pci(dev)) {
299 ret = pci_max_pasids(to_pci_dev(dev));
300 if (ret > 0)
301 max_pasids = ret;
302 } else {
303 ret = device_property_read_u32(dev, "pasid-num-bits", &bits);
304 if (!ret)
305 max_pasids = 1UL << bits;
306 }
307
308 return min_t(u32, max_pasids, dev->iommu->iommu_dev->max_pasids);
309 }
310
311 DEFINE_MUTEX(iommu_probe_device_lock);
312
__iommu_probe_device(struct device * dev,struct list_head * group_list)313 static int __iommu_probe_device(struct device *dev, struct list_head *group_list)
314 {
315 const struct iommu_ops *ops = dev->bus->iommu_ops;
316 struct iommu_device *iommu_dev;
317 struct iommu_group *group;
318 int ret;
319
320 if (!ops)
321 return -ENODEV;
322 /*
323 * Serialise to avoid races between IOMMU drivers registering in
324 * parallel and/or the "replay" calls from ACPI/OF code via client
325 * driver probe. Once the latter have been cleaned up we should
326 * probably be able to use device_lock() here to minimise the scope,
327 * but for now enforcing a simple global ordering is fine.
328 */
329 lockdep_assert_held(&iommu_probe_device_lock);
330 if (!dev_iommu_get(dev)) {
331 ret = -ENOMEM;
332 goto err_out;
333 }
334
335 if (!try_module_get(ops->owner)) {
336 ret = -EINVAL;
337 goto err_free;
338 }
339
340 iommu_dev = ops->probe_device(dev);
341 if (IS_ERR(iommu_dev)) {
342 ret = PTR_ERR(iommu_dev);
343 goto out_module_put;
344 }
345
346 dev->iommu->iommu_dev = iommu_dev;
347 dev->iommu->max_pasids = dev_iommu_get_max_pasids(dev);
348
349 group = iommu_group_get_for_dev(dev);
350 if (IS_ERR(group)) {
351 ret = PTR_ERR(group);
352 goto out_release;
353 }
354
355 mutex_lock(&group->mutex);
356 if (group_list && !group->default_domain && list_empty(&group->entry))
357 list_add_tail(&group->entry, group_list);
358 mutex_unlock(&group->mutex);
359 iommu_group_put(group);
360
361 iommu_device_link(iommu_dev, dev);
362
363 return 0;
364
365 out_release:
366 if (ops->release_device)
367 ops->release_device(dev);
368
369 out_module_put:
370 module_put(ops->owner);
371
372 err_free:
373 dev_iommu_free(dev);
374
375 err_out:
376 return ret;
377 }
378
iommu_probe_device(struct device * dev)379 int iommu_probe_device(struct device *dev)
380 {
381 const struct iommu_ops *ops;
382 struct iommu_group *group;
383 int ret;
384
385 mutex_lock(&iommu_probe_device_lock);
386 ret = __iommu_probe_device(dev, NULL);
387 mutex_unlock(&iommu_probe_device_lock);
388 if (ret)
389 goto err_out;
390
391 group = iommu_group_get(dev);
392 if (!group) {
393 ret = -ENODEV;
394 goto err_release;
395 }
396
397 /*
398 * Try to allocate a default domain - needs support from the
399 * IOMMU driver. There are still some drivers which don't
400 * support default domains, so the return value is not yet
401 * checked.
402 */
403 mutex_lock(&group->mutex);
404 iommu_alloc_default_domain(group, dev);
405
406 /*
407 * If device joined an existing group which has been claimed, don't
408 * attach the default domain.
409 */
410 if (group->default_domain && !group->owner) {
411 ret = __iommu_attach_device(group->default_domain, dev);
412 if (ret) {
413 mutex_unlock(&group->mutex);
414 iommu_group_put(group);
415 goto err_release;
416 }
417 }
418
419 iommu_create_device_direct_mappings(group, dev);
420
421 mutex_unlock(&group->mutex);
422 iommu_group_put(group);
423
424 ops = dev_iommu_ops(dev);
425 if (ops->probe_finalize)
426 ops->probe_finalize(dev);
427
428 return 0;
429
430 err_release:
431 iommu_release_device(dev);
432
433 err_out:
434 return ret;
435
436 }
437
iommu_release_device(struct device * dev)438 void iommu_release_device(struct device *dev)
439 {
440 const struct iommu_ops *ops;
441
442 if (!dev->iommu)
443 return;
444
445 iommu_device_unlink(dev->iommu->iommu_dev, dev);
446
447 ops = dev_iommu_ops(dev);
448 if (ops->release_device)
449 ops->release_device(dev);
450
451 iommu_group_remove_device(dev);
452 module_put(ops->owner);
453 dev_iommu_free(dev);
454 }
455
iommu_set_def_domain_type(char * str)456 static int __init iommu_set_def_domain_type(char *str)
457 {
458 bool pt;
459 int ret;
460
461 ret = kstrtobool(str, &pt);
462 if (ret)
463 return ret;
464
465 if (pt)
466 iommu_set_default_passthrough(true);
467 else
468 iommu_set_default_translated(true);
469
470 return 0;
471 }
472 early_param("iommu.passthrough", iommu_set_def_domain_type);
473
iommu_dma_setup(char * str)474 static int __init iommu_dma_setup(char *str)
475 {
476 int ret = kstrtobool(str, &iommu_dma_strict);
477
478 if (!ret)
479 iommu_cmd_line |= IOMMU_CMD_LINE_STRICT;
480 return ret;
481 }
482 early_param("iommu.strict", iommu_dma_setup);
483
iommu_set_dma_strict(void)484 void iommu_set_dma_strict(void)
485 {
486 iommu_dma_strict = true;
487 if (iommu_def_domain_type == IOMMU_DOMAIN_DMA_FQ)
488 iommu_def_domain_type = IOMMU_DOMAIN_DMA;
489 }
490
iommu_group_attr_show(struct kobject * kobj,struct attribute * __attr,char * buf)491 static ssize_t iommu_group_attr_show(struct kobject *kobj,
492 struct attribute *__attr, char *buf)
493 {
494 struct iommu_group_attribute *attr = to_iommu_group_attr(__attr);
495 struct iommu_group *group = to_iommu_group(kobj);
496 ssize_t ret = -EIO;
497
498 if (attr->show)
499 ret = attr->show(group, buf);
500 return ret;
501 }
502
iommu_group_attr_store(struct kobject * kobj,struct attribute * __attr,const char * buf,size_t count)503 static ssize_t iommu_group_attr_store(struct kobject *kobj,
504 struct attribute *__attr,
505 const char *buf, size_t count)
506 {
507 struct iommu_group_attribute *attr = to_iommu_group_attr(__attr);
508 struct iommu_group *group = to_iommu_group(kobj);
509 ssize_t ret = -EIO;
510
511 if (attr->store)
512 ret = attr->store(group, buf, count);
513 return ret;
514 }
515
516 static const struct sysfs_ops iommu_group_sysfs_ops = {
517 .show = iommu_group_attr_show,
518 .store = iommu_group_attr_store,
519 };
520
iommu_group_create_file(struct iommu_group * group,struct iommu_group_attribute * attr)521 static int iommu_group_create_file(struct iommu_group *group,
522 struct iommu_group_attribute *attr)
523 {
524 return sysfs_create_file(&group->kobj, &attr->attr);
525 }
526
iommu_group_remove_file(struct iommu_group * group,struct iommu_group_attribute * attr)527 static void iommu_group_remove_file(struct iommu_group *group,
528 struct iommu_group_attribute *attr)
529 {
530 sysfs_remove_file(&group->kobj, &attr->attr);
531 }
532
iommu_group_show_name(struct iommu_group * group,char * buf)533 static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf)
534 {
535 return sprintf(buf, "%s\n", group->name);
536 }
537
538 /**
539 * iommu_insert_resv_region - Insert a new region in the
540 * list of reserved regions.
541 * @new: new region to insert
542 * @regions: list of regions
543 *
544 * Elements are sorted by start address and overlapping segments
545 * of the same type are merged.
546 */
iommu_insert_resv_region(struct iommu_resv_region * new,struct list_head * regions)547 static int iommu_insert_resv_region(struct iommu_resv_region *new,
548 struct list_head *regions)
549 {
550 struct iommu_resv_region *iter, *tmp, *nr, *top;
551 LIST_HEAD(stack);
552
553 nr = iommu_alloc_resv_region(new->start, new->length,
554 new->prot, new->type, GFP_KERNEL);
555 if (!nr)
556 return -ENOMEM;
557
558 /* First add the new element based on start address sorting */
559 list_for_each_entry(iter, regions, list) {
560 if (nr->start < iter->start ||
561 (nr->start == iter->start && nr->type <= iter->type))
562 break;
563 }
564 list_add_tail(&nr->list, &iter->list);
565
566 /* Merge overlapping segments of type nr->type in @regions, if any */
567 list_for_each_entry_safe(iter, tmp, regions, list) {
568 phys_addr_t top_end, iter_end = iter->start + iter->length - 1;
569
570 /* no merge needed on elements of different types than @new */
571 if (iter->type != new->type) {
572 list_move_tail(&iter->list, &stack);
573 continue;
574 }
575
576 /* look for the last stack element of same type as @iter */
577 list_for_each_entry_reverse(top, &stack, list)
578 if (top->type == iter->type)
579 goto check_overlap;
580
581 list_move_tail(&iter->list, &stack);
582 continue;
583
584 check_overlap:
585 top_end = top->start + top->length - 1;
586
587 if (iter->start > top_end + 1) {
588 list_move_tail(&iter->list, &stack);
589 } else {
590 top->length = max(top_end, iter_end) - top->start + 1;
591 list_del(&iter->list);
592 kfree(iter);
593 }
594 }
595 list_splice(&stack, regions);
596 return 0;
597 }
598
599 static int
iommu_insert_device_resv_regions(struct list_head * dev_resv_regions,struct list_head * group_resv_regions)600 iommu_insert_device_resv_regions(struct list_head *dev_resv_regions,
601 struct list_head *group_resv_regions)
602 {
603 struct iommu_resv_region *entry;
604 int ret = 0;
605
606 list_for_each_entry(entry, dev_resv_regions, list) {
607 ret = iommu_insert_resv_region(entry, group_resv_regions);
608 if (ret)
609 break;
610 }
611 return ret;
612 }
613
iommu_get_group_resv_regions(struct iommu_group * group,struct list_head * head)614 int iommu_get_group_resv_regions(struct iommu_group *group,
615 struct list_head *head)
616 {
617 struct group_device *device;
618 int ret = 0;
619
620 mutex_lock(&group->mutex);
621 list_for_each_entry(device, &group->devices, list) {
622 struct list_head dev_resv_regions;
623
624 /*
625 * Non-API groups still expose reserved_regions in sysfs,
626 * so filter out calls that get here that way.
627 */
628 if (!device->dev->iommu)
629 break;
630
631 INIT_LIST_HEAD(&dev_resv_regions);
632 iommu_get_resv_regions(device->dev, &dev_resv_regions);
633 ret = iommu_insert_device_resv_regions(&dev_resv_regions, head);
634 iommu_put_resv_regions(device->dev, &dev_resv_regions);
635 if (ret)
636 break;
637 }
638 mutex_unlock(&group->mutex);
639 return ret;
640 }
641 EXPORT_SYMBOL_GPL(iommu_get_group_resv_regions);
642
iommu_group_show_resv_regions(struct iommu_group * group,char * buf)643 static ssize_t iommu_group_show_resv_regions(struct iommu_group *group,
644 char *buf)
645 {
646 struct iommu_resv_region *region, *next;
647 struct list_head group_resv_regions;
648 char *str = buf;
649
650 INIT_LIST_HEAD(&group_resv_regions);
651 iommu_get_group_resv_regions(group, &group_resv_regions);
652
653 list_for_each_entry_safe(region, next, &group_resv_regions, list) {
654 str += sprintf(str, "0x%016llx 0x%016llx %s\n",
655 (long long int)region->start,
656 (long long int)(region->start +
657 region->length - 1),
658 iommu_group_resv_type_string[region->type]);
659 kfree(region);
660 }
661
662 return (str - buf);
663 }
664
iommu_group_show_type(struct iommu_group * group,char * buf)665 static ssize_t iommu_group_show_type(struct iommu_group *group,
666 char *buf)
667 {
668 char *type = "unknown\n";
669
670 mutex_lock(&group->mutex);
671 if (group->default_domain) {
672 switch (group->default_domain->type) {
673 case IOMMU_DOMAIN_BLOCKED:
674 type = "blocked\n";
675 break;
676 case IOMMU_DOMAIN_IDENTITY:
677 type = "identity\n";
678 break;
679 case IOMMU_DOMAIN_UNMANAGED:
680 type = "unmanaged\n";
681 break;
682 case IOMMU_DOMAIN_DMA:
683 type = "DMA\n";
684 break;
685 case IOMMU_DOMAIN_DMA_FQ:
686 type = "DMA-FQ\n";
687 break;
688 }
689 }
690 mutex_unlock(&group->mutex);
691 strcpy(buf, type);
692
693 return strlen(type);
694 }
695
696 static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL);
697
698 static IOMMU_GROUP_ATTR(reserved_regions, 0444,
699 iommu_group_show_resv_regions, NULL);
700
701 static IOMMU_GROUP_ATTR(type, 0644, iommu_group_show_type,
702 iommu_group_store_type);
703
iommu_group_release(struct kobject * kobj)704 static void iommu_group_release(struct kobject *kobj)
705 {
706 struct iommu_group *group = to_iommu_group(kobj);
707
708 pr_debug("Releasing group %d\n", group->id);
709
710 if (group->iommu_data_release)
711 group->iommu_data_release(group->iommu_data);
712
713 ida_free(&iommu_group_ida, group->id);
714
715 if (group->default_domain)
716 iommu_domain_free(group->default_domain);
717 if (group->blocking_domain)
718 iommu_domain_free(group->blocking_domain);
719
720 kfree(group->name);
721 kfree(group);
722 }
723
724 static struct kobj_type iommu_group_ktype = {
725 .sysfs_ops = &iommu_group_sysfs_ops,
726 .release = iommu_group_release,
727 };
728
729 /**
730 * iommu_group_alloc - Allocate a new group
731 *
732 * This function is called by an iommu driver to allocate a new iommu
733 * group. The iommu group represents the minimum granularity of the iommu.
734 * Upon successful return, the caller holds a reference to the supplied
735 * group in order to hold the group until devices are added. Use
736 * iommu_group_put() to release this extra reference count, allowing the
737 * group to be automatically reclaimed once it has no devices or external
738 * references.
739 */
iommu_group_alloc(void)740 struct iommu_group *iommu_group_alloc(void)
741 {
742 struct iommu_group *group;
743 int ret;
744
745 group = kzalloc(sizeof(*group), GFP_KERNEL);
746 if (!group)
747 return ERR_PTR(-ENOMEM);
748
749 group->kobj.kset = iommu_group_kset;
750 mutex_init(&group->mutex);
751 INIT_LIST_HEAD(&group->devices);
752 INIT_LIST_HEAD(&group->entry);
753 xa_init(&group->pasid_array);
754
755 ret = ida_alloc(&iommu_group_ida, GFP_KERNEL);
756 if (ret < 0) {
757 kfree(group);
758 return ERR_PTR(ret);
759 }
760 group->id = ret;
761
762 ret = kobject_init_and_add(&group->kobj, &iommu_group_ktype,
763 NULL, "%d", group->id);
764 if (ret) {
765 kobject_put(&group->kobj);
766 return ERR_PTR(ret);
767 }
768
769 group->devices_kobj = kobject_create_and_add("devices", &group->kobj);
770 if (!group->devices_kobj) {
771 kobject_put(&group->kobj); /* triggers .release & free */
772 return ERR_PTR(-ENOMEM);
773 }
774
775 /*
776 * The devices_kobj holds a reference on the group kobject, so
777 * as long as that exists so will the group. We can therefore
778 * use the devices_kobj for reference counting.
779 */
780 kobject_put(&group->kobj);
781
782 ret = iommu_group_create_file(group,
783 &iommu_group_attr_reserved_regions);
784 if (ret) {
785 kobject_put(group->devices_kobj);
786 return ERR_PTR(ret);
787 }
788
789 ret = iommu_group_create_file(group, &iommu_group_attr_type);
790 if (ret) {
791 kobject_put(group->devices_kobj);
792 return ERR_PTR(ret);
793 }
794
795 pr_debug("Allocated group %d\n", group->id);
796
797 return group;
798 }
799 EXPORT_SYMBOL_GPL(iommu_group_alloc);
800
iommu_group_get_by_id(int id)801 struct iommu_group *iommu_group_get_by_id(int id)
802 {
803 struct kobject *group_kobj;
804 struct iommu_group *group;
805 const char *name;
806
807 if (!iommu_group_kset)
808 return NULL;
809
810 name = kasprintf(GFP_KERNEL, "%d", id);
811 if (!name)
812 return NULL;
813
814 group_kobj = kset_find_obj(iommu_group_kset, name);
815 kfree(name);
816
817 if (!group_kobj)
818 return NULL;
819
820 group = container_of(group_kobj, struct iommu_group, kobj);
821 BUG_ON(group->id != id);
822
823 kobject_get(group->devices_kobj);
824 kobject_put(&group->kobj);
825
826 return group;
827 }
828 EXPORT_SYMBOL_GPL(iommu_group_get_by_id);
829
830 /**
831 * iommu_group_get_iommudata - retrieve iommu_data registered for a group
832 * @group: the group
833 *
834 * iommu drivers can store data in the group for use when doing iommu
835 * operations. This function provides a way to retrieve it. Caller
836 * should hold a group reference.
837 */
iommu_group_get_iommudata(struct iommu_group * group)838 void *iommu_group_get_iommudata(struct iommu_group *group)
839 {
840 return group->iommu_data;
841 }
842 EXPORT_SYMBOL_GPL(iommu_group_get_iommudata);
843
844 /**
845 * iommu_group_set_iommudata - set iommu_data for a group
846 * @group: the group
847 * @iommu_data: new data
848 * @release: release function for iommu_data
849 *
850 * iommu drivers can store data in the group for use when doing iommu
851 * operations. This function provides a way to set the data after
852 * the group has been allocated. Caller should hold a group reference.
853 */
iommu_group_set_iommudata(struct iommu_group * group,void * iommu_data,void (* release)(void * iommu_data))854 void iommu_group_set_iommudata(struct iommu_group *group, void *iommu_data,
855 void (*release)(void *iommu_data))
856 {
857 group->iommu_data = iommu_data;
858 group->iommu_data_release = release;
859 }
860 EXPORT_SYMBOL_GPL(iommu_group_set_iommudata);
861
862 /**
863 * iommu_group_set_name - set name for a group
864 * @group: the group
865 * @name: name
866 *
867 * Allow iommu driver to set a name for a group. When set it will
868 * appear in a name attribute file under the group in sysfs.
869 */
iommu_group_set_name(struct iommu_group * group,const char * name)870 int iommu_group_set_name(struct iommu_group *group, const char *name)
871 {
872 int ret;
873
874 if (group->name) {
875 iommu_group_remove_file(group, &iommu_group_attr_name);
876 kfree(group->name);
877 group->name = NULL;
878 if (!name)
879 return 0;
880 }
881
882 group->name = kstrdup(name, GFP_KERNEL);
883 if (!group->name)
884 return -ENOMEM;
885
886 ret = iommu_group_create_file(group, &iommu_group_attr_name);
887 if (ret) {
888 kfree(group->name);
889 group->name = NULL;
890 return ret;
891 }
892
893 return 0;
894 }
895 EXPORT_SYMBOL_GPL(iommu_group_set_name);
896
iommu_create_device_direct_mappings(struct iommu_group * group,struct device * dev)897 static int iommu_create_device_direct_mappings(struct iommu_group *group,
898 struct device *dev)
899 {
900 struct iommu_domain *domain = group->default_domain;
901 struct iommu_resv_region *entry;
902 struct list_head mappings;
903 unsigned long pg_size;
904 int ret = 0;
905
906 if (!domain || !iommu_is_dma_domain(domain))
907 return 0;
908
909 BUG_ON(!domain->pgsize_bitmap);
910
911 pg_size = 1UL << __ffs(domain->pgsize_bitmap);
912 INIT_LIST_HEAD(&mappings);
913
914 iommu_get_resv_regions(dev, &mappings);
915
916 /* We need to consider overlapping regions for different devices */
917 list_for_each_entry(entry, &mappings, list) {
918 dma_addr_t start, end, addr;
919 size_t map_size = 0;
920
921 start = ALIGN(entry->start, pg_size);
922 end = ALIGN(entry->start + entry->length, pg_size);
923
924 if (entry->type != IOMMU_RESV_DIRECT &&
925 entry->type != IOMMU_RESV_DIRECT_RELAXABLE)
926 continue;
927
928 for (addr = start; addr <= end; addr += pg_size) {
929 phys_addr_t phys_addr;
930
931 if (addr == end)
932 goto map_end;
933
934 phys_addr = iommu_iova_to_phys(domain, addr);
935 if (!phys_addr) {
936 map_size += pg_size;
937 continue;
938 }
939
940 map_end:
941 if (map_size) {
942 ret = iommu_map(domain, addr - map_size,
943 addr - map_size, map_size,
944 entry->prot);
945 if (ret)
946 goto out;
947 map_size = 0;
948 }
949 }
950
951 }
952
953 iommu_flush_iotlb_all(domain);
954
955 out:
956 iommu_put_resv_regions(dev, &mappings);
957
958 return ret;
959 }
960
iommu_is_attach_deferred(struct device * dev)961 static bool iommu_is_attach_deferred(struct device *dev)
962 {
963 const struct iommu_ops *ops = dev_iommu_ops(dev);
964
965 if (ops->is_attach_deferred)
966 return ops->is_attach_deferred(dev);
967
968 return false;
969 }
970
971 /**
972 * iommu_group_add_device - add a device to an iommu group
973 * @group: the group into which to add the device (reference should be held)
974 * @dev: the device
975 *
976 * This function is called by an iommu driver to add a device into a
977 * group. Adding a device increments the group reference count.
978 */
iommu_group_add_device(struct iommu_group * group,struct device * dev)979 int iommu_group_add_device(struct iommu_group *group, struct device *dev)
980 {
981 int ret, i = 0;
982 struct group_device *device;
983
984 device = kzalloc(sizeof(*device), GFP_KERNEL);
985 if (!device)
986 return -ENOMEM;
987
988 device->dev = dev;
989
990 ret = sysfs_create_link(&dev->kobj, &group->kobj, "iommu_group");
991 if (ret)
992 goto err_free_device;
993
994 device->name = kasprintf(GFP_KERNEL, "%s", kobject_name(&dev->kobj));
995 rename:
996 if (!device->name) {
997 ret = -ENOMEM;
998 goto err_remove_link;
999 }
1000
1001 ret = sysfs_create_link_nowarn(group->devices_kobj,
1002 &dev->kobj, device->name);
1003 if (ret) {
1004 if (ret == -EEXIST && i >= 0) {
1005 /*
1006 * Account for the slim chance of collision
1007 * and append an instance to the name.
1008 */
1009 kfree(device->name);
1010 device->name = kasprintf(GFP_KERNEL, "%s.%d",
1011 kobject_name(&dev->kobj), i++);
1012 goto rename;
1013 }
1014 goto err_free_name;
1015 }
1016
1017 kobject_get(group->devices_kobj);
1018
1019 dev->iommu_group = group;
1020
1021 mutex_lock(&group->mutex);
1022 list_add_tail(&device->list, &group->devices);
1023 if (group->domain && !iommu_is_attach_deferred(dev))
1024 ret = __iommu_attach_device(group->domain, dev);
1025 mutex_unlock(&group->mutex);
1026 if (ret)
1027 goto err_put_group;
1028
1029 trace_add_device_to_group(group->id, dev);
1030
1031 dev_info(dev, "Adding to iommu group %d\n", group->id);
1032
1033 return 0;
1034
1035 err_put_group:
1036 mutex_lock(&group->mutex);
1037 list_del(&device->list);
1038 mutex_unlock(&group->mutex);
1039 dev->iommu_group = NULL;
1040 kobject_put(group->devices_kobj);
1041 sysfs_remove_link(group->devices_kobj, device->name);
1042 err_free_name:
1043 kfree(device->name);
1044 err_remove_link:
1045 sysfs_remove_link(&dev->kobj, "iommu_group");
1046 err_free_device:
1047 kfree(device);
1048 dev_err(dev, "Failed to add to iommu group %d: %d\n", group->id, ret);
1049 return ret;
1050 }
1051 EXPORT_SYMBOL_GPL(iommu_group_add_device);
1052
1053 /**
1054 * iommu_group_remove_device - remove a device from it's current group
1055 * @dev: device to be removed
1056 *
1057 * This function is called by an iommu driver to remove the device from
1058 * it's current group. This decrements the iommu group reference count.
1059 */
iommu_group_remove_device(struct device * dev)1060 void iommu_group_remove_device(struct device *dev)
1061 {
1062 struct iommu_group *group = dev->iommu_group;
1063 struct group_device *tmp_device, *device = NULL;
1064
1065 if (!group)
1066 return;
1067
1068 dev_info(dev, "Removing from iommu group %d\n", group->id);
1069
1070 mutex_lock(&group->mutex);
1071 list_for_each_entry(tmp_device, &group->devices, list) {
1072 if (tmp_device->dev == dev) {
1073 device = tmp_device;
1074 list_del(&device->list);
1075 break;
1076 }
1077 }
1078 mutex_unlock(&group->mutex);
1079
1080 if (!device)
1081 return;
1082
1083 sysfs_remove_link(group->devices_kobj, device->name);
1084 sysfs_remove_link(&dev->kobj, "iommu_group");
1085
1086 trace_remove_device_from_group(group->id, dev);
1087
1088 kfree(device->name);
1089 kfree(device);
1090 dev->iommu_group = NULL;
1091 kobject_put(group->devices_kobj);
1092 }
1093 EXPORT_SYMBOL_GPL(iommu_group_remove_device);
1094
iommu_group_device_count(struct iommu_group * group)1095 static int iommu_group_device_count(struct iommu_group *group)
1096 {
1097 struct group_device *entry;
1098 int ret = 0;
1099
1100 list_for_each_entry(entry, &group->devices, list)
1101 ret++;
1102
1103 return ret;
1104 }
1105
__iommu_group_for_each_dev(struct iommu_group * group,void * data,int (* fn)(struct device *,void *))1106 static int __iommu_group_for_each_dev(struct iommu_group *group, void *data,
1107 int (*fn)(struct device *, void *))
1108 {
1109 struct group_device *device;
1110 int ret = 0;
1111
1112 list_for_each_entry(device, &group->devices, list) {
1113 ret = fn(device->dev, data);
1114 if (ret)
1115 break;
1116 }
1117 return ret;
1118 }
1119
1120 /**
1121 * iommu_group_for_each_dev - iterate over each device in the group
1122 * @group: the group
1123 * @data: caller opaque data to be passed to callback function
1124 * @fn: caller supplied callback function
1125 *
1126 * This function is called by group users to iterate over group devices.
1127 * Callers should hold a reference count to the group during callback.
1128 * The group->mutex is held across callbacks, which will block calls to
1129 * iommu_group_add/remove_device.
1130 */
iommu_group_for_each_dev(struct iommu_group * group,void * data,int (* fn)(struct device *,void *))1131 int iommu_group_for_each_dev(struct iommu_group *group, void *data,
1132 int (*fn)(struct device *, void *))
1133 {
1134 int ret;
1135
1136 mutex_lock(&group->mutex);
1137 ret = __iommu_group_for_each_dev(group, data, fn);
1138 mutex_unlock(&group->mutex);
1139
1140 return ret;
1141 }
1142 EXPORT_SYMBOL_GPL(iommu_group_for_each_dev);
1143
1144 /**
1145 * iommu_group_get - Return the group for a device and increment reference
1146 * @dev: get the group that this device belongs to
1147 *
1148 * This function is called by iommu drivers and users to get the group
1149 * for the specified device. If found, the group is returned and the group
1150 * reference in incremented, else NULL.
1151 */
iommu_group_get(struct device * dev)1152 struct iommu_group *iommu_group_get(struct device *dev)
1153 {
1154 struct iommu_group *group = dev->iommu_group;
1155
1156 if (group)
1157 kobject_get(group->devices_kobj);
1158
1159 return group;
1160 }
1161 EXPORT_SYMBOL_GPL(iommu_group_get);
1162
1163 /**
1164 * iommu_group_ref_get - Increment reference on a group
1165 * @group: the group to use, must not be NULL
1166 *
1167 * This function is called by iommu drivers to take additional references on an
1168 * existing group. Returns the given group for convenience.
1169 */
iommu_group_ref_get(struct iommu_group * group)1170 struct iommu_group *iommu_group_ref_get(struct iommu_group *group)
1171 {
1172 kobject_get(group->devices_kobj);
1173 return group;
1174 }
1175 EXPORT_SYMBOL_GPL(iommu_group_ref_get);
1176
1177 /**
1178 * iommu_group_put - Decrement group reference
1179 * @group: the group to use
1180 *
1181 * This function is called by iommu drivers and users to release the
1182 * iommu group. Once the reference count is zero, the group is released.
1183 */
iommu_group_put(struct iommu_group * group)1184 void iommu_group_put(struct iommu_group *group)
1185 {
1186 if (group)
1187 kobject_put(group->devices_kobj);
1188 }
1189 EXPORT_SYMBOL_GPL(iommu_group_put);
1190
1191 /**
1192 * iommu_register_device_fault_handler() - Register a device fault handler
1193 * @dev: the device
1194 * @handler: the fault handler
1195 * @data: private data passed as argument to the handler
1196 *
1197 * When an IOMMU fault event is received, this handler gets called with the
1198 * fault event and data as argument. The handler should return 0 on success. If
1199 * the fault is recoverable (IOMMU_FAULT_PAGE_REQ), the consumer should also
1200 * complete the fault by calling iommu_page_response() with one of the following
1201 * response code:
1202 * - IOMMU_PAGE_RESP_SUCCESS: retry the translation
1203 * - IOMMU_PAGE_RESP_INVALID: terminate the fault
1204 * - IOMMU_PAGE_RESP_FAILURE: terminate the fault and stop reporting
1205 * page faults if possible.
1206 *
1207 * Return 0 if the fault handler was installed successfully, or an error.
1208 */
iommu_register_device_fault_handler(struct device * dev,iommu_dev_fault_handler_t handler,void * data)1209 int iommu_register_device_fault_handler(struct device *dev,
1210 iommu_dev_fault_handler_t handler,
1211 void *data)
1212 {
1213 struct dev_iommu *param = dev->iommu;
1214 int ret = 0;
1215
1216 if (!param)
1217 return -EINVAL;
1218
1219 mutex_lock(¶m->lock);
1220 /* Only allow one fault handler registered for each device */
1221 if (param->fault_param) {
1222 ret = -EBUSY;
1223 goto done_unlock;
1224 }
1225
1226 get_device(dev);
1227 param->fault_param = kzalloc(sizeof(*param->fault_param), GFP_KERNEL);
1228 if (!param->fault_param) {
1229 put_device(dev);
1230 ret = -ENOMEM;
1231 goto done_unlock;
1232 }
1233 param->fault_param->handler = handler;
1234 param->fault_param->data = data;
1235 mutex_init(¶m->fault_param->lock);
1236 INIT_LIST_HEAD(¶m->fault_param->faults);
1237
1238 done_unlock:
1239 mutex_unlock(¶m->lock);
1240
1241 return ret;
1242 }
1243 EXPORT_SYMBOL_GPL(iommu_register_device_fault_handler);
1244
1245 /**
1246 * iommu_unregister_device_fault_handler() - Unregister the device fault handler
1247 * @dev: the device
1248 *
1249 * Remove the device fault handler installed with
1250 * iommu_register_device_fault_handler().
1251 *
1252 * Return 0 on success, or an error.
1253 */
iommu_unregister_device_fault_handler(struct device * dev)1254 int iommu_unregister_device_fault_handler(struct device *dev)
1255 {
1256 struct dev_iommu *param = dev->iommu;
1257 int ret = 0;
1258
1259 if (!param)
1260 return -EINVAL;
1261
1262 mutex_lock(¶m->lock);
1263
1264 if (!param->fault_param)
1265 goto unlock;
1266
1267 /* we cannot unregister handler if there are pending faults */
1268 if (!list_empty(¶m->fault_param->faults)) {
1269 ret = -EBUSY;
1270 goto unlock;
1271 }
1272
1273 kfree(param->fault_param);
1274 param->fault_param = NULL;
1275 put_device(dev);
1276 unlock:
1277 mutex_unlock(¶m->lock);
1278
1279 return ret;
1280 }
1281 EXPORT_SYMBOL_GPL(iommu_unregister_device_fault_handler);
1282
1283 /**
1284 * iommu_report_device_fault() - Report fault event to device driver
1285 * @dev: the device
1286 * @evt: fault event data
1287 *
1288 * Called by IOMMU drivers when a fault is detected, typically in a threaded IRQ
1289 * handler. When this function fails and the fault is recoverable, it is the
1290 * caller's responsibility to complete the fault.
1291 *
1292 * Return 0 on success, or an error.
1293 */
iommu_report_device_fault(struct device * dev,struct iommu_fault_event * evt)1294 int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt)
1295 {
1296 struct dev_iommu *param = dev->iommu;
1297 struct iommu_fault_event *evt_pending = NULL;
1298 struct iommu_fault_param *fparam;
1299 int ret = 0;
1300
1301 if (!param || !evt)
1302 return -EINVAL;
1303
1304 /* we only report device fault if there is a handler registered */
1305 mutex_lock(¶m->lock);
1306 fparam = param->fault_param;
1307 if (!fparam || !fparam->handler) {
1308 ret = -EINVAL;
1309 goto done_unlock;
1310 }
1311
1312 if (evt->fault.type == IOMMU_FAULT_PAGE_REQ &&
1313 (evt->fault.prm.flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE)) {
1314 evt_pending = kmemdup(evt, sizeof(struct iommu_fault_event),
1315 GFP_KERNEL);
1316 if (!evt_pending) {
1317 ret = -ENOMEM;
1318 goto done_unlock;
1319 }
1320 mutex_lock(&fparam->lock);
1321 list_add_tail(&evt_pending->list, &fparam->faults);
1322 mutex_unlock(&fparam->lock);
1323 }
1324
1325 ret = fparam->handler(&evt->fault, fparam->data);
1326 if (ret && evt_pending) {
1327 mutex_lock(&fparam->lock);
1328 list_del(&evt_pending->list);
1329 mutex_unlock(&fparam->lock);
1330 kfree(evt_pending);
1331 }
1332 done_unlock:
1333 mutex_unlock(¶m->lock);
1334 return ret;
1335 }
1336 EXPORT_SYMBOL_GPL(iommu_report_device_fault);
1337
iommu_page_response(struct device * dev,struct iommu_page_response * msg)1338 int iommu_page_response(struct device *dev,
1339 struct iommu_page_response *msg)
1340 {
1341 bool needs_pasid;
1342 int ret = -EINVAL;
1343 struct iommu_fault_event *evt;
1344 struct iommu_fault_page_request *prm;
1345 struct dev_iommu *param = dev->iommu;
1346 const struct iommu_ops *ops = dev_iommu_ops(dev);
1347 bool has_pasid = msg->flags & IOMMU_PAGE_RESP_PASID_VALID;
1348
1349 if (!ops->page_response)
1350 return -ENODEV;
1351
1352 if (!param || !param->fault_param)
1353 return -EINVAL;
1354
1355 if (msg->version != IOMMU_PAGE_RESP_VERSION_1 ||
1356 msg->flags & ~IOMMU_PAGE_RESP_PASID_VALID)
1357 return -EINVAL;
1358
1359 /* Only send response if there is a fault report pending */
1360 mutex_lock(¶m->fault_param->lock);
1361 if (list_empty(¶m->fault_param->faults)) {
1362 dev_warn_ratelimited(dev, "no pending PRQ, drop response\n");
1363 goto done_unlock;
1364 }
1365 /*
1366 * Check if we have a matching page request pending to respond,
1367 * otherwise return -EINVAL
1368 */
1369 list_for_each_entry(evt, ¶m->fault_param->faults, list) {
1370 prm = &evt->fault.prm;
1371 if (prm->grpid != msg->grpid)
1372 continue;
1373
1374 /*
1375 * If the PASID is required, the corresponding request is
1376 * matched using the group ID, the PASID valid bit and the PASID
1377 * value. Otherwise only the group ID matches request and
1378 * response.
1379 */
1380 needs_pasid = prm->flags & IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID;
1381 if (needs_pasid && (!has_pasid || msg->pasid != prm->pasid))
1382 continue;
1383
1384 if (!needs_pasid && has_pasid) {
1385 /* No big deal, just clear it. */
1386 msg->flags &= ~IOMMU_PAGE_RESP_PASID_VALID;
1387 msg->pasid = 0;
1388 }
1389
1390 ret = ops->page_response(dev, evt, msg);
1391 list_del(&evt->list);
1392 kfree(evt);
1393 break;
1394 }
1395
1396 done_unlock:
1397 mutex_unlock(¶m->fault_param->lock);
1398 return ret;
1399 }
1400 EXPORT_SYMBOL_GPL(iommu_page_response);
1401
1402 /**
1403 * iommu_group_id - Return ID for a group
1404 * @group: the group to ID
1405 *
1406 * Return the unique ID for the group matching the sysfs group number.
1407 */
iommu_group_id(struct iommu_group * group)1408 int iommu_group_id(struct iommu_group *group)
1409 {
1410 return group->id;
1411 }
1412 EXPORT_SYMBOL_GPL(iommu_group_id);
1413
1414 static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev,
1415 unsigned long *devfns);
1416
1417 /*
1418 * To consider a PCI device isolated, we require ACS to support Source
1419 * Validation, Request Redirection, Completer Redirection, and Upstream
1420 * Forwarding. This effectively means that devices cannot spoof their
1421 * requester ID, requests and completions cannot be redirected, and all
1422 * transactions are forwarded upstream, even as it passes through a
1423 * bridge where the target device is downstream.
1424 */
1425 #define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF)
1426
1427 /*
1428 * For multifunction devices which are not isolated from each other, find
1429 * all the other non-isolated functions and look for existing groups. For
1430 * each function, we also need to look for aliases to or from other devices
1431 * that may already have a group.
1432 */
get_pci_function_alias_group(struct pci_dev * pdev,unsigned long * devfns)1433 static struct iommu_group *get_pci_function_alias_group(struct pci_dev *pdev,
1434 unsigned long *devfns)
1435 {
1436 struct pci_dev *tmp = NULL;
1437 struct iommu_group *group;
1438
1439 if (!pdev->multifunction || pci_acs_enabled(pdev, REQ_ACS_FLAGS))
1440 return NULL;
1441
1442 for_each_pci_dev(tmp) {
1443 if (tmp == pdev || tmp->bus != pdev->bus ||
1444 PCI_SLOT(tmp->devfn) != PCI_SLOT(pdev->devfn) ||
1445 pci_acs_enabled(tmp, REQ_ACS_FLAGS))
1446 continue;
1447
1448 group = get_pci_alias_group(tmp, devfns);
1449 if (group) {
1450 pci_dev_put(tmp);
1451 return group;
1452 }
1453 }
1454
1455 return NULL;
1456 }
1457
1458 /*
1459 * Look for aliases to or from the given device for existing groups. DMA
1460 * aliases are only supported on the same bus, therefore the search
1461 * space is quite small (especially since we're really only looking at pcie
1462 * device, and therefore only expect multiple slots on the root complex or
1463 * downstream switch ports). It's conceivable though that a pair of
1464 * multifunction devices could have aliases between them that would cause a
1465 * loop. To prevent this, we use a bitmap to track where we've been.
1466 */
get_pci_alias_group(struct pci_dev * pdev,unsigned long * devfns)1467 static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev,
1468 unsigned long *devfns)
1469 {
1470 struct pci_dev *tmp = NULL;
1471 struct iommu_group *group;
1472
1473 if (test_and_set_bit(pdev->devfn & 0xff, devfns))
1474 return NULL;
1475
1476 group = iommu_group_get(&pdev->dev);
1477 if (group)
1478 return group;
1479
1480 for_each_pci_dev(tmp) {
1481 if (tmp == pdev || tmp->bus != pdev->bus)
1482 continue;
1483
1484 /* We alias them or they alias us */
1485 if (pci_devs_are_dma_aliases(pdev, tmp)) {
1486 group = get_pci_alias_group(tmp, devfns);
1487 if (group) {
1488 pci_dev_put(tmp);
1489 return group;
1490 }
1491
1492 group = get_pci_function_alias_group(tmp, devfns);
1493 if (group) {
1494 pci_dev_put(tmp);
1495 return group;
1496 }
1497 }
1498 }
1499
1500 return NULL;
1501 }
1502
1503 struct group_for_pci_data {
1504 struct pci_dev *pdev;
1505 struct iommu_group *group;
1506 };
1507
1508 /*
1509 * DMA alias iterator callback, return the last seen device. Stop and return
1510 * the IOMMU group if we find one along the way.
1511 */
get_pci_alias_or_group(struct pci_dev * pdev,u16 alias,void * opaque)1512 static int get_pci_alias_or_group(struct pci_dev *pdev, u16 alias, void *opaque)
1513 {
1514 struct group_for_pci_data *data = opaque;
1515
1516 data->pdev = pdev;
1517 data->group = iommu_group_get(&pdev->dev);
1518
1519 return data->group != NULL;
1520 }
1521
1522 /*
1523 * Generic device_group call-back function. It just allocates one
1524 * iommu-group per device.
1525 */
generic_device_group(struct device * dev)1526 struct iommu_group *generic_device_group(struct device *dev)
1527 {
1528 return iommu_group_alloc();
1529 }
1530 EXPORT_SYMBOL_GPL(generic_device_group);
1531
1532 /*
1533 * Use standard PCI bus topology, isolation features, and DMA alias quirks
1534 * to find or create an IOMMU group for a device.
1535 */
pci_device_group(struct device * dev)1536 struct iommu_group *pci_device_group(struct device *dev)
1537 {
1538 struct pci_dev *pdev = to_pci_dev(dev);
1539 struct group_for_pci_data data;
1540 struct pci_bus *bus;
1541 struct iommu_group *group = NULL;
1542 u64 devfns[4] = { 0 };
1543
1544 if (WARN_ON(!dev_is_pci(dev)))
1545 return ERR_PTR(-EINVAL);
1546
1547 /*
1548 * Find the upstream DMA alias for the device. A device must not
1549 * be aliased due to topology in order to have its own IOMMU group.
1550 * If we find an alias along the way that already belongs to a
1551 * group, use it.
1552 */
1553 if (pci_for_each_dma_alias(pdev, get_pci_alias_or_group, &data))
1554 return data.group;
1555
1556 pdev = data.pdev;
1557
1558 /*
1559 * Continue upstream from the point of minimum IOMMU granularity
1560 * due to aliases to the point where devices are protected from
1561 * peer-to-peer DMA by PCI ACS. Again, if we find an existing
1562 * group, use it.
1563 */
1564 for (bus = pdev->bus; !pci_is_root_bus(bus); bus = bus->parent) {
1565 if (!bus->self)
1566 continue;
1567
1568 if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS))
1569 break;
1570
1571 pdev = bus->self;
1572
1573 group = iommu_group_get(&pdev->dev);
1574 if (group)
1575 return group;
1576 }
1577
1578 /*
1579 * Look for existing groups on device aliases. If we alias another
1580 * device or another device aliases us, use the same group.
1581 */
1582 group = get_pci_alias_group(pdev, (unsigned long *)devfns);
1583 if (group)
1584 return group;
1585
1586 /*
1587 * Look for existing groups on non-isolated functions on the same
1588 * slot and aliases of those funcions, if any. No need to clear
1589 * the search bitmap, the tested devfns are still valid.
1590 */
1591 group = get_pci_function_alias_group(pdev, (unsigned long *)devfns);
1592 if (group)
1593 return group;
1594
1595 /* No shared group found, allocate new */
1596 return iommu_group_alloc();
1597 }
1598 EXPORT_SYMBOL_GPL(pci_device_group);
1599
1600 /* Get the IOMMU group for device on fsl-mc bus */
fsl_mc_device_group(struct device * dev)1601 struct iommu_group *fsl_mc_device_group(struct device *dev)
1602 {
1603 struct device *cont_dev = fsl_mc_cont_dev(dev);
1604 struct iommu_group *group;
1605
1606 group = iommu_group_get(cont_dev);
1607 if (!group)
1608 group = iommu_group_alloc();
1609 return group;
1610 }
1611 EXPORT_SYMBOL_GPL(fsl_mc_device_group);
1612
iommu_get_def_domain_type(struct device * dev)1613 static int iommu_get_def_domain_type(struct device *dev)
1614 {
1615 const struct iommu_ops *ops = dev_iommu_ops(dev);
1616
1617 if (dev_is_pci(dev) && to_pci_dev(dev)->untrusted)
1618 return IOMMU_DOMAIN_DMA;
1619
1620 if (ops->def_domain_type)
1621 return ops->def_domain_type(dev);
1622
1623 return 0;
1624 }
1625
iommu_group_alloc_default_domain(struct bus_type * bus,struct iommu_group * group,unsigned int type)1626 static int iommu_group_alloc_default_domain(struct bus_type *bus,
1627 struct iommu_group *group,
1628 unsigned int type)
1629 {
1630 struct iommu_domain *dom;
1631
1632 dom = __iommu_domain_alloc(bus, type);
1633 if (!dom && type != IOMMU_DOMAIN_DMA) {
1634 dom = __iommu_domain_alloc(bus, IOMMU_DOMAIN_DMA);
1635 if (dom)
1636 pr_warn("Failed to allocate default IOMMU domain of type %u for group %s - Falling back to IOMMU_DOMAIN_DMA",
1637 type, group->name);
1638 }
1639
1640 if (!dom)
1641 return -ENOMEM;
1642
1643 group->default_domain = dom;
1644 if (!group->domain)
1645 group->domain = dom;
1646 return 0;
1647 }
1648
iommu_alloc_default_domain(struct iommu_group * group,struct device * dev)1649 static int iommu_alloc_default_domain(struct iommu_group *group,
1650 struct device *dev)
1651 {
1652 unsigned int type;
1653
1654 if (group->default_domain)
1655 return 0;
1656
1657 type = iommu_get_def_domain_type(dev) ? : iommu_def_domain_type;
1658
1659 return iommu_group_alloc_default_domain(dev->bus, group, type);
1660 }
1661
1662 /**
1663 * iommu_group_get_for_dev - Find or create the IOMMU group for a device
1664 * @dev: target device
1665 *
1666 * This function is intended to be called by IOMMU drivers and extended to
1667 * support common, bus-defined algorithms when determining or creating the
1668 * IOMMU group for a device. On success, the caller will hold a reference
1669 * to the returned IOMMU group, which will already include the provided
1670 * device. The reference should be released with iommu_group_put().
1671 */
iommu_group_get_for_dev(struct device * dev)1672 static struct iommu_group *iommu_group_get_for_dev(struct device *dev)
1673 {
1674 const struct iommu_ops *ops = dev_iommu_ops(dev);
1675 struct iommu_group *group;
1676 int ret;
1677
1678 group = iommu_group_get(dev);
1679 if (group)
1680 return group;
1681
1682 group = ops->device_group(dev);
1683 if (WARN_ON_ONCE(group == NULL))
1684 return ERR_PTR(-EINVAL);
1685
1686 if (IS_ERR(group))
1687 return group;
1688
1689 ret = iommu_group_add_device(group, dev);
1690 if (ret)
1691 goto out_put_group;
1692
1693 return group;
1694
1695 out_put_group:
1696 iommu_group_put(group);
1697
1698 return ERR_PTR(ret);
1699 }
1700
iommu_group_default_domain(struct iommu_group * group)1701 struct iommu_domain *iommu_group_default_domain(struct iommu_group *group)
1702 {
1703 return group->default_domain;
1704 }
1705
probe_iommu_group(struct device * dev,void * data)1706 static int probe_iommu_group(struct device *dev, void *data)
1707 {
1708 struct list_head *group_list = data;
1709 struct iommu_group *group;
1710 int ret;
1711
1712 /* Device is probed already if in a group */
1713 group = iommu_group_get(dev);
1714 if (group) {
1715 iommu_group_put(group);
1716 return 0;
1717 }
1718
1719 mutex_lock(&iommu_probe_device_lock);
1720 ret = __iommu_probe_device(dev, group_list);
1721 mutex_unlock(&iommu_probe_device_lock);
1722 if (ret == -ENODEV)
1723 ret = 0;
1724
1725 return ret;
1726 }
1727
iommu_bus_notifier(struct notifier_block * nb,unsigned long action,void * data)1728 static int iommu_bus_notifier(struct notifier_block *nb,
1729 unsigned long action, void *data)
1730 {
1731 struct device *dev = data;
1732
1733 if (action == BUS_NOTIFY_ADD_DEVICE) {
1734 int ret;
1735
1736 ret = iommu_probe_device(dev);
1737 return (ret) ? NOTIFY_DONE : NOTIFY_OK;
1738 } else if (action == BUS_NOTIFY_REMOVED_DEVICE) {
1739 iommu_release_device(dev);
1740 return NOTIFY_OK;
1741 }
1742
1743 return 0;
1744 }
1745
1746 struct __group_domain_type {
1747 struct device *dev;
1748 unsigned int type;
1749 };
1750
probe_get_default_domain_type(struct device * dev,void * data)1751 static int probe_get_default_domain_type(struct device *dev, void *data)
1752 {
1753 struct __group_domain_type *gtype = data;
1754 unsigned int type = iommu_get_def_domain_type(dev);
1755
1756 if (type) {
1757 if (gtype->type && gtype->type != type) {
1758 dev_warn(dev, "Device needs domain type %s, but device %s in the same iommu group requires type %s - using default\n",
1759 iommu_domain_type_str(type),
1760 dev_name(gtype->dev),
1761 iommu_domain_type_str(gtype->type));
1762 gtype->type = 0;
1763 }
1764
1765 if (!gtype->dev) {
1766 gtype->dev = dev;
1767 gtype->type = type;
1768 }
1769 }
1770
1771 return 0;
1772 }
1773
probe_alloc_default_domain(struct bus_type * bus,struct iommu_group * group)1774 static void probe_alloc_default_domain(struct bus_type *bus,
1775 struct iommu_group *group)
1776 {
1777 struct __group_domain_type gtype;
1778
1779 if (group->default_domain)
1780 return;
1781
1782 memset(>ype, 0, sizeof(gtype));
1783
1784 /* Ask for default domain requirements of all devices in the group */
1785 __iommu_group_for_each_dev(group, >ype,
1786 probe_get_default_domain_type);
1787
1788 if (!gtype.type)
1789 gtype.type = iommu_def_domain_type;
1790
1791 iommu_group_alloc_default_domain(bus, group, gtype.type);
1792
1793 }
1794
iommu_group_do_dma_attach(struct device * dev,void * data)1795 static int iommu_group_do_dma_attach(struct device *dev, void *data)
1796 {
1797 struct iommu_domain *domain = data;
1798 int ret = 0;
1799
1800 if (!iommu_is_attach_deferred(dev))
1801 ret = __iommu_attach_device(domain, dev);
1802
1803 return ret;
1804 }
1805
__iommu_group_dma_attach(struct iommu_group * group)1806 static int __iommu_group_dma_attach(struct iommu_group *group)
1807 {
1808 return __iommu_group_for_each_dev(group, group->default_domain,
1809 iommu_group_do_dma_attach);
1810 }
1811
iommu_group_do_probe_finalize(struct device * dev,void * data)1812 static int iommu_group_do_probe_finalize(struct device *dev, void *data)
1813 {
1814 const struct iommu_ops *ops = dev_iommu_ops(dev);
1815
1816 if (ops->probe_finalize)
1817 ops->probe_finalize(dev);
1818
1819 return 0;
1820 }
1821
__iommu_group_dma_finalize(struct iommu_group * group)1822 static void __iommu_group_dma_finalize(struct iommu_group *group)
1823 {
1824 __iommu_group_for_each_dev(group, group->default_domain,
1825 iommu_group_do_probe_finalize);
1826 }
1827
iommu_do_create_direct_mappings(struct device * dev,void * data)1828 static int iommu_do_create_direct_mappings(struct device *dev, void *data)
1829 {
1830 struct iommu_group *group = data;
1831
1832 iommu_create_device_direct_mappings(group, dev);
1833
1834 return 0;
1835 }
1836
iommu_group_create_direct_mappings(struct iommu_group * group)1837 static int iommu_group_create_direct_mappings(struct iommu_group *group)
1838 {
1839 return __iommu_group_for_each_dev(group, group,
1840 iommu_do_create_direct_mappings);
1841 }
1842
bus_iommu_probe(struct bus_type * bus)1843 int bus_iommu_probe(struct bus_type *bus)
1844 {
1845 struct iommu_group *group, *next;
1846 LIST_HEAD(group_list);
1847 int ret;
1848
1849 /*
1850 * This code-path does not allocate the default domain when
1851 * creating the iommu group, so do it after the groups are
1852 * created.
1853 */
1854 ret = bus_for_each_dev(bus, NULL, &group_list, probe_iommu_group);
1855 if (ret)
1856 return ret;
1857
1858 list_for_each_entry_safe(group, next, &group_list, entry) {
1859 mutex_lock(&group->mutex);
1860
1861 /* Remove item from the list */
1862 list_del_init(&group->entry);
1863
1864 /* Try to allocate default domain */
1865 probe_alloc_default_domain(bus, group);
1866
1867 if (!group->default_domain) {
1868 mutex_unlock(&group->mutex);
1869 continue;
1870 }
1871
1872 iommu_group_create_direct_mappings(group);
1873
1874 ret = __iommu_group_dma_attach(group);
1875
1876 mutex_unlock(&group->mutex);
1877
1878 if (ret)
1879 break;
1880
1881 __iommu_group_dma_finalize(group);
1882 }
1883
1884 return ret;
1885 }
1886
iommu_present(struct bus_type * bus)1887 bool iommu_present(struct bus_type *bus)
1888 {
1889 return bus->iommu_ops != NULL;
1890 }
1891 EXPORT_SYMBOL_GPL(iommu_present);
1892
1893 /**
1894 * device_iommu_capable() - check for a general IOMMU capability
1895 * @dev: device to which the capability would be relevant, if available
1896 * @cap: IOMMU capability
1897 *
1898 * Return: true if an IOMMU is present and supports the given capability
1899 * for the given device, otherwise false.
1900 */
device_iommu_capable(struct device * dev,enum iommu_cap cap)1901 bool device_iommu_capable(struct device *dev, enum iommu_cap cap)
1902 {
1903 const struct iommu_ops *ops;
1904
1905 if (!dev->iommu || !dev->iommu->iommu_dev)
1906 return false;
1907
1908 ops = dev_iommu_ops(dev);
1909 if (!ops->capable)
1910 return false;
1911
1912 return ops->capable(dev, cap);
1913 }
1914 EXPORT_SYMBOL_GPL(device_iommu_capable);
1915
1916 /**
1917 * iommu_set_fault_handler() - set a fault handler for an iommu domain
1918 * @domain: iommu domain
1919 * @handler: fault handler
1920 * @token: user data, will be passed back to the fault handler
1921 *
1922 * This function should be used by IOMMU users which want to be notified
1923 * whenever an IOMMU fault happens.
1924 *
1925 * The fault handler itself should return 0 on success, and an appropriate
1926 * error code otherwise.
1927 */
iommu_set_fault_handler(struct iommu_domain * domain,iommu_fault_handler_t handler,void * token)1928 void iommu_set_fault_handler(struct iommu_domain *domain,
1929 iommu_fault_handler_t handler,
1930 void *token)
1931 {
1932 BUG_ON(!domain);
1933
1934 domain->handler = handler;
1935 domain->handler_token = token;
1936 }
1937 EXPORT_SYMBOL_GPL(iommu_set_fault_handler);
1938
__iommu_domain_alloc(struct bus_type * bus,unsigned type)1939 static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
1940 unsigned type)
1941 {
1942 struct iommu_domain *domain;
1943
1944 if (bus == NULL || bus->iommu_ops == NULL)
1945 return NULL;
1946
1947 domain = bus->iommu_ops->domain_alloc(type);
1948 if (!domain)
1949 return NULL;
1950
1951 domain->type = type;
1952 /*
1953 * If not already set, assume all sizes by default; the driver
1954 * may override this later
1955 */
1956 if (!domain->pgsize_bitmap)
1957 domain->pgsize_bitmap = bus->iommu_ops->pgsize_bitmap;
1958
1959 if (!domain->ops)
1960 domain->ops = bus->iommu_ops->default_domain_ops;
1961
1962 if (iommu_is_dma_domain(domain) && iommu_get_dma_cookie(domain)) {
1963 iommu_domain_free(domain);
1964 domain = NULL;
1965 }
1966 return domain;
1967 }
1968
iommu_domain_alloc(struct bus_type * bus)1969 struct iommu_domain *iommu_domain_alloc(struct bus_type *bus)
1970 {
1971 return __iommu_domain_alloc(bus, IOMMU_DOMAIN_UNMANAGED);
1972 }
1973 EXPORT_SYMBOL_GPL(iommu_domain_alloc);
1974
iommu_domain_free(struct iommu_domain * domain)1975 void iommu_domain_free(struct iommu_domain *domain)
1976 {
1977 if (domain->type == IOMMU_DOMAIN_SVA)
1978 mmdrop(domain->mm);
1979 iommu_put_dma_cookie(domain);
1980 domain->ops->free(domain);
1981 }
1982 EXPORT_SYMBOL_GPL(iommu_domain_free);
1983
1984 /*
1985 * Put the group's domain back to the appropriate core-owned domain - either the
1986 * standard kernel-mode DMA configuration or an all-DMA-blocked domain.
1987 */
__iommu_group_set_core_domain(struct iommu_group * group)1988 static void __iommu_group_set_core_domain(struct iommu_group *group)
1989 {
1990 struct iommu_domain *new_domain;
1991 int ret;
1992
1993 if (group->owner)
1994 new_domain = group->blocking_domain;
1995 else
1996 new_domain = group->default_domain;
1997
1998 ret = __iommu_group_set_domain(group, new_domain);
1999 WARN(ret, "iommu driver failed to attach the default/blocking domain");
2000 }
2001
__iommu_attach_device(struct iommu_domain * domain,struct device * dev)2002 static int __iommu_attach_device(struct iommu_domain *domain,
2003 struct device *dev)
2004 {
2005 int ret;
2006
2007 if (unlikely(domain->ops->attach_dev == NULL))
2008 return -ENODEV;
2009
2010 ret = domain->ops->attach_dev(domain, dev);
2011 if (!ret)
2012 trace_attach_device_to_domain(dev);
2013 return ret;
2014 }
2015
iommu_attach_device(struct iommu_domain * domain,struct device * dev)2016 int iommu_attach_device(struct iommu_domain *domain, struct device *dev)
2017 {
2018 struct iommu_group *group;
2019 int ret;
2020
2021 group = iommu_group_get(dev);
2022 if (!group)
2023 return -ENODEV;
2024
2025 /*
2026 * Lock the group to make sure the device-count doesn't
2027 * change while we are attaching
2028 */
2029 mutex_lock(&group->mutex);
2030 ret = -EINVAL;
2031 if (iommu_group_device_count(group) != 1)
2032 goto out_unlock;
2033
2034 ret = __iommu_attach_group(domain, group);
2035
2036 out_unlock:
2037 mutex_unlock(&group->mutex);
2038 iommu_group_put(group);
2039
2040 return ret;
2041 }
2042 EXPORT_SYMBOL_GPL(iommu_attach_device);
2043
iommu_deferred_attach(struct device * dev,struct iommu_domain * domain)2044 int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain)
2045 {
2046 if (iommu_is_attach_deferred(dev))
2047 return __iommu_attach_device(domain, dev);
2048
2049 return 0;
2050 }
2051
__iommu_detach_device(struct iommu_domain * domain,struct device * dev)2052 static void __iommu_detach_device(struct iommu_domain *domain,
2053 struct device *dev)
2054 {
2055 if (iommu_is_attach_deferred(dev))
2056 return;
2057
2058 domain->ops->detach_dev(domain, dev);
2059 trace_detach_device_from_domain(dev);
2060 }
2061
iommu_detach_device(struct iommu_domain * domain,struct device * dev)2062 void iommu_detach_device(struct iommu_domain *domain, struct device *dev)
2063 {
2064 struct iommu_group *group;
2065
2066 group = iommu_group_get(dev);
2067 if (!group)
2068 return;
2069
2070 mutex_lock(&group->mutex);
2071 if (WARN_ON(domain != group->domain) ||
2072 WARN_ON(iommu_group_device_count(group) != 1))
2073 goto out_unlock;
2074 __iommu_group_set_core_domain(group);
2075
2076 out_unlock:
2077 mutex_unlock(&group->mutex);
2078 iommu_group_put(group);
2079 }
2080 EXPORT_SYMBOL_GPL(iommu_detach_device);
2081
iommu_get_domain_for_dev(struct device * dev)2082 struct iommu_domain *iommu_get_domain_for_dev(struct device *dev)
2083 {
2084 struct iommu_domain *domain;
2085 struct iommu_group *group;
2086
2087 group = iommu_group_get(dev);
2088 if (!group)
2089 return NULL;
2090
2091 domain = group->domain;
2092
2093 iommu_group_put(group);
2094
2095 return domain;
2096 }
2097 EXPORT_SYMBOL_GPL(iommu_get_domain_for_dev);
2098
2099 /*
2100 * For IOMMU_DOMAIN_DMA implementations which already provide their own
2101 * guarantees that the group and its default domain are valid and correct.
2102 */
iommu_get_dma_domain(struct device * dev)2103 struct iommu_domain *iommu_get_dma_domain(struct device *dev)
2104 {
2105 return dev->iommu_group->default_domain;
2106 }
2107
2108 /*
2109 * IOMMU groups are really the natural working unit of the IOMMU, but
2110 * the IOMMU API works on domains and devices. Bridge that gap by
2111 * iterating over the devices in a group. Ideally we'd have a single
2112 * device which represents the requestor ID of the group, but we also
2113 * allow IOMMU drivers to create policy defined minimum sets, where
2114 * the physical hardware may be able to distiguish members, but we
2115 * wish to group them at a higher level (ex. untrusted multi-function
2116 * PCI devices). Thus we attach each device.
2117 */
iommu_group_do_attach_device(struct device * dev,void * data)2118 static int iommu_group_do_attach_device(struct device *dev, void *data)
2119 {
2120 struct iommu_domain *domain = data;
2121
2122 return __iommu_attach_device(domain, dev);
2123 }
2124
__iommu_attach_group(struct iommu_domain * domain,struct iommu_group * group)2125 static int __iommu_attach_group(struct iommu_domain *domain,
2126 struct iommu_group *group)
2127 {
2128 int ret;
2129
2130 if (group->domain && group->domain != group->default_domain &&
2131 group->domain != group->blocking_domain)
2132 return -EBUSY;
2133
2134 ret = __iommu_group_for_each_dev(group, domain,
2135 iommu_group_do_attach_device);
2136 if (ret == 0) {
2137 group->domain = domain;
2138 } else {
2139 /*
2140 * To recover from the case when certain device within the
2141 * group fails to attach to the new domain, we need force
2142 * attaching all devices back to the old domain. The old
2143 * domain is compatible for all devices in the group,
2144 * hence the iommu driver should always return success.
2145 */
2146 struct iommu_domain *old_domain = group->domain;
2147
2148 group->domain = NULL;
2149 WARN(__iommu_group_set_domain(group, old_domain),
2150 "iommu driver failed to attach a compatible domain");
2151 }
2152
2153 return ret;
2154 }
2155
iommu_attach_group(struct iommu_domain * domain,struct iommu_group * group)2156 int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group)
2157 {
2158 int ret;
2159
2160 mutex_lock(&group->mutex);
2161 ret = __iommu_attach_group(domain, group);
2162 mutex_unlock(&group->mutex);
2163
2164 return ret;
2165 }
2166 EXPORT_SYMBOL_GPL(iommu_attach_group);
2167
iommu_group_do_detach_device(struct device * dev,void * data)2168 static int iommu_group_do_detach_device(struct device *dev, void *data)
2169 {
2170 struct iommu_domain *domain = data;
2171
2172 __iommu_detach_device(domain, dev);
2173
2174 return 0;
2175 }
2176
__iommu_group_set_domain(struct iommu_group * group,struct iommu_domain * new_domain)2177 static int __iommu_group_set_domain(struct iommu_group *group,
2178 struct iommu_domain *new_domain)
2179 {
2180 int ret;
2181
2182 if (group->domain == new_domain)
2183 return 0;
2184
2185 /*
2186 * New drivers should support default domains and so the detach_dev() op
2187 * will never be called. Otherwise the NULL domain represents some
2188 * platform specific behavior.
2189 */
2190 if (!new_domain) {
2191 if (WARN_ON(!group->domain->ops->detach_dev))
2192 return -EINVAL;
2193 __iommu_group_for_each_dev(group, group->domain,
2194 iommu_group_do_detach_device);
2195 group->domain = NULL;
2196 return 0;
2197 }
2198
2199 /*
2200 * Changing the domain is done by calling attach_dev() on the new
2201 * domain. This switch does not have to be atomic and DMA can be
2202 * discarded during the transition. DMA must only be able to access
2203 * either new_domain or group->domain, never something else.
2204 *
2205 * Note that this is called in error unwind paths, attaching to a
2206 * domain that has already been attached cannot fail.
2207 */
2208 ret = __iommu_group_for_each_dev(group, new_domain,
2209 iommu_group_do_attach_device);
2210 if (ret)
2211 return ret;
2212 group->domain = new_domain;
2213 return 0;
2214 }
2215
iommu_detach_group(struct iommu_domain * domain,struct iommu_group * group)2216 void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group)
2217 {
2218 mutex_lock(&group->mutex);
2219 __iommu_group_set_core_domain(group);
2220 mutex_unlock(&group->mutex);
2221 }
2222 EXPORT_SYMBOL_GPL(iommu_detach_group);
2223
iommu_iova_to_phys(struct iommu_domain * domain,dma_addr_t iova)2224 phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
2225 {
2226 if (domain->type == IOMMU_DOMAIN_IDENTITY)
2227 return iova;
2228
2229 if (domain->type == IOMMU_DOMAIN_BLOCKED)
2230 return 0;
2231
2232 return domain->ops->iova_to_phys(domain, iova);
2233 }
2234 EXPORT_SYMBOL_GPL(iommu_iova_to_phys);
2235
iommu_pgsize(struct iommu_domain * domain,unsigned long iova,phys_addr_t paddr,size_t size,size_t * count)2236 static size_t iommu_pgsize(struct iommu_domain *domain, unsigned long iova,
2237 phys_addr_t paddr, size_t size, size_t *count)
2238 {
2239 unsigned int pgsize_idx, pgsize_idx_next;
2240 unsigned long pgsizes;
2241 size_t offset, pgsize, pgsize_next;
2242 unsigned long addr_merge = paddr | iova;
2243
2244 /* Page sizes supported by the hardware and small enough for @size */
2245 pgsizes = domain->pgsize_bitmap & GENMASK(__fls(size), 0);
2246
2247 /* Constrain the page sizes further based on the maximum alignment */
2248 if (likely(addr_merge))
2249 pgsizes &= GENMASK(__ffs(addr_merge), 0);
2250
2251 /* Make sure we have at least one suitable page size */
2252 BUG_ON(!pgsizes);
2253
2254 /* Pick the biggest page size remaining */
2255 pgsize_idx = __fls(pgsizes);
2256 pgsize = BIT(pgsize_idx);
2257 if (!count)
2258 return pgsize;
2259
2260 /* Find the next biggest support page size, if it exists */
2261 pgsizes = domain->pgsize_bitmap & ~GENMASK(pgsize_idx, 0);
2262 if (!pgsizes)
2263 goto out_set_count;
2264
2265 pgsize_idx_next = __ffs(pgsizes);
2266 pgsize_next = BIT(pgsize_idx_next);
2267
2268 /*
2269 * There's no point trying a bigger page size unless the virtual
2270 * and physical addresses are similarly offset within the larger page.
2271 */
2272 if ((iova ^ paddr) & (pgsize_next - 1))
2273 goto out_set_count;
2274
2275 /* Calculate the offset to the next page size alignment boundary */
2276 offset = pgsize_next - (addr_merge & (pgsize_next - 1));
2277
2278 /*
2279 * If size is big enough to accommodate the larger page, reduce
2280 * the number of smaller pages.
2281 */
2282 if (offset + pgsize_next <= size)
2283 size = offset;
2284
2285 out_set_count:
2286 *count = size >> pgsize_idx;
2287 return pgsize;
2288 }
2289
__iommu_map_pages(struct iommu_domain * domain,unsigned long iova,phys_addr_t paddr,size_t size,int prot,gfp_t gfp,size_t * mapped)2290 static int __iommu_map_pages(struct iommu_domain *domain, unsigned long iova,
2291 phys_addr_t paddr, size_t size, int prot,
2292 gfp_t gfp, size_t *mapped)
2293 {
2294 const struct iommu_domain_ops *ops = domain->ops;
2295 size_t pgsize, count;
2296 int ret;
2297
2298 pgsize = iommu_pgsize(domain, iova, paddr, size, &count);
2299
2300 pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx count %zu\n",
2301 iova, &paddr, pgsize, count);
2302
2303 if (ops->map_pages) {
2304 ret = ops->map_pages(domain, iova, paddr, pgsize, count, prot,
2305 gfp, mapped);
2306 } else {
2307 ret = ops->map(domain, iova, paddr, pgsize, prot, gfp);
2308 *mapped = ret ? 0 : pgsize;
2309 }
2310
2311 return ret;
2312 }
2313
__iommu_map(struct iommu_domain * domain,unsigned long iova,phys_addr_t paddr,size_t size,int prot,gfp_t gfp)2314 static int __iommu_map(struct iommu_domain *domain, unsigned long iova,
2315 phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
2316 {
2317 const struct iommu_domain_ops *ops = domain->ops;
2318 unsigned long orig_iova = iova;
2319 unsigned int min_pagesz;
2320 size_t orig_size = size;
2321 phys_addr_t orig_paddr = paddr;
2322 int ret = 0;
2323
2324 if (unlikely(!(ops->map || ops->map_pages) ||
2325 domain->pgsize_bitmap == 0UL))
2326 return -ENODEV;
2327
2328 if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
2329 return -EINVAL;
2330
2331 /* find out the minimum page size supported */
2332 min_pagesz = 1 << __ffs(domain->pgsize_bitmap);
2333
2334 /*
2335 * both the virtual address and the physical one, as well as
2336 * the size of the mapping, must be aligned (at least) to the
2337 * size of the smallest page supported by the hardware
2338 */
2339 if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
2340 pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x\n",
2341 iova, &paddr, size, min_pagesz);
2342 return -EINVAL;
2343 }
2344
2345 pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr, size);
2346
2347 while (size) {
2348 size_t mapped = 0;
2349
2350 ret = __iommu_map_pages(domain, iova, paddr, size, prot, gfp,
2351 &mapped);
2352 /*
2353 * Some pages may have been mapped, even if an error occurred,
2354 * so we should account for those so they can be unmapped.
2355 */
2356 size -= mapped;
2357
2358 if (ret)
2359 break;
2360
2361 iova += mapped;
2362 paddr += mapped;
2363 }
2364
2365 /* unroll mapping in case something went wrong */
2366 if (ret)
2367 iommu_unmap(domain, orig_iova, orig_size - size);
2368 else
2369 trace_map(orig_iova, orig_paddr, orig_size);
2370
2371 return ret;
2372 }
2373
_iommu_map(struct iommu_domain * domain,unsigned long iova,phys_addr_t paddr,size_t size,int prot,gfp_t gfp)2374 static int _iommu_map(struct iommu_domain *domain, unsigned long iova,
2375 phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
2376 {
2377 const struct iommu_domain_ops *ops = domain->ops;
2378 int ret;
2379
2380 ret = __iommu_map(domain, iova, paddr, size, prot, gfp);
2381 if (ret == 0 && ops->iotlb_sync_map)
2382 ops->iotlb_sync_map(domain, iova, size);
2383
2384 return ret;
2385 }
2386
iommu_map(struct iommu_domain * domain,unsigned long iova,phys_addr_t paddr,size_t size,int prot)2387 int iommu_map(struct iommu_domain *domain, unsigned long iova,
2388 phys_addr_t paddr, size_t size, int prot)
2389 {
2390 might_sleep();
2391 return _iommu_map(domain, iova, paddr, size, prot, GFP_KERNEL);
2392 }
2393 EXPORT_SYMBOL_GPL(iommu_map);
2394
iommu_map_atomic(struct iommu_domain * domain,unsigned long iova,phys_addr_t paddr,size_t size,int prot)2395 int iommu_map_atomic(struct iommu_domain *domain, unsigned long iova,
2396 phys_addr_t paddr, size_t size, int prot)
2397 {
2398 return _iommu_map(domain, iova, paddr, size, prot, GFP_ATOMIC);
2399 }
2400 EXPORT_SYMBOL_GPL(iommu_map_atomic);
2401
__iommu_unmap_pages(struct iommu_domain * domain,unsigned long iova,size_t size,struct iommu_iotlb_gather * iotlb_gather)2402 static size_t __iommu_unmap_pages(struct iommu_domain *domain,
2403 unsigned long iova, size_t size,
2404 struct iommu_iotlb_gather *iotlb_gather)
2405 {
2406 const struct iommu_domain_ops *ops = domain->ops;
2407 size_t pgsize, count;
2408
2409 pgsize = iommu_pgsize(domain, iova, iova, size, &count);
2410 return ops->unmap_pages ?
2411 ops->unmap_pages(domain, iova, pgsize, count, iotlb_gather) :
2412 ops->unmap(domain, iova, pgsize, iotlb_gather);
2413 }
2414
__iommu_unmap(struct iommu_domain * domain,unsigned long iova,size_t size,struct iommu_iotlb_gather * iotlb_gather)2415 static size_t __iommu_unmap(struct iommu_domain *domain,
2416 unsigned long iova, size_t size,
2417 struct iommu_iotlb_gather *iotlb_gather)
2418 {
2419 const struct iommu_domain_ops *ops = domain->ops;
2420 size_t unmapped_page, unmapped = 0;
2421 unsigned long orig_iova = iova;
2422 unsigned int min_pagesz;
2423
2424 if (unlikely(!(ops->unmap || ops->unmap_pages) ||
2425 domain->pgsize_bitmap == 0UL))
2426 return 0;
2427
2428 if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
2429 return 0;
2430
2431 /* find out the minimum page size supported */
2432 min_pagesz = 1 << __ffs(domain->pgsize_bitmap);
2433
2434 /*
2435 * The virtual address, as well as the size of the mapping, must be
2436 * aligned (at least) to the size of the smallest page supported
2437 * by the hardware
2438 */
2439 if (!IS_ALIGNED(iova | size, min_pagesz)) {
2440 pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n",
2441 iova, size, min_pagesz);
2442 return 0;
2443 }
2444
2445 pr_debug("unmap this: iova 0x%lx size 0x%zx\n", iova, size);
2446
2447 /*
2448 * Keep iterating until we either unmap 'size' bytes (or more)
2449 * or we hit an area that isn't mapped.
2450 */
2451 while (unmapped < size) {
2452 unmapped_page = __iommu_unmap_pages(domain, iova,
2453 size - unmapped,
2454 iotlb_gather);
2455 if (!unmapped_page)
2456 break;
2457
2458 pr_debug("unmapped: iova 0x%lx size 0x%zx\n",
2459 iova, unmapped_page);
2460
2461 iova += unmapped_page;
2462 unmapped += unmapped_page;
2463 }
2464
2465 trace_unmap(orig_iova, size, unmapped);
2466 return unmapped;
2467 }
2468
iommu_unmap(struct iommu_domain * domain,unsigned long iova,size_t size)2469 size_t iommu_unmap(struct iommu_domain *domain,
2470 unsigned long iova, size_t size)
2471 {
2472 struct iommu_iotlb_gather iotlb_gather;
2473 size_t ret;
2474
2475 iommu_iotlb_gather_init(&iotlb_gather);
2476 ret = __iommu_unmap(domain, iova, size, &iotlb_gather);
2477 iommu_iotlb_sync(domain, &iotlb_gather);
2478
2479 return ret;
2480 }
2481 EXPORT_SYMBOL_GPL(iommu_unmap);
2482
iommu_unmap_fast(struct iommu_domain * domain,unsigned long iova,size_t size,struct iommu_iotlb_gather * iotlb_gather)2483 size_t iommu_unmap_fast(struct iommu_domain *domain,
2484 unsigned long iova, size_t size,
2485 struct iommu_iotlb_gather *iotlb_gather)
2486 {
2487 return __iommu_unmap(domain, iova, size, iotlb_gather);
2488 }
2489 EXPORT_SYMBOL_GPL(iommu_unmap_fast);
2490
__iommu_map_sg(struct iommu_domain * domain,unsigned long iova,struct scatterlist * sg,unsigned int nents,int prot,gfp_t gfp)2491 static ssize_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
2492 struct scatterlist *sg, unsigned int nents, int prot,
2493 gfp_t gfp)
2494 {
2495 const struct iommu_domain_ops *ops = domain->ops;
2496 size_t len = 0, mapped = 0;
2497 phys_addr_t start;
2498 unsigned int i = 0;
2499 int ret;
2500
2501 while (i <= nents) {
2502 phys_addr_t s_phys = sg_phys(sg);
2503
2504 if (len && s_phys != start + len) {
2505 ret = __iommu_map(domain, iova + mapped, start,
2506 len, prot, gfp);
2507
2508 if (ret)
2509 goto out_err;
2510
2511 mapped += len;
2512 len = 0;
2513 }
2514
2515 if (sg_is_dma_bus_address(sg))
2516 goto next;
2517
2518 if (len) {
2519 len += sg->length;
2520 } else {
2521 len = sg->length;
2522 start = s_phys;
2523 }
2524
2525 next:
2526 if (++i < nents)
2527 sg = sg_next(sg);
2528 }
2529
2530 if (ops->iotlb_sync_map)
2531 ops->iotlb_sync_map(domain, iova, mapped);
2532 return mapped;
2533
2534 out_err:
2535 /* undo mappings already done */
2536 iommu_unmap(domain, iova, mapped);
2537
2538 return ret;
2539 }
2540
iommu_map_sg(struct iommu_domain * domain,unsigned long iova,struct scatterlist * sg,unsigned int nents,int prot)2541 ssize_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
2542 struct scatterlist *sg, unsigned int nents, int prot)
2543 {
2544 might_sleep();
2545 return __iommu_map_sg(domain, iova, sg, nents, prot, GFP_KERNEL);
2546 }
2547 EXPORT_SYMBOL_GPL(iommu_map_sg);
2548
iommu_map_sg_atomic(struct iommu_domain * domain,unsigned long iova,struct scatterlist * sg,unsigned int nents,int prot)2549 ssize_t iommu_map_sg_atomic(struct iommu_domain *domain, unsigned long iova,
2550 struct scatterlist *sg, unsigned int nents, int prot)
2551 {
2552 return __iommu_map_sg(domain, iova, sg, nents, prot, GFP_ATOMIC);
2553 }
2554
2555 /**
2556 * report_iommu_fault() - report about an IOMMU fault to the IOMMU framework
2557 * @domain: the iommu domain where the fault has happened
2558 * @dev: the device where the fault has happened
2559 * @iova: the faulting address
2560 * @flags: mmu fault flags (e.g. IOMMU_FAULT_READ/IOMMU_FAULT_WRITE/...)
2561 *
2562 * This function should be called by the low-level IOMMU implementations
2563 * whenever IOMMU faults happen, to allow high-level users, that are
2564 * interested in such events, to know about them.
2565 *
2566 * This event may be useful for several possible use cases:
2567 * - mere logging of the event
2568 * - dynamic TLB/PTE loading
2569 * - if restarting of the faulting device is required
2570 *
2571 * Returns 0 on success and an appropriate error code otherwise (if dynamic
2572 * PTE/TLB loading will one day be supported, implementations will be able
2573 * to tell whether it succeeded or not according to this return value).
2574 *
2575 * Specifically, -ENOSYS is returned if a fault handler isn't installed
2576 * (though fault handlers can also return -ENOSYS, in case they want to
2577 * elicit the default behavior of the IOMMU drivers).
2578 */
report_iommu_fault(struct iommu_domain * domain,struct device * dev,unsigned long iova,int flags)2579 int report_iommu_fault(struct iommu_domain *domain, struct device *dev,
2580 unsigned long iova, int flags)
2581 {
2582 int ret = -ENOSYS;
2583
2584 /*
2585 * if upper layers showed interest and installed a fault handler,
2586 * invoke it.
2587 */
2588 if (domain->handler)
2589 ret = domain->handler(domain, dev, iova, flags,
2590 domain->handler_token);
2591
2592 trace_io_page_fault(dev, iova, flags);
2593 return ret;
2594 }
2595 EXPORT_SYMBOL_GPL(report_iommu_fault);
2596
iommu_init(void)2597 static int __init iommu_init(void)
2598 {
2599 iommu_group_kset = kset_create_and_add("iommu_groups",
2600 NULL, kernel_kobj);
2601 BUG_ON(!iommu_group_kset);
2602
2603 iommu_debugfs_setup();
2604
2605 return 0;
2606 }
2607 core_initcall(iommu_init);
2608
iommu_enable_nesting(struct iommu_domain * domain)2609 int iommu_enable_nesting(struct iommu_domain *domain)
2610 {
2611 if (domain->type != IOMMU_DOMAIN_UNMANAGED)
2612 return -EINVAL;
2613 if (!domain->ops->enable_nesting)
2614 return -EINVAL;
2615 return domain->ops->enable_nesting(domain);
2616 }
2617 EXPORT_SYMBOL_GPL(iommu_enable_nesting);
2618
iommu_set_pgtable_quirks(struct iommu_domain * domain,unsigned long quirk)2619 int iommu_set_pgtable_quirks(struct iommu_domain *domain,
2620 unsigned long quirk)
2621 {
2622 if (domain->type != IOMMU_DOMAIN_UNMANAGED)
2623 return -EINVAL;
2624 if (!domain->ops->set_pgtable_quirks)
2625 return -EINVAL;
2626 return domain->ops->set_pgtable_quirks(domain, quirk);
2627 }
2628 EXPORT_SYMBOL_GPL(iommu_set_pgtable_quirks);
2629
iommu_get_resv_regions(struct device * dev,struct list_head * list)2630 void iommu_get_resv_regions(struct device *dev, struct list_head *list)
2631 {
2632 const struct iommu_ops *ops = dev_iommu_ops(dev);
2633
2634 if (ops->get_resv_regions)
2635 ops->get_resv_regions(dev, list);
2636 }
2637
2638 /**
2639 * iommu_put_resv_regions - release resered regions
2640 * @dev: device for which to free reserved regions
2641 * @list: reserved region list for device
2642 *
2643 * This releases a reserved region list acquired by iommu_get_resv_regions().
2644 */
iommu_put_resv_regions(struct device * dev,struct list_head * list)2645 void iommu_put_resv_regions(struct device *dev, struct list_head *list)
2646 {
2647 struct iommu_resv_region *entry, *next;
2648
2649 list_for_each_entry_safe(entry, next, list, list) {
2650 if (entry->free)
2651 entry->free(dev, entry);
2652 else
2653 kfree(entry);
2654 }
2655 }
2656 EXPORT_SYMBOL(iommu_put_resv_regions);
2657
iommu_alloc_resv_region(phys_addr_t start,size_t length,int prot,enum iommu_resv_type type,gfp_t gfp)2658 struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start,
2659 size_t length, int prot,
2660 enum iommu_resv_type type,
2661 gfp_t gfp)
2662 {
2663 struct iommu_resv_region *region;
2664
2665 region = kzalloc(sizeof(*region), gfp);
2666 if (!region)
2667 return NULL;
2668
2669 INIT_LIST_HEAD(®ion->list);
2670 region->start = start;
2671 region->length = length;
2672 region->prot = prot;
2673 region->type = type;
2674 return region;
2675 }
2676 EXPORT_SYMBOL_GPL(iommu_alloc_resv_region);
2677
iommu_set_default_passthrough(bool cmd_line)2678 void iommu_set_default_passthrough(bool cmd_line)
2679 {
2680 if (cmd_line)
2681 iommu_cmd_line |= IOMMU_CMD_LINE_DMA_API;
2682 iommu_def_domain_type = IOMMU_DOMAIN_IDENTITY;
2683 }
2684
iommu_set_default_translated(bool cmd_line)2685 void iommu_set_default_translated(bool cmd_line)
2686 {
2687 if (cmd_line)
2688 iommu_cmd_line |= IOMMU_CMD_LINE_DMA_API;
2689 iommu_def_domain_type = IOMMU_DOMAIN_DMA;
2690 }
2691
iommu_default_passthrough(void)2692 bool iommu_default_passthrough(void)
2693 {
2694 return iommu_def_domain_type == IOMMU_DOMAIN_IDENTITY;
2695 }
2696 EXPORT_SYMBOL_GPL(iommu_default_passthrough);
2697
iommu_ops_from_fwnode(struct fwnode_handle * fwnode)2698 const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode)
2699 {
2700 const struct iommu_ops *ops = NULL;
2701 struct iommu_device *iommu;
2702
2703 spin_lock(&iommu_device_lock);
2704 list_for_each_entry(iommu, &iommu_device_list, list)
2705 if (iommu->fwnode == fwnode) {
2706 ops = iommu->ops;
2707 break;
2708 }
2709 spin_unlock(&iommu_device_lock);
2710 return ops;
2711 }
2712
iommu_fwspec_init(struct device * dev,struct fwnode_handle * iommu_fwnode,const struct iommu_ops * ops)2713 int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode,
2714 const struct iommu_ops *ops)
2715 {
2716 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
2717
2718 if (fwspec)
2719 return ops == fwspec->ops ? 0 : -EINVAL;
2720
2721 if (!dev_iommu_get(dev))
2722 return -ENOMEM;
2723
2724 /* Preallocate for the overwhelmingly common case of 1 ID */
2725 fwspec = kzalloc(struct_size(fwspec, ids, 1), GFP_KERNEL);
2726 if (!fwspec)
2727 return -ENOMEM;
2728
2729 of_node_get(to_of_node(iommu_fwnode));
2730 fwspec->iommu_fwnode = iommu_fwnode;
2731 fwspec->ops = ops;
2732 dev_iommu_fwspec_set(dev, fwspec);
2733 return 0;
2734 }
2735 EXPORT_SYMBOL_GPL(iommu_fwspec_init);
2736
iommu_fwspec_free(struct device * dev)2737 void iommu_fwspec_free(struct device *dev)
2738 {
2739 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
2740
2741 if (fwspec) {
2742 fwnode_handle_put(fwspec->iommu_fwnode);
2743 kfree(fwspec);
2744 dev_iommu_fwspec_set(dev, NULL);
2745 }
2746 }
2747 EXPORT_SYMBOL_GPL(iommu_fwspec_free);
2748
iommu_fwspec_add_ids(struct device * dev,u32 * ids,int num_ids)2749 int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids)
2750 {
2751 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
2752 int i, new_num;
2753
2754 if (!fwspec)
2755 return -EINVAL;
2756
2757 new_num = fwspec->num_ids + num_ids;
2758 if (new_num > 1) {
2759 fwspec = krealloc(fwspec, struct_size(fwspec, ids, new_num),
2760 GFP_KERNEL);
2761 if (!fwspec)
2762 return -ENOMEM;
2763
2764 dev_iommu_fwspec_set(dev, fwspec);
2765 }
2766
2767 for (i = 0; i < num_ids; i++)
2768 fwspec->ids[fwspec->num_ids + i] = ids[i];
2769
2770 fwspec->num_ids = new_num;
2771 return 0;
2772 }
2773 EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids);
2774
2775 /*
2776 * Per device IOMMU features.
2777 */
iommu_dev_enable_feature(struct device * dev,enum iommu_dev_features feat)2778 int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat)
2779 {
2780 if (dev->iommu && dev->iommu->iommu_dev) {
2781 const struct iommu_ops *ops = dev->iommu->iommu_dev->ops;
2782
2783 if (ops->dev_enable_feat)
2784 return ops->dev_enable_feat(dev, feat);
2785 }
2786
2787 return -ENODEV;
2788 }
2789 EXPORT_SYMBOL_GPL(iommu_dev_enable_feature);
2790
2791 /*
2792 * The device drivers should do the necessary cleanups before calling this.
2793 */
iommu_dev_disable_feature(struct device * dev,enum iommu_dev_features feat)2794 int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat)
2795 {
2796 if (dev->iommu && dev->iommu->iommu_dev) {
2797 const struct iommu_ops *ops = dev->iommu->iommu_dev->ops;
2798
2799 if (ops->dev_disable_feat)
2800 return ops->dev_disable_feat(dev, feat);
2801 }
2802
2803 return -EBUSY;
2804 }
2805 EXPORT_SYMBOL_GPL(iommu_dev_disable_feature);
2806
2807 /*
2808 * Changes the default domain of an iommu group that has *only* one device
2809 *
2810 * @group: The group for which the default domain should be changed
2811 * @prev_dev: The device in the group (this is used to make sure that the device
2812 * hasn't changed after the caller has called this function)
2813 * @type: The type of the new default domain that gets associated with the group
2814 *
2815 * Returns 0 on success and error code on failure
2816 *
2817 * Note:
2818 * 1. Presently, this function is called only when user requests to change the
2819 * group's default domain type through /sys/kernel/iommu_groups/<grp_id>/type
2820 * Please take a closer look if intended to use for other purposes.
2821 */
iommu_change_dev_def_domain(struct iommu_group * group,struct device * prev_dev,int type)2822 static int iommu_change_dev_def_domain(struct iommu_group *group,
2823 struct device *prev_dev, int type)
2824 {
2825 struct iommu_domain *prev_dom;
2826 struct group_device *grp_dev;
2827 int ret, dev_def_dom;
2828 struct device *dev;
2829
2830 mutex_lock(&group->mutex);
2831
2832 if (group->default_domain != group->domain) {
2833 dev_err_ratelimited(prev_dev, "Group not assigned to default domain\n");
2834 ret = -EBUSY;
2835 goto out;
2836 }
2837
2838 /*
2839 * iommu group wasn't locked while acquiring device lock in
2840 * iommu_group_store_type(). So, make sure that the device count hasn't
2841 * changed while acquiring device lock.
2842 *
2843 * Changing default domain of an iommu group with two or more devices
2844 * isn't supported because there could be a potential deadlock. Consider
2845 * the following scenario. T1 is trying to acquire device locks of all
2846 * the devices in the group and before it could acquire all of them,
2847 * there could be another thread T2 (from different sub-system and use
2848 * case) that has already acquired some of the device locks and might be
2849 * waiting for T1 to release other device locks.
2850 */
2851 if (iommu_group_device_count(group) != 1) {
2852 dev_err_ratelimited(prev_dev, "Cannot change default domain: Group has more than one device\n");
2853 ret = -EINVAL;
2854 goto out;
2855 }
2856
2857 /* Since group has only one device */
2858 grp_dev = list_first_entry(&group->devices, struct group_device, list);
2859 dev = grp_dev->dev;
2860
2861 if (prev_dev != dev) {
2862 dev_err_ratelimited(prev_dev, "Cannot change default domain: Device has been changed\n");
2863 ret = -EBUSY;
2864 goto out;
2865 }
2866
2867 prev_dom = group->default_domain;
2868 if (!prev_dom) {
2869 ret = -EINVAL;
2870 goto out;
2871 }
2872
2873 dev_def_dom = iommu_get_def_domain_type(dev);
2874 if (!type) {
2875 /*
2876 * If the user hasn't requested any specific type of domain and
2877 * if the device supports both the domains, then default to the
2878 * domain the device was booted with
2879 */
2880 type = dev_def_dom ? : iommu_def_domain_type;
2881 } else if (dev_def_dom && type != dev_def_dom) {
2882 dev_err_ratelimited(prev_dev, "Device cannot be in %s domain\n",
2883 iommu_domain_type_str(type));
2884 ret = -EINVAL;
2885 goto out;
2886 }
2887
2888 /*
2889 * Switch to a new domain only if the requested domain type is different
2890 * from the existing default domain type
2891 */
2892 if (prev_dom->type == type) {
2893 ret = 0;
2894 goto out;
2895 }
2896
2897 /* We can bring up a flush queue without tearing down the domain */
2898 if (type == IOMMU_DOMAIN_DMA_FQ && prev_dom->type == IOMMU_DOMAIN_DMA) {
2899 ret = iommu_dma_init_fq(prev_dom);
2900 if (!ret)
2901 prev_dom->type = IOMMU_DOMAIN_DMA_FQ;
2902 goto out;
2903 }
2904
2905 /* Sets group->default_domain to the newly allocated domain */
2906 ret = iommu_group_alloc_default_domain(dev->bus, group, type);
2907 if (ret)
2908 goto out;
2909
2910 ret = iommu_create_device_direct_mappings(group, dev);
2911 if (ret)
2912 goto free_new_domain;
2913
2914 ret = __iommu_attach_device(group->default_domain, dev);
2915 if (ret)
2916 goto free_new_domain;
2917
2918 group->domain = group->default_domain;
2919
2920 /*
2921 * Release the mutex here because ops->probe_finalize() call-back of
2922 * some vendor IOMMU drivers calls arm_iommu_attach_device() which
2923 * in-turn might call back into IOMMU core code, where it tries to take
2924 * group->mutex, resulting in a deadlock.
2925 */
2926 mutex_unlock(&group->mutex);
2927
2928 /* Make sure dma_ops is appropriatley set */
2929 iommu_group_do_probe_finalize(dev, group->default_domain);
2930 iommu_domain_free(prev_dom);
2931 return 0;
2932
2933 free_new_domain:
2934 iommu_domain_free(group->default_domain);
2935 group->default_domain = prev_dom;
2936 group->domain = prev_dom;
2937
2938 out:
2939 mutex_unlock(&group->mutex);
2940
2941 return ret;
2942 }
2943
2944 /*
2945 * Changing the default domain through sysfs requires the users to unbind the
2946 * drivers from the devices in the iommu group, except for a DMA -> DMA-FQ
2947 * transition. Return failure if this isn't met.
2948 *
2949 * We need to consider the race between this and the device release path.
2950 * device_lock(dev) is used here to guarantee that the device release path
2951 * will not be entered at the same time.
2952 */
iommu_group_store_type(struct iommu_group * group,const char * buf,size_t count)2953 static ssize_t iommu_group_store_type(struct iommu_group *group,
2954 const char *buf, size_t count)
2955 {
2956 struct group_device *grp_dev;
2957 struct device *dev;
2958 int ret, req_type;
2959
2960 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
2961 return -EACCES;
2962
2963 if (WARN_ON(!group) || !group->default_domain)
2964 return -EINVAL;
2965
2966 if (sysfs_streq(buf, "identity"))
2967 req_type = IOMMU_DOMAIN_IDENTITY;
2968 else if (sysfs_streq(buf, "DMA"))
2969 req_type = IOMMU_DOMAIN_DMA;
2970 else if (sysfs_streq(buf, "DMA-FQ"))
2971 req_type = IOMMU_DOMAIN_DMA_FQ;
2972 else if (sysfs_streq(buf, "auto"))
2973 req_type = 0;
2974 else
2975 return -EINVAL;
2976
2977 /*
2978 * Lock/Unlock the group mutex here before device lock to
2979 * 1. Make sure that the iommu group has only one device (this is a
2980 * prerequisite for step 2)
2981 * 2. Get struct *dev which is needed to lock device
2982 */
2983 mutex_lock(&group->mutex);
2984 if (iommu_group_device_count(group) != 1) {
2985 mutex_unlock(&group->mutex);
2986 pr_err_ratelimited("Cannot change default domain: Group has more than one device\n");
2987 return -EINVAL;
2988 }
2989
2990 /* Since group has only one device */
2991 grp_dev = list_first_entry(&group->devices, struct group_device, list);
2992 dev = grp_dev->dev;
2993 get_device(dev);
2994
2995 /*
2996 * Don't hold the group mutex because taking group mutex first and then
2997 * the device lock could potentially cause a deadlock as below. Assume
2998 * two threads T1 and T2. T1 is trying to change default domain of an
2999 * iommu group and T2 is trying to hot unplug a device or release [1] VF
3000 * of a PCIe device which is in the same iommu group. T1 takes group
3001 * mutex and before it could take device lock assume T2 has taken device
3002 * lock and is yet to take group mutex. Now, both the threads will be
3003 * waiting for the other thread to release lock. Below, lock order was
3004 * suggested.
3005 * device_lock(dev);
3006 * mutex_lock(&group->mutex);
3007 * iommu_change_dev_def_domain();
3008 * mutex_unlock(&group->mutex);
3009 * device_unlock(dev);
3010 *
3011 * [1] Typical device release path
3012 * device_lock() from device/driver core code
3013 * -> bus_notifier()
3014 * -> iommu_bus_notifier()
3015 * -> iommu_release_device()
3016 * -> ops->release_device() vendor driver calls back iommu core code
3017 * -> mutex_lock() from iommu core code
3018 */
3019 mutex_unlock(&group->mutex);
3020
3021 /* Check if the device in the group still has a driver bound to it */
3022 device_lock(dev);
3023 if (device_is_bound(dev) && !(req_type == IOMMU_DOMAIN_DMA_FQ &&
3024 group->default_domain->type == IOMMU_DOMAIN_DMA)) {
3025 pr_err_ratelimited("Device is still bound to driver\n");
3026 ret = -EBUSY;
3027 goto out;
3028 }
3029
3030 ret = iommu_change_dev_def_domain(group, dev, req_type);
3031 ret = ret ?: count;
3032
3033 out:
3034 device_unlock(dev);
3035 put_device(dev);
3036
3037 return ret;
3038 }
3039
iommu_is_default_domain(struct iommu_group * group)3040 static bool iommu_is_default_domain(struct iommu_group *group)
3041 {
3042 if (group->domain == group->default_domain)
3043 return true;
3044
3045 /*
3046 * If the default domain was set to identity and it is still an identity
3047 * domain then we consider this a pass. This happens because of
3048 * amd_iommu_init_device() replacing the default idenytity domain with an
3049 * identity domain that has a different configuration for AMDGPU.
3050 */
3051 if (group->default_domain &&
3052 group->default_domain->type == IOMMU_DOMAIN_IDENTITY &&
3053 group->domain && group->domain->type == IOMMU_DOMAIN_IDENTITY)
3054 return true;
3055 return false;
3056 }
3057
3058 /**
3059 * iommu_device_use_default_domain() - Device driver wants to handle device
3060 * DMA through the kernel DMA API.
3061 * @dev: The device.
3062 *
3063 * The device driver about to bind @dev wants to do DMA through the kernel
3064 * DMA API. Return 0 if it is allowed, otherwise an error.
3065 */
iommu_device_use_default_domain(struct device * dev)3066 int iommu_device_use_default_domain(struct device *dev)
3067 {
3068 struct iommu_group *group = iommu_group_get(dev);
3069 int ret = 0;
3070
3071 if (!group)
3072 return 0;
3073
3074 mutex_lock(&group->mutex);
3075 if (group->owner_cnt) {
3076 if (group->owner || !iommu_is_default_domain(group) ||
3077 !xa_empty(&group->pasid_array)) {
3078 ret = -EBUSY;
3079 goto unlock_out;
3080 }
3081 }
3082
3083 group->owner_cnt++;
3084
3085 unlock_out:
3086 mutex_unlock(&group->mutex);
3087 iommu_group_put(group);
3088
3089 return ret;
3090 }
3091
3092 /**
3093 * iommu_device_unuse_default_domain() - Device driver stops handling device
3094 * DMA through the kernel DMA API.
3095 * @dev: The device.
3096 *
3097 * The device driver doesn't want to do DMA through kernel DMA API anymore.
3098 * It must be called after iommu_device_use_default_domain().
3099 */
iommu_device_unuse_default_domain(struct device * dev)3100 void iommu_device_unuse_default_domain(struct device *dev)
3101 {
3102 struct iommu_group *group = iommu_group_get(dev);
3103
3104 if (!group)
3105 return;
3106
3107 mutex_lock(&group->mutex);
3108 if (!WARN_ON(!group->owner_cnt || !xa_empty(&group->pasid_array)))
3109 group->owner_cnt--;
3110
3111 mutex_unlock(&group->mutex);
3112 iommu_group_put(group);
3113 }
3114
__iommu_group_alloc_blocking_domain(struct iommu_group * group)3115 static int __iommu_group_alloc_blocking_domain(struct iommu_group *group)
3116 {
3117 struct group_device *dev =
3118 list_first_entry(&group->devices, struct group_device, list);
3119
3120 if (group->blocking_domain)
3121 return 0;
3122
3123 group->blocking_domain =
3124 __iommu_domain_alloc(dev->dev->bus, IOMMU_DOMAIN_BLOCKED);
3125 if (!group->blocking_domain) {
3126 /*
3127 * For drivers that do not yet understand IOMMU_DOMAIN_BLOCKED
3128 * create an empty domain instead.
3129 */
3130 group->blocking_domain = __iommu_domain_alloc(
3131 dev->dev->bus, IOMMU_DOMAIN_UNMANAGED);
3132 if (!group->blocking_domain)
3133 return -EINVAL;
3134 }
3135 return 0;
3136 }
3137
3138 /**
3139 * iommu_group_claim_dma_owner() - Set DMA ownership of a group
3140 * @group: The group.
3141 * @owner: Caller specified pointer. Used for exclusive ownership.
3142 *
3143 * This is to support backward compatibility for vfio which manages
3144 * the dma ownership in iommu_group level. New invocations on this
3145 * interface should be prohibited.
3146 */
iommu_group_claim_dma_owner(struct iommu_group * group,void * owner)3147 int iommu_group_claim_dma_owner(struct iommu_group *group, void *owner)
3148 {
3149 int ret = 0;
3150
3151 mutex_lock(&group->mutex);
3152 if (group->owner_cnt) {
3153 ret = -EPERM;
3154 goto unlock_out;
3155 } else {
3156 if ((group->domain && group->domain != group->default_domain) ||
3157 !xa_empty(&group->pasid_array)) {
3158 ret = -EBUSY;
3159 goto unlock_out;
3160 }
3161
3162 ret = __iommu_group_alloc_blocking_domain(group);
3163 if (ret)
3164 goto unlock_out;
3165
3166 ret = __iommu_group_set_domain(group, group->blocking_domain);
3167 if (ret)
3168 goto unlock_out;
3169 group->owner = owner;
3170 }
3171
3172 group->owner_cnt++;
3173 unlock_out:
3174 mutex_unlock(&group->mutex);
3175
3176 return ret;
3177 }
3178 EXPORT_SYMBOL_GPL(iommu_group_claim_dma_owner);
3179
3180 /**
3181 * iommu_group_release_dma_owner() - Release DMA ownership of a group
3182 * @group: The group.
3183 *
3184 * Release the DMA ownership claimed by iommu_group_claim_dma_owner().
3185 */
iommu_group_release_dma_owner(struct iommu_group * group)3186 void iommu_group_release_dma_owner(struct iommu_group *group)
3187 {
3188 int ret;
3189
3190 mutex_lock(&group->mutex);
3191 if (WARN_ON(!group->owner_cnt || !group->owner ||
3192 !xa_empty(&group->pasid_array)))
3193 goto unlock_out;
3194
3195 group->owner_cnt = 0;
3196 group->owner = NULL;
3197 ret = __iommu_group_set_domain(group, group->default_domain);
3198 WARN(ret, "iommu driver failed to attach the default domain");
3199
3200 unlock_out:
3201 mutex_unlock(&group->mutex);
3202 }
3203 EXPORT_SYMBOL_GPL(iommu_group_release_dma_owner);
3204
3205 /**
3206 * iommu_group_dma_owner_claimed() - Query group dma ownership status
3207 * @group: The group.
3208 *
3209 * This provides status query on a given group. It is racy and only for
3210 * non-binding status reporting.
3211 */
iommu_group_dma_owner_claimed(struct iommu_group * group)3212 bool iommu_group_dma_owner_claimed(struct iommu_group *group)
3213 {
3214 unsigned int user;
3215
3216 mutex_lock(&group->mutex);
3217 user = group->owner_cnt;
3218 mutex_unlock(&group->mutex);
3219
3220 return user;
3221 }
3222 EXPORT_SYMBOL_GPL(iommu_group_dma_owner_claimed);
3223
__iommu_set_group_pasid(struct iommu_domain * domain,struct iommu_group * group,ioasid_t pasid)3224 static int __iommu_set_group_pasid(struct iommu_domain *domain,
3225 struct iommu_group *group, ioasid_t pasid)
3226 {
3227 struct group_device *device;
3228 int ret = 0;
3229
3230 list_for_each_entry(device, &group->devices, list) {
3231 ret = domain->ops->set_dev_pasid(domain, device->dev, pasid);
3232 if (ret)
3233 break;
3234 }
3235
3236 return ret;
3237 }
3238
__iommu_remove_group_pasid(struct iommu_group * group,ioasid_t pasid)3239 static void __iommu_remove_group_pasid(struct iommu_group *group,
3240 ioasid_t pasid)
3241 {
3242 struct group_device *device;
3243 const struct iommu_ops *ops;
3244
3245 list_for_each_entry(device, &group->devices, list) {
3246 ops = dev_iommu_ops(device->dev);
3247 ops->remove_dev_pasid(device->dev, pasid);
3248 }
3249 }
3250
3251 /*
3252 * iommu_attach_device_pasid() - Attach a domain to pasid of device
3253 * @domain: the iommu domain.
3254 * @dev: the attached device.
3255 * @pasid: the pasid of the device.
3256 *
3257 * Return: 0 on success, or an error.
3258 */
iommu_attach_device_pasid(struct iommu_domain * domain,struct device * dev,ioasid_t pasid)3259 int iommu_attach_device_pasid(struct iommu_domain *domain,
3260 struct device *dev, ioasid_t pasid)
3261 {
3262 struct iommu_group *group;
3263 void *curr;
3264 int ret;
3265
3266 if (!domain->ops->set_dev_pasid)
3267 return -EOPNOTSUPP;
3268
3269 group = iommu_group_get(dev);
3270 if (!group)
3271 return -ENODEV;
3272
3273 mutex_lock(&group->mutex);
3274 curr = xa_cmpxchg(&group->pasid_array, pasid, NULL, domain, GFP_KERNEL);
3275 if (curr) {
3276 ret = xa_err(curr) ? : -EBUSY;
3277 goto out_unlock;
3278 }
3279
3280 ret = __iommu_set_group_pasid(domain, group, pasid);
3281 if (ret) {
3282 __iommu_remove_group_pasid(group, pasid);
3283 xa_erase(&group->pasid_array, pasid);
3284 }
3285 out_unlock:
3286 mutex_unlock(&group->mutex);
3287 iommu_group_put(group);
3288
3289 return ret;
3290 }
3291 EXPORT_SYMBOL_GPL(iommu_attach_device_pasid);
3292
3293 /*
3294 * iommu_detach_device_pasid() - Detach the domain from pasid of device
3295 * @domain: the iommu domain.
3296 * @dev: the attached device.
3297 * @pasid: the pasid of the device.
3298 *
3299 * The @domain must have been attached to @pasid of the @dev with
3300 * iommu_attach_device_pasid().
3301 */
iommu_detach_device_pasid(struct iommu_domain * domain,struct device * dev,ioasid_t pasid)3302 void iommu_detach_device_pasid(struct iommu_domain *domain, struct device *dev,
3303 ioasid_t pasid)
3304 {
3305 struct iommu_group *group = iommu_group_get(dev);
3306
3307 mutex_lock(&group->mutex);
3308 __iommu_remove_group_pasid(group, pasid);
3309 WARN_ON(xa_erase(&group->pasid_array, pasid) != domain);
3310 mutex_unlock(&group->mutex);
3311
3312 iommu_group_put(group);
3313 }
3314 EXPORT_SYMBOL_GPL(iommu_detach_device_pasid);
3315
3316 /*
3317 * iommu_get_domain_for_dev_pasid() - Retrieve domain for @pasid of @dev
3318 * @dev: the queried device
3319 * @pasid: the pasid of the device
3320 * @type: matched domain type, 0 for any match
3321 *
3322 * This is a variant of iommu_get_domain_for_dev(). It returns the existing
3323 * domain attached to pasid of a device. Callers must hold a lock around this
3324 * function, and both iommu_attach/detach_dev_pasid() whenever a domain of
3325 * type is being manipulated. This API does not internally resolve races with
3326 * attach/detach.
3327 *
3328 * Return: attached domain on success, NULL otherwise.
3329 */
iommu_get_domain_for_dev_pasid(struct device * dev,ioasid_t pasid,unsigned int type)3330 struct iommu_domain *iommu_get_domain_for_dev_pasid(struct device *dev,
3331 ioasid_t pasid,
3332 unsigned int type)
3333 {
3334 struct iommu_domain *domain;
3335 struct iommu_group *group;
3336
3337 group = iommu_group_get(dev);
3338 if (!group)
3339 return NULL;
3340
3341 xa_lock(&group->pasid_array);
3342 domain = xa_load(&group->pasid_array, pasid);
3343 if (type && domain && domain->type != type)
3344 domain = ERR_PTR(-EBUSY);
3345 xa_unlock(&group->pasid_array);
3346 iommu_group_put(group);
3347
3348 return domain;
3349 }
3350 EXPORT_SYMBOL_GPL(iommu_get_domain_for_dev_pasid);
3351
iommu_sva_domain_alloc(struct device * dev,struct mm_struct * mm)3352 struct iommu_domain *iommu_sva_domain_alloc(struct device *dev,
3353 struct mm_struct *mm)
3354 {
3355 const struct iommu_ops *ops = dev_iommu_ops(dev);
3356 struct iommu_domain *domain;
3357
3358 domain = ops->domain_alloc(IOMMU_DOMAIN_SVA);
3359 if (!domain)
3360 return NULL;
3361
3362 domain->type = IOMMU_DOMAIN_SVA;
3363 mmgrab(mm);
3364 domain->mm = mm;
3365 domain->iopf_handler = iommu_sva_handle_iopf;
3366 domain->fault_data = mm;
3367
3368 return domain;
3369 }
3370