• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
3  * Author: Joerg Roedel <jroedel@suse.de>
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 as published
7  * by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
17  */
18 
19 #define pr_fmt(fmt)    "iommu: " fmt
20 
21 #include <linux/device.h>
22 #include <linux/kernel.h>
23 #include <linux/bug.h>
24 #include <linux/types.h>
25 #include <linux/module.h>
26 #include <linux/slab.h>
27 #include <linux/errno.h>
28 #include <linux/iommu.h>
29 #include <linux/idr.h>
30 #include <linux/notifier.h>
31 #include <linux/err.h>
32 #include <linux/pci.h>
33 #include <linux/bitops.h>
34 #include <linux/property.h>
35 #include <trace/events/iommu.h>
36 
37 static struct kset *iommu_group_kset;
38 static DEFINE_IDA(iommu_group_ida);
39 
40 struct iommu_callback_data {
41 	const struct iommu_ops *ops;
42 };
43 
44 struct iommu_group {
45 	struct kobject kobj;
46 	struct kobject *devices_kobj;
47 	struct list_head devices;
48 	struct mutex mutex;
49 	struct blocking_notifier_head notifier;
50 	void *iommu_data;
51 	void (*iommu_data_release)(void *iommu_data);
52 	char *name;
53 	int id;
54 	struct iommu_domain *default_domain;
55 	struct iommu_domain *domain;
56 };
57 
58 struct iommu_device {
59 	struct list_head list;
60 	struct device *dev;
61 	char *name;
62 };
63 
64 struct iommu_group_attribute {
65 	struct attribute attr;
66 	ssize_t (*show)(struct iommu_group *group, char *buf);
67 	ssize_t (*store)(struct iommu_group *group,
68 			 const char *buf, size_t count);
69 };
70 
71 #define IOMMU_GROUP_ATTR(_name, _mode, _show, _store)		\
72 struct iommu_group_attribute iommu_group_attr_##_name =		\
73 	__ATTR(_name, _mode, _show, _store)
74 
75 #define to_iommu_group_attr(_attr)	\
76 	container_of(_attr, struct iommu_group_attribute, attr)
77 #define to_iommu_group(_kobj)		\
78 	container_of(_kobj, struct iommu_group, kobj)
79 
80 static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
81 						 unsigned type);
82 static int __iommu_attach_device(struct iommu_domain *domain,
83 				 struct device *dev);
84 static int __iommu_attach_group(struct iommu_domain *domain,
85 				struct iommu_group *group);
86 static void __iommu_detach_group(struct iommu_domain *domain,
87 				 struct iommu_group *group);
88 
iommu_group_attr_show(struct kobject * kobj,struct attribute * __attr,char * buf)89 static ssize_t iommu_group_attr_show(struct kobject *kobj,
90 				     struct attribute *__attr, char *buf)
91 {
92 	struct iommu_group_attribute *attr = to_iommu_group_attr(__attr);
93 	struct iommu_group *group = to_iommu_group(kobj);
94 	ssize_t ret = -EIO;
95 
96 	if (attr->show)
97 		ret = attr->show(group, buf);
98 	return ret;
99 }
100 
iommu_group_attr_store(struct kobject * kobj,struct attribute * __attr,const char * buf,size_t count)101 static ssize_t iommu_group_attr_store(struct kobject *kobj,
102 				      struct attribute *__attr,
103 				      const char *buf, size_t count)
104 {
105 	struct iommu_group_attribute *attr = to_iommu_group_attr(__attr);
106 	struct iommu_group *group = to_iommu_group(kobj);
107 	ssize_t ret = -EIO;
108 
109 	if (attr->store)
110 		ret = attr->store(group, buf, count);
111 	return ret;
112 }
113 
114 static const struct sysfs_ops iommu_group_sysfs_ops = {
115 	.show = iommu_group_attr_show,
116 	.store = iommu_group_attr_store,
117 };
118 
iommu_group_create_file(struct iommu_group * group,struct iommu_group_attribute * attr)119 static int iommu_group_create_file(struct iommu_group *group,
120 				   struct iommu_group_attribute *attr)
121 {
122 	return sysfs_create_file(&group->kobj, &attr->attr);
123 }
124 
iommu_group_remove_file(struct iommu_group * group,struct iommu_group_attribute * attr)125 static void iommu_group_remove_file(struct iommu_group *group,
126 				    struct iommu_group_attribute *attr)
127 {
128 	sysfs_remove_file(&group->kobj, &attr->attr);
129 }
130 
iommu_group_show_name(struct iommu_group * group,char * buf)131 static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf)
132 {
133 	return sprintf(buf, "%s\n", group->name);
134 }
135 
136 static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL);
137 
iommu_group_release(struct kobject * kobj)138 static void iommu_group_release(struct kobject *kobj)
139 {
140 	struct iommu_group *group = to_iommu_group(kobj);
141 
142 	pr_debug("Releasing group %d\n", group->id);
143 
144 	if (group->iommu_data_release)
145 		group->iommu_data_release(group->iommu_data);
146 
147 	ida_simple_remove(&iommu_group_ida, group->id);
148 
149 	if (group->default_domain)
150 		iommu_domain_free(group->default_domain);
151 
152 	kfree(group->name);
153 	kfree(group);
154 }
155 
156 static struct kobj_type iommu_group_ktype = {
157 	.sysfs_ops = &iommu_group_sysfs_ops,
158 	.release = iommu_group_release,
159 };
160 
161 /**
162  * iommu_group_alloc - Allocate a new group
163  * @name: Optional name to associate with group, visible in sysfs
164  *
165  * This function is called by an iommu driver to allocate a new iommu
166  * group.  The iommu group represents the minimum granularity of the iommu.
167  * Upon successful return, the caller holds a reference to the supplied
168  * group in order to hold the group until devices are added.  Use
169  * iommu_group_put() to release this extra reference count, allowing the
170  * group to be automatically reclaimed once it has no devices or external
171  * references.
172  */
iommu_group_alloc(void)173 struct iommu_group *iommu_group_alloc(void)
174 {
175 	struct iommu_group *group;
176 	int ret;
177 
178 	group = kzalloc(sizeof(*group), GFP_KERNEL);
179 	if (!group)
180 		return ERR_PTR(-ENOMEM);
181 
182 	group->kobj.kset = iommu_group_kset;
183 	mutex_init(&group->mutex);
184 	INIT_LIST_HEAD(&group->devices);
185 	BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier);
186 
187 	ret = ida_simple_get(&iommu_group_ida, 0, 0, GFP_KERNEL);
188 	if (ret < 0) {
189 		kfree(group);
190 		return ERR_PTR(ret);
191 	}
192 	group->id = ret;
193 
194 	ret = kobject_init_and_add(&group->kobj, &iommu_group_ktype,
195 				   NULL, "%d", group->id);
196 	if (ret) {
197 		ida_simple_remove(&iommu_group_ida, group->id);
198 		kfree(group);
199 		return ERR_PTR(ret);
200 	}
201 
202 	group->devices_kobj = kobject_create_and_add("devices", &group->kobj);
203 	if (!group->devices_kobj) {
204 		kobject_put(&group->kobj); /* triggers .release & free */
205 		return ERR_PTR(-ENOMEM);
206 	}
207 
208 	/*
209 	 * The devices_kobj holds a reference on the group kobject, so
210 	 * as long as that exists so will the group.  We can therefore
211 	 * use the devices_kobj for reference counting.
212 	 */
213 	kobject_put(&group->kobj);
214 
215 	pr_debug("Allocated group %d\n", group->id);
216 
217 	return group;
218 }
219 EXPORT_SYMBOL_GPL(iommu_group_alloc);
220 
iommu_group_get_by_id(int id)221 struct iommu_group *iommu_group_get_by_id(int id)
222 {
223 	struct kobject *group_kobj;
224 	struct iommu_group *group;
225 	const char *name;
226 
227 	if (!iommu_group_kset)
228 		return NULL;
229 
230 	name = kasprintf(GFP_KERNEL, "%d", id);
231 	if (!name)
232 		return NULL;
233 
234 	group_kobj = kset_find_obj(iommu_group_kset, name);
235 	kfree(name);
236 
237 	if (!group_kobj)
238 		return NULL;
239 
240 	group = container_of(group_kobj, struct iommu_group, kobj);
241 	BUG_ON(group->id != id);
242 
243 	kobject_get(group->devices_kobj);
244 	kobject_put(&group->kobj);
245 
246 	return group;
247 }
248 EXPORT_SYMBOL_GPL(iommu_group_get_by_id);
249 
250 /**
251  * iommu_group_get_iommudata - retrieve iommu_data registered for a group
252  * @group: the group
253  *
254  * iommu drivers can store data in the group for use when doing iommu
255  * operations.  This function provides a way to retrieve it.  Caller
256  * should hold a group reference.
257  */
iommu_group_get_iommudata(struct iommu_group * group)258 void *iommu_group_get_iommudata(struct iommu_group *group)
259 {
260 	return group->iommu_data;
261 }
262 EXPORT_SYMBOL_GPL(iommu_group_get_iommudata);
263 
264 /**
265  * iommu_group_set_iommudata - set iommu_data for a group
266  * @group: the group
267  * @iommu_data: new data
268  * @release: release function for iommu_data
269  *
270  * iommu drivers can store data in the group for use when doing iommu
271  * operations.  This function provides a way to set the data after
272  * the group has been allocated.  Caller should hold a group reference.
273  */
iommu_group_set_iommudata(struct iommu_group * group,void * iommu_data,void (* release)(void * iommu_data))274 void iommu_group_set_iommudata(struct iommu_group *group, void *iommu_data,
275 			       void (*release)(void *iommu_data))
276 {
277 	group->iommu_data = iommu_data;
278 	group->iommu_data_release = release;
279 }
280 EXPORT_SYMBOL_GPL(iommu_group_set_iommudata);
281 
282 /**
283  * iommu_group_set_name - set name for a group
284  * @group: the group
285  * @name: name
286  *
287  * Allow iommu driver to set a name for a group.  When set it will
288  * appear in a name attribute file under the group in sysfs.
289  */
iommu_group_set_name(struct iommu_group * group,const char * name)290 int iommu_group_set_name(struct iommu_group *group, const char *name)
291 {
292 	int ret;
293 
294 	if (group->name) {
295 		iommu_group_remove_file(group, &iommu_group_attr_name);
296 		kfree(group->name);
297 		group->name = NULL;
298 		if (!name)
299 			return 0;
300 	}
301 
302 	group->name = kstrdup(name, GFP_KERNEL);
303 	if (!group->name)
304 		return -ENOMEM;
305 
306 	ret = iommu_group_create_file(group, &iommu_group_attr_name);
307 	if (ret) {
308 		kfree(group->name);
309 		group->name = NULL;
310 		return ret;
311 	}
312 
313 	return 0;
314 }
315 EXPORT_SYMBOL_GPL(iommu_group_set_name);
316 
iommu_group_create_direct_mappings(struct iommu_group * group,struct device * dev)317 static int iommu_group_create_direct_mappings(struct iommu_group *group,
318 					      struct device *dev)
319 {
320 	struct iommu_domain *domain = group->default_domain;
321 	struct iommu_dm_region *entry;
322 	struct list_head mappings;
323 	unsigned long pg_size;
324 	int ret = 0;
325 
326 	if (!domain || domain->type != IOMMU_DOMAIN_DMA)
327 		return 0;
328 
329 	BUG_ON(!domain->pgsize_bitmap);
330 
331 	pg_size = 1UL << __ffs(domain->pgsize_bitmap);
332 	INIT_LIST_HEAD(&mappings);
333 
334 	iommu_get_dm_regions(dev, &mappings);
335 
336 	/* We need to consider overlapping regions for different devices */
337 	list_for_each_entry(entry, &mappings, list) {
338 		dma_addr_t start, end, addr;
339 
340 		if (domain->ops->apply_dm_region)
341 			domain->ops->apply_dm_region(dev, domain, entry);
342 
343 		start = ALIGN(entry->start, pg_size);
344 		end   = ALIGN(entry->start + entry->length, pg_size);
345 
346 		for (addr = start; addr < end; addr += pg_size) {
347 			phys_addr_t phys_addr;
348 
349 			phys_addr = iommu_iova_to_phys(domain, addr);
350 			if (phys_addr)
351 				continue;
352 
353 			ret = iommu_map(domain, addr, addr, pg_size, entry->prot);
354 			if (ret)
355 				goto out;
356 		}
357 
358 	}
359 
360 out:
361 	iommu_put_dm_regions(dev, &mappings);
362 
363 	return ret;
364 }
365 
366 /**
367  * iommu_group_add_device - add a device to an iommu group
368  * @group: the group into which to add the device (reference should be held)
369  * @dev: the device
370  *
371  * This function is called by an iommu driver to add a device into a
372  * group.  Adding a device increments the group reference count.
373  */
iommu_group_add_device(struct iommu_group * group,struct device * dev)374 int iommu_group_add_device(struct iommu_group *group, struct device *dev)
375 {
376 	int ret, i = 0;
377 	struct iommu_device *device;
378 
379 	device = kzalloc(sizeof(*device), GFP_KERNEL);
380 	if (!device)
381 		return -ENOMEM;
382 
383 	device->dev = dev;
384 
385 	ret = sysfs_create_link(&dev->kobj, &group->kobj, "iommu_group");
386 	if (ret)
387 		goto err_free_device;
388 
389 	device->name = kasprintf(GFP_KERNEL, "%s", kobject_name(&dev->kobj));
390 rename:
391 	if (!device->name) {
392 		ret = -ENOMEM;
393 		goto err_remove_link;
394 	}
395 
396 	ret = sysfs_create_link_nowarn(group->devices_kobj,
397 				       &dev->kobj, device->name);
398 	if (ret) {
399 		if (ret == -EEXIST && i >= 0) {
400 			/*
401 			 * Account for the slim chance of collision
402 			 * and append an instance to the name.
403 			 */
404 			kfree(device->name);
405 			device->name = kasprintf(GFP_KERNEL, "%s.%d",
406 						 kobject_name(&dev->kobj), i++);
407 			goto rename;
408 		}
409 		goto err_free_name;
410 	}
411 
412 	kobject_get(group->devices_kobj);
413 
414 	dev->iommu_group = group;
415 
416 	iommu_group_create_direct_mappings(group, dev);
417 
418 	mutex_lock(&group->mutex);
419 	list_add_tail(&device->list, &group->devices);
420 	if (group->domain)
421 		ret = __iommu_attach_device(group->domain, dev);
422 	mutex_unlock(&group->mutex);
423 	if (ret)
424 		goto err_put_group;
425 
426 	/* Notify any listeners about change to group. */
427 	blocking_notifier_call_chain(&group->notifier,
428 				     IOMMU_GROUP_NOTIFY_ADD_DEVICE, dev);
429 
430 	trace_add_device_to_group(group->id, dev);
431 
432 	pr_info("Adding device %s to group %d\n", dev_name(dev), group->id);
433 
434 	return 0;
435 
436 err_put_group:
437 	mutex_lock(&group->mutex);
438 	list_del(&device->list);
439 	mutex_unlock(&group->mutex);
440 	dev->iommu_group = NULL;
441 	kobject_put(group->devices_kobj);
442 err_free_name:
443 	kfree(device->name);
444 err_remove_link:
445 	sysfs_remove_link(&dev->kobj, "iommu_group");
446 err_free_device:
447 	kfree(device);
448 	pr_err("Failed to add device %s to group %d: %d\n", dev_name(dev), group->id, ret);
449 	return ret;
450 }
451 EXPORT_SYMBOL_GPL(iommu_group_add_device);
452 
453 /**
454  * iommu_group_remove_device - remove a device from it's current group
455  * @dev: device to be removed
456  *
457  * This function is called by an iommu driver to remove the device from
458  * it's current group.  This decrements the iommu group reference count.
459  */
iommu_group_remove_device(struct device * dev)460 void iommu_group_remove_device(struct device *dev)
461 {
462 	struct iommu_group *group = dev->iommu_group;
463 	struct iommu_device *tmp_device, *device = NULL;
464 
465 	pr_info("Removing device %s from group %d\n", dev_name(dev), group->id);
466 
467 	/* Pre-notify listeners that a device is being removed. */
468 	blocking_notifier_call_chain(&group->notifier,
469 				     IOMMU_GROUP_NOTIFY_DEL_DEVICE, dev);
470 
471 	mutex_lock(&group->mutex);
472 	list_for_each_entry(tmp_device, &group->devices, list) {
473 		if (tmp_device->dev == dev) {
474 			device = tmp_device;
475 			list_del(&device->list);
476 			break;
477 		}
478 	}
479 	mutex_unlock(&group->mutex);
480 
481 	if (!device)
482 		return;
483 
484 	sysfs_remove_link(group->devices_kobj, device->name);
485 	sysfs_remove_link(&dev->kobj, "iommu_group");
486 
487 	trace_remove_device_from_group(group->id, dev);
488 
489 	kfree(device->name);
490 	kfree(device);
491 	dev->iommu_group = NULL;
492 	kobject_put(group->devices_kobj);
493 }
494 EXPORT_SYMBOL_GPL(iommu_group_remove_device);
495 
iommu_group_device_count(struct iommu_group * group)496 static int iommu_group_device_count(struct iommu_group *group)
497 {
498 	struct iommu_device *entry;
499 	int ret = 0;
500 
501 	list_for_each_entry(entry, &group->devices, list)
502 		ret++;
503 
504 	return ret;
505 }
506 
507 /**
508  * iommu_group_for_each_dev - iterate over each device in the group
509  * @group: the group
510  * @data: caller opaque data to be passed to callback function
511  * @fn: caller supplied callback function
512  *
513  * This function is called by group users to iterate over group devices.
514  * Callers should hold a reference count to the group during callback.
515  * The group->mutex is held across callbacks, which will block calls to
516  * iommu_group_add/remove_device.
517  */
__iommu_group_for_each_dev(struct iommu_group * group,void * data,int (* fn)(struct device *,void *))518 static int __iommu_group_for_each_dev(struct iommu_group *group, void *data,
519 				      int (*fn)(struct device *, void *))
520 {
521 	struct iommu_device *device;
522 	int ret = 0;
523 
524 	list_for_each_entry(device, &group->devices, list) {
525 		ret = fn(device->dev, data);
526 		if (ret)
527 			break;
528 	}
529 	return ret;
530 }
531 
532 
iommu_group_for_each_dev(struct iommu_group * group,void * data,int (* fn)(struct device *,void *))533 int iommu_group_for_each_dev(struct iommu_group *group, void *data,
534 			     int (*fn)(struct device *, void *))
535 {
536 	int ret;
537 
538 	mutex_lock(&group->mutex);
539 	ret = __iommu_group_for_each_dev(group, data, fn);
540 	mutex_unlock(&group->mutex);
541 
542 	return ret;
543 }
544 EXPORT_SYMBOL_GPL(iommu_group_for_each_dev);
545 
546 /**
547  * iommu_group_get - Return the group for a device and increment reference
548  * @dev: get the group that this device belongs to
549  *
550  * This function is called by iommu drivers and users to get the group
551  * for the specified device.  If found, the group is returned and the group
552  * reference in incremented, else NULL.
553  */
iommu_group_get(struct device * dev)554 struct iommu_group *iommu_group_get(struct device *dev)
555 {
556 	struct iommu_group *group = dev->iommu_group;
557 
558 	if (group)
559 		kobject_get(group->devices_kobj);
560 
561 	return group;
562 }
563 EXPORT_SYMBOL_GPL(iommu_group_get);
564 
565 /**
566  * iommu_group_put - Decrement group reference
567  * @group: the group to use
568  *
569  * This function is called by iommu drivers and users to release the
570  * iommu group.  Once the reference count is zero, the group is released.
571  */
iommu_group_put(struct iommu_group * group)572 void iommu_group_put(struct iommu_group *group)
573 {
574 	if (group)
575 		kobject_put(group->devices_kobj);
576 }
577 EXPORT_SYMBOL_GPL(iommu_group_put);
578 
579 /**
580  * iommu_group_register_notifier - Register a notifier for group changes
581  * @group: the group to watch
582  * @nb: notifier block to signal
583  *
584  * This function allows iommu group users to track changes in a group.
585  * See include/linux/iommu.h for actions sent via this notifier.  Caller
586  * should hold a reference to the group throughout notifier registration.
587  */
iommu_group_register_notifier(struct iommu_group * group,struct notifier_block * nb)588 int iommu_group_register_notifier(struct iommu_group *group,
589 				  struct notifier_block *nb)
590 {
591 	return blocking_notifier_chain_register(&group->notifier, nb);
592 }
593 EXPORT_SYMBOL_GPL(iommu_group_register_notifier);
594 
595 /**
596  * iommu_group_unregister_notifier - Unregister a notifier
597  * @group: the group to watch
598  * @nb: notifier block to signal
599  *
600  * Unregister a previously registered group notifier block.
601  */
iommu_group_unregister_notifier(struct iommu_group * group,struct notifier_block * nb)602 int iommu_group_unregister_notifier(struct iommu_group *group,
603 				    struct notifier_block *nb)
604 {
605 	return blocking_notifier_chain_unregister(&group->notifier, nb);
606 }
607 EXPORT_SYMBOL_GPL(iommu_group_unregister_notifier);
608 
609 /**
610  * iommu_group_id - Return ID for a group
611  * @group: the group to ID
612  *
613  * Return the unique ID for the group matching the sysfs group number.
614  */
iommu_group_id(struct iommu_group * group)615 int iommu_group_id(struct iommu_group *group)
616 {
617 	return group->id;
618 }
619 EXPORT_SYMBOL_GPL(iommu_group_id);
620 
621 static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev,
622 					       unsigned long *devfns);
623 
624 /*
625  * To consider a PCI device isolated, we require ACS to support Source
626  * Validation, Request Redirection, Completer Redirection, and Upstream
627  * Forwarding.  This effectively means that devices cannot spoof their
628  * requester ID, requests and completions cannot be redirected, and all
629  * transactions are forwarded upstream, even as it passes through a
630  * bridge where the target device is downstream.
631  */
632 #define REQ_ACS_FLAGS   (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF)
633 
634 /*
635  * For multifunction devices which are not isolated from each other, find
636  * all the other non-isolated functions and look for existing groups.  For
637  * each function, we also need to look for aliases to or from other devices
638  * that may already have a group.
639  */
get_pci_function_alias_group(struct pci_dev * pdev,unsigned long * devfns)640 static struct iommu_group *get_pci_function_alias_group(struct pci_dev *pdev,
641 							unsigned long *devfns)
642 {
643 	struct pci_dev *tmp = NULL;
644 	struct iommu_group *group;
645 
646 	if (!pdev->multifunction || pci_acs_enabled(pdev, REQ_ACS_FLAGS))
647 		return NULL;
648 
649 	for_each_pci_dev(tmp) {
650 		if (tmp == pdev || tmp->bus != pdev->bus ||
651 		    PCI_SLOT(tmp->devfn) != PCI_SLOT(pdev->devfn) ||
652 		    pci_acs_enabled(tmp, REQ_ACS_FLAGS))
653 			continue;
654 
655 		group = get_pci_alias_group(tmp, devfns);
656 		if (group) {
657 			pci_dev_put(tmp);
658 			return group;
659 		}
660 	}
661 
662 	return NULL;
663 }
664 
665 /*
666  * Look for aliases to or from the given device for existing groups. DMA
667  * aliases are only supported on the same bus, therefore the search
668  * space is quite small (especially since we're really only looking at pcie
669  * device, and therefore only expect multiple slots on the root complex or
670  * downstream switch ports).  It's conceivable though that a pair of
671  * multifunction devices could have aliases between them that would cause a
672  * loop.  To prevent this, we use a bitmap to track where we've been.
673  */
get_pci_alias_group(struct pci_dev * pdev,unsigned long * devfns)674 static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev,
675 					       unsigned long *devfns)
676 {
677 	struct pci_dev *tmp = NULL;
678 	struct iommu_group *group;
679 
680 	if (test_and_set_bit(pdev->devfn & 0xff, devfns))
681 		return NULL;
682 
683 	group = iommu_group_get(&pdev->dev);
684 	if (group)
685 		return group;
686 
687 	for_each_pci_dev(tmp) {
688 		if (tmp == pdev || tmp->bus != pdev->bus)
689 			continue;
690 
691 		/* We alias them or they alias us */
692 		if (pci_devs_are_dma_aliases(pdev, tmp)) {
693 			group = get_pci_alias_group(tmp, devfns);
694 			if (group) {
695 				pci_dev_put(tmp);
696 				return group;
697 			}
698 
699 			group = get_pci_function_alias_group(tmp, devfns);
700 			if (group) {
701 				pci_dev_put(tmp);
702 				return group;
703 			}
704 		}
705 	}
706 
707 	return NULL;
708 }
709 
710 struct group_for_pci_data {
711 	struct pci_dev *pdev;
712 	struct iommu_group *group;
713 };
714 
715 /*
716  * DMA alias iterator callback, return the last seen device.  Stop and return
717  * the IOMMU group if we find one along the way.
718  */
get_pci_alias_or_group(struct pci_dev * pdev,u16 alias,void * opaque)719 static int get_pci_alias_or_group(struct pci_dev *pdev, u16 alias, void *opaque)
720 {
721 	struct group_for_pci_data *data = opaque;
722 
723 	data->pdev = pdev;
724 	data->group = iommu_group_get(&pdev->dev);
725 
726 	return data->group != NULL;
727 }
728 
729 /*
730  * Generic device_group call-back function. It just allocates one
731  * iommu-group per device.
732  */
generic_device_group(struct device * dev)733 struct iommu_group *generic_device_group(struct device *dev)
734 {
735 	struct iommu_group *group;
736 
737 	group = iommu_group_alloc();
738 	if (IS_ERR(group))
739 		return NULL;
740 
741 	return group;
742 }
743 
744 /*
745  * Use standard PCI bus topology, isolation features, and DMA alias quirks
746  * to find or create an IOMMU group for a device.
747  */
pci_device_group(struct device * dev)748 struct iommu_group *pci_device_group(struct device *dev)
749 {
750 	struct pci_dev *pdev = to_pci_dev(dev);
751 	struct group_for_pci_data data;
752 	struct pci_bus *bus;
753 	struct iommu_group *group = NULL;
754 	u64 devfns[4] = { 0 };
755 
756 	if (WARN_ON(!dev_is_pci(dev)))
757 		return ERR_PTR(-EINVAL);
758 
759 	/*
760 	 * Find the upstream DMA alias for the device.  A device must not
761 	 * be aliased due to topology in order to have its own IOMMU group.
762 	 * If we find an alias along the way that already belongs to a
763 	 * group, use it.
764 	 */
765 	if (pci_for_each_dma_alias(pdev, get_pci_alias_or_group, &data))
766 		return data.group;
767 
768 	pdev = data.pdev;
769 
770 	/*
771 	 * Continue upstream from the point of minimum IOMMU granularity
772 	 * due to aliases to the point where devices are protected from
773 	 * peer-to-peer DMA by PCI ACS.  Again, if we find an existing
774 	 * group, use it.
775 	 */
776 	for (bus = pdev->bus; !pci_is_root_bus(bus); bus = bus->parent) {
777 		if (!bus->self)
778 			continue;
779 
780 		if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS))
781 			break;
782 
783 		pdev = bus->self;
784 
785 		group = iommu_group_get(&pdev->dev);
786 		if (group)
787 			return group;
788 	}
789 
790 	/*
791 	 * Look for existing groups on device aliases.  If we alias another
792 	 * device or another device aliases us, use the same group.
793 	 */
794 	group = get_pci_alias_group(pdev, (unsigned long *)devfns);
795 	if (group)
796 		return group;
797 
798 	/*
799 	 * Look for existing groups on non-isolated functions on the same
800 	 * slot and aliases of those funcions, if any.  No need to clear
801 	 * the search bitmap, the tested devfns are still valid.
802 	 */
803 	group = get_pci_function_alias_group(pdev, (unsigned long *)devfns);
804 	if (group)
805 		return group;
806 
807 	/* No shared group found, allocate new */
808 	group = iommu_group_alloc();
809 	if (IS_ERR(group))
810 		return NULL;
811 
812 	return group;
813 }
814 
815 /**
816  * iommu_group_get_for_dev - Find or create the IOMMU group for a device
817  * @dev: target device
818  *
819  * This function is intended to be called by IOMMU drivers and extended to
820  * support common, bus-defined algorithms when determining or creating the
821  * IOMMU group for a device.  On success, the caller will hold a reference
822  * to the returned IOMMU group, which will already include the provided
823  * device.  The reference should be released with iommu_group_put().
824  */
iommu_group_get_for_dev(struct device * dev)825 struct iommu_group *iommu_group_get_for_dev(struct device *dev)
826 {
827 	const struct iommu_ops *ops = dev->bus->iommu_ops;
828 	struct iommu_group *group;
829 	int ret;
830 
831 	group = iommu_group_get(dev);
832 	if (group)
833 		return group;
834 
835 	group = ERR_PTR(-EINVAL);
836 
837 	if (ops && ops->device_group)
838 		group = ops->device_group(dev);
839 
840 	if (IS_ERR(group))
841 		return group;
842 
843 	/*
844 	 * Try to allocate a default domain - needs support from the
845 	 * IOMMU driver.
846 	 */
847 	if (!group->default_domain) {
848 		group->default_domain = __iommu_domain_alloc(dev->bus,
849 							     IOMMU_DOMAIN_DMA);
850 		if (!group->domain)
851 			group->domain = group->default_domain;
852 	}
853 
854 	ret = iommu_group_add_device(group, dev);
855 	if (ret) {
856 		iommu_group_put(group);
857 		return ERR_PTR(ret);
858 	}
859 
860 	return group;
861 }
862 
iommu_group_default_domain(struct iommu_group * group)863 struct iommu_domain *iommu_group_default_domain(struct iommu_group *group)
864 {
865 	return group->default_domain;
866 }
867 
add_iommu_group(struct device * dev,void * data)868 static int add_iommu_group(struct device *dev, void *data)
869 {
870 	struct iommu_callback_data *cb = data;
871 	const struct iommu_ops *ops = cb->ops;
872 	int ret;
873 
874 	if (!ops->add_device)
875 		return 0;
876 
877 	WARN_ON(dev->iommu_group);
878 
879 	ret = ops->add_device(dev);
880 
881 	/*
882 	 * We ignore -ENODEV errors for now, as they just mean that the
883 	 * device is not translated by an IOMMU. We still care about
884 	 * other errors and fail to initialize when they happen.
885 	 */
886 	if (ret == -ENODEV)
887 		ret = 0;
888 
889 	return ret;
890 }
891 
remove_iommu_group(struct device * dev,void * data)892 static int remove_iommu_group(struct device *dev, void *data)
893 {
894 	struct iommu_callback_data *cb = data;
895 	const struct iommu_ops *ops = cb->ops;
896 
897 	if (ops->remove_device && dev->iommu_group)
898 		ops->remove_device(dev);
899 
900 	return 0;
901 }
902 
iommu_bus_notifier(struct notifier_block * nb,unsigned long action,void * data)903 static int iommu_bus_notifier(struct notifier_block *nb,
904 			      unsigned long action, void *data)
905 {
906 	struct device *dev = data;
907 	const struct iommu_ops *ops = dev->bus->iommu_ops;
908 	struct iommu_group *group;
909 	unsigned long group_action = 0;
910 
911 	/*
912 	 * ADD/DEL call into iommu driver ops if provided, which may
913 	 * result in ADD/DEL notifiers to group->notifier
914 	 */
915 	if (action == BUS_NOTIFY_ADD_DEVICE) {
916 		if (ops->add_device)
917 			return ops->add_device(dev);
918 	} else if (action == BUS_NOTIFY_REMOVED_DEVICE) {
919 		if (ops->remove_device && dev->iommu_group) {
920 			ops->remove_device(dev);
921 			return 0;
922 		}
923 	}
924 
925 	/*
926 	 * Remaining BUS_NOTIFYs get filtered and republished to the
927 	 * group, if anyone is listening
928 	 */
929 	group = iommu_group_get(dev);
930 	if (!group)
931 		return 0;
932 
933 	switch (action) {
934 	case BUS_NOTIFY_BIND_DRIVER:
935 		group_action = IOMMU_GROUP_NOTIFY_BIND_DRIVER;
936 		break;
937 	case BUS_NOTIFY_BOUND_DRIVER:
938 		group_action = IOMMU_GROUP_NOTIFY_BOUND_DRIVER;
939 		break;
940 	case BUS_NOTIFY_UNBIND_DRIVER:
941 		group_action = IOMMU_GROUP_NOTIFY_UNBIND_DRIVER;
942 		break;
943 	case BUS_NOTIFY_UNBOUND_DRIVER:
944 		group_action = IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER;
945 		break;
946 	}
947 
948 	if (group_action)
949 		blocking_notifier_call_chain(&group->notifier,
950 					     group_action, dev);
951 
952 	iommu_group_put(group);
953 	return 0;
954 }
955 
iommu_bus_init(struct bus_type * bus,const struct iommu_ops * ops)956 static int iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops)
957 {
958 	int err;
959 	struct notifier_block *nb;
960 	struct iommu_callback_data cb = {
961 		.ops = ops,
962 	};
963 
964 	nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
965 	if (!nb)
966 		return -ENOMEM;
967 
968 	nb->notifier_call = iommu_bus_notifier;
969 
970 	err = bus_register_notifier(bus, nb);
971 	if (err)
972 		goto out_free;
973 
974 	err = bus_for_each_dev(bus, NULL, &cb, add_iommu_group);
975 	if (err)
976 		goto out_err;
977 
978 
979 	return 0;
980 
981 out_err:
982 	/* Clean up */
983 	bus_for_each_dev(bus, NULL, &cb, remove_iommu_group);
984 	bus_unregister_notifier(bus, nb);
985 
986 out_free:
987 	kfree(nb);
988 
989 	return err;
990 }
991 
992 /**
993  * bus_set_iommu - set iommu-callbacks for the bus
994  * @bus: bus.
995  * @ops: the callbacks provided by the iommu-driver
996  *
997  * This function is called by an iommu driver to set the iommu methods
998  * used for a particular bus. Drivers for devices on that bus can use
999  * the iommu-api after these ops are registered.
1000  * This special function is needed because IOMMUs are usually devices on
1001  * the bus itself, so the iommu drivers are not initialized when the bus
1002  * is set up. With this function the iommu-driver can set the iommu-ops
1003  * afterwards.
1004  */
bus_set_iommu(struct bus_type * bus,const struct iommu_ops * ops)1005 int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops)
1006 {
1007 	int err;
1008 
1009 	if (bus->iommu_ops != NULL)
1010 		return -EBUSY;
1011 
1012 	bus->iommu_ops = ops;
1013 
1014 	/* Do IOMMU specific setup for this bus-type */
1015 	err = iommu_bus_init(bus, ops);
1016 	if (err)
1017 		bus->iommu_ops = NULL;
1018 
1019 	return err;
1020 }
1021 EXPORT_SYMBOL_GPL(bus_set_iommu);
1022 
iommu_present(struct bus_type * bus)1023 bool iommu_present(struct bus_type *bus)
1024 {
1025 	return bus->iommu_ops != NULL;
1026 }
1027 EXPORT_SYMBOL_GPL(iommu_present);
1028 
iommu_capable(struct bus_type * bus,enum iommu_cap cap)1029 bool iommu_capable(struct bus_type *bus, enum iommu_cap cap)
1030 {
1031 	if (!bus->iommu_ops || !bus->iommu_ops->capable)
1032 		return false;
1033 
1034 	return bus->iommu_ops->capable(cap);
1035 }
1036 EXPORT_SYMBOL_GPL(iommu_capable);
1037 
1038 /**
1039  * iommu_set_fault_handler() - set a fault handler for an iommu domain
1040  * @domain: iommu domain
1041  * @handler: fault handler
1042  * @token: user data, will be passed back to the fault handler
1043  *
1044  * This function should be used by IOMMU users which want to be notified
1045  * whenever an IOMMU fault happens.
1046  *
1047  * The fault handler itself should return 0 on success, and an appropriate
1048  * error code otherwise.
1049  */
iommu_set_fault_handler(struct iommu_domain * domain,iommu_fault_handler_t handler,void * token)1050 void iommu_set_fault_handler(struct iommu_domain *domain,
1051 					iommu_fault_handler_t handler,
1052 					void *token)
1053 {
1054 	BUG_ON(!domain);
1055 
1056 	domain->handler = handler;
1057 	domain->handler_token = token;
1058 }
1059 EXPORT_SYMBOL_GPL(iommu_set_fault_handler);
1060 
__iommu_domain_alloc(struct bus_type * bus,unsigned type)1061 static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
1062 						 unsigned type)
1063 {
1064 	struct iommu_domain *domain;
1065 
1066 	if (bus == NULL || bus->iommu_ops == NULL)
1067 		return NULL;
1068 
1069 	domain = bus->iommu_ops->domain_alloc(type);
1070 	if (!domain)
1071 		return NULL;
1072 
1073 	domain->ops  = bus->iommu_ops;
1074 	domain->type = type;
1075 	/* Assume all sizes by default; the driver may override this later */
1076 	domain->pgsize_bitmap  = bus->iommu_ops->pgsize_bitmap;
1077 
1078 	return domain;
1079 }
1080 
iommu_domain_alloc(struct bus_type * bus)1081 struct iommu_domain *iommu_domain_alloc(struct bus_type *bus)
1082 {
1083 	return __iommu_domain_alloc(bus, IOMMU_DOMAIN_UNMANAGED);
1084 }
1085 EXPORT_SYMBOL_GPL(iommu_domain_alloc);
1086 
iommu_domain_free(struct iommu_domain * domain)1087 void iommu_domain_free(struct iommu_domain *domain)
1088 {
1089 	domain->ops->domain_free(domain);
1090 }
1091 EXPORT_SYMBOL_GPL(iommu_domain_free);
1092 
__iommu_attach_device(struct iommu_domain * domain,struct device * dev)1093 static int __iommu_attach_device(struct iommu_domain *domain,
1094 				 struct device *dev)
1095 {
1096 	int ret;
1097 	if (unlikely(domain->ops->attach_dev == NULL))
1098 		return -ENODEV;
1099 
1100 	ret = domain->ops->attach_dev(domain, dev);
1101 	if (!ret)
1102 		trace_attach_device_to_domain(dev);
1103 	return ret;
1104 }
1105 
iommu_attach_device(struct iommu_domain * domain,struct device * dev)1106 int iommu_attach_device(struct iommu_domain *domain, struct device *dev)
1107 {
1108 	struct iommu_group *group;
1109 	int ret;
1110 
1111 	group = iommu_group_get(dev);
1112 	/* FIXME: Remove this when groups a mandatory for iommu drivers */
1113 	if (group == NULL)
1114 		return __iommu_attach_device(domain, dev);
1115 
1116 	/*
1117 	 * We have a group - lock it to make sure the device-count doesn't
1118 	 * change while we are attaching
1119 	 */
1120 	mutex_lock(&group->mutex);
1121 	ret = -EINVAL;
1122 	if (iommu_group_device_count(group) != 1)
1123 		goto out_unlock;
1124 
1125 	ret = __iommu_attach_group(domain, group);
1126 
1127 out_unlock:
1128 	mutex_unlock(&group->mutex);
1129 	iommu_group_put(group);
1130 
1131 	return ret;
1132 }
1133 EXPORT_SYMBOL_GPL(iommu_attach_device);
1134 
__iommu_detach_device(struct iommu_domain * domain,struct device * dev)1135 static void __iommu_detach_device(struct iommu_domain *domain,
1136 				  struct device *dev)
1137 {
1138 	if (unlikely(domain->ops->detach_dev == NULL))
1139 		return;
1140 
1141 	domain->ops->detach_dev(domain, dev);
1142 	trace_detach_device_from_domain(dev);
1143 }
1144 
iommu_detach_device(struct iommu_domain * domain,struct device * dev)1145 void iommu_detach_device(struct iommu_domain *domain, struct device *dev)
1146 {
1147 	struct iommu_group *group;
1148 
1149 	group = iommu_group_get(dev);
1150 	/* FIXME: Remove this when groups a mandatory for iommu drivers */
1151 	if (group == NULL)
1152 		return __iommu_detach_device(domain, dev);
1153 
1154 	mutex_lock(&group->mutex);
1155 	if (iommu_group_device_count(group) != 1) {
1156 		WARN_ON(1);
1157 		goto out_unlock;
1158 	}
1159 
1160 	__iommu_detach_group(domain, group);
1161 
1162 out_unlock:
1163 	mutex_unlock(&group->mutex);
1164 	iommu_group_put(group);
1165 }
1166 EXPORT_SYMBOL_GPL(iommu_detach_device);
1167 
iommu_get_domain_for_dev(struct device * dev)1168 struct iommu_domain *iommu_get_domain_for_dev(struct device *dev)
1169 {
1170 	struct iommu_domain *domain;
1171 	struct iommu_group *group;
1172 
1173 	group = iommu_group_get(dev);
1174 	/* FIXME: Remove this when groups a mandatory for iommu drivers */
1175 	if (group == NULL)
1176 		return NULL;
1177 
1178 	domain = group->domain;
1179 
1180 	iommu_group_put(group);
1181 
1182 	return domain;
1183 }
1184 EXPORT_SYMBOL_GPL(iommu_get_domain_for_dev);
1185 
1186 /*
1187  * IOMMU groups are really the natrual working unit of the IOMMU, but
1188  * the IOMMU API works on domains and devices.  Bridge that gap by
1189  * iterating over the devices in a group.  Ideally we'd have a single
1190  * device which represents the requestor ID of the group, but we also
1191  * allow IOMMU drivers to create policy defined minimum sets, where
1192  * the physical hardware may be able to distiguish members, but we
1193  * wish to group them at a higher level (ex. untrusted multi-function
1194  * PCI devices).  Thus we attach each device.
1195  */
iommu_group_do_attach_device(struct device * dev,void * data)1196 static int iommu_group_do_attach_device(struct device *dev, void *data)
1197 {
1198 	struct iommu_domain *domain = data;
1199 
1200 	return __iommu_attach_device(domain, dev);
1201 }
1202 
__iommu_attach_group(struct iommu_domain * domain,struct iommu_group * group)1203 static int __iommu_attach_group(struct iommu_domain *domain,
1204 				struct iommu_group *group)
1205 {
1206 	int ret;
1207 
1208 	if (group->default_domain && group->domain != group->default_domain)
1209 		return -EBUSY;
1210 
1211 	ret = __iommu_group_for_each_dev(group, domain,
1212 					 iommu_group_do_attach_device);
1213 	if (ret == 0)
1214 		group->domain = domain;
1215 
1216 	return ret;
1217 }
1218 
iommu_attach_group(struct iommu_domain * domain,struct iommu_group * group)1219 int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group)
1220 {
1221 	int ret;
1222 
1223 	mutex_lock(&group->mutex);
1224 	ret = __iommu_attach_group(domain, group);
1225 	mutex_unlock(&group->mutex);
1226 
1227 	return ret;
1228 }
1229 EXPORT_SYMBOL_GPL(iommu_attach_group);
1230 
iommu_group_do_detach_device(struct device * dev,void * data)1231 static int iommu_group_do_detach_device(struct device *dev, void *data)
1232 {
1233 	struct iommu_domain *domain = data;
1234 
1235 	__iommu_detach_device(domain, dev);
1236 
1237 	return 0;
1238 }
1239 
__iommu_detach_group(struct iommu_domain * domain,struct iommu_group * group)1240 static void __iommu_detach_group(struct iommu_domain *domain,
1241 				 struct iommu_group *group)
1242 {
1243 	int ret;
1244 
1245 	if (!group->default_domain) {
1246 		__iommu_group_for_each_dev(group, domain,
1247 					   iommu_group_do_detach_device);
1248 		group->domain = NULL;
1249 		return;
1250 	}
1251 
1252 	if (group->domain == group->default_domain)
1253 		return;
1254 
1255 	/* Detach by re-attaching to the default domain */
1256 	ret = __iommu_group_for_each_dev(group, group->default_domain,
1257 					 iommu_group_do_attach_device);
1258 	if (ret != 0)
1259 		WARN_ON(1);
1260 	else
1261 		group->domain = group->default_domain;
1262 }
1263 
iommu_detach_group(struct iommu_domain * domain,struct iommu_group * group)1264 void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group)
1265 {
1266 	mutex_lock(&group->mutex);
1267 	__iommu_detach_group(domain, group);
1268 	mutex_unlock(&group->mutex);
1269 }
1270 EXPORT_SYMBOL_GPL(iommu_detach_group);
1271 
iommu_iova_to_phys(struct iommu_domain * domain,dma_addr_t iova)1272 phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
1273 {
1274 	if (unlikely(domain->ops->iova_to_phys == NULL))
1275 		return 0;
1276 
1277 	return domain->ops->iova_to_phys(domain, iova);
1278 }
1279 EXPORT_SYMBOL_GPL(iommu_iova_to_phys);
1280 
iommu_pgsize(struct iommu_domain * domain,unsigned long addr_merge,size_t size)1281 static size_t iommu_pgsize(struct iommu_domain *domain,
1282 			   unsigned long addr_merge, size_t size)
1283 {
1284 	unsigned int pgsize_idx;
1285 	size_t pgsize;
1286 
1287 	/* Max page size that still fits into 'size' */
1288 	pgsize_idx = __fls(size);
1289 
1290 	/* need to consider alignment requirements ? */
1291 	if (likely(addr_merge)) {
1292 		/* Max page size allowed by address */
1293 		unsigned int align_pgsize_idx = __ffs(addr_merge);
1294 		pgsize_idx = min(pgsize_idx, align_pgsize_idx);
1295 	}
1296 
1297 	/* build a mask of acceptable page sizes */
1298 	pgsize = (1UL << (pgsize_idx + 1)) - 1;
1299 
1300 	/* throw away page sizes not supported by the hardware */
1301 	pgsize &= domain->pgsize_bitmap;
1302 
1303 	/* make sure we're still sane */
1304 	BUG_ON(!pgsize);
1305 
1306 	/* pick the biggest page */
1307 	pgsize_idx = __fls(pgsize);
1308 	pgsize = 1UL << pgsize_idx;
1309 
1310 	return pgsize;
1311 }
1312 
iommu_map(struct iommu_domain * domain,unsigned long iova,phys_addr_t paddr,size_t size,int prot)1313 int iommu_map(struct iommu_domain *domain, unsigned long iova,
1314 	      phys_addr_t paddr, size_t size, int prot)
1315 {
1316 	unsigned long orig_iova = iova;
1317 	unsigned int min_pagesz;
1318 	size_t orig_size = size;
1319 	phys_addr_t orig_paddr = paddr;
1320 	int ret = 0;
1321 
1322 	if (unlikely(domain->ops->map == NULL ||
1323 		     domain->pgsize_bitmap == 0UL))
1324 		return -ENODEV;
1325 
1326 	if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
1327 		return -EINVAL;
1328 
1329 	/* find out the minimum page size supported */
1330 	min_pagesz = 1 << __ffs(domain->pgsize_bitmap);
1331 
1332 	/*
1333 	 * both the virtual address and the physical one, as well as
1334 	 * the size of the mapping, must be aligned (at least) to the
1335 	 * size of the smallest page supported by the hardware
1336 	 */
1337 	if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
1338 		pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x\n",
1339 		       iova, &paddr, size, min_pagesz);
1340 		return -EINVAL;
1341 	}
1342 
1343 	pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr, size);
1344 
1345 	while (size) {
1346 		size_t pgsize = iommu_pgsize(domain, iova | paddr, size);
1347 
1348 		pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx\n",
1349 			 iova, &paddr, pgsize);
1350 
1351 		ret = domain->ops->map(domain, iova, paddr, pgsize, prot);
1352 		if (ret)
1353 			break;
1354 
1355 		iova += pgsize;
1356 		paddr += pgsize;
1357 		size -= pgsize;
1358 	}
1359 
1360 	/* unroll mapping in case something went wrong */
1361 	if (ret)
1362 		iommu_unmap(domain, orig_iova, orig_size - size);
1363 	else
1364 		trace_map(orig_iova, orig_paddr, orig_size);
1365 
1366 	return ret;
1367 }
1368 EXPORT_SYMBOL_GPL(iommu_map);
1369 
iommu_unmap(struct iommu_domain * domain,unsigned long iova,size_t size)1370 size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
1371 {
1372 	size_t unmapped_page, unmapped = 0;
1373 	unsigned int min_pagesz;
1374 	unsigned long orig_iova = iova;
1375 
1376 	if (unlikely(domain->ops->unmap == NULL ||
1377 		     domain->pgsize_bitmap == 0UL))
1378 		return -ENODEV;
1379 
1380 	if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
1381 		return -EINVAL;
1382 
1383 	/* find out the minimum page size supported */
1384 	min_pagesz = 1 << __ffs(domain->pgsize_bitmap);
1385 
1386 	/*
1387 	 * The virtual address, as well as the size of the mapping, must be
1388 	 * aligned (at least) to the size of the smallest page supported
1389 	 * by the hardware
1390 	 */
1391 	if (!IS_ALIGNED(iova | size, min_pagesz)) {
1392 		pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n",
1393 		       iova, size, min_pagesz);
1394 		return -EINVAL;
1395 	}
1396 
1397 	pr_debug("unmap this: iova 0x%lx size 0x%zx\n", iova, size);
1398 
1399 	/*
1400 	 * Keep iterating until we either unmap 'size' bytes (or more)
1401 	 * or we hit an area that isn't mapped.
1402 	 */
1403 	while (unmapped < size) {
1404 		size_t pgsize = iommu_pgsize(domain, iova, size - unmapped);
1405 
1406 		unmapped_page = domain->ops->unmap(domain, iova, pgsize);
1407 		if (!unmapped_page)
1408 			break;
1409 
1410 		pr_debug("unmapped: iova 0x%lx size 0x%zx\n",
1411 			 iova, unmapped_page);
1412 
1413 		iova += unmapped_page;
1414 		unmapped += unmapped_page;
1415 	}
1416 
1417 	trace_unmap(orig_iova, size, unmapped);
1418 	return unmapped;
1419 }
1420 EXPORT_SYMBOL_GPL(iommu_unmap);
1421 
default_iommu_map_sg(struct iommu_domain * domain,unsigned long iova,struct scatterlist * sg,unsigned int nents,int prot)1422 size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
1423 			 struct scatterlist *sg, unsigned int nents, int prot)
1424 {
1425 	struct scatterlist *s;
1426 	size_t mapped = 0;
1427 	unsigned int i, min_pagesz;
1428 	int ret;
1429 
1430 	if (unlikely(domain->pgsize_bitmap == 0UL))
1431 		return 0;
1432 
1433 	min_pagesz = 1 << __ffs(domain->pgsize_bitmap);
1434 
1435 	for_each_sg(sg, s, nents, i) {
1436 		phys_addr_t phys = page_to_phys(sg_page(s)) + s->offset;
1437 
1438 		/*
1439 		 * We are mapping on IOMMU page boundaries, so offset within
1440 		 * the page must be 0. However, the IOMMU may support pages
1441 		 * smaller than PAGE_SIZE, so s->offset may still represent
1442 		 * an offset of that boundary within the CPU page.
1443 		 */
1444 		if (!IS_ALIGNED(s->offset, min_pagesz))
1445 			goto out_err;
1446 
1447 		ret = iommu_map(domain, iova + mapped, phys, s->length, prot);
1448 		if (ret)
1449 			goto out_err;
1450 
1451 		mapped += s->length;
1452 	}
1453 
1454 	return mapped;
1455 
1456 out_err:
1457 	/* undo mappings already done */
1458 	iommu_unmap(domain, iova, mapped);
1459 
1460 	return 0;
1461 
1462 }
1463 EXPORT_SYMBOL_GPL(default_iommu_map_sg);
1464 
iommu_domain_window_enable(struct iommu_domain * domain,u32 wnd_nr,phys_addr_t paddr,u64 size,int prot)1465 int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr,
1466 			       phys_addr_t paddr, u64 size, int prot)
1467 {
1468 	if (unlikely(domain->ops->domain_window_enable == NULL))
1469 		return -ENODEV;
1470 
1471 	return domain->ops->domain_window_enable(domain, wnd_nr, paddr, size,
1472 						 prot);
1473 }
1474 EXPORT_SYMBOL_GPL(iommu_domain_window_enable);
1475 
iommu_domain_window_disable(struct iommu_domain * domain,u32 wnd_nr)1476 void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr)
1477 {
1478 	if (unlikely(domain->ops->domain_window_disable == NULL))
1479 		return;
1480 
1481 	return domain->ops->domain_window_disable(domain, wnd_nr);
1482 }
1483 EXPORT_SYMBOL_GPL(iommu_domain_window_disable);
1484 
iommu_init(void)1485 static int __init iommu_init(void)
1486 {
1487 	iommu_group_kset = kset_create_and_add("iommu_groups",
1488 					       NULL, kernel_kobj);
1489 	BUG_ON(!iommu_group_kset);
1490 
1491 	return 0;
1492 }
1493 core_initcall(iommu_init);
1494 
iommu_domain_get_attr(struct iommu_domain * domain,enum iommu_attr attr,void * data)1495 int iommu_domain_get_attr(struct iommu_domain *domain,
1496 			  enum iommu_attr attr, void *data)
1497 {
1498 	struct iommu_domain_geometry *geometry;
1499 	bool *paging;
1500 	int ret = 0;
1501 	u32 *count;
1502 
1503 	switch (attr) {
1504 	case DOMAIN_ATTR_GEOMETRY:
1505 		geometry  = data;
1506 		*geometry = domain->geometry;
1507 
1508 		break;
1509 	case DOMAIN_ATTR_PAGING:
1510 		paging  = data;
1511 		*paging = (domain->pgsize_bitmap != 0UL);
1512 		break;
1513 	case DOMAIN_ATTR_WINDOWS:
1514 		count = data;
1515 
1516 		if (domain->ops->domain_get_windows != NULL)
1517 			*count = domain->ops->domain_get_windows(domain);
1518 		else
1519 			ret = -ENODEV;
1520 
1521 		break;
1522 	default:
1523 		if (!domain->ops->domain_get_attr)
1524 			return -EINVAL;
1525 
1526 		ret = domain->ops->domain_get_attr(domain, attr, data);
1527 	}
1528 
1529 	return ret;
1530 }
1531 EXPORT_SYMBOL_GPL(iommu_domain_get_attr);
1532 
iommu_domain_set_attr(struct iommu_domain * domain,enum iommu_attr attr,void * data)1533 int iommu_domain_set_attr(struct iommu_domain *domain,
1534 			  enum iommu_attr attr, void *data)
1535 {
1536 	int ret = 0;
1537 	u32 *count;
1538 
1539 	switch (attr) {
1540 	case DOMAIN_ATTR_WINDOWS:
1541 		count = data;
1542 
1543 		if (domain->ops->domain_set_windows != NULL)
1544 			ret = domain->ops->domain_set_windows(domain, *count);
1545 		else
1546 			ret = -ENODEV;
1547 
1548 		break;
1549 	default:
1550 		if (domain->ops->domain_set_attr == NULL)
1551 			return -EINVAL;
1552 
1553 		ret = domain->ops->domain_set_attr(domain, attr, data);
1554 	}
1555 
1556 	return ret;
1557 }
1558 EXPORT_SYMBOL_GPL(iommu_domain_set_attr);
1559 
iommu_get_dm_regions(struct device * dev,struct list_head * list)1560 void iommu_get_dm_regions(struct device *dev, struct list_head *list)
1561 {
1562 	const struct iommu_ops *ops = dev->bus->iommu_ops;
1563 
1564 	if (ops && ops->get_dm_regions)
1565 		ops->get_dm_regions(dev, list);
1566 }
1567 
iommu_put_dm_regions(struct device * dev,struct list_head * list)1568 void iommu_put_dm_regions(struct device *dev, struct list_head *list)
1569 {
1570 	const struct iommu_ops *ops = dev->bus->iommu_ops;
1571 
1572 	if (ops && ops->put_dm_regions)
1573 		ops->put_dm_regions(dev, list);
1574 }
1575 
1576 /* Request that a device is direct mapped by the IOMMU */
iommu_request_dm_for_dev(struct device * dev)1577 int iommu_request_dm_for_dev(struct device *dev)
1578 {
1579 	struct iommu_domain *dm_domain;
1580 	struct iommu_group *group;
1581 	int ret;
1582 
1583 	/* Device must already be in a group before calling this function */
1584 	group = iommu_group_get_for_dev(dev);
1585 	if (IS_ERR(group))
1586 		return PTR_ERR(group);
1587 
1588 	mutex_lock(&group->mutex);
1589 
1590 	/* Check if the default domain is already direct mapped */
1591 	ret = 0;
1592 	if (group->default_domain &&
1593 	    group->default_domain->type == IOMMU_DOMAIN_IDENTITY)
1594 		goto out;
1595 
1596 	/* Don't change mappings of existing devices */
1597 	ret = -EBUSY;
1598 	if (iommu_group_device_count(group) != 1)
1599 		goto out;
1600 
1601 	/* Allocate a direct mapped domain */
1602 	ret = -ENOMEM;
1603 	dm_domain = __iommu_domain_alloc(dev->bus, IOMMU_DOMAIN_IDENTITY);
1604 	if (!dm_domain)
1605 		goto out;
1606 
1607 	/* Attach the device to the domain */
1608 	ret = __iommu_attach_group(dm_domain, group);
1609 	if (ret) {
1610 		iommu_domain_free(dm_domain);
1611 		goto out;
1612 	}
1613 
1614 	/* Make the direct mapped domain the default for this group */
1615 	if (group->default_domain)
1616 		iommu_domain_free(group->default_domain);
1617 	group->default_domain = dm_domain;
1618 
1619 	pr_info("Using direct mapping for device %s\n", dev_name(dev));
1620 
1621 	ret = 0;
1622 out:
1623 	mutex_unlock(&group->mutex);
1624 	iommu_group_put(group);
1625 
1626 	return ret;
1627 }
1628 
iommu_fwspec_init(struct device * dev,struct fwnode_handle * iommu_fwnode,const struct iommu_ops * ops)1629 int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode,
1630 		      const struct iommu_ops *ops)
1631 {
1632 	struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1633 
1634 	if (fwspec)
1635 		return ops == fwspec->ops ? 0 : -EINVAL;
1636 
1637 	fwspec = kzalloc(sizeof(*fwspec), GFP_KERNEL);
1638 	if (!fwspec)
1639 		return -ENOMEM;
1640 
1641 	of_node_get(to_of_node(iommu_fwnode));
1642 	fwspec->iommu_fwnode = iommu_fwnode;
1643 	fwspec->ops = ops;
1644 	dev->iommu_fwspec = fwspec;
1645 	return 0;
1646 }
1647 EXPORT_SYMBOL_GPL(iommu_fwspec_init);
1648 
iommu_fwspec_free(struct device * dev)1649 void iommu_fwspec_free(struct device *dev)
1650 {
1651 	struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1652 
1653 	if (fwspec) {
1654 		fwnode_handle_put(fwspec->iommu_fwnode);
1655 		kfree(fwspec);
1656 		dev->iommu_fwspec = NULL;
1657 	}
1658 }
1659 EXPORT_SYMBOL_GPL(iommu_fwspec_free);
1660 
iommu_fwspec_add_ids(struct device * dev,u32 * ids,int num_ids)1661 int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids)
1662 {
1663 	struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1664 	size_t size;
1665 	int i;
1666 
1667 	if (!fwspec)
1668 		return -EINVAL;
1669 
1670 	size = offsetof(struct iommu_fwspec, ids[fwspec->num_ids + num_ids]);
1671 	if (size > sizeof(*fwspec)) {
1672 		fwspec = krealloc(dev->iommu_fwspec, size, GFP_KERNEL);
1673 		if (!fwspec)
1674 			return -ENOMEM;
1675 	}
1676 
1677 	for (i = 0; i < num_ids; i++)
1678 		fwspec->ids[fwspec->num_ids + i] = ids[i];
1679 
1680 	fwspec->num_ids += num_ids;
1681 	dev->iommu_fwspec = fwspec;
1682 	return 0;
1683 }
1684 EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids);
1685