• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
3  * Author: Joerg Roedel <jroedel@suse.de>
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 as published
7  * by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
17  */
18 
19 #define pr_fmt(fmt)    "iommu: " fmt
20 
21 #include <linux/device.h>
22 #include <linux/kernel.h>
23 #include <linux/bug.h>
24 #include <linux/types.h>
25 #include <linux/module.h>
26 #include <linux/slab.h>
27 #include <linux/errno.h>
28 #include <linux/iommu.h>
29 #include <linux/idr.h>
30 #include <linux/notifier.h>
31 #include <linux/err.h>
32 #include <linux/pci.h>
33 #include <linux/bitops.h>
34 #include <trace/events/iommu.h>
35 
36 static struct kset *iommu_group_kset;
37 static struct ida iommu_group_ida;
38 static struct mutex iommu_group_mutex;
39 
40 struct iommu_callback_data {
41 	const struct iommu_ops *ops;
42 };
43 
44 struct iommu_group {
45 	struct kobject kobj;
46 	struct kobject *devices_kobj;
47 	struct list_head devices;
48 	struct mutex mutex;
49 	struct blocking_notifier_head notifier;
50 	void *iommu_data;
51 	void (*iommu_data_release)(void *iommu_data);
52 	char *name;
53 	int id;
54 	struct iommu_domain *default_domain;
55 	struct iommu_domain *domain;
56 };
57 
58 struct iommu_device {
59 	struct list_head list;
60 	struct device *dev;
61 	char *name;
62 };
63 
64 struct iommu_group_attribute {
65 	struct attribute attr;
66 	ssize_t (*show)(struct iommu_group *group, char *buf);
67 	ssize_t (*store)(struct iommu_group *group,
68 			 const char *buf, size_t count);
69 };
70 
71 #define IOMMU_GROUP_ATTR(_name, _mode, _show, _store)		\
72 struct iommu_group_attribute iommu_group_attr_##_name =		\
73 	__ATTR(_name, _mode, _show, _store)
74 
75 #define to_iommu_group_attr(_attr)	\
76 	container_of(_attr, struct iommu_group_attribute, attr)
77 #define to_iommu_group(_kobj)		\
78 	container_of(_kobj, struct iommu_group, kobj)
79 
80 static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
81 						 unsigned type);
82 static int __iommu_attach_device(struct iommu_domain *domain,
83 				 struct device *dev);
84 static int __iommu_attach_group(struct iommu_domain *domain,
85 				struct iommu_group *group);
86 static void __iommu_detach_group(struct iommu_domain *domain,
87 				 struct iommu_group *group);
88 
iommu_group_attr_show(struct kobject * kobj,struct attribute * __attr,char * buf)89 static ssize_t iommu_group_attr_show(struct kobject *kobj,
90 				     struct attribute *__attr, char *buf)
91 {
92 	struct iommu_group_attribute *attr = to_iommu_group_attr(__attr);
93 	struct iommu_group *group = to_iommu_group(kobj);
94 	ssize_t ret = -EIO;
95 
96 	if (attr->show)
97 		ret = attr->show(group, buf);
98 	return ret;
99 }
100 
iommu_group_attr_store(struct kobject * kobj,struct attribute * __attr,const char * buf,size_t count)101 static ssize_t iommu_group_attr_store(struct kobject *kobj,
102 				      struct attribute *__attr,
103 				      const char *buf, size_t count)
104 {
105 	struct iommu_group_attribute *attr = to_iommu_group_attr(__attr);
106 	struct iommu_group *group = to_iommu_group(kobj);
107 	ssize_t ret = -EIO;
108 
109 	if (attr->store)
110 		ret = attr->store(group, buf, count);
111 	return ret;
112 }
113 
114 static const struct sysfs_ops iommu_group_sysfs_ops = {
115 	.show = iommu_group_attr_show,
116 	.store = iommu_group_attr_store,
117 };
118 
iommu_group_create_file(struct iommu_group * group,struct iommu_group_attribute * attr)119 static int iommu_group_create_file(struct iommu_group *group,
120 				   struct iommu_group_attribute *attr)
121 {
122 	return sysfs_create_file(&group->kobj, &attr->attr);
123 }
124 
iommu_group_remove_file(struct iommu_group * group,struct iommu_group_attribute * attr)125 static void iommu_group_remove_file(struct iommu_group *group,
126 				    struct iommu_group_attribute *attr)
127 {
128 	sysfs_remove_file(&group->kobj, &attr->attr);
129 }
130 
iommu_group_show_name(struct iommu_group * group,char * buf)131 static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf)
132 {
133 	return sprintf(buf, "%s\n", group->name);
134 }
135 
136 static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL);
137 
iommu_group_release(struct kobject * kobj)138 static void iommu_group_release(struct kobject *kobj)
139 {
140 	struct iommu_group *group = to_iommu_group(kobj);
141 
142 	pr_debug("Releasing group %d\n", group->id);
143 
144 	if (group->iommu_data_release)
145 		group->iommu_data_release(group->iommu_data);
146 
147 	mutex_lock(&iommu_group_mutex);
148 	ida_remove(&iommu_group_ida, group->id);
149 	mutex_unlock(&iommu_group_mutex);
150 
151 	if (group->default_domain)
152 		iommu_domain_free(group->default_domain);
153 
154 	kfree(group->name);
155 	kfree(group);
156 }
157 
158 static struct kobj_type iommu_group_ktype = {
159 	.sysfs_ops = &iommu_group_sysfs_ops,
160 	.release = iommu_group_release,
161 };
162 
163 /**
164  * iommu_group_alloc - Allocate a new group
165  * @name: Optional name to associate with group, visible in sysfs
166  *
167  * This function is called by an iommu driver to allocate a new iommu
168  * group.  The iommu group represents the minimum granularity of the iommu.
169  * Upon successful return, the caller holds a reference to the supplied
170  * group in order to hold the group until devices are added.  Use
171  * iommu_group_put() to release this extra reference count, allowing the
172  * group to be automatically reclaimed once it has no devices or external
173  * references.
174  */
iommu_group_alloc(void)175 struct iommu_group *iommu_group_alloc(void)
176 {
177 	struct iommu_group *group;
178 	int ret;
179 
180 	group = kzalloc(sizeof(*group), GFP_KERNEL);
181 	if (!group)
182 		return ERR_PTR(-ENOMEM);
183 
184 	group->kobj.kset = iommu_group_kset;
185 	mutex_init(&group->mutex);
186 	INIT_LIST_HEAD(&group->devices);
187 	BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier);
188 
189 	mutex_lock(&iommu_group_mutex);
190 
191 again:
192 	if (unlikely(0 == ida_pre_get(&iommu_group_ida, GFP_KERNEL))) {
193 		kfree(group);
194 		mutex_unlock(&iommu_group_mutex);
195 		return ERR_PTR(-ENOMEM);
196 	}
197 
198 	if (-EAGAIN == ida_get_new(&iommu_group_ida, &group->id))
199 		goto again;
200 
201 	mutex_unlock(&iommu_group_mutex);
202 
203 	ret = kobject_init_and_add(&group->kobj, &iommu_group_ktype,
204 				   NULL, "%d", group->id);
205 	if (ret) {
206 		mutex_lock(&iommu_group_mutex);
207 		ida_remove(&iommu_group_ida, group->id);
208 		mutex_unlock(&iommu_group_mutex);
209 		kobject_put(&group->kobj);
210 		return ERR_PTR(ret);
211 	}
212 
213 	group->devices_kobj = kobject_create_and_add("devices", &group->kobj);
214 	if (!group->devices_kobj) {
215 		kobject_put(&group->kobj); /* triggers .release & free */
216 		return ERR_PTR(-ENOMEM);
217 	}
218 
219 	/*
220 	 * The devices_kobj holds a reference on the group kobject, so
221 	 * as long as that exists so will the group.  We can therefore
222 	 * use the devices_kobj for reference counting.
223 	 */
224 	kobject_put(&group->kobj);
225 
226 	pr_debug("Allocated group %d\n", group->id);
227 
228 	return group;
229 }
230 EXPORT_SYMBOL_GPL(iommu_group_alloc);
231 
iommu_group_get_by_id(int id)232 struct iommu_group *iommu_group_get_by_id(int id)
233 {
234 	struct kobject *group_kobj;
235 	struct iommu_group *group;
236 	const char *name;
237 
238 	if (!iommu_group_kset)
239 		return NULL;
240 
241 	name = kasprintf(GFP_KERNEL, "%d", id);
242 	if (!name)
243 		return NULL;
244 
245 	group_kobj = kset_find_obj(iommu_group_kset, name);
246 	kfree(name);
247 
248 	if (!group_kobj)
249 		return NULL;
250 
251 	group = container_of(group_kobj, struct iommu_group, kobj);
252 	BUG_ON(group->id != id);
253 
254 	kobject_get(group->devices_kobj);
255 	kobject_put(&group->kobj);
256 
257 	return group;
258 }
259 EXPORT_SYMBOL_GPL(iommu_group_get_by_id);
260 
261 /**
262  * iommu_group_get_iommudata - retrieve iommu_data registered for a group
263  * @group: the group
264  *
265  * iommu drivers can store data in the group for use when doing iommu
266  * operations.  This function provides a way to retrieve it.  Caller
267  * should hold a group reference.
268  */
iommu_group_get_iommudata(struct iommu_group * group)269 void *iommu_group_get_iommudata(struct iommu_group *group)
270 {
271 	return group->iommu_data;
272 }
273 EXPORT_SYMBOL_GPL(iommu_group_get_iommudata);
274 
275 /**
276  * iommu_group_set_iommudata - set iommu_data for a group
277  * @group: the group
278  * @iommu_data: new data
279  * @release: release function for iommu_data
280  *
281  * iommu drivers can store data in the group for use when doing iommu
282  * operations.  This function provides a way to set the data after
283  * the group has been allocated.  Caller should hold a group reference.
284  */
iommu_group_set_iommudata(struct iommu_group * group,void * iommu_data,void (* release)(void * iommu_data))285 void iommu_group_set_iommudata(struct iommu_group *group, void *iommu_data,
286 			       void (*release)(void *iommu_data))
287 {
288 	group->iommu_data = iommu_data;
289 	group->iommu_data_release = release;
290 }
291 EXPORT_SYMBOL_GPL(iommu_group_set_iommudata);
292 
293 /**
294  * iommu_group_set_name - set name for a group
295  * @group: the group
296  * @name: name
297  *
298  * Allow iommu driver to set a name for a group.  When set it will
299  * appear in a name attribute file under the group in sysfs.
300  */
iommu_group_set_name(struct iommu_group * group,const char * name)301 int iommu_group_set_name(struct iommu_group *group, const char *name)
302 {
303 	int ret;
304 
305 	if (group->name) {
306 		iommu_group_remove_file(group, &iommu_group_attr_name);
307 		kfree(group->name);
308 		group->name = NULL;
309 		if (!name)
310 			return 0;
311 	}
312 
313 	group->name = kstrdup(name, GFP_KERNEL);
314 	if (!group->name)
315 		return -ENOMEM;
316 
317 	ret = iommu_group_create_file(group, &iommu_group_attr_name);
318 	if (ret) {
319 		kfree(group->name);
320 		group->name = NULL;
321 		return ret;
322 	}
323 
324 	return 0;
325 }
326 EXPORT_SYMBOL_GPL(iommu_group_set_name);
327 
iommu_group_create_direct_mappings(struct iommu_group * group,struct device * dev)328 static int iommu_group_create_direct_mappings(struct iommu_group *group,
329 					      struct device *dev)
330 {
331 	struct iommu_domain *domain = group->default_domain;
332 	struct iommu_dm_region *entry;
333 	struct list_head mappings;
334 	unsigned long pg_size;
335 	int ret = 0;
336 
337 	if (!domain || domain->type != IOMMU_DOMAIN_DMA)
338 		return 0;
339 
340 	BUG_ON(!domain->ops->pgsize_bitmap);
341 
342 	pg_size = 1UL << __ffs(domain->ops->pgsize_bitmap);
343 	INIT_LIST_HEAD(&mappings);
344 
345 	iommu_get_dm_regions(dev, &mappings);
346 
347 	/* We need to consider overlapping regions for different devices */
348 	list_for_each_entry(entry, &mappings, list) {
349 		dma_addr_t start, end, addr;
350 
351 		start = ALIGN(entry->start, pg_size);
352 		end   = ALIGN(entry->start + entry->length, pg_size);
353 
354 		for (addr = start; addr < end; addr += pg_size) {
355 			phys_addr_t phys_addr;
356 
357 			phys_addr = iommu_iova_to_phys(domain, addr);
358 			if (phys_addr)
359 				continue;
360 
361 			ret = iommu_map(domain, addr, addr, pg_size, entry->prot);
362 			if (ret)
363 				goto out;
364 		}
365 
366 	}
367 
368 out:
369 	iommu_put_dm_regions(dev, &mappings);
370 
371 	return ret;
372 }
373 
374 /**
375  * iommu_group_add_device - add a device to an iommu group
376  * @group: the group into which to add the device (reference should be held)
377  * @dev: the device
378  *
379  * This function is called by an iommu driver to add a device into a
380  * group.  Adding a device increments the group reference count.
381  */
iommu_group_add_device(struct iommu_group * group,struct device * dev)382 int iommu_group_add_device(struct iommu_group *group, struct device *dev)
383 {
384 	int ret, i = 0;
385 	struct iommu_device *device;
386 
387 	device = kzalloc(sizeof(*device), GFP_KERNEL);
388 	if (!device)
389 		return -ENOMEM;
390 
391 	device->dev = dev;
392 
393 	ret = sysfs_create_link(&dev->kobj, &group->kobj, "iommu_group");
394 	if (ret)
395 		goto err_free_device;
396 
397 	device->name = kasprintf(GFP_KERNEL, "%s", kobject_name(&dev->kobj));
398 rename:
399 	if (!device->name) {
400 		ret = -ENOMEM;
401 		goto err_remove_link;
402 	}
403 
404 	ret = sysfs_create_link_nowarn(group->devices_kobj,
405 				       &dev->kobj, device->name);
406 	if (ret) {
407 		if (ret == -EEXIST && i >= 0) {
408 			/*
409 			 * Account for the slim chance of collision
410 			 * and append an instance to the name.
411 			 */
412 			kfree(device->name);
413 			device->name = kasprintf(GFP_KERNEL, "%s.%d",
414 						 kobject_name(&dev->kobj), i++);
415 			goto rename;
416 		}
417 		goto err_free_name;
418 	}
419 
420 	kobject_get(group->devices_kobj);
421 
422 	dev->iommu_group = group;
423 
424 	iommu_group_create_direct_mappings(group, dev);
425 
426 	mutex_lock(&group->mutex);
427 	list_add_tail(&device->list, &group->devices);
428 	if (group->domain)
429 		ret = __iommu_attach_device(group->domain, dev);
430 	mutex_unlock(&group->mutex);
431 	if (ret)
432 		goto err_put_group;
433 
434 	/* Notify any listeners about change to group. */
435 	blocking_notifier_call_chain(&group->notifier,
436 				     IOMMU_GROUP_NOTIFY_ADD_DEVICE, dev);
437 
438 	trace_add_device_to_group(group->id, dev);
439 
440 	pr_info("Adding device %s to group %d\n", dev_name(dev), group->id);
441 
442 	return 0;
443 
444 err_put_group:
445 	mutex_lock(&group->mutex);
446 	list_del(&device->list);
447 	mutex_unlock(&group->mutex);
448 	dev->iommu_group = NULL;
449 	kobject_put(group->devices_kobj);
450 	sysfs_remove_link(group->devices_kobj, device->name);
451 err_free_name:
452 	kfree(device->name);
453 err_remove_link:
454 	sysfs_remove_link(&dev->kobj, "iommu_group");
455 err_free_device:
456 	kfree(device);
457 	pr_err("Failed to add device %s to group %d: %d\n", dev_name(dev), group->id, ret);
458 	return ret;
459 }
460 EXPORT_SYMBOL_GPL(iommu_group_add_device);
461 
462 /**
463  * iommu_group_remove_device - remove a device from it's current group
464  * @dev: device to be removed
465  *
466  * This function is called by an iommu driver to remove the device from
467  * it's current group.  This decrements the iommu group reference count.
468  */
iommu_group_remove_device(struct device * dev)469 void iommu_group_remove_device(struct device *dev)
470 {
471 	struct iommu_group *group = dev->iommu_group;
472 	struct iommu_device *tmp_device, *device = NULL;
473 
474 	pr_info("Removing device %s from group %d\n", dev_name(dev), group->id);
475 
476 	/* Pre-notify listeners that a device is being removed. */
477 	blocking_notifier_call_chain(&group->notifier,
478 				     IOMMU_GROUP_NOTIFY_DEL_DEVICE, dev);
479 
480 	mutex_lock(&group->mutex);
481 	list_for_each_entry(tmp_device, &group->devices, list) {
482 		if (tmp_device->dev == dev) {
483 			device = tmp_device;
484 			list_del(&device->list);
485 			break;
486 		}
487 	}
488 	mutex_unlock(&group->mutex);
489 
490 	if (!device)
491 		return;
492 
493 	sysfs_remove_link(group->devices_kobj, device->name);
494 	sysfs_remove_link(&dev->kobj, "iommu_group");
495 
496 	trace_remove_device_from_group(group->id, dev);
497 
498 	kfree(device->name);
499 	kfree(device);
500 	dev->iommu_group = NULL;
501 	kobject_put(group->devices_kobj);
502 }
503 EXPORT_SYMBOL_GPL(iommu_group_remove_device);
504 
iommu_group_device_count(struct iommu_group * group)505 static int iommu_group_device_count(struct iommu_group *group)
506 {
507 	struct iommu_device *entry;
508 	int ret = 0;
509 
510 	list_for_each_entry(entry, &group->devices, list)
511 		ret++;
512 
513 	return ret;
514 }
515 
516 /**
517  * iommu_group_for_each_dev - iterate over each device in the group
518  * @group: the group
519  * @data: caller opaque data to be passed to callback function
520  * @fn: caller supplied callback function
521  *
522  * This function is called by group users to iterate over group devices.
523  * Callers should hold a reference count to the group during callback.
524  * The group->mutex is held across callbacks, which will block calls to
525  * iommu_group_add/remove_device.
526  */
__iommu_group_for_each_dev(struct iommu_group * group,void * data,int (* fn)(struct device *,void *))527 static int __iommu_group_for_each_dev(struct iommu_group *group, void *data,
528 				      int (*fn)(struct device *, void *))
529 {
530 	struct iommu_device *device;
531 	int ret = 0;
532 
533 	list_for_each_entry(device, &group->devices, list) {
534 		ret = fn(device->dev, data);
535 		if (ret)
536 			break;
537 	}
538 	return ret;
539 }
540 
541 
iommu_group_for_each_dev(struct iommu_group * group,void * data,int (* fn)(struct device *,void *))542 int iommu_group_for_each_dev(struct iommu_group *group, void *data,
543 			     int (*fn)(struct device *, void *))
544 {
545 	int ret;
546 
547 	mutex_lock(&group->mutex);
548 	ret = __iommu_group_for_each_dev(group, data, fn);
549 	mutex_unlock(&group->mutex);
550 
551 	return ret;
552 }
553 EXPORT_SYMBOL_GPL(iommu_group_for_each_dev);
554 
555 /**
556  * iommu_group_get - Return the group for a device and increment reference
557  * @dev: get the group that this device belongs to
558  *
559  * This function is called by iommu drivers and users to get the group
560  * for the specified device.  If found, the group is returned and the group
561  * reference in incremented, else NULL.
562  */
iommu_group_get(struct device * dev)563 struct iommu_group *iommu_group_get(struct device *dev)
564 {
565 	struct iommu_group *group = dev->iommu_group;
566 
567 	if (group)
568 		kobject_get(group->devices_kobj);
569 
570 	return group;
571 }
572 EXPORT_SYMBOL_GPL(iommu_group_get);
573 
574 /**
575  * iommu_group_put - Decrement group reference
576  * @group: the group to use
577  *
578  * This function is called by iommu drivers and users to release the
579  * iommu group.  Once the reference count is zero, the group is released.
580  */
iommu_group_put(struct iommu_group * group)581 void iommu_group_put(struct iommu_group *group)
582 {
583 	if (group)
584 		kobject_put(group->devices_kobj);
585 }
586 EXPORT_SYMBOL_GPL(iommu_group_put);
587 
588 /**
589  * iommu_group_register_notifier - Register a notifier for group changes
590  * @group: the group to watch
591  * @nb: notifier block to signal
592  *
593  * This function allows iommu group users to track changes in a group.
594  * See include/linux/iommu.h for actions sent via this notifier.  Caller
595  * should hold a reference to the group throughout notifier registration.
596  */
iommu_group_register_notifier(struct iommu_group * group,struct notifier_block * nb)597 int iommu_group_register_notifier(struct iommu_group *group,
598 				  struct notifier_block *nb)
599 {
600 	return blocking_notifier_chain_register(&group->notifier, nb);
601 }
602 EXPORT_SYMBOL_GPL(iommu_group_register_notifier);
603 
604 /**
605  * iommu_group_unregister_notifier - Unregister a notifier
606  * @group: the group to watch
607  * @nb: notifier block to signal
608  *
609  * Unregister a previously registered group notifier block.
610  */
iommu_group_unregister_notifier(struct iommu_group * group,struct notifier_block * nb)611 int iommu_group_unregister_notifier(struct iommu_group *group,
612 				    struct notifier_block *nb)
613 {
614 	return blocking_notifier_chain_unregister(&group->notifier, nb);
615 }
616 EXPORT_SYMBOL_GPL(iommu_group_unregister_notifier);
617 
618 /**
619  * iommu_group_id - Return ID for a group
620  * @group: the group to ID
621  *
622  * Return the unique ID for the group matching the sysfs group number.
623  */
iommu_group_id(struct iommu_group * group)624 int iommu_group_id(struct iommu_group *group)
625 {
626 	return group->id;
627 }
628 EXPORT_SYMBOL_GPL(iommu_group_id);
629 
630 static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev,
631 					       unsigned long *devfns);
632 
633 /*
634  * To consider a PCI device isolated, we require ACS to support Source
635  * Validation, Request Redirection, Completer Redirection, and Upstream
636  * Forwarding.  This effectively means that devices cannot spoof their
637  * requester ID, requests and completions cannot be redirected, and all
638  * transactions are forwarded upstream, even as it passes through a
639  * bridge where the target device is downstream.
640  */
641 #define REQ_ACS_FLAGS   (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF)
642 
643 /*
644  * For multifunction devices which are not isolated from each other, find
645  * all the other non-isolated functions and look for existing groups.  For
646  * each function, we also need to look for aliases to or from other devices
647  * that may already have a group.
648  */
get_pci_function_alias_group(struct pci_dev * pdev,unsigned long * devfns)649 static struct iommu_group *get_pci_function_alias_group(struct pci_dev *pdev,
650 							unsigned long *devfns)
651 {
652 	struct pci_dev *tmp = NULL;
653 	struct iommu_group *group;
654 
655 	if (!pdev->multifunction || pci_acs_enabled(pdev, REQ_ACS_FLAGS))
656 		return NULL;
657 
658 	for_each_pci_dev(tmp) {
659 		if (tmp == pdev || tmp->bus != pdev->bus ||
660 		    PCI_SLOT(tmp->devfn) != PCI_SLOT(pdev->devfn) ||
661 		    pci_acs_enabled(tmp, REQ_ACS_FLAGS))
662 			continue;
663 
664 		group = get_pci_alias_group(tmp, devfns);
665 		if (group) {
666 			pci_dev_put(tmp);
667 			return group;
668 		}
669 	}
670 
671 	return NULL;
672 }
673 
674 /*
675  * Look for aliases to or from the given device for exisiting groups.  The
676  * dma_alias_devfn only supports aliases on the same bus, therefore the search
677  * space is quite small (especially since we're really only looking at pcie
678  * device, and therefore only expect multiple slots on the root complex or
679  * downstream switch ports).  It's conceivable though that a pair of
680  * multifunction devices could have aliases between them that would cause a
681  * loop.  To prevent this, we use a bitmap to track where we've been.
682  */
get_pci_alias_group(struct pci_dev * pdev,unsigned long * devfns)683 static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev,
684 					       unsigned long *devfns)
685 {
686 	struct pci_dev *tmp = NULL;
687 	struct iommu_group *group;
688 
689 	if (test_and_set_bit(pdev->devfn & 0xff, devfns))
690 		return NULL;
691 
692 	group = iommu_group_get(&pdev->dev);
693 	if (group)
694 		return group;
695 
696 	for_each_pci_dev(tmp) {
697 		if (tmp == pdev || tmp->bus != pdev->bus)
698 			continue;
699 
700 		/* We alias them or they alias us */
701 		if (((pdev->dev_flags & PCI_DEV_FLAGS_DMA_ALIAS_DEVFN) &&
702 		     pdev->dma_alias_devfn == tmp->devfn) ||
703 		    ((tmp->dev_flags & PCI_DEV_FLAGS_DMA_ALIAS_DEVFN) &&
704 		     tmp->dma_alias_devfn == pdev->devfn)) {
705 
706 			group = get_pci_alias_group(tmp, devfns);
707 			if (group) {
708 				pci_dev_put(tmp);
709 				return group;
710 			}
711 
712 			group = get_pci_function_alias_group(tmp, devfns);
713 			if (group) {
714 				pci_dev_put(tmp);
715 				return group;
716 			}
717 		}
718 	}
719 
720 	return NULL;
721 }
722 
723 struct group_for_pci_data {
724 	struct pci_dev *pdev;
725 	struct iommu_group *group;
726 };
727 
728 /*
729  * DMA alias iterator callback, return the last seen device.  Stop and return
730  * the IOMMU group if we find one along the way.
731  */
get_pci_alias_or_group(struct pci_dev * pdev,u16 alias,void * opaque)732 static int get_pci_alias_or_group(struct pci_dev *pdev, u16 alias, void *opaque)
733 {
734 	struct group_for_pci_data *data = opaque;
735 
736 	data->pdev = pdev;
737 	data->group = iommu_group_get(&pdev->dev);
738 
739 	return data->group != NULL;
740 }
741 
742 /*
743  * Generic device_group call-back function. It just allocates one
744  * iommu-group per device.
745  */
generic_device_group(struct device * dev)746 struct iommu_group *generic_device_group(struct device *dev)
747 {
748 	struct iommu_group *group;
749 
750 	group = iommu_group_alloc();
751 	if (IS_ERR(group))
752 		return NULL;
753 
754 	return group;
755 }
756 
757 /*
758  * Use standard PCI bus topology, isolation features, and DMA alias quirks
759  * to find or create an IOMMU group for a device.
760  */
pci_device_group(struct device * dev)761 struct iommu_group *pci_device_group(struct device *dev)
762 {
763 	struct pci_dev *pdev = to_pci_dev(dev);
764 	struct group_for_pci_data data;
765 	struct pci_bus *bus;
766 	struct iommu_group *group = NULL;
767 	u64 devfns[4] = { 0 };
768 
769 	if (WARN_ON(!dev_is_pci(dev)))
770 		return ERR_PTR(-EINVAL);
771 
772 	/*
773 	 * Find the upstream DMA alias for the device.  A device must not
774 	 * be aliased due to topology in order to have its own IOMMU group.
775 	 * If we find an alias along the way that already belongs to a
776 	 * group, use it.
777 	 */
778 	if (pci_for_each_dma_alias(pdev, get_pci_alias_or_group, &data))
779 		return data.group;
780 
781 	pdev = data.pdev;
782 
783 	/*
784 	 * Continue upstream from the point of minimum IOMMU granularity
785 	 * due to aliases to the point where devices are protected from
786 	 * peer-to-peer DMA by PCI ACS.  Again, if we find an existing
787 	 * group, use it.
788 	 */
789 	for (bus = pdev->bus; !pci_is_root_bus(bus); bus = bus->parent) {
790 		if (!bus->self)
791 			continue;
792 
793 		if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS))
794 			break;
795 
796 		pdev = bus->self;
797 
798 		group = iommu_group_get(&pdev->dev);
799 		if (group)
800 			return group;
801 	}
802 
803 	/*
804 	 * Look for existing groups on device aliases.  If we alias another
805 	 * device or another device aliases us, use the same group.
806 	 */
807 	group = get_pci_alias_group(pdev, (unsigned long *)devfns);
808 	if (group)
809 		return group;
810 
811 	/*
812 	 * Look for existing groups on non-isolated functions on the same
813 	 * slot and aliases of those funcions, if any.  No need to clear
814 	 * the search bitmap, the tested devfns are still valid.
815 	 */
816 	group = get_pci_function_alias_group(pdev, (unsigned long *)devfns);
817 	if (group)
818 		return group;
819 
820 	/* No shared group found, allocate new */
821 	group = iommu_group_alloc();
822 	if (IS_ERR(group))
823 		return NULL;
824 
825 	return group;
826 }
827 
828 /**
829  * iommu_group_get_for_dev - Find or create the IOMMU group for a device
830  * @dev: target device
831  *
832  * This function is intended to be called by IOMMU drivers and extended to
833  * support common, bus-defined algorithms when determining or creating the
834  * IOMMU group for a device.  On success, the caller will hold a reference
835  * to the returned IOMMU group, which will already include the provided
836  * device.  The reference should be released with iommu_group_put().
837  */
iommu_group_get_for_dev(struct device * dev)838 struct iommu_group *iommu_group_get_for_dev(struct device *dev)
839 {
840 	const struct iommu_ops *ops = dev->bus->iommu_ops;
841 	struct iommu_group *group;
842 	int ret;
843 
844 	group = iommu_group_get(dev);
845 	if (group)
846 		return group;
847 
848 	group = ERR_PTR(-EINVAL);
849 
850 	if (ops && ops->device_group)
851 		group = ops->device_group(dev);
852 
853 	if (IS_ERR(group))
854 		return group;
855 
856 	/*
857 	 * Try to allocate a default domain - needs support from the
858 	 * IOMMU driver.
859 	 */
860 	if (!group->default_domain) {
861 		group->default_domain = __iommu_domain_alloc(dev->bus,
862 							     IOMMU_DOMAIN_DMA);
863 		if (!group->domain)
864 			group->domain = group->default_domain;
865 	}
866 
867 	ret = iommu_group_add_device(group, dev);
868 	if (ret) {
869 		iommu_group_put(group);
870 		return ERR_PTR(ret);
871 	}
872 
873 	return group;
874 }
875 
iommu_group_default_domain(struct iommu_group * group)876 struct iommu_domain *iommu_group_default_domain(struct iommu_group *group)
877 {
878 	return group->default_domain;
879 }
880 
add_iommu_group(struct device * dev,void * data)881 static int add_iommu_group(struct device *dev, void *data)
882 {
883 	struct iommu_callback_data *cb = data;
884 	const struct iommu_ops *ops = cb->ops;
885 	int ret;
886 
887 	if (!ops->add_device)
888 		return 0;
889 
890 	WARN_ON(dev->iommu_group);
891 
892 	ret = ops->add_device(dev);
893 
894 	/*
895 	 * We ignore -ENODEV errors for now, as they just mean that the
896 	 * device is not translated by an IOMMU. We still care about
897 	 * other errors and fail to initialize when they happen.
898 	 */
899 	if (ret == -ENODEV)
900 		ret = 0;
901 
902 	return ret;
903 }
904 
remove_iommu_group(struct device * dev,void * data)905 static int remove_iommu_group(struct device *dev, void *data)
906 {
907 	struct iommu_callback_data *cb = data;
908 	const struct iommu_ops *ops = cb->ops;
909 
910 	if (ops->remove_device && dev->iommu_group)
911 		ops->remove_device(dev);
912 
913 	return 0;
914 }
915 
iommu_bus_notifier(struct notifier_block * nb,unsigned long action,void * data)916 static int iommu_bus_notifier(struct notifier_block *nb,
917 			      unsigned long action, void *data)
918 {
919 	struct device *dev = data;
920 	const struct iommu_ops *ops = dev->bus->iommu_ops;
921 	struct iommu_group *group;
922 	unsigned long group_action = 0;
923 
924 	/*
925 	 * ADD/DEL call into iommu driver ops if provided, which may
926 	 * result in ADD/DEL notifiers to group->notifier
927 	 */
928 	if (action == BUS_NOTIFY_ADD_DEVICE) {
929 		if (ops->add_device)
930 			return ops->add_device(dev);
931 	} else if (action == BUS_NOTIFY_REMOVED_DEVICE) {
932 		if (ops->remove_device && dev->iommu_group) {
933 			ops->remove_device(dev);
934 			return 0;
935 		}
936 	}
937 
938 	/*
939 	 * Remaining BUS_NOTIFYs get filtered and republished to the
940 	 * group, if anyone is listening
941 	 */
942 	group = iommu_group_get(dev);
943 	if (!group)
944 		return 0;
945 
946 	switch (action) {
947 	case BUS_NOTIFY_BIND_DRIVER:
948 		group_action = IOMMU_GROUP_NOTIFY_BIND_DRIVER;
949 		break;
950 	case BUS_NOTIFY_BOUND_DRIVER:
951 		group_action = IOMMU_GROUP_NOTIFY_BOUND_DRIVER;
952 		break;
953 	case BUS_NOTIFY_UNBIND_DRIVER:
954 		group_action = IOMMU_GROUP_NOTIFY_UNBIND_DRIVER;
955 		break;
956 	case BUS_NOTIFY_UNBOUND_DRIVER:
957 		group_action = IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER;
958 		break;
959 	}
960 
961 	if (group_action)
962 		blocking_notifier_call_chain(&group->notifier,
963 					     group_action, dev);
964 
965 	iommu_group_put(group);
966 	return 0;
967 }
968 
iommu_bus_init(struct bus_type * bus,const struct iommu_ops * ops)969 static int iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops)
970 {
971 	int err;
972 	struct notifier_block *nb;
973 	struct iommu_callback_data cb = {
974 		.ops = ops,
975 	};
976 
977 	nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
978 	if (!nb)
979 		return -ENOMEM;
980 
981 	nb->notifier_call = iommu_bus_notifier;
982 
983 	err = bus_register_notifier(bus, nb);
984 	if (err)
985 		goto out_free;
986 
987 	err = bus_for_each_dev(bus, NULL, &cb, add_iommu_group);
988 	if (err)
989 		goto out_err;
990 
991 
992 	return 0;
993 
994 out_err:
995 	/* Clean up */
996 	bus_for_each_dev(bus, NULL, &cb, remove_iommu_group);
997 	bus_unregister_notifier(bus, nb);
998 
999 out_free:
1000 	kfree(nb);
1001 
1002 	return err;
1003 }
1004 
1005 /**
1006  * bus_set_iommu - set iommu-callbacks for the bus
1007  * @bus: bus.
1008  * @ops: the callbacks provided by the iommu-driver
1009  *
1010  * This function is called by an iommu driver to set the iommu methods
1011  * used for a particular bus. Drivers for devices on that bus can use
1012  * the iommu-api after these ops are registered.
1013  * This special function is needed because IOMMUs are usually devices on
1014  * the bus itself, so the iommu drivers are not initialized when the bus
1015  * is set up. With this function the iommu-driver can set the iommu-ops
1016  * afterwards.
1017  */
bus_set_iommu(struct bus_type * bus,const struct iommu_ops * ops)1018 int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops)
1019 {
1020 	int err;
1021 
1022 	if (bus->iommu_ops != NULL)
1023 		return -EBUSY;
1024 
1025 	bus->iommu_ops = ops;
1026 
1027 	/* Do IOMMU specific setup for this bus-type */
1028 	err = iommu_bus_init(bus, ops);
1029 	if (err)
1030 		bus->iommu_ops = NULL;
1031 
1032 	return err;
1033 }
1034 EXPORT_SYMBOL_GPL(bus_set_iommu);
1035 
iommu_present(struct bus_type * bus)1036 bool iommu_present(struct bus_type *bus)
1037 {
1038 	return bus->iommu_ops != NULL;
1039 }
1040 EXPORT_SYMBOL_GPL(iommu_present);
1041 
iommu_capable(struct bus_type * bus,enum iommu_cap cap)1042 bool iommu_capable(struct bus_type *bus, enum iommu_cap cap)
1043 {
1044 	if (!bus->iommu_ops || !bus->iommu_ops->capable)
1045 		return false;
1046 
1047 	return bus->iommu_ops->capable(cap);
1048 }
1049 EXPORT_SYMBOL_GPL(iommu_capable);
1050 
1051 /**
1052  * iommu_set_fault_handler() - set a fault handler for an iommu domain
1053  * @domain: iommu domain
1054  * @handler: fault handler
1055  * @token: user data, will be passed back to the fault handler
1056  *
1057  * This function should be used by IOMMU users which want to be notified
1058  * whenever an IOMMU fault happens.
1059  *
1060  * The fault handler itself should return 0 on success, and an appropriate
1061  * error code otherwise.
1062  */
iommu_set_fault_handler(struct iommu_domain * domain,iommu_fault_handler_t handler,void * token)1063 void iommu_set_fault_handler(struct iommu_domain *domain,
1064 					iommu_fault_handler_t handler,
1065 					void *token)
1066 {
1067 	BUG_ON(!domain);
1068 
1069 	domain->handler = handler;
1070 	domain->handler_token = token;
1071 }
1072 EXPORT_SYMBOL_GPL(iommu_set_fault_handler);
1073 
__iommu_domain_alloc(struct bus_type * bus,unsigned type)1074 static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
1075 						 unsigned type)
1076 {
1077 	struct iommu_domain *domain;
1078 
1079 	if (bus == NULL || bus->iommu_ops == NULL)
1080 		return NULL;
1081 
1082 	domain = bus->iommu_ops->domain_alloc(type);
1083 	if (!domain)
1084 		return NULL;
1085 
1086 	domain->ops  = bus->iommu_ops;
1087 	domain->type = type;
1088 
1089 	return domain;
1090 }
1091 
iommu_domain_alloc(struct bus_type * bus)1092 struct iommu_domain *iommu_domain_alloc(struct bus_type *bus)
1093 {
1094 	return __iommu_domain_alloc(bus, IOMMU_DOMAIN_UNMANAGED);
1095 }
1096 EXPORT_SYMBOL_GPL(iommu_domain_alloc);
1097 
iommu_domain_free(struct iommu_domain * domain)1098 void iommu_domain_free(struct iommu_domain *domain)
1099 {
1100 	domain->ops->domain_free(domain);
1101 }
1102 EXPORT_SYMBOL_GPL(iommu_domain_free);
1103 
__iommu_attach_device(struct iommu_domain * domain,struct device * dev)1104 static int __iommu_attach_device(struct iommu_domain *domain,
1105 				 struct device *dev)
1106 {
1107 	int ret;
1108 	if (unlikely(domain->ops->attach_dev == NULL))
1109 		return -ENODEV;
1110 
1111 	ret = domain->ops->attach_dev(domain, dev);
1112 	if (!ret)
1113 		trace_attach_device_to_domain(dev);
1114 	return ret;
1115 }
1116 
iommu_attach_device(struct iommu_domain * domain,struct device * dev)1117 int iommu_attach_device(struct iommu_domain *domain, struct device *dev)
1118 {
1119 	struct iommu_group *group;
1120 	int ret;
1121 
1122 	group = iommu_group_get(dev);
1123 	/* FIXME: Remove this when groups a mandatory for iommu drivers */
1124 	if (group == NULL)
1125 		return __iommu_attach_device(domain, dev);
1126 
1127 	/*
1128 	 * We have a group - lock it to make sure the device-count doesn't
1129 	 * change while we are attaching
1130 	 */
1131 	mutex_lock(&group->mutex);
1132 	ret = -EINVAL;
1133 	if (iommu_group_device_count(group) != 1)
1134 		goto out_unlock;
1135 
1136 	ret = __iommu_attach_group(domain, group);
1137 
1138 out_unlock:
1139 	mutex_unlock(&group->mutex);
1140 	iommu_group_put(group);
1141 
1142 	return ret;
1143 }
1144 EXPORT_SYMBOL_GPL(iommu_attach_device);
1145 
__iommu_detach_device(struct iommu_domain * domain,struct device * dev)1146 static void __iommu_detach_device(struct iommu_domain *domain,
1147 				  struct device *dev)
1148 {
1149 	if (unlikely(domain->ops->detach_dev == NULL))
1150 		return;
1151 
1152 	domain->ops->detach_dev(domain, dev);
1153 	trace_detach_device_from_domain(dev);
1154 }
1155 
iommu_detach_device(struct iommu_domain * domain,struct device * dev)1156 void iommu_detach_device(struct iommu_domain *domain, struct device *dev)
1157 {
1158 	struct iommu_group *group;
1159 
1160 	group = iommu_group_get(dev);
1161 	/* FIXME: Remove this when groups a mandatory for iommu drivers */
1162 	if (group == NULL)
1163 		return __iommu_detach_device(domain, dev);
1164 
1165 	mutex_lock(&group->mutex);
1166 	if (iommu_group_device_count(group) != 1) {
1167 		WARN_ON(1);
1168 		goto out_unlock;
1169 	}
1170 
1171 	__iommu_detach_group(domain, group);
1172 
1173 out_unlock:
1174 	mutex_unlock(&group->mutex);
1175 	iommu_group_put(group);
1176 }
1177 EXPORT_SYMBOL_GPL(iommu_detach_device);
1178 
iommu_get_domain_for_dev(struct device * dev)1179 struct iommu_domain *iommu_get_domain_for_dev(struct device *dev)
1180 {
1181 	struct iommu_domain *domain;
1182 	struct iommu_group *group;
1183 
1184 	group = iommu_group_get(dev);
1185 	/* FIXME: Remove this when groups a mandatory for iommu drivers */
1186 	if (group == NULL)
1187 		return NULL;
1188 
1189 	domain = group->domain;
1190 
1191 	iommu_group_put(group);
1192 
1193 	return domain;
1194 }
1195 EXPORT_SYMBOL_GPL(iommu_get_domain_for_dev);
1196 
1197 /*
1198  * IOMMU groups are really the natrual working unit of the IOMMU, but
1199  * the IOMMU API works on domains and devices.  Bridge that gap by
1200  * iterating over the devices in a group.  Ideally we'd have a single
1201  * device which represents the requestor ID of the group, but we also
1202  * allow IOMMU drivers to create policy defined minimum sets, where
1203  * the physical hardware may be able to distiguish members, but we
1204  * wish to group them at a higher level (ex. untrusted multi-function
1205  * PCI devices).  Thus we attach each device.
1206  */
iommu_group_do_attach_device(struct device * dev,void * data)1207 static int iommu_group_do_attach_device(struct device *dev, void *data)
1208 {
1209 	struct iommu_domain *domain = data;
1210 
1211 	return __iommu_attach_device(domain, dev);
1212 }
1213 
__iommu_attach_group(struct iommu_domain * domain,struct iommu_group * group)1214 static int __iommu_attach_group(struct iommu_domain *domain,
1215 				struct iommu_group *group)
1216 {
1217 	int ret;
1218 
1219 	if (group->default_domain && group->domain != group->default_domain)
1220 		return -EBUSY;
1221 
1222 	ret = __iommu_group_for_each_dev(group, domain,
1223 					 iommu_group_do_attach_device);
1224 	if (ret == 0)
1225 		group->domain = domain;
1226 
1227 	return ret;
1228 }
1229 
iommu_attach_group(struct iommu_domain * domain,struct iommu_group * group)1230 int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group)
1231 {
1232 	int ret;
1233 
1234 	mutex_lock(&group->mutex);
1235 	ret = __iommu_attach_group(domain, group);
1236 	mutex_unlock(&group->mutex);
1237 
1238 	return ret;
1239 }
1240 EXPORT_SYMBOL_GPL(iommu_attach_group);
1241 
iommu_group_do_detach_device(struct device * dev,void * data)1242 static int iommu_group_do_detach_device(struct device *dev, void *data)
1243 {
1244 	struct iommu_domain *domain = data;
1245 
1246 	__iommu_detach_device(domain, dev);
1247 
1248 	return 0;
1249 }
1250 
__iommu_detach_group(struct iommu_domain * domain,struct iommu_group * group)1251 static void __iommu_detach_group(struct iommu_domain *domain,
1252 				 struct iommu_group *group)
1253 {
1254 	int ret;
1255 
1256 	if (!group->default_domain) {
1257 		__iommu_group_for_each_dev(group, domain,
1258 					   iommu_group_do_detach_device);
1259 		group->domain = NULL;
1260 		return;
1261 	}
1262 
1263 	if (group->domain == group->default_domain)
1264 		return;
1265 
1266 	/* Detach by re-attaching to the default domain */
1267 	ret = __iommu_group_for_each_dev(group, group->default_domain,
1268 					 iommu_group_do_attach_device);
1269 	if (ret != 0)
1270 		WARN_ON(1);
1271 	else
1272 		group->domain = group->default_domain;
1273 }
1274 
iommu_detach_group(struct iommu_domain * domain,struct iommu_group * group)1275 void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group)
1276 {
1277 	mutex_lock(&group->mutex);
1278 	__iommu_detach_group(domain, group);
1279 	mutex_unlock(&group->mutex);
1280 }
1281 EXPORT_SYMBOL_GPL(iommu_detach_group);
1282 
iommu_iova_to_phys(struct iommu_domain * domain,dma_addr_t iova)1283 phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
1284 {
1285 	if (unlikely(domain->ops->iova_to_phys == NULL))
1286 		return 0;
1287 
1288 	return domain->ops->iova_to_phys(domain, iova);
1289 }
1290 EXPORT_SYMBOL_GPL(iommu_iova_to_phys);
1291 
iommu_pgsize(struct iommu_domain * domain,unsigned long addr_merge,size_t size)1292 static size_t iommu_pgsize(struct iommu_domain *domain,
1293 			   unsigned long addr_merge, size_t size)
1294 {
1295 	unsigned int pgsize_idx;
1296 	size_t pgsize;
1297 
1298 	/* Max page size that still fits into 'size' */
1299 	pgsize_idx = __fls(size);
1300 
1301 	/* need to consider alignment requirements ? */
1302 	if (likely(addr_merge)) {
1303 		/* Max page size allowed by address */
1304 		unsigned int align_pgsize_idx = __ffs(addr_merge);
1305 		pgsize_idx = min(pgsize_idx, align_pgsize_idx);
1306 	}
1307 
1308 	/* build a mask of acceptable page sizes */
1309 	pgsize = (1UL << (pgsize_idx + 1)) - 1;
1310 
1311 	/* throw away page sizes not supported by the hardware */
1312 	pgsize &= domain->ops->pgsize_bitmap;
1313 
1314 	/* make sure we're still sane */
1315 	BUG_ON(!pgsize);
1316 
1317 	/* pick the biggest page */
1318 	pgsize_idx = __fls(pgsize);
1319 	pgsize = 1UL << pgsize_idx;
1320 
1321 	return pgsize;
1322 }
1323 
iommu_map(struct iommu_domain * domain,unsigned long iova,phys_addr_t paddr,size_t size,int prot)1324 int iommu_map(struct iommu_domain *domain, unsigned long iova,
1325 	      phys_addr_t paddr, size_t size, int prot)
1326 {
1327 	unsigned long orig_iova = iova;
1328 	unsigned int min_pagesz;
1329 	size_t orig_size = size;
1330 	int ret = 0;
1331 
1332 	if (unlikely(domain->ops->map == NULL ||
1333 		     domain->ops->pgsize_bitmap == 0UL))
1334 		return -ENODEV;
1335 
1336 	if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
1337 		return -EINVAL;
1338 
1339 	/* find out the minimum page size supported */
1340 	min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
1341 
1342 	/*
1343 	 * both the virtual address and the physical one, as well as
1344 	 * the size of the mapping, must be aligned (at least) to the
1345 	 * size of the smallest page supported by the hardware
1346 	 */
1347 	if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
1348 		pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x\n",
1349 		       iova, &paddr, size, min_pagesz);
1350 		return -EINVAL;
1351 	}
1352 
1353 	pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr, size);
1354 
1355 	while (size) {
1356 		size_t pgsize = iommu_pgsize(domain, iova | paddr, size);
1357 
1358 		pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx\n",
1359 			 iova, &paddr, pgsize);
1360 
1361 		ret = domain->ops->map(domain, iova, paddr, pgsize, prot);
1362 		if (ret)
1363 			break;
1364 
1365 		iova += pgsize;
1366 		paddr += pgsize;
1367 		size -= pgsize;
1368 	}
1369 
1370 	/* unroll mapping in case something went wrong */
1371 	if (ret)
1372 		iommu_unmap(domain, orig_iova, orig_size - size);
1373 	else
1374 		trace_map(orig_iova, paddr, orig_size);
1375 
1376 	return ret;
1377 }
1378 EXPORT_SYMBOL_GPL(iommu_map);
1379 
iommu_unmap(struct iommu_domain * domain,unsigned long iova,size_t size)1380 size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
1381 {
1382 	size_t unmapped_page, unmapped = 0;
1383 	unsigned int min_pagesz;
1384 	unsigned long orig_iova = iova;
1385 
1386 	if (unlikely(domain->ops->unmap == NULL ||
1387 		     domain->ops->pgsize_bitmap == 0UL))
1388 		return -ENODEV;
1389 
1390 	if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
1391 		return -EINVAL;
1392 
1393 	/* find out the minimum page size supported */
1394 	min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
1395 
1396 	/*
1397 	 * The virtual address, as well as the size of the mapping, must be
1398 	 * aligned (at least) to the size of the smallest page supported
1399 	 * by the hardware
1400 	 */
1401 	if (!IS_ALIGNED(iova | size, min_pagesz)) {
1402 		pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n",
1403 		       iova, size, min_pagesz);
1404 		return -EINVAL;
1405 	}
1406 
1407 	pr_debug("unmap this: iova 0x%lx size 0x%zx\n", iova, size);
1408 
1409 	/*
1410 	 * Keep iterating until we either unmap 'size' bytes (or more)
1411 	 * or we hit an area that isn't mapped.
1412 	 */
1413 	while (unmapped < size) {
1414 		size_t pgsize = iommu_pgsize(domain, iova, size - unmapped);
1415 
1416 		unmapped_page = domain->ops->unmap(domain, iova, pgsize);
1417 		if (!unmapped_page)
1418 			break;
1419 
1420 		pr_debug("unmapped: iova 0x%lx size 0x%zx\n",
1421 			 iova, unmapped_page);
1422 
1423 		iova += unmapped_page;
1424 		unmapped += unmapped_page;
1425 	}
1426 
1427 	trace_unmap(orig_iova, size, unmapped);
1428 	return unmapped;
1429 }
1430 EXPORT_SYMBOL_GPL(iommu_unmap);
1431 
default_iommu_map_sg(struct iommu_domain * domain,unsigned long iova,struct scatterlist * sg,unsigned int nents,int prot)1432 size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
1433 			 struct scatterlist *sg, unsigned int nents, int prot)
1434 {
1435 	struct scatterlist *s;
1436 	size_t mapped = 0;
1437 	unsigned int i, min_pagesz;
1438 	int ret;
1439 
1440 	if (unlikely(domain->ops->pgsize_bitmap == 0UL))
1441 		return 0;
1442 
1443 	min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
1444 
1445 	for_each_sg(sg, s, nents, i) {
1446 		phys_addr_t phys = page_to_phys(sg_page(s)) + s->offset;
1447 
1448 		/*
1449 		 * We are mapping on IOMMU page boundaries, so offset within
1450 		 * the page must be 0. However, the IOMMU may support pages
1451 		 * smaller than PAGE_SIZE, so s->offset may still represent
1452 		 * an offset of that boundary within the CPU page.
1453 		 */
1454 		if (!IS_ALIGNED(s->offset, min_pagesz))
1455 			goto out_err;
1456 
1457 		ret = iommu_map(domain, iova + mapped, phys, s->length, prot);
1458 		if (ret)
1459 			goto out_err;
1460 
1461 		mapped += s->length;
1462 	}
1463 
1464 	return mapped;
1465 
1466 out_err:
1467 	/* undo mappings already done */
1468 	iommu_unmap(domain, iova, mapped);
1469 
1470 	return 0;
1471 
1472 }
1473 EXPORT_SYMBOL_GPL(default_iommu_map_sg);
1474 
iommu_domain_window_enable(struct iommu_domain * domain,u32 wnd_nr,phys_addr_t paddr,u64 size,int prot)1475 int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr,
1476 			       phys_addr_t paddr, u64 size, int prot)
1477 {
1478 	if (unlikely(domain->ops->domain_window_enable == NULL))
1479 		return -ENODEV;
1480 
1481 	return domain->ops->domain_window_enable(domain, wnd_nr, paddr, size,
1482 						 prot);
1483 }
1484 EXPORT_SYMBOL_GPL(iommu_domain_window_enable);
1485 
iommu_domain_window_disable(struct iommu_domain * domain,u32 wnd_nr)1486 void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr)
1487 {
1488 	if (unlikely(domain->ops->domain_window_disable == NULL))
1489 		return;
1490 
1491 	return domain->ops->domain_window_disable(domain, wnd_nr);
1492 }
1493 EXPORT_SYMBOL_GPL(iommu_domain_window_disable);
1494 
iommu_init(void)1495 static int __init iommu_init(void)
1496 {
1497 	iommu_group_kset = kset_create_and_add("iommu_groups",
1498 					       NULL, kernel_kobj);
1499 	ida_init(&iommu_group_ida);
1500 	mutex_init(&iommu_group_mutex);
1501 
1502 	BUG_ON(!iommu_group_kset);
1503 
1504 	return 0;
1505 }
1506 core_initcall(iommu_init);
1507 
iommu_domain_get_attr(struct iommu_domain * domain,enum iommu_attr attr,void * data)1508 int iommu_domain_get_attr(struct iommu_domain *domain,
1509 			  enum iommu_attr attr, void *data)
1510 {
1511 	struct iommu_domain_geometry *geometry;
1512 	bool *paging;
1513 	int ret = 0;
1514 	u32 *count;
1515 
1516 	switch (attr) {
1517 	case DOMAIN_ATTR_GEOMETRY:
1518 		geometry  = data;
1519 		*geometry = domain->geometry;
1520 
1521 		break;
1522 	case DOMAIN_ATTR_PAGING:
1523 		paging  = data;
1524 		*paging = (domain->ops->pgsize_bitmap != 0UL);
1525 		break;
1526 	case DOMAIN_ATTR_WINDOWS:
1527 		count = data;
1528 
1529 		if (domain->ops->domain_get_windows != NULL)
1530 			*count = domain->ops->domain_get_windows(domain);
1531 		else
1532 			ret = -ENODEV;
1533 
1534 		break;
1535 	default:
1536 		if (!domain->ops->domain_get_attr)
1537 			return -EINVAL;
1538 
1539 		ret = domain->ops->domain_get_attr(domain, attr, data);
1540 	}
1541 
1542 	return ret;
1543 }
1544 EXPORT_SYMBOL_GPL(iommu_domain_get_attr);
1545 
iommu_domain_set_attr(struct iommu_domain * domain,enum iommu_attr attr,void * data)1546 int iommu_domain_set_attr(struct iommu_domain *domain,
1547 			  enum iommu_attr attr, void *data)
1548 {
1549 	int ret = 0;
1550 	u32 *count;
1551 
1552 	switch (attr) {
1553 	case DOMAIN_ATTR_WINDOWS:
1554 		count = data;
1555 
1556 		if (domain->ops->domain_set_windows != NULL)
1557 			ret = domain->ops->domain_set_windows(domain, *count);
1558 		else
1559 			ret = -ENODEV;
1560 
1561 		break;
1562 	default:
1563 		if (domain->ops->domain_set_attr == NULL)
1564 			return -EINVAL;
1565 
1566 		ret = domain->ops->domain_set_attr(domain, attr, data);
1567 	}
1568 
1569 	return ret;
1570 }
1571 EXPORT_SYMBOL_GPL(iommu_domain_set_attr);
1572 
iommu_get_dm_regions(struct device * dev,struct list_head * list)1573 void iommu_get_dm_regions(struct device *dev, struct list_head *list)
1574 {
1575 	const struct iommu_ops *ops = dev->bus->iommu_ops;
1576 
1577 	if (ops && ops->get_dm_regions)
1578 		ops->get_dm_regions(dev, list);
1579 }
1580 
iommu_put_dm_regions(struct device * dev,struct list_head * list)1581 void iommu_put_dm_regions(struct device *dev, struct list_head *list)
1582 {
1583 	const struct iommu_ops *ops = dev->bus->iommu_ops;
1584 
1585 	if (ops && ops->put_dm_regions)
1586 		ops->put_dm_regions(dev, list);
1587 }
1588 
1589 /* Request that a device is direct mapped by the IOMMU */
iommu_request_dm_for_dev(struct device * dev)1590 int iommu_request_dm_for_dev(struct device *dev)
1591 {
1592 	struct iommu_domain *dm_domain;
1593 	struct iommu_group *group;
1594 	int ret;
1595 
1596 	/* Device must already be in a group before calling this function */
1597 	group = iommu_group_get(dev);
1598 	if (!group)
1599 		return -EINVAL;
1600 
1601 	mutex_lock(&group->mutex);
1602 
1603 	/* Check if the default domain is already direct mapped */
1604 	ret = 0;
1605 	if (group->default_domain &&
1606 	    group->default_domain->type == IOMMU_DOMAIN_IDENTITY)
1607 		goto out;
1608 
1609 	/* Don't change mappings of existing devices */
1610 	ret = -EBUSY;
1611 	if (iommu_group_device_count(group) != 1)
1612 		goto out;
1613 
1614 	/* Allocate a direct mapped domain */
1615 	ret = -ENOMEM;
1616 	dm_domain = __iommu_domain_alloc(dev->bus, IOMMU_DOMAIN_IDENTITY);
1617 	if (!dm_domain)
1618 		goto out;
1619 
1620 	/* Attach the device to the domain */
1621 	ret = __iommu_attach_group(dm_domain, group);
1622 	if (ret) {
1623 		iommu_domain_free(dm_domain);
1624 		goto out;
1625 	}
1626 
1627 	/* Make the direct mapped domain the default for this group */
1628 	if (group->default_domain)
1629 		iommu_domain_free(group->default_domain);
1630 	group->default_domain = dm_domain;
1631 
1632 	pr_info("Using direct mapping for device %s\n", dev_name(dev));
1633 
1634 	ret = 0;
1635 out:
1636 	mutex_unlock(&group->mutex);
1637 	iommu_group_put(group);
1638 
1639 	return ret;
1640 }
1641