Lines Matching refs:dev_data
111 struct iommu_dev_data *dev_data; in alloc_dev_data() local
114 dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL); in alloc_dev_data()
115 if (!dev_data) in alloc_dev_data()
118 dev_data->devid = devid; in alloc_dev_data()
119 atomic_set(&dev_data->bind, 0); in alloc_dev_data()
122 list_add_tail(&dev_data->dev_data_list, &dev_data_list); in alloc_dev_data()
125 return dev_data; in alloc_dev_data()
128 static void free_dev_data(struct iommu_dev_data *dev_data) in free_dev_data() argument
133 list_del(&dev_data->dev_data_list); in free_dev_data()
136 if (dev_data->group) in free_dev_data()
137 iommu_group_put(dev_data->group); in free_dev_data()
139 kfree(dev_data); in free_dev_data()
144 struct iommu_dev_data *dev_data; in search_dev_data() local
148 list_for_each_entry(dev_data, &dev_data_list, dev_data_list) { in search_dev_data()
149 if (dev_data->devid == devid) in search_dev_data()
153 dev_data = NULL; in search_dev_data()
158 return dev_data; in search_dev_data()
163 struct iommu_dev_data *dev_data; in find_dev_data() local
165 dev_data = search_dev_data(devid); in find_dev_data()
167 if (dev_data == NULL) in find_dev_data()
168 dev_data = alloc_dev_data(devid); in find_dev_data()
170 return dev_data; in find_dev_data()
205 struct iommu_dev_data *dev_data; in pdev_pri_erratum() local
207 dev_data = get_dev_data(&pdev->dev); in pdev_pri_erratum()
209 return dev_data->errata & (1 << erratum) ? true : false; in pdev_pri_erratum()
336 static int use_dev_data_iommu_group(struct iommu_dev_data *dev_data, in use_dev_data_iommu_group() argument
339 if (!dev_data->group) { in use_dev_data_iommu_group()
344 dev_data->group = group; in use_dev_data_iommu_group()
347 return iommu_group_add_device(dev_data->group, dev); in use_dev_data_iommu_group()
352 struct iommu_dev_data *dev_data; in init_iommu_group() local
363 dev_data = find_dev_data(get_device_id(dev)); in init_iommu_group()
364 if (!dev_data) in init_iommu_group()
367 if (dev_data->alias_data) { in init_iommu_group()
371 if (dev_data->alias_data->group) in init_iommu_group()
378 alias = amd_iommu_alias_table[dev_data->devid]; in init_iommu_group()
414 return use_dev_data_iommu_group(dev_data->alias_data, dev); in init_iommu_group()
420 struct iommu_dev_data *dev_data; in iommu_init_device() local
427 dev_data = find_dev_data(get_device_id(dev)); in iommu_init_device()
428 if (!dev_data) in iommu_init_device()
431 alias = amd_iommu_alias_table[dev_data->devid]; in iommu_init_device()
432 if (alias != dev_data->devid) { in iommu_init_device()
439 free_dev_data(dev_data); in iommu_init_device()
442 dev_data->alias_data = alias_data; in iommu_init_device()
452 iommu = amd_iommu_rlookup_table[dev_data->devid]; in iommu_init_device()
453 dev_data->iommu_v2 = iommu->is_iommu_v2; in iommu_init_device()
456 dev->archdata.iommu = dev_data; in iommu_init_device()
488 struct iommu_dev_data *dev_data, *n; in amd_iommu_uninit_devices() local
500 list_for_each_entry_safe(dev_data, n, &dev_data_list, dev_data_list) in amd_iommu_uninit_devices()
501 free_dev_data(dev_data); in amd_iommu_uninit_devices()
1165 static int device_flush_iotlb(struct iommu_dev_data *dev_data, in device_flush_iotlb() argument
1172 qdep = dev_data->ats.qdep; in device_flush_iotlb()
1173 iommu = amd_iommu_rlookup_table[dev_data->devid]; in device_flush_iotlb()
1175 build_inv_iotlb_pages(&cmd, dev_data->devid, qdep, address, size); in device_flush_iotlb()
1183 static int device_flush_dte(struct iommu_dev_data *dev_data) in device_flush_dte() argument
1188 iommu = amd_iommu_rlookup_table[dev_data->devid]; in device_flush_dte()
1190 ret = iommu_flush_dte(iommu, dev_data->devid); in device_flush_dte()
1194 if (dev_data->ats.enabled) in device_flush_dte()
1195 ret = device_flush_iotlb(dev_data, 0, ~0UL); in device_flush_dte()
1208 struct iommu_dev_data *dev_data; in __domain_flush_pages() local
1225 list_for_each_entry(dev_data, &domain->dev_list, list) { in __domain_flush_pages()
1227 if (!dev_data->ats.enabled) in __domain_flush_pages()
1230 ret |= device_flush_iotlb(dev_data, address, size); in __domain_flush_pages()
1276 struct iommu_dev_data *dev_data; in domain_flush_devices() local
1278 list_for_each_entry(dev_data, &domain->dev_list, list) in domain_flush_devices()
1279 device_flush_dte(dev_data); in domain_flush_devices()
2111 static void do_attach(struct iommu_dev_data *dev_data, in do_attach() argument
2117 iommu = amd_iommu_rlookup_table[dev_data->devid]; in do_attach()
2118 ats = dev_data->ats.enabled; in do_attach()
2121 dev_data->domain = domain; in do_attach()
2122 list_add(&dev_data->list, &domain->dev_list); in do_attach()
2123 set_dte_entry(dev_data->devid, domain, ats); in do_attach()
2130 device_flush_dte(dev_data); in do_attach()
2133 static void do_detach(struct iommu_dev_data *dev_data) in do_detach() argument
2137 iommu = amd_iommu_rlookup_table[dev_data->devid]; in do_detach()
2140 dev_data->domain->dev_iommu[iommu->index] -= 1; in do_detach()
2141 dev_data->domain->dev_cnt -= 1; in do_detach()
2144 dev_data->domain = NULL; in do_detach()
2145 list_del(&dev_data->list); in do_detach()
2146 clear_dte_entry(dev_data->devid); in do_detach()
2149 device_flush_dte(dev_data); in do_detach()
2156 static int __attach_device(struct iommu_dev_data *dev_data, in __attach_device() argument
2164 if (dev_data->alias_data != NULL) { in __attach_device()
2165 struct iommu_dev_data *alias_data = dev_data->alias_data; in __attach_device()
2173 if (dev_data->domain != NULL && in __attach_device()
2174 dev_data->domain != domain) in __attach_device()
2184 if (dev_data->domain == NULL) in __attach_device()
2185 do_attach(dev_data, domain); in __attach_device()
2187 atomic_inc(&dev_data->bind); in __attach_device()
2294 struct iommu_dev_data *dev_data; in attach_device() local
2298 dev_data = get_dev_data(dev); in attach_device()
2301 if (!dev_data->iommu_v2 || !dev_data->passthrough) in attach_device()
2307 dev_data->ats.enabled = true; in attach_device()
2308 dev_data->ats.qdep = pci_ats_queue_depth(pdev); in attach_device()
2309 dev_data->pri_tlp = pci_pri_tlp_required(pdev); in attach_device()
2312 dev_data->ats.enabled = true; in attach_device()
2313 dev_data->ats.qdep = pci_ats_queue_depth(pdev); in attach_device()
2317 ret = __attach_device(dev_data, domain); in attach_device()
2333 static void __detach_device(struct iommu_dev_data *dev_data) in __detach_device() argument
2338 BUG_ON(!dev_data->domain); in __detach_device()
2340 domain = dev_data->domain; in __detach_device()
2344 if (dev_data->alias_data != NULL) { in __detach_device()
2345 struct iommu_dev_data *alias_data = dev_data->alias_data; in __detach_device()
2351 if (atomic_dec_and_test(&dev_data->bind)) in __detach_device()
2352 do_detach(dev_data); in __detach_device()
2361 if (dev_data->passthrough && in __detach_device()
2362 (dev_data->domain == NULL && domain != pt_domain)) in __detach_device()
2363 __attach_device(dev_data, pt_domain); in __detach_device()
2372 struct iommu_dev_data *dev_data; in detach_device() local
2375 dev_data = get_dev_data(dev); in detach_device()
2376 domain = dev_data->domain; in detach_device()
2380 __detach_device(dev_data); in detach_device()
2385 else if (dev_data->ats.enabled) in detach_device()
2388 dev_data->ats.enabled = false; in detach_device()
2397 struct iommu_dev_data *dev_data; in domain_for_device() local
2401 dev_data = get_dev_data(dev); in domain_for_device()
2403 if (dev_data->domain) in domain_for_device()
2404 return dev_data->domain; in domain_for_device()
2406 if (dev_data->alias_data != NULL) { in domain_for_device()
2407 struct iommu_dev_data *alias_data = dev_data->alias_data; in domain_for_device()
2411 __attach_device(dev_data, alias_data->domain); in domain_for_device()
2425 struct iommu_dev_data *dev_data; in device_change_notifier() local
2436 dev_data = get_dev_data(dev); in device_change_notifier()
2445 if (dev_data->passthrough) in device_change_notifier()
2457 dev_data = get_dev_data(dev); in device_change_notifier()
2459 if (iommu_pass_through || dev_data->iommu_v2) { in device_change_notifier()
2460 dev_data->passthrough = true; in device_change_notifier()
2548 struct iommu_dev_data *dev_data; in update_device_table() local
2550 list_for_each_entry(dev_data, &domain->dev_list, list) in update_device_table()
2551 set_dte_entry(dev_data->devid, domain, dev_data->ats.enabled); in update_device_table()
3037 struct iommu_dev_data *dev_data; in prealloc_protection_domains() local
3048 dev_data = get_dev_data(&dev->dev); in prealloc_protection_domains()
3049 if (!amd_iommu_force_isolation && dev_data->iommu_v2) { in prealloc_protection_domains()
3052 dev_data->passthrough = true; in prealloc_protection_domains()
3088 struct iommu_dev_data *dev_data; in device_dma_ops_init() local
3101 dev_data = get_dev_data(&pdev->dev); in device_dma_ops_init()
3103 if (!dev_data->passthrough) in device_dma_ops_init()
3186 struct iommu_dev_data *dev_data, *next; in cleanup_domain() local
3191 list_for_each_entry_safe(dev_data, next, &domain->dev_list, list) { in cleanup_domain()
3192 __detach_device(dev_data); in cleanup_domain()
3193 atomic_set(&dev_data->bind, 0); in cleanup_domain()
3306 struct iommu_dev_data *dev_data = dev->archdata.iommu; in amd_iommu_detach_device() local
3315 if (dev_data->domain != NULL) in amd_iommu_detach_device()
3329 struct iommu_dev_data *dev_data; in amd_iommu_attach_device() local
3336 dev_data = dev->archdata.iommu; in amd_iommu_attach_device()
3338 iommu = amd_iommu_rlookup_table[dev_data->devid]; in amd_iommu_attach_device()
3342 if (dev_data->domain) in amd_iommu_attach_device()
3456 struct iommu_dev_data *dev_data; in amd_iommu_init_passthrough() local
3470 dev_data = get_dev_data(&dev->dev); in amd_iommu_init_passthrough()
3471 dev_data->passthrough = true; in amd_iommu_init_passthrough()
3573 struct iommu_dev_data *dev_data; in __flush_pasid() local
3599 list_for_each_entry(dev_data, &domain->dev_list, list) { in __flush_pasid()
3603 BUG_ON(!dev_data->ats.enabled); in __flush_pasid()
3605 qdep = dev_data->ats.qdep; in __flush_pasid()
3606 iommu = amd_iommu_rlookup_table[dev_data->devid]; in __flush_pasid()
3608 build_inv_iotlb_pasid(&cmd, dev_data->devid, pasid, in __flush_pasid()
3768 struct iommu_dev_data *dev_data; in amd_iommu_complete_ppr() local
3774 dev_data = get_dev_data(&pdev->dev); in amd_iommu_complete_ppr()
3775 iommu = amd_iommu_rlookup_table[dev_data->devid]; in amd_iommu_complete_ppr()
3777 build_complete_ppr(&cmd, dev_data->devid, pasid, status, in amd_iommu_complete_ppr()
3778 tag, dev_data->pri_tlp); in amd_iommu_complete_ppr()
3802 struct iommu_dev_data *dev_data; in amd_iommu_enable_device_erratum() local
3807 dev_data = get_dev_data(&pdev->dev); in amd_iommu_enable_device_erratum()
3808 dev_data->errata |= (1 << erratum); in amd_iommu_enable_device_erratum()