• Home
  • Raw
  • Download

Lines Matching refs:dev_data

143 	struct iommu_dev_data *dev_data;  in alloc_dev_data()  local
145 dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL); in alloc_dev_data()
146 if (!dev_data) in alloc_dev_data()
149 spin_lock_init(&dev_data->lock); in alloc_dev_data()
150 dev_data->devid = devid; in alloc_dev_data()
151 ratelimit_default_init(&dev_data->rs); in alloc_dev_data()
153 llist_add(&dev_data->dev_data_list, &dev_data_list); in alloc_dev_data()
154 return dev_data; in alloc_dev_data()
159 struct iommu_dev_data *dev_data; in search_dev_data() local
166 llist_for_each_entry(dev_data, node, dev_data_list) { in search_dev_data()
167 if (dev_data->devid == devid) in search_dev_data()
168 return dev_data; in search_dev_data()
230 struct iommu_dev_data *dev_data; in find_dev_data() local
233 dev_data = search_dev_data(devid); in find_dev_data()
235 if (dev_data == NULL) { in find_dev_data()
236 dev_data = alloc_dev_data(devid); in find_dev_data()
237 if (!dev_data) in find_dev_data()
241 dev_data->defer_attach = true; in find_dev_data()
244 return dev_data; in find_dev_data()
319 struct iommu_dev_data *dev_data; in iommu_init_device() local
329 dev_data = find_dev_data(devid); in iommu_init_device()
330 if (!dev_data) in iommu_init_device()
333 dev_data->pdev = setup_aliases(dev); in iommu_init_device()
345 iommu = amd_iommu_rlookup_table[dev_data->devid]; in iommu_init_device()
346 dev_data->iommu_v2 = iommu->is_iommu_v2; in iommu_init_device()
349 dev_iommu_priv_set(dev, dev_data); in iommu_init_device()
370 struct iommu_dev_data *dev_data; in amd_iommu_uninit_device() local
372 dev_data = dev_iommu_priv_get(dev); in amd_iommu_uninit_device()
373 if (!dev_data) in amd_iommu_uninit_device()
376 if (dev_data->domain) in amd_iommu_uninit_device()
413 struct iommu_dev_data *dev_data = NULL; in amd_iommu_report_rmp_hw_error() local
426 dev_data = dev_iommu_priv_get(&pdev->dev); in amd_iommu_report_rmp_hw_error()
428 if (dev_data) { in amd_iommu_report_rmp_hw_error()
429 if (__ratelimit(&dev_data->rs)) { in amd_iommu_report_rmp_hw_error()
445 struct iommu_dev_data *dev_data = NULL; in amd_iommu_report_rmp_fault() local
459 dev_data = dev_iommu_priv_get(&pdev->dev); in amd_iommu_report_rmp_fault()
461 if (dev_data) { in amd_iommu_report_rmp_fault()
462 if (__ratelimit(&dev_data->rs)) { in amd_iommu_report_rmp_fault()
479 struct iommu_dev_data *dev_data = NULL; in amd_iommu_report_page_fault() local
485 dev_data = dev_iommu_priv_get(&pdev->dev); in amd_iommu_report_page_fault()
487 if (dev_data) { in amd_iommu_report_page_fault()
488 if (__ratelimit(&dev_data->rs)) { in amd_iommu_report_page_fault()
1186 static int device_flush_iotlb(struct iommu_dev_data *dev_data, in device_flush_iotlb() argument
1193 qdep = dev_data->ats.qdep; in device_flush_iotlb()
1194 iommu = amd_iommu_rlookup_table[dev_data->devid]; in device_flush_iotlb()
1196 build_inv_iotlb_pages(&cmd, dev_data->devid, qdep, address, size); in device_flush_iotlb()
1211 static int device_flush_dte(struct iommu_dev_data *dev_data) in device_flush_dte() argument
1217 iommu = amd_iommu_rlookup_table[dev_data->devid]; in device_flush_dte()
1219 if (dev_data->pdev) in device_flush_dte()
1220 ret = pci_for_each_dma_alias(dev_data->pdev, in device_flush_dte()
1223 ret = iommu_flush_dte(iommu, dev_data->devid); in device_flush_dte()
1227 alias = amd_iommu_alias_table[dev_data->devid]; in device_flush_dte()
1228 if (alias != dev_data->devid) { in device_flush_dte()
1234 if (dev_data->ats.enabled) in device_flush_dte()
1235 ret = device_flush_iotlb(dev_data, 0, ~0UL); in device_flush_dte()
1248 struct iommu_dev_data *dev_data; in __domain_flush_pages() local
1265 list_for_each_entry(dev_data, &domain->dev_list, list) { in __domain_flush_pages()
1267 if (!dev_data->ats.enabled) in __domain_flush_pages()
1270 ret |= device_flush_iotlb(dev_data, address, size); in __domain_flush_pages()
1361 struct iommu_dev_data *dev_data; in domain_flush_devices() local
1363 list_for_each_entry(dev_data, &domain->dev_list, list) in domain_flush_devices()
1364 device_flush_dte(dev_data); in domain_flush_devices()
1523 static void do_attach(struct iommu_dev_data *dev_data, in do_attach() argument
1529 iommu = amd_iommu_rlookup_table[dev_data->devid]; in do_attach()
1530 ats = dev_data->ats.enabled; in do_attach()
1533 dev_data->domain = domain; in do_attach()
1534 list_add(&dev_data->list, &domain->dev_list); in do_attach()
1541 set_dte_entry(dev_data->devid, domain, in do_attach()
1542 ats, dev_data->iommu_v2); in do_attach()
1543 clone_aliases(dev_data->pdev); in do_attach()
1545 device_flush_dte(dev_data); in do_attach()
1548 static void do_detach(struct iommu_dev_data *dev_data) in do_detach() argument
1550 struct protection_domain *domain = dev_data->domain; in do_detach()
1553 iommu = amd_iommu_rlookup_table[dev_data->devid]; in do_detach()
1556 dev_data->domain = NULL; in do_detach()
1557 list_del(&dev_data->list); in do_detach()
1558 clear_dte_entry(dev_data->devid); in do_detach()
1559 clone_aliases(dev_data->pdev); in do_detach()
1562 device_flush_dte(dev_data); in do_detach()
1624 struct iommu_dev_data *dev_data; in attach_device() local
1631 dev_data = dev_iommu_priv_get(dev); in attach_device()
1633 spin_lock(&dev_data->lock); in attach_device()
1636 if (dev_data->domain != NULL) in attach_device()
1650 if (dev_data->iommu_v2) { in attach_device()
1654 dev_data->ats.enabled = true; in attach_device()
1655 dev_data->ats.qdep = pci_ats_queue_depth(pdev); in attach_device()
1656 dev_data->pri_tlp = pci_prg_resp_pasid_required(pdev); in attach_device()
1660 dev_data->ats.enabled = true; in attach_device()
1661 dev_data->ats.qdep = pci_ats_queue_depth(pdev); in attach_device()
1667 do_attach(dev_data, domain); in attach_device()
1679 spin_unlock(&dev_data->lock); in attach_device()
1692 struct iommu_dev_data *dev_data; in detach_device() local
1695 dev_data = dev_iommu_priv_get(dev); in detach_device()
1696 domain = dev_data->domain; in detach_device()
1700 spin_lock(&dev_data->lock); in detach_device()
1708 if (WARN_ON(!dev_data->domain)) in detach_device()
1711 do_detach(dev_data); in detach_device()
1716 if (domain->flags & PD_IOMMUV2_MASK && dev_data->iommu_v2) in detach_device()
1718 else if (dev_data->ats.enabled) in detach_device()
1721 dev_data->ats.enabled = false; in detach_device()
1724 spin_unlock(&dev_data->lock); in detach_device()
1797 struct iommu_dev_data *dev_data; in update_device_table() local
1799 list_for_each_entry(dev_data, &domain->dev_list, list) { in update_device_table()
1800 set_dte_entry(dev_data->devid, domain, in update_device_table()
1801 dev_data->ats.enabled, dev_data->iommu_v2); in update_device_table()
1802 clone_aliases(dev_data->pdev); in update_device_table()
1987 struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev); in amd_iommu_detach_device() local
1994 if (dev_data->domain != NULL) in amd_iommu_detach_device()
2004 dev_data->use_vapic = 0; in amd_iommu_detach_device()
2014 struct iommu_dev_data *dev_data; in amd_iommu_attach_device() local
2021 dev_data = dev_iommu_priv_get(dev); in amd_iommu_attach_device()
2022 dev_data->defer_attach = false; in amd_iommu_attach_device()
2024 iommu = amd_iommu_rlookup_table[dev_data->devid]; in amd_iommu_attach_device()
2028 if (dev_data->domain) in amd_iommu_attach_device()
2036 dev_data->use_vapic = 1; in amd_iommu_attach_device()
2038 dev_data->use_vapic = 0; in amd_iommu_attach_device()
2201 struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev); in amd_iommu_is_attach_deferred() local
2203 return dev_data->defer_attach; in amd_iommu_is_attach_deferred()
2232 struct iommu_dev_data *dev_data; in amd_iommu_def_domain_type() local
2234 dev_data = dev_iommu_priv_get(dev); in amd_iommu_def_domain_type()
2235 if (!dev_data) in amd_iommu_def_domain_type()
2243 if (!mem_encrypt_active() && dev_data->iommu_v2) in amd_iommu_def_domain_type()
2355 struct iommu_dev_data *dev_data; in __flush_pasid() local
2381 list_for_each_entry(dev_data, &domain->dev_list, list) { in __flush_pasid()
2389 if (!dev_data->ats.enabled) in __flush_pasid()
2392 qdep = dev_data->ats.qdep; in __flush_pasid()
2393 iommu = amd_iommu_rlookup_table[dev_data->devid]; in __flush_pasid()
2395 build_inv_iotlb_pasid(&cmd, dev_data->devid, pasid, in __flush_pasid()
2551 struct iommu_dev_data *dev_data; in amd_iommu_complete_ppr() local
2555 dev_data = dev_iommu_priv_get(&pdev->dev); in amd_iommu_complete_ppr()
2556 iommu = amd_iommu_rlookup_table[dev_data->devid]; in amd_iommu_complete_ppr()
2558 build_complete_ppr(&cmd, dev_data->devid, pasid, status, in amd_iommu_complete_ppr()
2559 tag, dev_data->pri_tlp); in amd_iommu_complete_ppr()
3377 struct iommu_dev_data *dev_data = search_dev_data(irte_info->devid); in amd_ir_set_vcpu_affinity() local
3383 if (!dev_data || !dev_data->use_vapic) in amd_ir_set_vcpu_affinity()