• Home
  • Raw
  • Download

Lines Matching refs:domain

138 	return container_of(dom, struct protection_domain, domain);  in to_pdomain()
376 if (dev_data->domain) in amd_iommu_uninit_device()
1245 static void __domain_flush_pages(struct protection_domain *domain, in __domain_flush_pages() argument
1252 build_inv_iommu_pages(&cmd, address, size, domain->id, pde); in __domain_flush_pages()
1255 if (!domain->dev_iommu[i]) in __domain_flush_pages()
1265 list_for_each_entry(dev_data, &domain->dev_list, list) { in __domain_flush_pages()
1276 static void domain_flush_pages(struct protection_domain *domain, in domain_flush_pages() argument
1280 __domain_flush_pages(domain, address, size, pde); in domain_flush_pages()
1313 __domain_flush_pages(domain, address, flush_size, pde); in domain_flush_pages()
1320 void amd_iommu_domain_flush_tlb_pde(struct protection_domain *domain) in amd_iommu_domain_flush_tlb_pde() argument
1322 domain_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 1); in amd_iommu_domain_flush_tlb_pde()
1325 void amd_iommu_domain_flush_complete(struct protection_domain *domain) in amd_iommu_domain_flush_complete() argument
1330 if (domain && !domain->dev_iommu[i]) in amd_iommu_domain_flush_complete()
1342 static void domain_flush_np_cache(struct protection_domain *domain, in domain_flush_np_cache() argument
1348 spin_lock_irqsave(&domain->lock, flags); in domain_flush_np_cache()
1349 domain_flush_pages(domain, iova, size, 1); in domain_flush_np_cache()
1350 amd_iommu_domain_flush_complete(domain); in domain_flush_np_cache()
1351 spin_unlock_irqrestore(&domain->lock, flags); in domain_flush_np_cache()
1359 static void domain_flush_devices(struct protection_domain *domain) in domain_flush_devices() argument
1363 list_for_each_entry(dev_data, &domain->dev_list, list) in domain_flush_devices()
1431 static void free_gcr3_table(struct protection_domain *domain) in free_gcr3_table() argument
1433 if (domain->glx == 2) in free_gcr3_table()
1434 free_gcr3_tbl_level2(domain->gcr3_tbl); in free_gcr3_table()
1435 else if (domain->glx == 1) in free_gcr3_table()
1436 free_gcr3_tbl_level1(domain->gcr3_tbl); in free_gcr3_table()
1438 BUG_ON(domain->glx != 0); in free_gcr3_table()
1440 free_page((unsigned long)domain->gcr3_tbl); in free_gcr3_table()
1443 static void set_dte_entry(u16 devid, struct protection_domain *domain, in set_dte_entry() argument
1450 if (domain->iop.mode != PAGE_MODE_NONE) in set_dte_entry()
1451 pte_root = iommu_virt_to_phys(domain->iop.root); in set_dte_entry()
1453 pte_root |= (domain->iop.mode & DEV_ENTRY_MODE_MASK) in set_dte_entry()
1469 if (domain->flags & PD_IOMMUV2_MASK) { in set_dte_entry()
1470 u64 gcr3 = iommu_virt_to_phys(domain->gcr3_tbl); in set_dte_entry()
1471 u64 glx = domain->glx; in set_dte_entry()
1496 flags |= domain->id; in set_dte_entry()
1524 struct protection_domain *domain) in do_attach() argument
1533 dev_data->domain = domain; in do_attach()
1534 list_add(&dev_data->list, &domain->dev_list); in do_attach()
1537 domain->dev_iommu[iommu->index] += 1; in do_attach()
1538 domain->dev_cnt += 1; in do_attach()
1541 set_dte_entry(dev_data->devid, domain, in do_attach()
1550 struct protection_domain *domain = dev_data->domain; in do_detach() local
1556 dev_data->domain = NULL; in do_detach()
1565 amd_iommu_domain_flush_tlb_pde(domain); in do_detach()
1568 amd_iommu_domain_flush_complete(domain); in do_detach()
1571 domain->dev_iommu[iommu->index] -= 1; in do_detach()
1572 domain->dev_cnt -= 1; in do_detach()
1622 struct protection_domain *domain) in attach_device() argument
1629 spin_lock_irqsave(&domain->lock, flags); in attach_device()
1636 if (dev_data->domain != NULL) in attach_device()
1643 if (domain->flags & PD_IOMMUV2_MASK) { in attach_device()
1667 do_attach(dev_data, domain); in attach_device()
1674 amd_iommu_domain_flush_tlb_pde(domain); in attach_device()
1676 amd_iommu_domain_flush_complete(domain); in attach_device()
1681 spin_unlock_irqrestore(&domain->lock, flags); in attach_device()
1691 struct protection_domain *domain; in detach_device() local
1696 domain = dev_data->domain; in detach_device()
1698 spin_lock_irqsave(&domain->lock, flags); in detach_device()
1708 if (WARN_ON(!dev_data->domain)) in detach_device()
1716 if (domain->flags & PD_IOMMUV2_MASK && dev_data->iommu_v2) in detach_device()
1726 spin_unlock_irqrestore(&domain->lock, flags); in detach_device()
1795 static void update_device_table(struct protection_domain *domain) in update_device_table() argument
1799 list_for_each_entry(dev_data, &domain->dev_list, list) { in update_device_table()
1800 set_dte_entry(dev_data->devid, domain, in update_device_table()
1806 void amd_iommu_update_and_flush_device_table(struct protection_domain *domain) in amd_iommu_update_and_flush_device_table() argument
1808 update_device_table(domain); in amd_iommu_update_and_flush_device_table()
1809 domain_flush_devices(domain); in amd_iommu_update_and_flush_device_table()
1812 void amd_iommu_domain_update(struct protection_domain *domain) in amd_iommu_domain_update() argument
1815 amd_iommu_update_and_flush_device_table(domain); in amd_iommu_domain_update()
1818 amd_iommu_domain_flush_tlb_pde(domain); in amd_iommu_domain_update()
1819 amd_iommu_domain_flush_complete(domain); in amd_iommu_domain_update()
1851 static void cleanup_domain(struct protection_domain *domain) in cleanup_domain() argument
1856 spin_lock_irqsave(&domain->lock, flags); in cleanup_domain()
1858 while (!list_empty(&domain->dev_list)) { in cleanup_domain()
1859 entry = list_first_entry(&domain->dev_list, in cleanup_domain()
1861 BUG_ON(!entry->domain); in cleanup_domain()
1865 spin_unlock_irqrestore(&domain->lock, flags); in cleanup_domain()
1868 static void protection_domain_free(struct protection_domain *domain) in protection_domain_free() argument
1870 if (!domain) in protection_domain_free()
1873 if (domain->id) in protection_domain_free()
1874 domain_id_free(domain->id); in protection_domain_free()
1876 if (domain->iop.pgtbl_cfg.tlb) in protection_domain_free()
1877 free_io_pgtable_ops(&domain->iop.iop.ops); in protection_domain_free()
1879 kfree(domain); in protection_domain_free()
1882 static int protection_domain_init_v1(struct protection_domain *domain, int mode) in protection_domain_init_v1() argument
1888 spin_lock_init(&domain->lock); in protection_domain_init_v1()
1889 domain->id = domain_id_alloc(); in protection_domain_init_v1()
1890 if (!domain->id) in protection_domain_init_v1()
1892 INIT_LIST_HEAD(&domain->dev_list); in protection_domain_init_v1()
1900 amd_iommu_domain_set_pgtable(domain, pt_root, mode); in protection_domain_init_v1()
1908 struct protection_domain *domain; in protection_domain_alloc() local
1913 domain = kzalloc(sizeof(*domain), GFP_KERNEL); in protection_domain_alloc()
1914 if (!domain) in protection_domain_alloc()
1930 ret = protection_domain_init_v1(domain, mode); in protection_domain_alloc()
1939 pgtbl_ops = alloc_io_pgtable_ops(pgtable, &domain->iop.pgtbl_cfg, domain); in protection_domain_alloc()
1943 return domain; in protection_domain_alloc()
1945 kfree(domain); in protection_domain_alloc()
1951 struct protection_domain *domain; in amd_iommu_domain_alloc() local
1953 domain = protection_domain_alloc(type); in amd_iommu_domain_alloc()
1954 if (!domain) in amd_iommu_domain_alloc()
1957 domain->domain.geometry.aperture_start = 0; in amd_iommu_domain_alloc()
1958 domain->domain.geometry.aperture_end = ~0ULL; in amd_iommu_domain_alloc()
1959 domain->domain.geometry.force_aperture = true; in amd_iommu_domain_alloc()
1961 return &domain->domain; in amd_iommu_domain_alloc()
1966 struct protection_domain *domain; in amd_iommu_domain_free() local
1968 domain = to_pdomain(dom); in amd_iommu_domain_free()
1970 if (domain->dev_cnt > 0) in amd_iommu_domain_free()
1971 cleanup_domain(domain); in amd_iommu_domain_free()
1973 BUG_ON(domain->dev_cnt != 0); in amd_iommu_domain_free()
1978 if (domain->flags & PD_IOMMUV2_MASK) in amd_iommu_domain_free()
1979 free_gcr3_table(domain); in amd_iommu_domain_free()
1981 protection_domain_free(domain); in amd_iommu_domain_free()
1994 if (dev_data->domain != NULL) in amd_iommu_detach_device()
2013 struct protection_domain *domain = to_pdomain(dom); in amd_iommu_attach_device() local
2028 if (dev_data->domain) in amd_iommu_attach_device()
2031 ret = attach_device(dev, domain); in amd_iommu_attach_device()
2050 struct protection_domain *domain = to_pdomain(dom); in amd_iommu_iotlb_sync_map() local
2051 struct io_pgtable_ops *ops = &domain->iop.iop.ops; in amd_iommu_iotlb_sync_map()
2054 domain_flush_np_cache(domain, iova, size); in amd_iommu_iotlb_sync_map()
2061 struct protection_domain *domain = to_pdomain(dom); in amd_iommu_map() local
2062 struct io_pgtable_ops *ops = &domain->iop.iop.ops; in amd_iommu_map()
2067 (domain->iop.mode == PAGE_MODE_NONE)) in amd_iommu_map()
2081 static void amd_iommu_iotlb_gather_add_page(struct iommu_domain *domain, in amd_iommu_iotlb_gather_add_page() argument
2097 iommu_iotlb_sync(domain, gather); in amd_iommu_iotlb_gather_add_page()
2106 struct protection_domain *domain = to_pdomain(dom); in amd_iommu_unmap() local
2107 struct io_pgtable_ops *ops = &domain->iop.iop.ops; in amd_iommu_unmap()
2111 (domain->iop.mode == PAGE_MODE_NONE)) in amd_iommu_unmap()
2124 struct protection_domain *domain = to_pdomain(dom); in amd_iommu_iova_to_phys() local
2125 struct io_pgtable_ops *ops = &domain->iop.iop.ops; in amd_iommu_iova_to_phys()
2198 bool amd_iommu_is_attach_deferred(struct iommu_domain *domain, in amd_iommu_is_attach_deferred() argument
2207 static void amd_iommu_flush_iotlb_all(struct iommu_domain *domain) in amd_iommu_flush_iotlb_all() argument
2209 struct protection_domain *dom = to_pdomain(domain); in amd_iommu_flush_iotlb_all()
2218 static void amd_iommu_iotlb_sync(struct iommu_domain *domain, in amd_iommu_iotlb_sync() argument
2221 struct protection_domain *dom = to_pdomain(domain); in amd_iommu_iotlb_sync()
2297 struct protection_domain *domain = to_pdomain(dom); in amd_iommu_domain_direct_map() local
2300 spin_lock_irqsave(&domain->lock, flags); in amd_iommu_domain_direct_map()
2302 if (domain->iop.pgtbl_cfg.tlb) in amd_iommu_domain_direct_map()
2303 free_io_pgtable_ops(&domain->iop.iop.ops); in amd_iommu_domain_direct_map()
2305 spin_unlock_irqrestore(&domain->lock, flags); in amd_iommu_domain_direct_map()
2311 struct protection_domain *domain = to_pdomain(dom); in amd_iommu_domain_enable_v2() local
2322 spin_lock_irqsave(&domain->lock, flags); in amd_iommu_domain_enable_v2()
2330 if (domain->dev_cnt > 0 || domain->flags & PD_IOMMUV2_MASK) in amd_iommu_domain_enable_v2()
2334 domain->gcr3_tbl = (void *)get_zeroed_page(GFP_ATOMIC); in amd_iommu_domain_enable_v2()
2335 if (domain->gcr3_tbl == NULL) in amd_iommu_domain_enable_v2()
2338 domain->glx = levels; in amd_iommu_domain_enable_v2()
2339 domain->flags |= PD_IOMMUV2_MASK; in amd_iommu_domain_enable_v2()
2341 amd_iommu_domain_update(domain); in amd_iommu_domain_enable_v2()
2346 spin_unlock_irqrestore(&domain->lock, flags); in amd_iommu_domain_enable_v2()
2352 static int __flush_pasid(struct protection_domain *domain, u32 pasid, in __flush_pasid() argument
2359 if (!(domain->flags & PD_IOMMUV2_MASK)) in __flush_pasid()
2362 build_inv_iommu_pasid(&cmd, domain->id, pasid, address, size); in __flush_pasid()
2369 if (domain->dev_iommu[i] == 0) in __flush_pasid()
2378 amd_iommu_domain_flush_complete(domain); in __flush_pasid()
2381 list_for_each_entry(dev_data, &domain->dev_list, list) { in __flush_pasid()
2404 amd_iommu_domain_flush_complete(domain); in __flush_pasid()
2413 static int __amd_iommu_flush_page(struct protection_domain *domain, u32 pasid, in __amd_iommu_flush_page() argument
2416 return __flush_pasid(domain, pasid, address, false); in __amd_iommu_flush_page()
2422 struct protection_domain *domain = to_pdomain(dom); in amd_iommu_flush_page() local
2426 spin_lock_irqsave(&domain->lock, flags); in amd_iommu_flush_page()
2427 ret = __amd_iommu_flush_page(domain, pasid, address); in amd_iommu_flush_page()
2428 spin_unlock_irqrestore(&domain->lock, flags); in amd_iommu_flush_page()
2434 static int __amd_iommu_flush_tlb(struct protection_domain *domain, u32 pasid) in __amd_iommu_flush_tlb() argument
2436 return __flush_pasid(domain, pasid, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, in __amd_iommu_flush_tlb()
2442 struct protection_domain *domain = to_pdomain(dom); in amd_iommu_flush_tlb() local
2446 spin_lock_irqsave(&domain->lock, flags); in amd_iommu_flush_tlb()
2447 ret = __amd_iommu_flush_tlb(domain, pasid); in amd_iommu_flush_tlb()
2448 spin_unlock_irqrestore(&domain->lock, flags); in amd_iommu_flush_tlb()
2486 static int __set_gcr3(struct protection_domain *domain, u32 pasid, in __set_gcr3() argument
2491 if (domain->iop.mode != PAGE_MODE_NONE) in __set_gcr3()
2494 pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, true); in __set_gcr3()
2500 return __amd_iommu_flush_tlb(domain, pasid); in __set_gcr3()
2503 static int __clear_gcr3(struct protection_domain *domain, u32 pasid) in __clear_gcr3() argument
2507 if (domain->iop.mode != PAGE_MODE_NONE) in __clear_gcr3()
2510 pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, false); in __clear_gcr3()
2516 return __amd_iommu_flush_tlb(domain, pasid); in __clear_gcr3()
2522 struct protection_domain *domain = to_pdomain(dom); in amd_iommu_domain_set_gcr3() local
2526 spin_lock_irqsave(&domain->lock, flags); in amd_iommu_domain_set_gcr3()
2527 ret = __set_gcr3(domain, pasid, cr3); in amd_iommu_domain_set_gcr3()
2528 spin_unlock_irqrestore(&domain->lock, flags); in amd_iommu_domain_set_gcr3()
2536 struct protection_domain *domain = to_pdomain(dom); in amd_iommu_domain_clear_gcr3() local
2540 spin_lock_irqsave(&domain->lock, flags); in amd_iommu_domain_clear_gcr3()
2541 ret = __clear_gcr3(domain, pasid); in amd_iommu_domain_clear_gcr3()
2542 spin_unlock_irqrestore(&domain->lock, flags); in amd_iommu_domain_clear_gcr3()
3116 static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq, in irq_remapping_alloc() argument
3143 ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg); in irq_remapping_alloc()
3185 irq_data = irq_domain_get_irq_data(domain, virq + i); in irq_remapping_alloc()
3218 irq_data = irq_domain_get_irq_data(domain, virq + i); in irq_remapping_alloc()
3225 irq_domain_free_irqs_common(domain, virq, nr_irqs); in irq_remapping_alloc()
3229 static void irq_remapping_free(struct irq_domain *domain, unsigned int virq, in irq_remapping_free() argument
3238 irq_data = irq_domain_get_irq_data(domain, virq + i); in irq_remapping_free()
3247 irq_domain_free_irqs_common(domain, virq, nr_irqs); in irq_remapping_free()
3255 static int irq_remapping_activate(struct irq_domain *domain, in irq_remapping_activate() argument
3272 static void irq_remapping_deactivate(struct irq_domain *domain, in irq_remapping_deactivate() argument