Lines Matching refs:domain
102 static void update_domain(struct protection_domain *domain);
103 static int protection_domain_init(struct protection_domain *domain);
112 struct protection_domain domain; member
187 return container_of(dom, struct protection_domain, domain); in to_pdomain()
190 static struct dma_ops_domain* to_dma_ops_domain(struct protection_domain *domain) in to_dma_ops_domain() argument
192 BUG_ON(domain->flags != PD_DMA_OPS_MASK); in to_dma_ops_domain()
193 return container_of(domain, struct dma_ops_domain, domain); in to_dma_ops_domain()
474 if (dev_data->domain) in iommu_uninit_device()
1269 static void __domain_flush_pages(struct protection_domain *domain, in __domain_flush_pages() argument
1276 build_inv_iommu_pages(&cmd, address, size, domain->id, pde); in __domain_flush_pages()
1279 if (!domain->dev_iommu[i]) in __domain_flush_pages()
1289 list_for_each_entry(dev_data, &domain->dev_list, list) { in __domain_flush_pages()
1300 static void domain_flush_pages(struct protection_domain *domain, in domain_flush_pages() argument
1303 __domain_flush_pages(domain, address, size, 0); in domain_flush_pages()
1307 static void domain_flush_tlb(struct protection_domain *domain) in domain_flush_tlb() argument
1309 __domain_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 0); in domain_flush_tlb()
1313 static void domain_flush_tlb_pde(struct protection_domain *domain) in domain_flush_tlb_pde() argument
1315 __domain_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 1); in domain_flush_tlb_pde()
1318 static void domain_flush_complete(struct protection_domain *domain) in domain_flush_complete() argument
1323 if (domain && !domain->dev_iommu[i]) in domain_flush_complete()
1335 static void domain_flush_np_cache(struct protection_domain *domain, in domain_flush_np_cache() argument
1341 spin_lock_irqsave(&domain->lock, flags); in domain_flush_np_cache()
1342 domain_flush_pages(domain, iova, size); in domain_flush_np_cache()
1343 domain_flush_complete(domain); in domain_flush_np_cache()
1344 spin_unlock_irqrestore(&domain->lock, flags); in domain_flush_np_cache()
1352 static void domain_flush_devices(struct protection_domain *domain) in domain_flush_devices() argument
1356 list_for_each_entry(dev_data, &domain->dev_list, list) in domain_flush_devices()
1449 static void free_pagetable(struct protection_domain *domain) in free_pagetable() argument
1451 unsigned long root = (unsigned long)domain->pt_root; in free_pagetable()
1454 BUG_ON(domain->mode < PAGE_MODE_NONE || in free_pagetable()
1455 domain->mode > PAGE_MODE_6_LEVEL); in free_pagetable()
1457 freelist = free_sub_pt(root, domain->mode, freelist); in free_pagetable()
1467 static bool increase_address_space(struct protection_domain *domain, in increase_address_space() argument
1475 spin_lock_irqsave(&domain->lock, flags); in increase_address_space()
1477 if (address <= PM_LEVEL_SIZE(domain->mode) || in increase_address_space()
1478 WARN_ON_ONCE(domain->mode == PAGE_MODE_6_LEVEL)) in increase_address_space()
1485 *pte = PM_LEVEL_PDE(domain->mode, in increase_address_space()
1486 iommu_virt_to_phys(domain->pt_root)); in increase_address_space()
1487 domain->pt_root = pte; in increase_address_space()
1488 domain->mode += 1; in increase_address_space()
1493 spin_unlock_irqrestore(&domain->lock, flags); in increase_address_space()
1498 static u64 *alloc_pte(struct protection_domain *domain, in alloc_pte() argument
1510 while (address > PM_LEVEL_SIZE(domain->mode)) in alloc_pte()
1511 *updated = increase_address_space(domain, address, gfp) || *updated; in alloc_pte()
1513 level = domain->mode - 1; in alloc_pte()
1514 pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)]; in alloc_pte()
1586 static u64 *fetch_pte(struct protection_domain *domain, in fetch_pte() argument
1595 if (address > PM_LEVEL_SIZE(domain->mode)) in fetch_pte()
1598 level = domain->mode - 1; in fetch_pte()
1599 pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)]; in fetch_pte()
1858 static void free_gcr3_table(struct protection_domain *domain) in free_gcr3_table() argument
1860 if (domain->glx == 2) in free_gcr3_table()
1861 free_gcr3_tbl_level2(domain->gcr3_tbl); in free_gcr3_table()
1862 else if (domain->glx == 1) in free_gcr3_table()
1863 free_gcr3_tbl_level1(domain->gcr3_tbl); in free_gcr3_table()
1865 BUG_ON(domain->glx != 0); in free_gcr3_table()
1867 free_page((unsigned long)domain->gcr3_tbl); in free_gcr3_table()
1874 spin_lock_irqsave(&dom->domain.lock, flags); in dma_ops_domain_flush_tlb()
1875 domain_flush_tlb(&dom->domain); in dma_ops_domain_flush_tlb()
1876 domain_flush_complete(&dom->domain); in dma_ops_domain_flush_tlb()
1877 spin_unlock_irqrestore(&dom->domain.lock, flags); in dma_ops_domain_flush_tlb()
1900 free_pagetable(&dom->domain); in dma_ops_domain_free()
1902 if (dom->domain.id) in dma_ops_domain_free()
1903 domain_id_free(dom->domain.id); in dma_ops_domain_free()
1921 if (protection_domain_init(&dma_dom->domain)) in dma_ops_domain_alloc()
1924 dma_dom->domain.mode = PAGE_MODE_3_LEVEL; in dma_ops_domain_alloc()
1925 dma_dom->domain.pt_root = (void *)get_zeroed_page(GFP_KERNEL); in dma_ops_domain_alloc()
1926 dma_dom->domain.flags = PD_DMA_OPS_MASK; in dma_ops_domain_alloc()
1927 if (!dma_dom->domain.pt_root) in dma_ops_domain_alloc()
1950 static bool dma_ops_domain(struct protection_domain *domain) in dma_ops_domain() argument
1952 return domain->flags & PD_DMA_OPS_MASK; in dma_ops_domain()
1955 static void set_dte_entry(u16 devid, struct protection_domain *domain, in set_dte_entry() argument
1962 if (domain->mode != PAGE_MODE_NONE) in set_dte_entry()
1963 pte_root = iommu_virt_to_phys(domain->pt_root); in set_dte_entry()
1965 pte_root |= (domain->mode & DEV_ENTRY_MODE_MASK) in set_dte_entry()
1981 if (domain->flags & PD_IOMMUV2_MASK) { in set_dte_entry()
1982 u64 gcr3 = iommu_virt_to_phys(domain->gcr3_tbl); in set_dte_entry()
1983 u64 glx = domain->glx; in set_dte_entry()
2008 flags |= domain->id; in set_dte_entry()
2036 struct protection_domain *domain) in do_attach() argument
2045 dev_data->domain = domain; in do_attach()
2046 list_add(&dev_data->list, &domain->dev_list); in do_attach()
2049 domain->dev_iommu[iommu->index] += 1; in do_attach()
2050 domain->dev_cnt += 1; in do_attach()
2053 set_dte_entry(dev_data->devid, domain, ats, dev_data->iommu_v2); in do_attach()
2061 struct protection_domain *domain = dev_data->domain; in do_detach() local
2067 dev_data->domain = NULL; in do_detach()
2076 domain_flush_tlb_pde(domain); in do_detach()
2079 domain_flush_complete(domain); in do_detach()
2082 domain->dev_iommu[iommu->index] -= 1; in do_detach()
2083 domain->dev_cnt -= 1; in do_detach()
2160 struct protection_domain *domain) in attach_device() argument
2167 spin_lock_irqsave(&domain->lock, flags); in attach_device()
2174 if (dev_data->domain != NULL) in attach_device()
2181 if (domain->flags & PD_IOMMUV2_MASK) { in attach_device()
2203 do_attach(dev_data, domain); in attach_device()
2210 domain_flush_tlb_pde(domain); in attach_device()
2212 domain_flush_complete(domain); in attach_device()
2217 spin_unlock_irqrestore(&domain->lock, flags); in attach_device()
2227 struct protection_domain *domain; in detach_device() local
2232 domain = dev_data->domain; in detach_device()
2234 spin_lock_irqsave(&domain->lock, flags); in detach_device()
2244 if (WARN_ON(!dev_data->domain)) in detach_device()
2252 if (domain->flags & PD_IOMMUV2_MASK && dev_data->iommu_v2) in detach_device()
2262 spin_unlock_irqrestore(&domain->lock, flags); in detach_device()
2268 struct iommu_domain *domain; in amd_iommu_add_device() local
2300 domain = iommu_get_domain_for_dev(dev); in amd_iommu_add_device()
2301 if (domain->type == IOMMU_DOMAIN_IDENTITY) in amd_iommu_add_device()
2353 struct protection_domain *domain; in get_domain() local
2359 domain = get_dev_data(dev)->domain; in get_domain()
2360 if (domain == NULL && get_dev_data(dev)->defer_attach) { in get_domain()
2363 domain = to_pdomain(io_domain); in get_domain()
2364 attach_device(dev, domain); in get_domain()
2366 if (domain == NULL) in get_domain()
2369 if (!dma_ops_domain(domain)) in get_domain()
2372 return domain; in get_domain()
2375 static void update_device_table(struct protection_domain *domain) in update_device_table() argument
2379 list_for_each_entry(dev_data, &domain->dev_list, list) { in update_device_table()
2380 set_dte_entry(dev_data->devid, domain, dev_data->ats.enabled, in update_device_table()
2386 static void update_domain(struct protection_domain *domain) in update_domain() argument
2388 update_device_table(domain); in update_domain()
2390 domain_flush_devices(domain); in update_domain()
2391 domain_flush_tlb_pde(domain); in update_domain()
2437 ret = iommu_map_page(&dma_dom->domain, start, paddr, in __map_single()
2447 domain_flush_np_cache(&dma_dom->domain, address, size); in __map_single()
2456 iommu_unmap_page(&dma_dom->domain, start, PAGE_SIZE); in __map_single()
2459 spin_lock_irqsave(&dma_dom->domain.lock, flags); in __map_single()
2460 domain_flush_tlb(&dma_dom->domain); in __map_single()
2461 domain_flush_complete(&dma_dom->domain); in __map_single()
2462 spin_unlock_irqrestore(&dma_dom->domain.lock, flags); in __map_single()
2486 iommu_unmap_page(&dma_dom->domain, start, PAGE_SIZE); in __unmap_single()
2493 spin_lock_irqsave(&dma_dom->domain.lock, flags); in __unmap_single()
2494 domain_flush_tlb(&dma_dom->domain); in __unmap_single()
2495 domain_flush_complete(&dma_dom->domain); in __unmap_single()
2496 spin_unlock_irqrestore(&dma_dom->domain.lock, flags); in __unmap_single()
2513 struct protection_domain *domain; in map_page() local
2517 domain = get_domain(dev); in map_page()
2518 if (PTR_ERR(domain) == -EINVAL) in map_page()
2520 else if (IS_ERR(domain)) in map_page()
2524 dma_dom = to_dma_ops_domain(domain); in map_page()
2535 struct protection_domain *domain; in unmap_page() local
2538 domain = get_domain(dev); in unmap_page()
2539 if (IS_ERR(domain)) in unmap_page()
2542 dma_dom = to_dma_ops_domain(domain); in unmap_page()
2582 struct protection_domain *domain; in map_sg() local
2589 domain = get_domain(dev); in map_sg()
2590 if (IS_ERR(domain)) in map_sg()
2593 dma_dom = to_dma_ops_domain(domain); in map_sg()
2613 ret = iommu_map_page(domain, bus_addr, phys_addr, in map_sg()
2635 domain_flush_np_cache(domain, s->dma_address, s->dma_length); in map_sg()
2650 iommu_unmap_page(domain, bus_addr, PAGE_SIZE); in map_sg()
2672 struct protection_domain *domain; in unmap_sg() local
2677 domain = get_domain(dev); in unmap_sg()
2678 if (IS_ERR(domain)) in unmap_sg()
2682 dma_dom = to_dma_ops_domain(domain); in unmap_sg()
2696 struct protection_domain *domain; in alloc_coherent() local
2700 domain = get_domain(dev); in alloc_coherent()
2701 if (PTR_ERR(domain) == -EINVAL) { in alloc_coherent()
2705 } else if (IS_ERR(domain)) in alloc_coherent()
2708 dma_dom = to_dma_ops_domain(domain); in alloc_coherent()
2751 struct protection_domain *domain; in free_coherent() local
2758 domain = get_domain(dev); in free_coherent()
2759 if (IS_ERR(domain)) in free_coherent()
2762 dma_dom = to_dma_ops_domain(domain); in free_coherent()
2897 static void cleanup_domain(struct protection_domain *domain) in cleanup_domain() argument
2902 spin_lock_irqsave(&domain->lock, flags); in cleanup_domain()
2904 while (!list_empty(&domain->dev_list)) { in cleanup_domain()
2905 entry = list_first_entry(&domain->dev_list, in cleanup_domain()
2907 BUG_ON(!entry->domain); in cleanup_domain()
2911 spin_unlock_irqrestore(&domain->lock, flags); in cleanup_domain()
2914 static void protection_domain_free(struct protection_domain *domain) in protection_domain_free() argument
2916 if (!domain) in protection_domain_free()
2919 if (domain->id) in protection_domain_free()
2920 domain_id_free(domain->id); in protection_domain_free()
2922 kfree(domain); in protection_domain_free()
2925 static int protection_domain_init(struct protection_domain *domain) in protection_domain_init() argument
2927 spin_lock_init(&domain->lock); in protection_domain_init()
2928 mutex_init(&domain->api_lock); in protection_domain_init()
2929 domain->id = domain_id_alloc(); in protection_domain_init()
2930 if (!domain->id) in protection_domain_init()
2932 INIT_LIST_HEAD(&domain->dev_list); in protection_domain_init()
2939 struct protection_domain *domain; in protection_domain_alloc() local
2941 domain = kzalloc(sizeof(*domain), GFP_KERNEL); in protection_domain_alloc()
2942 if (!domain) in protection_domain_alloc()
2945 if (protection_domain_init(domain)) in protection_domain_alloc()
2948 return domain; in protection_domain_alloc()
2951 kfree(domain); in protection_domain_alloc()
2974 pdomain->domain.geometry.aperture_start = 0; in amd_iommu_domain_alloc()
2975 pdomain->domain.geometry.aperture_end = ~0ULL; in amd_iommu_domain_alloc()
2976 pdomain->domain.geometry.force_aperture = true; in amd_iommu_domain_alloc()
2985 pdomain = &dma_domain->domain; in amd_iommu_domain_alloc()
2998 return &pdomain->domain; in amd_iommu_domain_alloc()
3003 struct protection_domain *domain; in amd_iommu_domain_free() local
3006 domain = to_pdomain(dom); in amd_iommu_domain_free()
3008 if (domain->dev_cnt > 0) in amd_iommu_domain_free()
3009 cleanup_domain(domain); in amd_iommu_domain_free()
3011 BUG_ON(domain->dev_cnt != 0); in amd_iommu_domain_free()
3019 dma_dom = to_dma_ops_domain(domain); in amd_iommu_domain_free()
3023 if (domain->mode != PAGE_MODE_NONE) in amd_iommu_domain_free()
3024 free_pagetable(domain); in amd_iommu_domain_free()
3026 if (domain->flags & PD_IOMMUV2_MASK) in amd_iommu_domain_free()
3027 free_gcr3_table(domain); in amd_iommu_domain_free()
3029 protection_domain_free(domain); in amd_iommu_domain_free()
3048 if (dev_data->domain != NULL) in amd_iommu_detach_device()
3067 struct protection_domain *domain = to_pdomain(dom); in amd_iommu_attach_device() local
3081 if (dev_data->domain) in amd_iommu_attach_device()
3084 ret = attach_device(dev, domain); in amd_iommu_attach_device()
3103 struct protection_domain *domain = to_pdomain(dom); in amd_iommu_map() local
3107 if (domain->mode == PAGE_MODE_NONE) in amd_iommu_map()
3115 mutex_lock(&domain->api_lock); in amd_iommu_map()
3116 ret = iommu_map_page(domain, iova, paddr, page_size, prot, GFP_KERNEL); in amd_iommu_map()
3117 mutex_unlock(&domain->api_lock); in amd_iommu_map()
3119 domain_flush_np_cache(domain, iova, page_size); in amd_iommu_map()
3128 struct protection_domain *domain = to_pdomain(dom); in amd_iommu_unmap() local
3131 if (domain->mode == PAGE_MODE_NONE) in amd_iommu_unmap()
3134 mutex_lock(&domain->api_lock); in amd_iommu_unmap()
3135 unmap_size = iommu_unmap_page(domain, iova, page_size); in amd_iommu_unmap()
3136 mutex_unlock(&domain->api_lock); in amd_iommu_unmap()
3144 struct protection_domain *domain = to_pdomain(dom); in amd_iommu_iova_to_phys() local
3148 if (domain->mode == PAGE_MODE_NONE) in amd_iommu_iova_to_phys()
3151 pte = fetch_pte(domain, iova, &pte_pgsize); in amd_iommu_iova_to_phys()
3240 struct iommu_domain *domain, in amd_iommu_apply_resv_region() argument
3243 struct dma_ops_domain *dma_dom = to_dma_ops_domain(to_pdomain(domain)); in amd_iommu_apply_resv_region()
3252 static bool amd_iommu_is_attach_deferred(struct iommu_domain *domain, in amd_iommu_is_attach_deferred() argument
3259 static void amd_iommu_flush_iotlb_all(struct iommu_domain *domain) in amd_iommu_flush_iotlb_all() argument
3261 struct protection_domain *dom = to_pdomain(domain); in amd_iommu_flush_iotlb_all()
3270 static void amd_iommu_iotlb_sync(struct iommu_domain *domain, in amd_iommu_iotlb_sync() argument
3273 amd_iommu_flush_iotlb_all(domain); in amd_iommu_iotlb_sync()
3322 struct protection_domain *domain = to_pdomain(dom); in amd_iommu_domain_direct_map() local
3325 spin_lock_irqsave(&domain->lock, flags); in amd_iommu_domain_direct_map()
3328 domain->mode = PAGE_MODE_NONE; in amd_iommu_domain_direct_map()
3331 update_domain(domain); in amd_iommu_domain_direct_map()
3334 free_pagetable(domain); in amd_iommu_domain_direct_map()
3336 spin_unlock_irqrestore(&domain->lock, flags); in amd_iommu_domain_direct_map()
3342 struct protection_domain *domain = to_pdomain(dom); in amd_iommu_domain_enable_v2() local
3356 spin_lock_irqsave(&domain->lock, flags); in amd_iommu_domain_enable_v2()
3364 if (domain->dev_cnt > 0 || domain->flags & PD_IOMMUV2_MASK) in amd_iommu_domain_enable_v2()
3368 domain->gcr3_tbl = (void *)get_zeroed_page(GFP_ATOMIC); in amd_iommu_domain_enable_v2()
3369 if (domain->gcr3_tbl == NULL) in amd_iommu_domain_enable_v2()
3372 domain->glx = levels; in amd_iommu_domain_enable_v2()
3373 domain->flags |= PD_IOMMUV2_MASK; in amd_iommu_domain_enable_v2()
3375 update_domain(domain); in amd_iommu_domain_enable_v2()
3380 spin_unlock_irqrestore(&domain->lock, flags); in amd_iommu_domain_enable_v2()
3386 static int __flush_pasid(struct protection_domain *domain, int pasid, in __flush_pasid() argument
3393 if (!(domain->flags & PD_IOMMUV2_MASK)) in __flush_pasid()
3396 build_inv_iommu_pasid(&cmd, domain->id, pasid, address, size); in __flush_pasid()
3403 if (domain->dev_iommu[i] == 0) in __flush_pasid()
3412 domain_flush_complete(domain); in __flush_pasid()
3415 list_for_each_entry(dev_data, &domain->dev_list, list) { in __flush_pasid()
3438 domain_flush_complete(domain); in __flush_pasid()
3447 static int __amd_iommu_flush_page(struct protection_domain *domain, int pasid, in __amd_iommu_flush_page() argument
3450 return __flush_pasid(domain, pasid, address, false); in __amd_iommu_flush_page()
3456 struct protection_domain *domain = to_pdomain(dom); in amd_iommu_flush_page() local
3460 spin_lock_irqsave(&domain->lock, flags); in amd_iommu_flush_page()
3461 ret = __amd_iommu_flush_page(domain, pasid, address); in amd_iommu_flush_page()
3462 spin_unlock_irqrestore(&domain->lock, flags); in amd_iommu_flush_page()
3468 static int __amd_iommu_flush_tlb(struct protection_domain *domain, int pasid) in __amd_iommu_flush_tlb() argument
3470 return __flush_pasid(domain, pasid, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, in __amd_iommu_flush_tlb()
3476 struct protection_domain *domain = to_pdomain(dom); in amd_iommu_flush_tlb() local
3480 spin_lock_irqsave(&domain->lock, flags); in amd_iommu_flush_tlb()
3481 ret = __amd_iommu_flush_tlb(domain, pasid); in amd_iommu_flush_tlb()
3482 spin_unlock_irqrestore(&domain->lock, flags); in amd_iommu_flush_tlb()
3520 static int __set_gcr3(struct protection_domain *domain, int pasid, in __set_gcr3() argument
3525 if (domain->mode != PAGE_MODE_NONE) in __set_gcr3()
3528 pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, true); in __set_gcr3()
3534 return __amd_iommu_flush_tlb(domain, pasid); in __set_gcr3()
3537 static int __clear_gcr3(struct protection_domain *domain, int pasid) in __clear_gcr3() argument
3541 if (domain->mode != PAGE_MODE_NONE) in __clear_gcr3()
3544 pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, false); in __clear_gcr3()
3550 return __amd_iommu_flush_tlb(domain, pasid); in __clear_gcr3()
3556 struct protection_domain *domain = to_pdomain(dom); in amd_iommu_domain_set_gcr3() local
3560 spin_lock_irqsave(&domain->lock, flags); in amd_iommu_domain_set_gcr3()
3561 ret = __set_gcr3(domain, pasid, cr3); in amd_iommu_domain_set_gcr3()
3562 spin_unlock_irqrestore(&domain->lock, flags); in amd_iommu_domain_set_gcr3()
3570 struct protection_domain *domain = to_pdomain(dom); in amd_iommu_domain_clear_gcr3() local
3574 spin_lock_irqsave(&domain->lock, flags); in amd_iommu_domain_clear_gcr3()
3575 ret = __clear_gcr3(domain, pasid); in amd_iommu_domain_clear_gcr3()
3576 spin_unlock_irqrestore(&domain->lock, flags); in amd_iommu_domain_clear_gcr3()
3611 return &pdomain->domain; in amd_iommu_get_v2_domain()
4234 static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq, in irq_remapping_alloc() argument
4261 ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg); in irq_remapping_alloc()
4302 irq_data = irq_domain_get_irq_data(domain, virq + i); in irq_remapping_alloc()
4335 irq_data = irq_domain_get_irq_data(domain, virq + i); in irq_remapping_alloc()
4342 irq_domain_free_irqs_common(domain, virq, nr_irqs); in irq_remapping_alloc()
4346 static void irq_remapping_free(struct irq_domain *domain, unsigned int virq, in irq_remapping_free() argument
4355 irq_data = irq_domain_get_irq_data(domain, virq + i); in irq_remapping_free()
4364 irq_domain_free_irqs_common(domain, virq, nr_irqs); in irq_remapping_free()
4372 static int irq_remapping_activate(struct irq_domain *domain, in irq_remapping_activate() argument
4389 static void irq_remapping_deactivate(struct irq_domain *domain, in irq_remapping_deactivate() argument