Lines Matching +full:pci +full:- +full:domain
1 // SPDX-License-Identifier: GPL-2.0-only
8 #define pr_fmt(fmt) "fsl-pamu-domain: %s: " fmt, __func__
40 return -ENOMEM; in iommu_init_mempool()
51 return -ENOMEM; in iommu_init_mempool()
59 u32 win_cnt = dma_domain->win_cnt; in get_phys_addr()
60 struct dma_window *win_ptr = &dma_domain->win_arr[0]; in get_phys_addr()
63 geom = &dma_domain->iommu_domain.geometry; in get_phys_addr()
65 if (!win_cnt || !dma_domain->geom_size) { in get_phys_addr()
66 pr_debug("Number of windows/geometry not configured for the domain\n"); in get_phys_addr()
75 subwin_size = dma_domain->geom_size >> ilog2(win_cnt); in get_phys_addr()
76 subwin_iova = iova & ~(subwin_size - 1); in get_phys_addr()
77 wnd = (subwin_iova - geom->aperture_start) >> ilog2(subwin_size); in get_phys_addr()
78 win_ptr = &dma_domain->win_arr[wnd]; in get_phys_addr()
81 if (win_ptr->valid) in get_phys_addr()
82 return win_ptr->paddr + (iova & (win_ptr->size - 1)); in get_phys_addr()
89 struct dma_window *sub_win_ptr = &dma_domain->win_arr[0]; in map_subwins()
93 for (i = 0; i < dma_domain->win_cnt; i++) { in map_subwins()
97 ret = pamu_config_spaace(liodn, dma_domain->win_cnt, i, in map_subwins()
101 dma_domain->snoop_id, in map_subwins()
102 dma_domain->stash_id, in map_subwins()
120 struct dma_window *wnd = &dma_domain->win_arr[0]; in map_win()
121 phys_addr_t wnd_addr = dma_domain->iommu_domain.geometry.aperture_start; in map_win()
126 wnd->size, in map_win()
128 wnd->paddr >> PAMU_PAGE_SHIFT, in map_win()
129 dma_domain->snoop_id, dma_domain->stash_id, in map_win()
130 0, wnd->prot); in map_win()
141 if (dma_domain->win_cnt > 1) in map_liodn()
151 struct dma_window *wnd = &dma_domain->win_arr[wnd_nr]; in update_liodn()
155 if (dma_domain->win_cnt > 1) { in update_liodn()
156 ret = pamu_config_spaace(liodn, dma_domain->win_cnt, wnd_nr, in update_liodn()
157 wnd->size, in update_liodn()
159 wnd->paddr >> PAMU_PAGE_SHIFT, in update_liodn()
160 dma_domain->snoop_id, in update_liodn()
161 dma_domain->stash_id, in update_liodn()
163 wnd->prot); in update_liodn()
170 wnd_addr = dma_domain->iommu_domain.geometry.aperture_start; in update_liodn()
173 wnd->size, in update_liodn()
175 wnd->paddr >> PAMU_PAGE_SHIFT, in update_liodn()
176 dma_domain->snoop_id, dma_domain->stash_id, in update_liodn()
177 0, wnd->prot); in update_liodn()
195 if (!dma_domain->win_arr) { in update_liodn_stash()
199 return -EINVAL; in update_liodn_stash()
202 for (i = 0; i < dma_domain->win_cnt; i++) { in update_liodn_stash()
236 window_addr = geom_attr->aperture_start; in pamu_set_liodn()
237 window_size = dma_domain->geom_size; in pamu_set_liodn()
243 0, dma_domain->snoop_id, in pamu_set_liodn()
244 dma_domain->stash_id, win_cnt, 0); in pamu_set_liodn()
260 0, dma_domain->snoop_id, in pamu_set_liodn()
261 dma_domain->stash_id, in pamu_set_liodn()
281 if ((size & (size - 1)) || size < PAMU_PAGE_SIZE) { in check_size()
283 return -EINVAL; in check_size()
287 if (iova & (size - 1)) { in check_size()
289 return -EINVAL; in check_size()
297 struct fsl_dma_domain *domain; in iommu_alloc_dma_domain() local
299 domain = kmem_cache_zalloc(fsl_pamu_domain_cache, GFP_KERNEL); in iommu_alloc_dma_domain()
300 if (!domain) in iommu_alloc_dma_domain()
303 domain->stash_id = ~(u32)0; in iommu_alloc_dma_domain()
304 domain->snoop_id = ~(u32)0; in iommu_alloc_dma_domain()
305 domain->win_cnt = pamu_get_max_subwin_cnt(); in iommu_alloc_dma_domain()
306 domain->geom_size = 0; in iommu_alloc_dma_domain()
308 INIT_LIST_HEAD(&domain->devices); in iommu_alloc_dma_domain()
310 spin_lock_init(&domain->domain_lock); in iommu_alloc_dma_domain()
312 return domain; in iommu_alloc_dma_domain()
319 list_del(&info->link); in remove_device_ref()
322 pamu_free_subwins(info->liodn); in remove_device_ref()
323 pamu_disable_liodn(info->liodn); in remove_device_ref()
326 dev_iommu_priv_set(info->dev, NULL); in remove_device_ref()
336 spin_lock_irqsave(&dma_domain->domain_lock, flags); in detach_device()
337 /* Remove the device from the domain device list */ in detach_device()
338 list_for_each_entry_safe(info, tmp, &dma_domain->devices, link) { in detach_device()
339 if (!dev || (info->dev == dev)) in detach_device()
340 remove_device_ref(info, dma_domain->win_cnt); in detach_device()
342 spin_unlock_irqrestore(&dma_domain->domain_lock, flags); in detach_device()
352 * Check here if the device is already attached to domain or not. in attach_device()
353 * If the device is already attached to a domain detach it. in attach_device()
356 if (old_domain_info && old_domain_info->domain != dma_domain) { in attach_device()
358 detach_device(dev, old_domain_info->domain); in attach_device()
364 info->dev = dev; in attach_device()
365 info->liodn = liodn; in attach_device()
366 info->domain = dma_domain; in attach_device()
368 list_add(&info->link, &dma_domain->devices); in attach_device()
372 * LIODNs share the same domain in attach_device()
379 static phys_addr_t fsl_pamu_iova_to_phys(struct iommu_domain *domain, in fsl_pamu_iova_to_phys() argument
382 struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain); in fsl_pamu_iova_to_phys()
384 if (iova < domain->geometry.aperture_start || in fsl_pamu_iova_to_phys()
385 iova > domain->geometry.aperture_end) in fsl_pamu_iova_to_phys()
396 static void fsl_pamu_domain_free(struct iommu_domain *domain) in fsl_pamu_domain_free() argument
398 struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain); in fsl_pamu_domain_free()
403 dma_domain->enabled = 0; in fsl_pamu_domain_free()
404 dma_domain->mapped = 0; in fsl_pamu_domain_free()
422 dma_domain->iommu_domain. geometry.aperture_start = 0; in fsl_pamu_domain_alloc()
423 dma_domain->iommu_domain.geometry.aperture_end = (1ULL << 36) - 1; in fsl_pamu_domain_alloc()
424 dma_domain->iommu_domain.geometry.force_aperture = true; in fsl_pamu_domain_alloc()
426 return &dma_domain->iommu_domain; in fsl_pamu_domain_alloc()
429 /* Configure geometry settings for all LIODNs associated with domain */
437 list_for_each_entry(info, &dma_domain->devices, link) { in pamu_set_domain_geometry()
438 ret = pamu_set_liodn(info->liodn, info->dev, dma_domain, in pamu_set_domain_geometry()
447 /* Update stash destination for all LIODNs associated with the domain */
453 list_for_each_entry(info, &dma_domain->devices, link) { in update_domain_stash()
454 ret = update_liodn_stash(info->liodn, dma_domain, val); in update_domain_stash()
462 /* Update domain mappings for all LIODNs associated with the domain */
468 list_for_each_entry(info, &dma_domain->devices, link) { in update_domain_mapping()
469 ret = update_liodn(info->liodn, dma_domain, wnd_nr); in update_domain_mapping()
481 list_for_each_entry(info, &dma_domain->devices, link) { in disable_domain_win()
482 if (dma_domain->win_cnt == 1 && dma_domain->enabled) { in disable_domain_win()
483 ret = pamu_disable_liodn(info->liodn); in disable_domain_win()
485 dma_domain->enabled = 0; in disable_domain_win()
487 ret = pamu_disable_spaace(info->liodn, wnd_nr); in disable_domain_win()
494 static void fsl_pamu_window_disable(struct iommu_domain *domain, u32 wnd_nr) in fsl_pamu_window_disable() argument
496 struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain); in fsl_pamu_window_disable()
500 spin_lock_irqsave(&dma_domain->domain_lock, flags); in fsl_pamu_window_disable()
501 if (!dma_domain->win_arr) { in fsl_pamu_window_disable()
503 spin_unlock_irqrestore(&dma_domain->domain_lock, flags); in fsl_pamu_window_disable()
507 if (wnd_nr >= dma_domain->win_cnt) { in fsl_pamu_window_disable()
509 spin_unlock_irqrestore(&dma_domain->domain_lock, flags); in fsl_pamu_window_disable()
513 if (dma_domain->win_arr[wnd_nr].valid) { in fsl_pamu_window_disable()
516 dma_domain->win_arr[wnd_nr].valid = 0; in fsl_pamu_window_disable()
517 dma_domain->mapped--; in fsl_pamu_window_disable()
521 spin_unlock_irqrestore(&dma_domain->domain_lock, flags); in fsl_pamu_window_disable()
524 static int fsl_pamu_window_enable(struct iommu_domain *domain, u32 wnd_nr, in fsl_pamu_window_enable() argument
527 struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain); in fsl_pamu_window_enable()
539 spin_lock_irqsave(&dma_domain->domain_lock, flags); in fsl_pamu_window_enable()
540 if (!dma_domain->win_arr) { in fsl_pamu_window_enable()
542 spin_unlock_irqrestore(&dma_domain->domain_lock, flags); in fsl_pamu_window_enable()
543 return -ENODEV; in fsl_pamu_window_enable()
546 if (wnd_nr >= dma_domain->win_cnt) { in fsl_pamu_window_enable()
548 spin_unlock_irqrestore(&dma_domain->domain_lock, flags); in fsl_pamu_window_enable()
549 return -EINVAL; in fsl_pamu_window_enable()
552 win_size = dma_domain->geom_size >> ilog2(dma_domain->win_cnt); in fsl_pamu_window_enable()
555 spin_unlock_irqrestore(&dma_domain->domain_lock, flags); in fsl_pamu_window_enable()
556 return -EINVAL; in fsl_pamu_window_enable()
559 if (dma_domain->win_cnt == 1) { in fsl_pamu_window_enable()
560 if (dma_domain->enabled) { in fsl_pamu_window_enable()
562 spin_unlock_irqrestore(&dma_domain->domain_lock, flags); in fsl_pamu_window_enable()
563 return -EBUSY; in fsl_pamu_window_enable()
566 ret = check_size(size, domain->geometry.aperture_start); in fsl_pamu_window_enable()
569 spin_unlock_irqrestore(&dma_domain->domain_lock, flags); in fsl_pamu_window_enable()
570 return -EINVAL; in fsl_pamu_window_enable()
574 wnd = &dma_domain->win_arr[wnd_nr]; in fsl_pamu_window_enable()
575 if (!wnd->valid) { in fsl_pamu_window_enable()
576 wnd->paddr = paddr; in fsl_pamu_window_enable()
577 wnd->size = size; in fsl_pamu_window_enable()
578 wnd->prot = pamu_prot; in fsl_pamu_window_enable()
582 wnd->valid = 1; in fsl_pamu_window_enable()
583 dma_domain->mapped++; in fsl_pamu_window_enable()
587 ret = -EBUSY; in fsl_pamu_window_enable()
590 spin_unlock_irqrestore(&dma_domain->domain_lock, flags); in fsl_pamu_window_enable()
596 * Attach the LIODN to the DMA domain and configure the geometry
604 struct iommu_domain *domain = &dma_domain->iommu_domain; in handle_attach_device() local
608 spin_lock_irqsave(&dma_domain->domain_lock, flags); in handle_attach_device()
613 liodn[i], dev->of_node); in handle_attach_device()
614 ret = -EINVAL; in handle_attach_device()
621 * for the domain. If yes, set the geometry for in handle_attach_device()
624 if (dma_domain->win_arr) { in handle_attach_device()
625 u32 win_cnt = dma_domain->win_cnt > 1 ? dma_domain->win_cnt : 0; in handle_attach_device()
628 &domain->geometry, win_cnt); in handle_attach_device()
631 if (dma_domain->mapped) { in handle_attach_device()
642 spin_unlock_irqrestore(&dma_domain->domain_lock, flags); in handle_attach_device()
647 static int fsl_pamu_attach_device(struct iommu_domain *domain, in fsl_pamu_attach_device() argument
650 struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain); in fsl_pamu_attach_device()
658 * Use LIODN of the PCI controller while attaching a in fsl_pamu_attach_device()
659 * PCI device. in fsl_pamu_attach_device()
663 pci_ctl = pci_bus_to_host(pdev->bus); in fsl_pamu_attach_device()
665 * make dev point to pci controller device in fsl_pamu_attach_device()
667 * u-boot. in fsl_pamu_attach_device()
669 dev = pci_ctl->parent; in fsl_pamu_attach_device()
672 liodn = of_get_property(dev->of_node, "fsl,liodn", &len); in fsl_pamu_attach_device()
677 pr_debug("missing fsl,liodn property at %pOF\n", dev->of_node); in fsl_pamu_attach_device()
678 ret = -EINVAL; in fsl_pamu_attach_device()
684 static void fsl_pamu_detach_device(struct iommu_domain *domain, in fsl_pamu_detach_device() argument
687 struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain); in fsl_pamu_detach_device()
694 * Use LIODN of the PCI controller while detaching a in fsl_pamu_detach_device()
695 * PCI device. in fsl_pamu_detach_device()
699 pci_ctl = pci_bus_to_host(pdev->bus); in fsl_pamu_detach_device()
701 * make dev point to pci controller device in fsl_pamu_detach_device()
703 * u-boot. in fsl_pamu_detach_device()
705 dev = pci_ctl->parent; in fsl_pamu_detach_device()
708 prop = of_get_property(dev->of_node, "fsl,liodn", &len); in fsl_pamu_detach_device()
712 pr_debug("missing fsl,liodn property at %pOF\n", dev->of_node); in fsl_pamu_detach_device()
715 static int configure_domain_geometry(struct iommu_domain *domain, void *data) in configure_domain_geometry() argument
718 struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain); in configure_domain_geometry()
722 geom_size = geom_attr->aperture_end - geom_attr->aperture_start + 1; in configure_domain_geometry()
727 if (check_size(geom_size, geom_attr->aperture_start) || in configure_domain_geometry()
728 !geom_attr->force_aperture) { in configure_domain_geometry()
730 return -EINVAL; in configure_domain_geometry()
733 spin_lock_irqsave(&dma_domain->domain_lock, flags); in configure_domain_geometry()
734 if (dma_domain->enabled) { in configure_domain_geometry()
735 pr_debug("Can't set geometry attributes as domain is active\n"); in configure_domain_geometry()
736 spin_unlock_irqrestore(&dma_domain->domain_lock, flags); in configure_domain_geometry()
737 return -EBUSY; in configure_domain_geometry()
740 /* Copy the domain geometry information */ in configure_domain_geometry()
741 memcpy(&domain->geometry, geom_attr, in configure_domain_geometry()
743 dma_domain->geom_size = geom_size; in configure_domain_geometry()
745 spin_unlock_irqrestore(&dma_domain->domain_lock, flags); in configure_domain_geometry()
750 /* Set the domain stash attribute */
757 spin_lock_irqsave(&dma_domain->domain_lock, flags); in configure_domain_stash()
759 memcpy(&dma_domain->dma_stash, stash_attr, in configure_domain_stash()
762 dma_domain->stash_id = get_stash_id(stash_attr->cache, in configure_domain_stash()
763 stash_attr->cpu); in configure_domain_stash()
764 if (dma_domain->stash_id == ~(u32)0) { in configure_domain_stash()
766 spin_unlock_irqrestore(&dma_domain->domain_lock, flags); in configure_domain_stash()
767 return -EINVAL; in configure_domain_stash()
770 ret = update_domain_stash(dma_domain, dma_domain->stash_id); in configure_domain_stash()
772 spin_unlock_irqrestore(&dma_domain->domain_lock, flags); in configure_domain_stash()
777 /* Configure domain dma state i.e. enable/disable DMA */
784 spin_lock_irqsave(&dma_domain->domain_lock, flags); in configure_domain_dma_state()
786 if (enable && !dma_domain->mapped) { in configure_domain_dma_state()
787 pr_debug("Can't enable DMA domain without valid mapping\n"); in configure_domain_dma_state()
788 spin_unlock_irqrestore(&dma_domain->domain_lock, flags); in configure_domain_dma_state()
789 return -ENODEV; in configure_domain_dma_state()
792 dma_domain->enabled = enable; in configure_domain_dma_state()
793 list_for_each_entry(info, &dma_domain->devices, link) { in configure_domain_dma_state()
794 ret = (enable) ? pamu_enable_liodn(info->liodn) : in configure_domain_dma_state()
795 pamu_disable_liodn(info->liodn); in configure_domain_dma_state()
798 info->liodn); in configure_domain_dma_state()
800 spin_unlock_irqrestore(&dma_domain->domain_lock, flags); in configure_domain_dma_state()
805 static int fsl_pamu_set_windows(struct iommu_domain *domain, u32 w_count) in fsl_pamu_set_windows() argument
807 struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain); in fsl_pamu_set_windows()
811 spin_lock_irqsave(&dma_domain->domain_lock, flags); in fsl_pamu_set_windows()
812 /* Ensure domain is inactive i.e. DMA should be disabled for the domain */ in fsl_pamu_set_windows()
813 if (dma_domain->enabled) { in fsl_pamu_set_windows()
814 pr_debug("Can't set geometry attributes as domain is active\n"); in fsl_pamu_set_windows()
815 spin_unlock_irqrestore(&dma_domain->domain_lock, flags); in fsl_pamu_set_windows()
816 return -EBUSY; in fsl_pamu_set_windows()
819 /* Ensure that the geometry has been set for the domain */ in fsl_pamu_set_windows()
820 if (!dma_domain->geom_size) { in fsl_pamu_set_windows()
822 spin_unlock_irqrestore(&dma_domain->domain_lock, flags); in fsl_pamu_set_windows()
823 return -EINVAL; in fsl_pamu_set_windows()
832 spin_unlock_irqrestore(&dma_domain->domain_lock, flags); in fsl_pamu_set_windows()
833 return -EINVAL; in fsl_pamu_set_windows()
836 ret = pamu_set_domain_geometry(dma_domain, &domain->geometry, in fsl_pamu_set_windows()
839 kfree(dma_domain->win_arr); in fsl_pamu_set_windows()
840 dma_domain->win_arr = kcalloc(w_count, in fsl_pamu_set_windows()
841 sizeof(*dma_domain->win_arr), in fsl_pamu_set_windows()
843 if (!dma_domain->win_arr) { in fsl_pamu_set_windows()
844 spin_unlock_irqrestore(&dma_domain->domain_lock, flags); in fsl_pamu_set_windows()
845 return -ENOMEM; in fsl_pamu_set_windows()
847 dma_domain->win_cnt = w_count; in fsl_pamu_set_windows()
849 spin_unlock_irqrestore(&dma_domain->domain_lock, flags); in fsl_pamu_set_windows()
854 static int fsl_pamu_set_domain_attr(struct iommu_domain *domain, in fsl_pamu_set_domain_attr() argument
857 struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain); in fsl_pamu_set_domain_attr()
862 ret = configure_domain_geometry(domain, data); in fsl_pamu_set_domain_attr()
871 ret = fsl_pamu_set_windows(domain, *(u32 *)data); in fsl_pamu_set_domain_attr()
875 ret = -EINVAL; in fsl_pamu_set_domain_attr()
882 static int fsl_pamu_get_domain_attr(struct iommu_domain *domain, in fsl_pamu_get_domain_attr() argument
885 struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain); in fsl_pamu_get_domain_attr()
890 memcpy(data, &dma_domain->dma_stash, in fsl_pamu_get_domain_attr()
894 *(int *)data = dma_domain->enabled; in fsl_pamu_get_domain_attr()
900 *(u32 *)data = dma_domain->win_cnt; in fsl_pamu_get_domain_attr()
904 ret = -EINVAL; in fsl_pamu_get_domain_attr()
926 /* Check the PCI controller version number by readding BRR1 register */ in check_pci_ctl_endpt_part()
927 version = in_be32(pci_ctl->cfg_addr + (PCI_FSL_BRR1 >> 2)); in check_pci_ctl_endpt_part()
929 /* If PCI controller version is >= 0x204 we can partition endpoints */ in check_pci_ctl_endpt_part()
938 struct pci_bus *bus = pdev->bus; in get_shared_pci_device_group()
941 * Traverese the pci bus device list to get in get_shared_pci_device_group()
945 list_for_each_entry(tmp, &bus->devices, bus_list) { in get_shared_pci_device_group()
948 group = iommu_group_get(&tmp->dev); in get_shared_pci_device_group()
953 bus = bus->parent; in get_shared_pci_device_group()
965 pci_ctl = pci_bus_to_host(pdev->bus); in get_pci_device_group()
969 group = pci_device_group(&pdev->dev); in get_pci_device_group()
975 if (pci_ctl->parent->iommu_group) in get_pci_device_group()
976 iommu_group_remove_device(pci_ctl->parent); in get_pci_device_group()
980 * PCI controllers device group. If this is the first in get_pci_device_group()
981 * device to be probed for the pci controller, copy the in get_pci_device_group()
982 * device group information from the PCI controller device in get_pci_device_group()
983 * node and remove the PCI controller iommu group. in get_pci_device_group()
988 if (pci_ctl->parent->iommu_group) { in get_pci_device_group()
989 group = get_device_iommu_group(pci_ctl->parent); in get_pci_device_group()
990 iommu_group_remove_device(pci_ctl->parent); in get_pci_device_group()
997 group = ERR_PTR(-ENODEV); in get_pci_device_group()
1004 struct iommu_group *group = ERR_PTR(-ENODEV); in fsl_pamu_device_group()
1013 else if (of_get_property(dev->of_node, "fsl,liodn", &len)) in fsl_pamu_device_group()