• Home
  • Raw
  • Download

Lines Matching refs:gart

58 #define FLUSH_GART_REGS(gart)	readl_relaxed((gart)->regs + GART_CONFIG)  argument
60 #define for_each_gart_pte(gart, iova) \ argument
61 for (iova = gart->iovmm_base; \
62 iova < gart->iovmm_end; \
65 static inline void gart_set_pte(struct gart_device *gart, in gart_set_pte() argument
68 writel_relaxed(iova, gart->regs + GART_ENTRY_ADDR); in gart_set_pte()
69 writel_relaxed(pte, gart->regs + GART_ENTRY_DATA); in gart_set_pte()
72 static inline unsigned long gart_read_pte(struct gart_device *gart, in gart_read_pte() argument
77 writel_relaxed(iova, gart->regs + GART_ENTRY_ADDR); in gart_read_pte()
78 pte = readl_relaxed(gart->regs + GART_ENTRY_DATA); in gart_read_pte()
83 static void do_gart_setup(struct gart_device *gart, const u32 *data) in do_gart_setup() argument
87 for_each_gart_pte(gart, iova) in do_gart_setup()
88 gart_set_pte(gart, iova, data ? *(data++) : 0); in do_gart_setup()
90 writel_relaxed(1, gart->regs + GART_CONFIG); in do_gart_setup()
91 FLUSH_GART_REGS(gart); in do_gart_setup()
94 static inline bool gart_iova_range_invalid(struct gart_device *gart, in gart_iova_range_invalid() argument
97 return unlikely(iova < gart->iovmm_base || bytes != GART_PAGE_SIZE || in gart_iova_range_invalid()
98 iova + bytes > gart->iovmm_end); in gart_iova_range_invalid()
101 static inline bool gart_pte_valid(struct gart_device *gart, unsigned long iova) in gart_pte_valid() argument
103 return !!(gart_read_pte(gart, iova) & GART_ENTRY_PHYS_ADDR_VALID); in gart_pte_valid()
109 struct gart_device *gart = gart_handle; in gart_iommu_attach_dev() local
112 spin_lock(&gart->dom_lock); in gart_iommu_attach_dev()
114 if (gart->active_domain && gart->active_domain != domain) { in gart_iommu_attach_dev()
118 gart->active_domain = domain; in gart_iommu_attach_dev()
119 gart->active_devices++; in gart_iommu_attach_dev()
122 spin_unlock(&gart->dom_lock); in gart_iommu_attach_dev()
130 struct gart_device *gart = gart_handle; in gart_iommu_detach_dev() local
132 spin_lock(&gart->dom_lock); in gart_iommu_detach_dev()
137 if (--gart->active_devices == 0) in gart_iommu_detach_dev()
138 gart->active_domain = NULL; in gart_iommu_detach_dev()
141 spin_unlock(&gart->dom_lock); in gart_iommu_detach_dev()
167 static inline int __gart_iommu_map(struct gart_device *gart, unsigned long iova, in __gart_iommu_map() argument
170 if (unlikely(gart_debug && gart_pte_valid(gart, iova))) { in __gart_iommu_map()
171 dev_err(gart->dev, "Page entry is in-use\n"); in __gart_iommu_map()
175 gart_set_pte(gart, iova, GART_ENTRY_PHYS_ADDR_VALID | pa); in __gart_iommu_map()
183 struct gart_device *gart = gart_handle; in gart_iommu_map() local
186 if (gart_iova_range_invalid(gart, iova, bytes)) in gart_iommu_map()
189 spin_lock(&gart->pte_lock); in gart_iommu_map()
190 ret = __gart_iommu_map(gart, iova, (unsigned long)pa); in gart_iommu_map()
191 spin_unlock(&gart->pte_lock); in gart_iommu_map()
196 static inline int __gart_iommu_unmap(struct gart_device *gart, in __gart_iommu_unmap() argument
199 if (unlikely(gart_debug && !gart_pte_valid(gart, iova))) { in __gart_iommu_unmap()
200 dev_err(gart->dev, "Page entry is invalid\n"); in __gart_iommu_unmap()
204 gart_set_pte(gart, iova, 0); in __gart_iommu_unmap()
212 struct gart_device *gart = gart_handle; in gart_iommu_unmap() local
215 if (gart_iova_range_invalid(gart, iova, bytes)) in gart_iommu_unmap()
218 spin_lock(&gart->pte_lock); in gart_iommu_unmap()
219 err = __gart_iommu_unmap(gart, iova); in gart_iommu_unmap()
220 spin_unlock(&gart->pte_lock); in gart_iommu_unmap()
228 struct gart_device *gart = gart_handle; in gart_iommu_iova_to_phys() local
231 if (gart_iova_range_invalid(gart, iova, GART_PAGE_SIZE)) in gart_iommu_iova_to_phys()
234 spin_lock(&gart->pte_lock); in gart_iommu_iova_to_phys()
235 pte = gart_read_pte(gart, iova); in gart_iommu_iova_to_phys()
236 spin_unlock(&gart->pte_lock); in gart_iommu_iova_to_phys()
293 int tegra_gart_suspend(struct gart_device *gart) in tegra_gart_suspend() argument
295 u32 *data = gart->savedata; in tegra_gart_suspend()
303 writel_relaxed(0, gart->regs + GART_CONFIG); in tegra_gart_suspend()
304 FLUSH_GART_REGS(gart); in tegra_gart_suspend()
306 for_each_gart_pte(gart, iova) in tegra_gart_suspend()
307 *(data++) = gart_read_pte(gart, iova); in tegra_gart_suspend()
312 int tegra_gart_resume(struct gart_device *gart) in tegra_gart_resume() argument
314 do_gart_setup(gart, gart->savedata); in tegra_gart_resume()
321 struct gart_device *gart; in tegra_gart_probe() local
334 gart = kzalloc(sizeof(*gart), GFP_KERNEL); in tegra_gart_probe()
335 if (!gart) in tegra_gart_probe()
338 gart_handle = gart; in tegra_gart_probe()
340 gart->dev = dev; in tegra_gart_probe()
341 gart->regs = mc->regs + GART_REG_BASE; in tegra_gart_probe()
342 gart->iovmm_base = res->start; in tegra_gart_probe()
343 gart->iovmm_end = res->end + 1; in tegra_gart_probe()
344 spin_lock_init(&gart->pte_lock); in tegra_gart_probe()
345 spin_lock_init(&gart->dom_lock); in tegra_gart_probe()
347 do_gart_setup(gart, NULL); in tegra_gart_probe()
349 err = iommu_device_sysfs_add(&gart->iommu, dev, NULL, "gart"); in tegra_gart_probe()
353 iommu_device_set_ops(&gart->iommu, &gart_iommu_ops); in tegra_gart_probe()
354 iommu_device_set_fwnode(&gart->iommu, dev->fwnode); in tegra_gart_probe()
356 err = iommu_device_register(&gart->iommu); in tegra_gart_probe()
360 gart->savedata = vmalloc(resource_size(res) / GART_PAGE_SIZE * in tegra_gart_probe()
362 if (!gart->savedata) { in tegra_gart_probe()
367 return gart; in tegra_gart_probe()
370 iommu_device_unregister(&gart->iommu); in tegra_gart_probe()
372 iommu_device_sysfs_remove(&gart->iommu); in tegra_gart_probe()
374 kfree(gart); in tegra_gart_probe()