1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2013 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
5 */
6
7 #include <linux/adreno-smmu-priv.h>
8 #include <linux/io-pgtable.h>
9 #include "msm_drv.h"
10 #include "msm_mmu.h"
11
12 struct msm_iommu {
13 struct msm_mmu base;
14 struct iommu_domain *domain;
15 atomic_t pagetables;
16 };
17
18 #define to_msm_iommu(x) container_of(x, struct msm_iommu, base)
19
20 struct msm_iommu_pagetable {
21 struct msm_mmu base;
22 struct msm_mmu *parent;
23 struct io_pgtable_ops *pgtbl_ops;
24 const struct iommu_flush_ops *tlb;
25 struct device *iommu_dev;
26 unsigned long pgsize_bitmap; /* Bitmap of page sizes in use */
27 phys_addr_t ttbr;
28 u32 asid;
29 };
to_pagetable(struct msm_mmu * mmu)30 static struct msm_iommu_pagetable *to_pagetable(struct msm_mmu *mmu)
31 {
32 return container_of(mmu, struct msm_iommu_pagetable, base);
33 }
34
35 /* based on iommu_pgsize() in iommu.c: */
calc_pgsize(struct msm_iommu_pagetable * pagetable,unsigned long iova,phys_addr_t paddr,size_t size,size_t * count)36 static size_t calc_pgsize(struct msm_iommu_pagetable *pagetable,
37 unsigned long iova, phys_addr_t paddr,
38 size_t size, size_t *count)
39 {
40 unsigned int pgsize_idx, pgsize_idx_next;
41 unsigned long pgsizes;
42 size_t offset, pgsize, pgsize_next;
43 unsigned long addr_merge = paddr | iova;
44
45 /* Page sizes supported by the hardware and small enough for @size */
46 pgsizes = pagetable->pgsize_bitmap & GENMASK(__fls(size), 0);
47
48 /* Constrain the page sizes further based on the maximum alignment */
49 if (likely(addr_merge))
50 pgsizes &= GENMASK(__ffs(addr_merge), 0);
51
52 /* Make sure we have at least one suitable page size */
53 BUG_ON(!pgsizes);
54
55 /* Pick the biggest page size remaining */
56 pgsize_idx = __fls(pgsizes);
57 pgsize = BIT(pgsize_idx);
58 if (!count)
59 return pgsize;
60
61 /* Find the next biggest support page size, if it exists */
62 pgsizes = pagetable->pgsize_bitmap & ~GENMASK(pgsize_idx, 0);
63 if (!pgsizes)
64 goto out_set_count;
65
66 pgsize_idx_next = __ffs(pgsizes);
67 pgsize_next = BIT(pgsize_idx_next);
68
69 /*
70 * There's no point trying a bigger page size unless the virtual
71 * and physical addresses are similarly offset within the larger page.
72 */
73 if ((iova ^ paddr) & (pgsize_next - 1))
74 goto out_set_count;
75
76 /* Calculate the offset to the next page size alignment boundary */
77 offset = pgsize_next - (addr_merge & (pgsize_next - 1));
78
79 /*
80 * If size is big enough to accommodate the larger page, reduce
81 * the number of smaller pages.
82 */
83 if (offset + pgsize_next <= size)
84 size = offset;
85
86 out_set_count:
87 *count = size >> pgsize_idx;
88 return pgsize;
89 }
90
msm_iommu_pagetable_unmap(struct msm_mmu * mmu,u64 iova,size_t size)91 static int msm_iommu_pagetable_unmap(struct msm_mmu *mmu, u64 iova,
92 size_t size)
93 {
94 struct msm_iommu_pagetable *pagetable = to_pagetable(mmu);
95 struct io_pgtable_ops *ops = pagetable->pgtbl_ops;
96
97 while (size) {
98 size_t unmapped, pgsize, count;
99
100 pgsize = calc_pgsize(pagetable, iova, iova, size, &count);
101
102 unmapped = ops->unmap_pages(ops, iova, pgsize, count, NULL);
103 if (!unmapped)
104 break;
105
106 iova += unmapped;
107 size -= unmapped;
108 }
109
110 iommu_flush_iotlb_all(to_msm_iommu(pagetable->parent)->domain);
111
112 return (size == 0) ? 0 : -EINVAL;
113 }
114
msm_iommu_pagetable_map(struct msm_mmu * mmu,u64 iova,struct sg_table * sgt,size_t len,int prot)115 static int msm_iommu_pagetable_map(struct msm_mmu *mmu, u64 iova,
116 struct sg_table *sgt, size_t len, int prot)
117 {
118 struct msm_iommu_pagetable *pagetable = to_pagetable(mmu);
119 struct io_pgtable_ops *ops = pagetable->pgtbl_ops;
120 struct scatterlist *sg;
121 u64 addr = iova;
122 unsigned int i;
123
124 for_each_sgtable_sg(sgt, sg, i) {
125 size_t size = sg->length;
126 phys_addr_t phys = sg_phys(sg);
127
128 while (size) {
129 size_t pgsize, count, mapped = 0;
130 int ret;
131
132 pgsize = calc_pgsize(pagetable, addr, phys, size, &count);
133
134 ret = ops->map_pages(ops, addr, phys, pgsize, count,
135 prot, GFP_KERNEL, &mapped);
136
137 /* map_pages could fail after mapping some of the pages,
138 * so update the counters before error handling.
139 */
140 phys += mapped;
141 addr += mapped;
142 size -= mapped;
143
144 if (ret) {
145 msm_iommu_pagetable_unmap(mmu, iova, addr - iova);
146 return -EINVAL;
147 }
148 }
149 }
150
151 return 0;
152 }
153
msm_iommu_pagetable_destroy(struct msm_mmu * mmu)154 static void msm_iommu_pagetable_destroy(struct msm_mmu *mmu)
155 {
156 struct msm_iommu_pagetable *pagetable = to_pagetable(mmu);
157 struct msm_iommu *iommu = to_msm_iommu(pagetable->parent);
158 struct adreno_smmu_priv *adreno_smmu =
159 dev_get_drvdata(pagetable->parent->dev);
160
161 /*
162 * If this is the last attached pagetable for the parent,
163 * disable TTBR0 in the arm-smmu driver
164 */
165 if (atomic_dec_return(&iommu->pagetables) == 0)
166 adreno_smmu->set_ttbr0_cfg(adreno_smmu->cookie, NULL);
167
168 free_io_pgtable_ops(pagetable->pgtbl_ops);
169 kfree(pagetable);
170 }
171
msm_iommu_pagetable_params(struct msm_mmu * mmu,phys_addr_t * ttbr,int * asid)172 int msm_iommu_pagetable_params(struct msm_mmu *mmu,
173 phys_addr_t *ttbr, int *asid)
174 {
175 struct msm_iommu_pagetable *pagetable;
176
177 if (mmu->type != MSM_MMU_IOMMU_PAGETABLE)
178 return -EINVAL;
179
180 pagetable = to_pagetable(mmu);
181
182 if (ttbr)
183 *ttbr = pagetable->ttbr;
184
185 if (asid)
186 *asid = pagetable->asid;
187
188 return 0;
189 }
190
msm_iommu_get_geometry(struct msm_mmu * mmu)191 struct iommu_domain_geometry *msm_iommu_get_geometry(struct msm_mmu *mmu)
192 {
193 struct msm_iommu *iommu = to_msm_iommu(mmu);
194
195 return &iommu->domain->geometry;
196 }
197
198 int
msm_iommu_pagetable_walk(struct msm_mmu * mmu,unsigned long iova,uint64_t ptes[4])199 msm_iommu_pagetable_walk(struct msm_mmu *mmu, unsigned long iova, uint64_t ptes[4])
200 {
201 struct msm_iommu_pagetable *pagetable;
202 struct arm_lpae_io_pgtable_walk_data wd = {};
203 struct io_pgtable_walk_common walker = {
204 .data = &wd,
205 };
206
207 if (mmu->type != MSM_MMU_IOMMU_PAGETABLE)
208 return -EINVAL;
209
210 pagetable = to_pagetable(mmu);
211
212 if (!pagetable->pgtbl_ops->pgtable_walk)
213 return -EINVAL;
214
215 pagetable->pgtbl_ops->pgtable_walk(pagetable->pgtbl_ops, iova, 1, &walker);
216
217 for (int i = 0; i < ARRAY_SIZE(wd.ptes); i++)
218 ptes[i] = wd.ptes[i];
219
220 return 0;
221 }
222
223 static const struct msm_mmu_funcs pagetable_funcs = {
224 .map = msm_iommu_pagetable_map,
225 .unmap = msm_iommu_pagetable_unmap,
226 .destroy = msm_iommu_pagetable_destroy,
227 };
228
msm_iommu_tlb_flush_all(void * cookie)229 static void msm_iommu_tlb_flush_all(void *cookie)
230 {
231 struct msm_iommu_pagetable *pagetable = cookie;
232 struct adreno_smmu_priv *adreno_smmu;
233
234 if (!pm_runtime_get_if_in_use(pagetable->iommu_dev))
235 return;
236
237 adreno_smmu = dev_get_drvdata(pagetable->parent->dev);
238
239 pagetable->tlb->tlb_flush_all((void *)adreno_smmu->cookie);
240
241 pm_runtime_put_autosuspend(pagetable->iommu_dev);
242 }
243
msm_iommu_tlb_flush_walk(unsigned long iova,size_t size,size_t granule,void * cookie)244 static void msm_iommu_tlb_flush_walk(unsigned long iova, size_t size,
245 size_t granule, void *cookie)
246 {
247 struct msm_iommu_pagetable *pagetable = cookie;
248 struct adreno_smmu_priv *adreno_smmu;
249
250 if (!pm_runtime_get_if_in_use(pagetable->iommu_dev))
251 return;
252
253 adreno_smmu = dev_get_drvdata(pagetable->parent->dev);
254
255 pagetable->tlb->tlb_flush_walk(iova, size, granule, (void *)adreno_smmu->cookie);
256
257 pm_runtime_put_autosuspend(pagetable->iommu_dev);
258 }
259
msm_iommu_tlb_add_page(struct iommu_iotlb_gather * gather,unsigned long iova,size_t granule,void * cookie)260 static void msm_iommu_tlb_add_page(struct iommu_iotlb_gather *gather,
261 unsigned long iova, size_t granule, void *cookie)
262 {
263 }
264
265 static const struct iommu_flush_ops tlb_ops = {
266 .tlb_flush_all = msm_iommu_tlb_flush_all,
267 .tlb_flush_walk = msm_iommu_tlb_flush_walk,
268 .tlb_add_page = msm_iommu_tlb_add_page,
269 };
270
271 static int msm_fault_handler(struct iommu_domain *domain, struct device *dev,
272 unsigned long iova, int flags, void *arg);
273
msm_iommu_pagetable_create(struct msm_mmu * parent)274 struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent)
275 {
276 struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(parent->dev);
277 struct msm_iommu *iommu = to_msm_iommu(parent);
278 struct msm_iommu_pagetable *pagetable;
279 const struct io_pgtable_cfg *ttbr1_cfg = NULL;
280 struct io_pgtable_cfg ttbr0_cfg;
281 int ret;
282
283 /* Get the pagetable configuration from the domain */
284 if (adreno_smmu->cookie)
285 ttbr1_cfg = adreno_smmu->get_ttbr1_cfg(adreno_smmu->cookie);
286
287 /*
288 * If you hit this WARN_ONCE() you are probably missing an entry in
289 * qcom_smmu_impl_of_match[] in arm-smmu-qcom.c
290 */
291 if (WARN_ONCE(!ttbr1_cfg, "No per-process page tables"))
292 return ERR_PTR(-ENODEV);
293
294 pagetable = kzalloc(sizeof(*pagetable), GFP_KERNEL);
295 if (!pagetable)
296 return ERR_PTR(-ENOMEM);
297
298 msm_mmu_init(&pagetable->base, parent->dev, &pagetable_funcs,
299 MSM_MMU_IOMMU_PAGETABLE);
300
301 /* Clone the TTBR1 cfg as starting point for TTBR0 cfg: */
302 ttbr0_cfg = *ttbr1_cfg;
303
304 /* The incoming cfg will have the TTBR1 quirk enabled */
305 ttbr0_cfg.quirks &= ~IO_PGTABLE_QUIRK_ARM_TTBR1;
306 ttbr0_cfg.tlb = &tlb_ops;
307
308 pagetable->pgtbl_ops = alloc_io_pgtable_ops(ARM_64_LPAE_S1,
309 &ttbr0_cfg, pagetable);
310
311 if (!pagetable->pgtbl_ops) {
312 kfree(pagetable);
313 return ERR_PTR(-ENOMEM);
314 }
315
316 /*
317 * If this is the first pagetable that we've allocated, send it back to
318 * the arm-smmu driver as a trigger to set up TTBR0
319 */
320 if (atomic_inc_return(&iommu->pagetables) == 1) {
321 ret = adreno_smmu->set_ttbr0_cfg(adreno_smmu->cookie, &ttbr0_cfg);
322 if (ret) {
323 free_io_pgtable_ops(pagetable->pgtbl_ops);
324 kfree(pagetable);
325 return ERR_PTR(ret);
326 }
327 }
328
329 /* Needed later for TLB flush */
330 pagetable->parent = parent;
331 pagetable->tlb = ttbr1_cfg->tlb;
332 pagetable->iommu_dev = ttbr1_cfg->iommu_dev;
333 pagetable->pgsize_bitmap = ttbr0_cfg.pgsize_bitmap;
334 pagetable->ttbr = ttbr0_cfg.arm_lpae_s1_cfg.ttbr;
335
336 /*
337 * TODO we would like each set of page tables to have a unique ASID
338 * to optimize TLB invalidation. But iommu_flush_iotlb_all() will
339 * end up flushing the ASID used for TTBR1 pagetables, which is not
340 * what we want. So for now just use the same ASID as TTBR1.
341 */
342 pagetable->asid = 0;
343
344 return &pagetable->base;
345 }
346
msm_fault_handler(struct iommu_domain * domain,struct device * dev,unsigned long iova,int flags,void * arg)347 static int msm_fault_handler(struct iommu_domain *domain, struct device *dev,
348 unsigned long iova, int flags, void *arg)
349 {
350 struct msm_iommu *iommu = arg;
351 struct msm_mmu *mmu = &iommu->base;
352 struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(iommu->base.dev);
353 struct adreno_smmu_fault_info info, *ptr = NULL;
354
355 if (adreno_smmu->get_fault_info) {
356 adreno_smmu->get_fault_info(adreno_smmu->cookie, &info);
357 ptr = &info;
358 }
359
360 if (iommu->base.handler)
361 return iommu->base.handler(iommu->base.arg, iova, flags, ptr);
362
363 pr_warn_ratelimited("*** fault: iova=%16lx, flags=%d\n", iova, flags);
364
365 if (mmu->funcs->resume_translation)
366 mmu->funcs->resume_translation(mmu);
367
368 return 0;
369 }
370
msm_iommu_resume_translation(struct msm_mmu * mmu)371 static void msm_iommu_resume_translation(struct msm_mmu *mmu)
372 {
373 struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(mmu->dev);
374
375 if (adreno_smmu->resume_translation)
376 adreno_smmu->resume_translation(adreno_smmu->cookie, true);
377 }
378
msm_iommu_detach(struct msm_mmu * mmu)379 static void msm_iommu_detach(struct msm_mmu *mmu)
380 {
381 struct msm_iommu *iommu = to_msm_iommu(mmu);
382
383 iommu_detach_device(iommu->domain, mmu->dev);
384 }
385
msm_iommu_map(struct msm_mmu * mmu,uint64_t iova,struct sg_table * sgt,size_t len,int prot)386 static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova,
387 struct sg_table *sgt, size_t len, int prot)
388 {
389 struct msm_iommu *iommu = to_msm_iommu(mmu);
390 size_t ret;
391
392 /* The arm-smmu driver expects the addresses to be sign extended */
393 if (iova & BIT_ULL(48))
394 iova |= GENMASK_ULL(63, 49);
395
396 ret = iommu_map_sgtable(iommu->domain, iova, sgt, prot);
397 WARN_ON(!ret);
398
399 return (ret == len) ? 0 : -EINVAL;
400 }
401
msm_iommu_unmap(struct msm_mmu * mmu,uint64_t iova,size_t len)402 static int msm_iommu_unmap(struct msm_mmu *mmu, uint64_t iova, size_t len)
403 {
404 struct msm_iommu *iommu = to_msm_iommu(mmu);
405
406 if (iova & BIT_ULL(48))
407 iova |= GENMASK_ULL(63, 49);
408
409 iommu_unmap(iommu->domain, iova, len);
410
411 return 0;
412 }
413
msm_iommu_destroy(struct msm_mmu * mmu)414 static void msm_iommu_destroy(struct msm_mmu *mmu)
415 {
416 struct msm_iommu *iommu = to_msm_iommu(mmu);
417 iommu_domain_free(iommu->domain);
418 kfree(iommu);
419 }
420
421 static const struct msm_mmu_funcs funcs = {
422 .detach = msm_iommu_detach,
423 .map = msm_iommu_map,
424 .unmap = msm_iommu_unmap,
425 .destroy = msm_iommu_destroy,
426 .resume_translation = msm_iommu_resume_translation,
427 };
428
msm_iommu_new(struct device * dev,unsigned long quirks)429 struct msm_mmu *msm_iommu_new(struct device *dev, unsigned long quirks)
430 {
431 struct iommu_domain *domain;
432 struct msm_iommu *iommu;
433 int ret;
434
435 if (!device_iommu_mapped(dev))
436 return NULL;
437
438 domain = iommu_paging_domain_alloc(dev);
439 if (IS_ERR(domain))
440 return ERR_CAST(domain);
441
442 iommu_set_pgtable_quirks(domain, quirks);
443
444 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
445 if (!iommu) {
446 iommu_domain_free(domain);
447 return ERR_PTR(-ENOMEM);
448 }
449
450 iommu->domain = domain;
451 msm_mmu_init(&iommu->base, dev, &funcs, MSM_MMU_IOMMU);
452
453 atomic_set(&iommu->pagetables, 0);
454
455 ret = iommu_attach_device(iommu->domain, dev);
456 if (ret) {
457 iommu_domain_free(domain);
458 kfree(iommu);
459 return ERR_PTR(ret);
460 }
461
462 return &iommu->base;
463 }
464
msm_iommu_gpu_new(struct device * dev,struct msm_gpu * gpu,unsigned long quirks)465 struct msm_mmu *msm_iommu_gpu_new(struct device *dev, struct msm_gpu *gpu, unsigned long quirks)
466 {
467 struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(dev);
468 struct msm_iommu *iommu;
469 struct msm_mmu *mmu;
470
471 mmu = msm_iommu_new(dev, quirks);
472 if (IS_ERR_OR_NULL(mmu))
473 return mmu;
474
475 iommu = to_msm_iommu(mmu);
476 iommu_set_fault_handler(iommu->domain, msm_fault_handler, iommu);
477
478 /* Enable stall on iommu fault: */
479 if (adreno_smmu->set_stall)
480 adreno_smmu->set_stall(adreno_smmu->cookie, true);
481
482 return mmu;
483 }
484