1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
3
4 #include <drm/panfrost_drm.h>
5
6 #include <linux/atomic.h>
7 #include <linux/bitfield.h>
8 #include <linux/delay.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/interrupt.h>
11 #include <linux/io.h>
12 #include <linux/iopoll.h>
13 #include <linux/io-pgtable.h>
14 #include <linux/iommu.h>
15 #include <linux/platform_device.h>
16 #include <linux/pm_runtime.h>
17 #include <linux/shmem_fs.h>
18 #include <linux/sizes.h>
19
20 #include "panfrost_device.h"
21 #include "panfrost_mmu.h"
22 #include "panfrost_gem.h"
23 #include "panfrost_features.h"
24 #include "panfrost_regs.h"
25
26 #define mmu_write(dev, reg, data) writel(data, dev->iomem + reg)
27 #define mmu_read(dev, reg) readl(dev->iomem + reg)
28
wait_ready(struct panfrost_device * pfdev,u32 as_nr)29 static int wait_ready(struct panfrost_device *pfdev, u32 as_nr)
30 {
31 int ret;
32 u32 val;
33
34 /* Wait for the MMU status to indicate there is no active command, in
35 * case one is pending. */
36 ret = readl_relaxed_poll_timeout_atomic(pfdev->iomem + AS_STATUS(as_nr),
37 val, !(val & AS_STATUS_AS_ACTIVE), 10, 1000);
38
39 if (ret)
40 dev_err(pfdev->dev, "AS_ACTIVE bit stuck\n");
41
42 return ret;
43 }
44
write_cmd(struct panfrost_device * pfdev,u32 as_nr,u32 cmd)45 static int write_cmd(struct panfrost_device *pfdev, u32 as_nr, u32 cmd)
46 {
47 int status;
48
49 /* write AS_COMMAND when MMU is ready to accept another command */
50 status = wait_ready(pfdev, as_nr);
51 if (!status)
52 mmu_write(pfdev, AS_COMMAND(as_nr), cmd);
53
54 return status;
55 }
56
lock_region(struct panfrost_device * pfdev,u32 as_nr,u64 iova,u64 size)57 static void lock_region(struct panfrost_device *pfdev, u32 as_nr,
58 u64 iova, u64 size)
59 {
60 u8 region_width;
61 u64 region = iova & PAGE_MASK;
62
63 /* The size is encoded as ceil(log2) minus(1), which may be calculated
64 * with fls. The size must be clamped to hardware bounds.
65 */
66 size = max_t(u64, size, AS_LOCK_REGION_MIN_SIZE);
67 region_width = fls64(size - 1) - 1;
68 region |= region_width;
69
70 /* Lock the region that needs to be updated */
71 mmu_write(pfdev, AS_LOCKADDR_LO(as_nr), region & 0xFFFFFFFFUL);
72 mmu_write(pfdev, AS_LOCKADDR_HI(as_nr), (region >> 32) & 0xFFFFFFFFUL);
73 write_cmd(pfdev, as_nr, AS_COMMAND_LOCK);
74 }
75
76
mmu_hw_do_operation_locked(struct panfrost_device * pfdev,int as_nr,u64 iova,u64 size,u32 op)77 static int mmu_hw_do_operation_locked(struct panfrost_device *pfdev, int as_nr,
78 u64 iova, u64 size, u32 op)
79 {
80 if (as_nr < 0)
81 return 0;
82
83 if (op != AS_COMMAND_UNLOCK)
84 lock_region(pfdev, as_nr, iova, size);
85
86 /* Run the MMU operation */
87 write_cmd(pfdev, as_nr, op);
88
89 /* Wait for the flush to complete */
90 return wait_ready(pfdev, as_nr);
91 }
92
mmu_hw_do_operation(struct panfrost_device * pfdev,struct panfrost_mmu * mmu,u64 iova,u64 size,u32 op)93 static int mmu_hw_do_operation(struct panfrost_device *pfdev,
94 struct panfrost_mmu *mmu,
95 u64 iova, u64 size, u32 op)
96 {
97 int ret;
98
99 spin_lock(&pfdev->as_lock);
100 ret = mmu_hw_do_operation_locked(pfdev, mmu->as, iova, size, op);
101 spin_unlock(&pfdev->as_lock);
102 return ret;
103 }
104
panfrost_mmu_enable(struct panfrost_device * pfdev,struct panfrost_mmu * mmu)105 static void panfrost_mmu_enable(struct panfrost_device *pfdev, struct panfrost_mmu *mmu)
106 {
107 int as_nr = mmu->as;
108 struct io_pgtable_cfg *cfg = &mmu->pgtbl_cfg;
109 u64 transtab = cfg->arm_mali_lpae_cfg.transtab;
110 u64 memattr = cfg->arm_mali_lpae_cfg.memattr;
111
112 mmu_hw_do_operation_locked(pfdev, as_nr, 0, ~0ULL, AS_COMMAND_FLUSH_MEM);
113
114 mmu_write(pfdev, AS_TRANSTAB_LO(as_nr), transtab & 0xffffffffUL);
115 mmu_write(pfdev, AS_TRANSTAB_HI(as_nr), transtab >> 32);
116
117 /* Need to revisit mem attrs.
118 * NC is the default, Mali driver is inner WT.
119 */
120 mmu_write(pfdev, AS_MEMATTR_LO(as_nr), memattr & 0xffffffffUL);
121 mmu_write(pfdev, AS_MEMATTR_HI(as_nr), memattr >> 32);
122
123 write_cmd(pfdev, as_nr, AS_COMMAND_UPDATE);
124 }
125
panfrost_mmu_disable(struct panfrost_device * pfdev,u32 as_nr)126 static void panfrost_mmu_disable(struct panfrost_device *pfdev, u32 as_nr)
127 {
128 mmu_hw_do_operation_locked(pfdev, as_nr, 0, ~0ULL, AS_COMMAND_FLUSH_MEM);
129
130 mmu_write(pfdev, AS_TRANSTAB_LO(as_nr), 0);
131 mmu_write(pfdev, AS_TRANSTAB_HI(as_nr), 0);
132
133 mmu_write(pfdev, AS_MEMATTR_LO(as_nr), 0);
134 mmu_write(pfdev, AS_MEMATTR_HI(as_nr), 0);
135
136 write_cmd(pfdev, as_nr, AS_COMMAND_UPDATE);
137 }
138
panfrost_mmu_as_get(struct panfrost_device * pfdev,struct panfrost_mmu * mmu)139 u32 panfrost_mmu_as_get(struct panfrost_device *pfdev, struct panfrost_mmu *mmu)
140 {
141 int as;
142
143 spin_lock(&pfdev->as_lock);
144
145 as = mmu->as;
146 if (as >= 0) {
147 int en = atomic_inc_return(&mmu->as_count);
148
149 /*
150 * AS can be retained by active jobs or a perfcnt context,
151 * hence the '+ 1' here.
152 */
153 WARN_ON(en >= (NUM_JOB_SLOTS + 1));
154
155 list_move(&mmu->list, &pfdev->as_lru_list);
156 goto out;
157 }
158
159 /* Check for a free AS */
160 as = ffz(pfdev->as_alloc_mask);
161 if (!(BIT(as) & pfdev->features.as_present)) {
162 struct panfrost_mmu *lru_mmu;
163
164 list_for_each_entry_reverse(lru_mmu, &pfdev->as_lru_list, list) {
165 if (!atomic_read(&lru_mmu->as_count))
166 break;
167 }
168 WARN_ON(&lru_mmu->list == &pfdev->as_lru_list);
169
170 list_del_init(&lru_mmu->list);
171 as = lru_mmu->as;
172
173 WARN_ON(as < 0);
174 lru_mmu->as = -1;
175 }
176
177 /* Assign the free or reclaimed AS to the FD */
178 mmu->as = as;
179 set_bit(as, &pfdev->as_alloc_mask);
180 atomic_set(&mmu->as_count, 1);
181 list_add(&mmu->list, &pfdev->as_lru_list);
182
183 dev_dbg(pfdev->dev, "Assigned AS%d to mmu %p, alloc_mask=%lx", as, mmu, pfdev->as_alloc_mask);
184
185 panfrost_mmu_enable(pfdev, mmu);
186
187 out:
188 spin_unlock(&pfdev->as_lock);
189 return as;
190 }
191
panfrost_mmu_as_put(struct panfrost_device * pfdev,struct panfrost_mmu * mmu)192 void panfrost_mmu_as_put(struct panfrost_device *pfdev, struct panfrost_mmu *mmu)
193 {
194 atomic_dec(&mmu->as_count);
195 WARN_ON(atomic_read(&mmu->as_count) < 0);
196 }
197
panfrost_mmu_reset(struct panfrost_device * pfdev)198 void panfrost_mmu_reset(struct panfrost_device *pfdev)
199 {
200 struct panfrost_mmu *mmu, *mmu_tmp;
201
202 spin_lock(&pfdev->as_lock);
203
204 pfdev->as_alloc_mask = 0;
205
206 list_for_each_entry_safe(mmu, mmu_tmp, &pfdev->as_lru_list, list) {
207 mmu->as = -1;
208 atomic_set(&mmu->as_count, 0);
209 list_del_init(&mmu->list);
210 }
211
212 spin_unlock(&pfdev->as_lock);
213
214 mmu_write(pfdev, MMU_INT_CLEAR, ~0);
215 mmu_write(pfdev, MMU_INT_MASK, ~0);
216 }
217
get_pgsize(u64 addr,size_t size)218 static size_t get_pgsize(u64 addr, size_t size)
219 {
220 if (addr & (SZ_2M - 1) || size < SZ_2M)
221 return SZ_4K;
222
223 return SZ_2M;
224 }
225
panfrost_mmu_flush_range(struct panfrost_device * pfdev,struct panfrost_mmu * mmu,u64 iova,u64 size)226 static void panfrost_mmu_flush_range(struct panfrost_device *pfdev,
227 struct panfrost_mmu *mmu,
228 u64 iova, u64 size)
229 {
230 if (mmu->as < 0)
231 return;
232
233 pm_runtime_get_noresume(pfdev->dev);
234
235 /* Flush the PTs only if we're already awake */
236 if (pm_runtime_active(pfdev->dev))
237 mmu_hw_do_operation(pfdev, mmu, iova, size, AS_COMMAND_FLUSH_PT);
238
239 pm_runtime_put_autosuspend(pfdev->dev);
240 }
241
mmu_map_sg(struct panfrost_device * pfdev,struct panfrost_mmu * mmu,u64 iova,int prot,struct sg_table * sgt)242 static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu,
243 u64 iova, int prot, struct sg_table *sgt)
244 {
245 unsigned int count;
246 struct scatterlist *sgl;
247 struct io_pgtable_ops *ops = mmu->pgtbl_ops;
248 u64 start_iova = iova;
249
250 for_each_sgtable_dma_sg(sgt, sgl, count) {
251 unsigned long paddr = sg_dma_address(sgl);
252 size_t len = sg_dma_len(sgl);
253
254 dev_dbg(pfdev->dev, "map: as=%d, iova=%llx, paddr=%lx, len=%zx", mmu->as, iova, paddr, len);
255
256 while (len) {
257 size_t pgsize = get_pgsize(iova | paddr, len);
258
259 ops->map(ops, iova, paddr, pgsize, prot, GFP_KERNEL);
260 iova += pgsize;
261 paddr += pgsize;
262 len -= pgsize;
263 }
264 }
265
266 panfrost_mmu_flush_range(pfdev, mmu, start_iova, iova - start_iova);
267
268 return 0;
269 }
270
panfrost_mmu_map(struct panfrost_gem_mapping * mapping)271 int panfrost_mmu_map(struct panfrost_gem_mapping *mapping)
272 {
273 struct panfrost_gem_object *bo = mapping->obj;
274 struct drm_gem_object *obj = &bo->base.base;
275 struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
276 struct sg_table *sgt;
277 int prot = IOMMU_READ | IOMMU_WRITE;
278
279 if (WARN_ON(mapping->active))
280 return 0;
281
282 if (bo->noexec)
283 prot |= IOMMU_NOEXEC;
284
285 sgt = drm_gem_shmem_get_pages_sgt(obj);
286 if (WARN_ON(IS_ERR(sgt)))
287 return PTR_ERR(sgt);
288
289 mmu_map_sg(pfdev, mapping->mmu, mapping->mmnode.start << PAGE_SHIFT,
290 prot, sgt);
291 mapping->active = true;
292
293 return 0;
294 }
295
panfrost_mmu_unmap(struct panfrost_gem_mapping * mapping)296 void panfrost_mmu_unmap(struct panfrost_gem_mapping *mapping)
297 {
298 struct panfrost_gem_object *bo = mapping->obj;
299 struct drm_gem_object *obj = &bo->base.base;
300 struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
301 struct io_pgtable_ops *ops = mapping->mmu->pgtbl_ops;
302 u64 iova = mapping->mmnode.start << PAGE_SHIFT;
303 size_t len = mapping->mmnode.size << PAGE_SHIFT;
304 size_t unmapped_len = 0;
305
306 if (WARN_ON(!mapping->active))
307 return;
308
309 dev_dbg(pfdev->dev, "unmap: as=%d, iova=%llx, len=%zx",
310 mapping->mmu->as, iova, len);
311
312 while (unmapped_len < len) {
313 size_t unmapped_page;
314 size_t pgsize = get_pgsize(iova, len - unmapped_len);
315
316 if (ops->iova_to_phys(ops, iova)) {
317 unmapped_page = ops->unmap(ops, iova, pgsize, NULL);
318 WARN_ON(unmapped_page != pgsize);
319 }
320 iova += pgsize;
321 unmapped_len += pgsize;
322 }
323
324 panfrost_mmu_flush_range(pfdev, mapping->mmu,
325 mapping->mmnode.start << PAGE_SHIFT, len);
326 mapping->active = false;
327 }
328
mmu_tlb_inv_context_s1(void * cookie)329 static void mmu_tlb_inv_context_s1(void *cookie)
330 {}
331
mmu_tlb_sync_context(void * cookie)332 static void mmu_tlb_sync_context(void *cookie)
333 {
334 //struct panfrost_mmu *mmu = cookie;
335 // TODO: Wait 1000 GPU cycles for HW_ISSUE_6367/T60X
336 }
337
mmu_tlb_flush_walk(unsigned long iova,size_t size,size_t granule,void * cookie)338 static void mmu_tlb_flush_walk(unsigned long iova, size_t size, size_t granule,
339 void *cookie)
340 {
341 mmu_tlb_sync_context(cookie);
342 }
343
344 static const struct iommu_flush_ops mmu_tlb_ops = {
345 .tlb_flush_all = mmu_tlb_inv_context_s1,
346 .tlb_flush_walk = mmu_tlb_flush_walk,
347 };
348
349 static struct panfrost_gem_mapping *
addr_to_mapping(struct panfrost_device * pfdev,int as,u64 addr)350 addr_to_mapping(struct panfrost_device *pfdev, int as, u64 addr)
351 {
352 struct panfrost_gem_mapping *mapping = NULL;
353 struct drm_mm_node *node;
354 u64 offset = addr >> PAGE_SHIFT;
355 struct panfrost_mmu *mmu;
356
357 spin_lock(&pfdev->as_lock);
358 list_for_each_entry(mmu, &pfdev->as_lru_list, list) {
359 if (as == mmu->as)
360 goto found_mmu;
361 }
362 goto out;
363
364 found_mmu:
365
366 spin_lock(&mmu->mm_lock);
367
368 drm_mm_for_each_node(node, &mmu->mm) {
369 if (offset >= node->start &&
370 offset < (node->start + node->size)) {
371 mapping = drm_mm_node_to_panfrost_mapping(node);
372
373 kref_get(&mapping->refcount);
374 break;
375 }
376 }
377
378 spin_unlock(&mmu->mm_lock);
379 out:
380 spin_unlock(&pfdev->as_lock);
381 return mapping;
382 }
383
384 #define NUM_FAULT_PAGES (SZ_2M / PAGE_SIZE)
385
panfrost_mmu_map_fault_addr(struct panfrost_device * pfdev,int as,u64 addr)386 static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
387 u64 addr)
388 {
389 int ret, i;
390 struct panfrost_gem_mapping *bomapping;
391 struct panfrost_gem_object *bo;
392 struct address_space *mapping;
393 pgoff_t page_offset;
394 struct sg_table *sgt;
395 struct page **pages;
396
397 bomapping = addr_to_mapping(pfdev, as, addr);
398 if (!bomapping)
399 return -ENOENT;
400
401 bo = bomapping->obj;
402 if (!bo->is_heap) {
403 dev_WARN(pfdev->dev, "matching BO is not heap type (GPU VA = %llx)",
404 bomapping->mmnode.start << PAGE_SHIFT);
405 ret = -EINVAL;
406 goto err_bo;
407 }
408 WARN_ON(bomapping->mmu->as != as);
409
410 /* Assume 2MB alignment and size multiple */
411 addr &= ~((u64)SZ_2M - 1);
412 page_offset = addr >> PAGE_SHIFT;
413 page_offset -= bomapping->mmnode.start;
414
415 mutex_lock(&bo->base.pages_lock);
416
417 if (!bo->base.pages) {
418 bo->sgts = kvmalloc_array(bo->base.base.size / SZ_2M,
419 sizeof(struct sg_table), GFP_KERNEL | __GFP_ZERO);
420 if (!bo->sgts) {
421 mutex_unlock(&bo->base.pages_lock);
422 ret = -ENOMEM;
423 goto err_bo;
424 }
425
426 pages = kvmalloc_array(bo->base.base.size >> PAGE_SHIFT,
427 sizeof(struct page *), GFP_KERNEL | __GFP_ZERO);
428 if (!pages) {
429 kvfree(bo->sgts);
430 bo->sgts = NULL;
431 mutex_unlock(&bo->base.pages_lock);
432 ret = -ENOMEM;
433 goto err_bo;
434 }
435 bo->base.pages = pages;
436 bo->base.pages_use_count = 1;
437 } else {
438 pages = bo->base.pages;
439 if (pages[page_offset]) {
440 /* Pages are already mapped, bail out. */
441 mutex_unlock(&bo->base.pages_lock);
442 goto out;
443 }
444 }
445
446 mapping = bo->base.base.filp->f_mapping;
447 mapping_set_unevictable(mapping);
448
449 for (i = page_offset; i < page_offset + NUM_FAULT_PAGES; i++) {
450 pages[i] = shmem_read_mapping_page(mapping, i);
451 if (IS_ERR(pages[i])) {
452 mutex_unlock(&bo->base.pages_lock);
453 ret = PTR_ERR(pages[i]);
454 pages[i] = NULL;
455 goto err_pages;
456 }
457 }
458
459 mutex_unlock(&bo->base.pages_lock);
460
461 sgt = &bo->sgts[page_offset / (SZ_2M / PAGE_SIZE)];
462 ret = sg_alloc_table_from_pages(sgt, pages + page_offset,
463 NUM_FAULT_PAGES, 0, SZ_2M, GFP_KERNEL);
464 if (ret)
465 goto err_pages;
466
467 ret = dma_map_sgtable(pfdev->dev, sgt, DMA_BIDIRECTIONAL, 0);
468 if (ret)
469 goto err_map;
470
471 mmu_map_sg(pfdev, bomapping->mmu, addr,
472 IOMMU_WRITE | IOMMU_READ | IOMMU_NOEXEC, sgt);
473
474 bomapping->active = true;
475
476 dev_dbg(pfdev->dev, "mapped page fault @ AS%d %llx", as, addr);
477
478 out:
479 panfrost_gem_mapping_put(bomapping);
480
481 return 0;
482
483 err_map:
484 sg_free_table(sgt);
485 err_pages:
486 drm_gem_shmem_put_pages(&bo->base);
487 err_bo:
488 panfrost_gem_mapping_put(bomapping);
489 return ret;
490 }
491
panfrost_mmu_release_ctx(struct kref * kref)492 static void panfrost_mmu_release_ctx(struct kref *kref)
493 {
494 struct panfrost_mmu *mmu = container_of(kref, struct panfrost_mmu,
495 refcount);
496 struct panfrost_device *pfdev = mmu->pfdev;
497
498 spin_lock(&pfdev->as_lock);
499 if (mmu->as >= 0) {
500 pm_runtime_get_noresume(pfdev->dev);
501 if (pm_runtime_active(pfdev->dev))
502 panfrost_mmu_disable(pfdev, mmu->as);
503 pm_runtime_put_autosuspend(pfdev->dev);
504
505 clear_bit(mmu->as, &pfdev->as_alloc_mask);
506 clear_bit(mmu->as, &pfdev->as_in_use_mask);
507 list_del(&mmu->list);
508 }
509 spin_unlock(&pfdev->as_lock);
510
511 free_io_pgtable_ops(mmu->pgtbl_ops);
512 drm_mm_takedown(&mmu->mm);
513 kfree(mmu);
514 }
515
panfrost_mmu_ctx_put(struct panfrost_mmu * mmu)516 void panfrost_mmu_ctx_put(struct panfrost_mmu *mmu)
517 {
518 kref_put(&mmu->refcount, panfrost_mmu_release_ctx);
519 }
520
panfrost_mmu_ctx_get(struct panfrost_mmu * mmu)521 struct panfrost_mmu *panfrost_mmu_ctx_get(struct panfrost_mmu *mmu)
522 {
523 kref_get(&mmu->refcount);
524
525 return mmu;
526 }
527
528 #define PFN_4G (SZ_4G >> PAGE_SHIFT)
529 #define PFN_4G_MASK (PFN_4G - 1)
530 #define PFN_16M (SZ_16M >> PAGE_SHIFT)
531
panfrost_drm_mm_color_adjust(const struct drm_mm_node * node,unsigned long color,u64 * start,u64 * end)532 static void panfrost_drm_mm_color_adjust(const struct drm_mm_node *node,
533 unsigned long color,
534 u64 *start, u64 *end)
535 {
536 /* Executable buffers can't start or end on a 4GB boundary */
537 if (!(color & PANFROST_BO_NOEXEC)) {
538 u64 next_seg;
539
540 if ((*start & PFN_4G_MASK) == 0)
541 (*start)++;
542
543 if ((*end & PFN_4G_MASK) == 0)
544 (*end)--;
545
546 next_seg = ALIGN(*start, PFN_4G);
547 if (next_seg - *start <= PFN_16M)
548 *start = next_seg + 1;
549
550 *end = min(*end, ALIGN(*start, PFN_4G) - 1);
551 }
552 }
553
panfrost_mmu_ctx_create(struct panfrost_device * pfdev)554 struct panfrost_mmu *panfrost_mmu_ctx_create(struct panfrost_device *pfdev)
555 {
556 struct panfrost_mmu *mmu;
557
558 mmu = kzalloc(sizeof(*mmu), GFP_KERNEL);
559 if (!mmu)
560 return ERR_PTR(-ENOMEM);
561
562 mmu->pfdev = pfdev;
563 spin_lock_init(&mmu->mm_lock);
564
565 /* 4G enough for now. can be 48-bit */
566 drm_mm_init(&mmu->mm, SZ_32M >> PAGE_SHIFT, (SZ_4G - SZ_32M) >> PAGE_SHIFT);
567 mmu->mm.color_adjust = panfrost_drm_mm_color_adjust;
568
569 INIT_LIST_HEAD(&mmu->list);
570 mmu->as = -1;
571
572 mmu->pgtbl_cfg = (struct io_pgtable_cfg) {
573 .pgsize_bitmap = SZ_4K | SZ_2M,
574 .ias = FIELD_GET(0xff, pfdev->features.mmu_features),
575 .oas = FIELD_GET(0xff00, pfdev->features.mmu_features),
576 .coherent_walk = pfdev->coherent,
577 .tlb = &mmu_tlb_ops,
578 .iommu_dev = pfdev->dev,
579 };
580
581 mmu->pgtbl_ops = alloc_io_pgtable_ops(ARM_MALI_LPAE, &mmu->pgtbl_cfg,
582 mmu);
583 if (!mmu->pgtbl_ops) {
584 kfree(mmu);
585 return ERR_PTR(-EINVAL);
586 }
587
588 kref_init(&mmu->refcount);
589
590 return mmu;
591 }
592
access_type_name(struct panfrost_device * pfdev,u32 fault_status)593 static const char *access_type_name(struct panfrost_device *pfdev,
594 u32 fault_status)
595 {
596 switch (fault_status & AS_FAULTSTATUS_ACCESS_TYPE_MASK) {
597 case AS_FAULTSTATUS_ACCESS_TYPE_ATOMIC:
598 if (panfrost_has_hw_feature(pfdev, HW_FEATURE_AARCH64_MMU))
599 return "ATOMIC";
600 else
601 return "UNKNOWN";
602 case AS_FAULTSTATUS_ACCESS_TYPE_READ:
603 return "READ";
604 case AS_FAULTSTATUS_ACCESS_TYPE_WRITE:
605 return "WRITE";
606 case AS_FAULTSTATUS_ACCESS_TYPE_EX:
607 return "EXECUTE";
608 default:
609 WARN_ON(1);
610 return NULL;
611 }
612 }
613
panfrost_mmu_irq_handler(int irq,void * data)614 static irqreturn_t panfrost_mmu_irq_handler(int irq, void *data)
615 {
616 struct panfrost_device *pfdev = data;
617
618 if (!mmu_read(pfdev, MMU_INT_STAT))
619 return IRQ_NONE;
620
621 mmu_write(pfdev, MMU_INT_MASK, 0);
622 return IRQ_WAKE_THREAD;
623 }
624
panfrost_mmu_irq_handler_thread(int irq,void * data)625 static irqreturn_t panfrost_mmu_irq_handler_thread(int irq, void *data)
626 {
627 struct panfrost_device *pfdev = data;
628 u32 status = mmu_read(pfdev, MMU_INT_RAWSTAT);
629 int i, ret;
630
631 for (i = 0; status; i++) {
632 u32 mask = BIT(i) | BIT(i + 16);
633 u64 addr;
634 u32 fault_status;
635 u32 exception_type;
636 u32 access_type;
637 u32 source_id;
638
639 if (!(status & mask))
640 continue;
641
642 fault_status = mmu_read(pfdev, AS_FAULTSTATUS(i));
643 addr = mmu_read(pfdev, AS_FAULTADDRESS_LO(i));
644 addr |= (u64)mmu_read(pfdev, AS_FAULTADDRESS_HI(i)) << 32;
645
646 /* decode the fault status */
647 exception_type = fault_status & 0xFF;
648 access_type = (fault_status >> 8) & 0x3;
649 source_id = (fault_status >> 16);
650
651 mmu_write(pfdev, MMU_INT_CLEAR, mask);
652
653 /* Page fault only */
654 ret = -1;
655 if ((status & mask) == BIT(i) && (exception_type & 0xF8) == 0xC0)
656 ret = panfrost_mmu_map_fault_addr(pfdev, i, addr);
657
658 if (ret)
659 /* terminal fault, print info about the fault */
660 dev_err(pfdev->dev,
661 "Unhandled Page fault in AS%d at VA 0x%016llX\n"
662 "Reason: %s\n"
663 "raw fault status: 0x%X\n"
664 "decoded fault status: %s\n"
665 "exception type 0x%X: %s\n"
666 "access type 0x%X: %s\n"
667 "source id 0x%X\n",
668 i, addr,
669 "TODO",
670 fault_status,
671 (fault_status & (1 << 10) ? "DECODER FAULT" : "SLAVE FAULT"),
672 exception_type, panfrost_exception_name(pfdev, exception_type),
673 access_type, access_type_name(pfdev, fault_status),
674 source_id);
675
676 status &= ~mask;
677 }
678
679 mmu_write(pfdev, MMU_INT_MASK, ~0);
680 return IRQ_HANDLED;
681 };
682
panfrost_mmu_init(struct panfrost_device * pfdev)683 int panfrost_mmu_init(struct panfrost_device *pfdev)
684 {
685 int err, irq;
686
687 irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "mmu");
688 if (irq <= 0)
689 return -ENODEV;
690
691 err = devm_request_threaded_irq(pfdev->dev, irq,
692 panfrost_mmu_irq_handler,
693 panfrost_mmu_irq_handler_thread,
694 IRQF_SHARED, KBUILD_MODNAME "-mmu",
695 pfdev);
696
697 if (err) {
698 dev_err(pfdev->dev, "failed to request mmu irq");
699 return err;
700 }
701
702 return 0;
703 }
704
panfrost_mmu_fini(struct panfrost_device * pfdev)705 void panfrost_mmu_fini(struct panfrost_device *pfdev)
706 {
707 mmu_write(pfdev, MMU_INT_MASK, 0);
708 }
709