1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
3
4 #include <drm/panfrost_drm.h>
5
6 #include <linux/atomic.h>
7 #include <linux/bitfield.h>
8 #include <linux/delay.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/interrupt.h>
11 #include <linux/io.h>
12 #include <linux/iopoll.h>
13 #include <linux/io-pgtable.h>
14 #include <linux/iommu.h>
15 #include <linux/platform_device.h>
16 #include <linux/pm_runtime.h>
17 #include <linux/shmem_fs.h>
18 #include <linux/sizes.h>
19
20 #include "panfrost_device.h"
21 #include "panfrost_mmu.h"
22 #include "panfrost_gem.h"
23 #include "panfrost_features.h"
24 #include "panfrost_regs.h"
25
26 #define mmu_write(dev, reg, data) writel(data, dev->iomem + reg)
27 #define mmu_read(dev, reg) readl(dev->iomem + reg)
28
wait_ready(struct panfrost_device * pfdev,u32 as_nr)29 static int wait_ready(struct panfrost_device *pfdev, u32 as_nr)
30 {
31 int ret;
32 u32 val;
33
34 /* Wait for the MMU status to indicate there is no active command, in
35 * case one is pending. */
36 ret = readl_relaxed_poll_timeout_atomic(pfdev->iomem + AS_STATUS(as_nr),
37 val, !(val & AS_STATUS_AS_ACTIVE), 10, 100000);
38
39 if (ret) {
40 /* The GPU hung, let's trigger a reset */
41 panfrost_device_schedule_reset(pfdev);
42 dev_err(pfdev->dev, "AS_ACTIVE bit stuck\n");
43 }
44
45 return ret;
46 }
47
write_cmd(struct panfrost_device * pfdev,u32 as_nr,u32 cmd)48 static int write_cmd(struct panfrost_device *pfdev, u32 as_nr, u32 cmd)
49 {
50 int status;
51
52 /* write AS_COMMAND when MMU is ready to accept another command */
53 status = wait_ready(pfdev, as_nr);
54 if (!status)
55 mmu_write(pfdev, AS_COMMAND(as_nr), cmd);
56
57 return status;
58 }
59
lock_region(struct panfrost_device * pfdev,u32 as_nr,u64 region_start,u64 size)60 static void lock_region(struct panfrost_device *pfdev, u32 as_nr,
61 u64 region_start, u64 size)
62 {
63 u8 region_width;
64 u64 region;
65 u64 region_end = region_start + size;
66
67 if (!size)
68 return;
69
70 /*
71 * The locked region is a naturally aligned power of 2 block encoded as
72 * log2 minus(1).
73 * Calculate the desired start/end and look for the highest bit which
74 * differs. The smallest naturally aligned block must include this bit
75 * change, the desired region starts with this bit (and subsequent bits)
76 * zeroed and ends with the bit (and subsequent bits) set to one.
77 */
78 region_width = max(fls64(region_start ^ (region_end - 1)),
79 const_ilog2(AS_LOCK_REGION_MIN_SIZE)) - 1;
80
81 /*
82 * Mask off the low bits of region_start (which would be ignored by
83 * the hardware anyway)
84 */
85 region_start &= GENMASK_ULL(63, region_width);
86
87 region = region_width | region_start;
88
89 /* Lock the region that needs to be updated */
90 mmu_write(pfdev, AS_LOCKADDR_LO(as_nr), lower_32_bits(region));
91 mmu_write(pfdev, AS_LOCKADDR_HI(as_nr), upper_32_bits(region));
92 write_cmd(pfdev, as_nr, AS_COMMAND_LOCK);
93 }
94
95
mmu_hw_do_operation_locked(struct panfrost_device * pfdev,int as_nr,u64 iova,u64 size,u32 op)96 static int mmu_hw_do_operation_locked(struct panfrost_device *pfdev, int as_nr,
97 u64 iova, u64 size, u32 op)
98 {
99 if (as_nr < 0)
100 return 0;
101
102 if (op != AS_COMMAND_UNLOCK)
103 lock_region(pfdev, as_nr, iova, size);
104
105 /* Run the MMU operation */
106 write_cmd(pfdev, as_nr, op);
107
108 /* Wait for the flush to complete */
109 return wait_ready(pfdev, as_nr);
110 }
111
mmu_hw_do_operation(struct panfrost_device * pfdev,struct panfrost_mmu * mmu,u64 iova,u64 size,u32 op)112 static int mmu_hw_do_operation(struct panfrost_device *pfdev,
113 struct panfrost_mmu *mmu,
114 u64 iova, u64 size, u32 op)
115 {
116 int ret;
117
118 spin_lock(&pfdev->as_lock);
119 ret = mmu_hw_do_operation_locked(pfdev, mmu->as, iova, size, op);
120 spin_unlock(&pfdev->as_lock);
121 return ret;
122 }
123
panfrost_mmu_enable(struct panfrost_device * pfdev,struct panfrost_mmu * mmu)124 static void panfrost_mmu_enable(struct panfrost_device *pfdev, struct panfrost_mmu *mmu)
125 {
126 int as_nr = mmu->as;
127 struct io_pgtable_cfg *cfg = &mmu->pgtbl_cfg;
128 u64 transtab = cfg->arm_mali_lpae_cfg.transtab;
129 u64 memattr = cfg->arm_mali_lpae_cfg.memattr;
130
131 mmu_hw_do_operation_locked(pfdev, as_nr, 0, ~0ULL, AS_COMMAND_FLUSH_MEM);
132
133 mmu_write(pfdev, AS_TRANSTAB_LO(as_nr), lower_32_bits(transtab));
134 mmu_write(pfdev, AS_TRANSTAB_HI(as_nr), upper_32_bits(transtab));
135
136 /* Need to revisit mem attrs.
137 * NC is the default, Mali driver is inner WT.
138 */
139 mmu_write(pfdev, AS_MEMATTR_LO(as_nr), lower_32_bits(memattr));
140 mmu_write(pfdev, AS_MEMATTR_HI(as_nr), upper_32_bits(memattr));
141
142 write_cmd(pfdev, as_nr, AS_COMMAND_UPDATE);
143 }
144
panfrost_mmu_disable(struct panfrost_device * pfdev,u32 as_nr)145 static void panfrost_mmu_disable(struct panfrost_device *pfdev, u32 as_nr)
146 {
147 mmu_hw_do_operation_locked(pfdev, as_nr, 0, ~0ULL, AS_COMMAND_FLUSH_MEM);
148
149 mmu_write(pfdev, AS_TRANSTAB_LO(as_nr), 0);
150 mmu_write(pfdev, AS_TRANSTAB_HI(as_nr), 0);
151
152 mmu_write(pfdev, AS_MEMATTR_LO(as_nr), 0);
153 mmu_write(pfdev, AS_MEMATTR_HI(as_nr), 0);
154
155 write_cmd(pfdev, as_nr, AS_COMMAND_UPDATE);
156 }
157
panfrost_mmu_as_get(struct panfrost_device * pfdev,struct panfrost_mmu * mmu)158 u32 panfrost_mmu_as_get(struct panfrost_device *pfdev, struct panfrost_mmu *mmu)
159 {
160 int as;
161
162 spin_lock(&pfdev->as_lock);
163
164 as = mmu->as;
165 if (as >= 0) {
166 int en = atomic_inc_return(&mmu->as_count);
167 u32 mask = BIT(as) | BIT(16 + as);
168
169 /*
170 * AS can be retained by active jobs or a perfcnt context,
171 * hence the '+ 1' here.
172 */
173 WARN_ON(en >= (NUM_JOB_SLOTS + 1));
174
175 list_move(&mmu->list, &pfdev->as_lru_list);
176
177 if (pfdev->as_faulty_mask & mask) {
178 /* Unhandled pagefault on this AS, the MMU was
179 * disabled. We need to re-enable the MMU after
180 * clearing+unmasking the AS interrupts.
181 */
182 mmu_write(pfdev, MMU_INT_CLEAR, mask);
183 mmu_write(pfdev, MMU_INT_MASK, ~pfdev->as_faulty_mask);
184 pfdev->as_faulty_mask &= ~mask;
185 panfrost_mmu_enable(pfdev, mmu);
186 }
187
188 goto out;
189 }
190
191 /* Check for a free AS */
192 as = ffz(pfdev->as_alloc_mask);
193 if (!(BIT(as) & pfdev->features.as_present)) {
194 struct panfrost_mmu *lru_mmu;
195
196 list_for_each_entry_reverse(lru_mmu, &pfdev->as_lru_list, list) {
197 if (!atomic_read(&lru_mmu->as_count))
198 break;
199 }
200 WARN_ON(&lru_mmu->list == &pfdev->as_lru_list);
201
202 list_del_init(&lru_mmu->list);
203 as = lru_mmu->as;
204
205 WARN_ON(as < 0);
206 lru_mmu->as = -1;
207 }
208
209 /* Assign the free or reclaimed AS to the FD */
210 mmu->as = as;
211 set_bit(as, &pfdev->as_alloc_mask);
212 atomic_set(&mmu->as_count, 1);
213 list_add(&mmu->list, &pfdev->as_lru_list);
214
215 dev_dbg(pfdev->dev, "Assigned AS%d to mmu %p, alloc_mask=%lx", as, mmu, pfdev->as_alloc_mask);
216
217 panfrost_mmu_enable(pfdev, mmu);
218
219 out:
220 spin_unlock(&pfdev->as_lock);
221 return as;
222 }
223
panfrost_mmu_as_put(struct panfrost_device * pfdev,struct panfrost_mmu * mmu)224 void panfrost_mmu_as_put(struct panfrost_device *pfdev, struct panfrost_mmu *mmu)
225 {
226 atomic_dec(&mmu->as_count);
227 WARN_ON(atomic_read(&mmu->as_count) < 0);
228 }
229
panfrost_mmu_reset(struct panfrost_device * pfdev)230 void panfrost_mmu_reset(struct panfrost_device *pfdev)
231 {
232 struct panfrost_mmu *mmu, *mmu_tmp;
233
234 spin_lock(&pfdev->as_lock);
235
236 pfdev->as_alloc_mask = 0;
237 pfdev->as_faulty_mask = 0;
238
239 list_for_each_entry_safe(mmu, mmu_tmp, &pfdev->as_lru_list, list) {
240 mmu->as = -1;
241 atomic_set(&mmu->as_count, 0);
242 list_del_init(&mmu->list);
243 }
244
245 spin_unlock(&pfdev->as_lock);
246
247 mmu_write(pfdev, MMU_INT_CLEAR, ~0);
248 mmu_write(pfdev, MMU_INT_MASK, ~0);
249 }
250
get_pgsize(u64 addr,size_t size,size_t * count)251 static size_t get_pgsize(u64 addr, size_t size, size_t *count)
252 {
253 /*
254 * io-pgtable only operates on multiple pages within a single table
255 * entry, so we need to split at boundaries of the table size, i.e.
256 * the next block size up. The distance from address A to the next
257 * boundary of block size B is logically B - A % B, but in unsigned
258 * two's complement where B is a power of two we get the equivalence
259 * B - A % B == (B - A) % B == (n * B - A) % B, and choose n = 0 :)
260 */
261 size_t blk_offset = -addr % SZ_2M;
262
263 if (blk_offset || size < SZ_2M) {
264 *count = min_not_zero(blk_offset, size) / SZ_4K;
265 return SZ_4K;
266 }
267 blk_offset = -addr % SZ_1G ?: SZ_1G;
268 *count = min(blk_offset, size) / SZ_2M;
269 return SZ_2M;
270 }
271
panfrost_mmu_flush_range(struct panfrost_device * pfdev,struct panfrost_mmu * mmu,u64 iova,u64 size)272 static void panfrost_mmu_flush_range(struct panfrost_device *pfdev,
273 struct panfrost_mmu *mmu,
274 u64 iova, u64 size)
275 {
276 if (mmu->as < 0)
277 return;
278
279 pm_runtime_get_noresume(pfdev->dev);
280
281 /* Flush the PTs only if we're already awake */
282 if (pm_runtime_active(pfdev->dev))
283 mmu_hw_do_operation(pfdev, mmu, iova, size, AS_COMMAND_FLUSH_PT);
284
285 pm_runtime_put_autosuspend(pfdev->dev);
286 }
287
mmu_map_sg(struct panfrost_device * pfdev,struct panfrost_mmu * mmu,u64 iova,int prot,struct sg_table * sgt)288 static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu,
289 u64 iova, int prot, struct sg_table *sgt)
290 {
291 unsigned int count;
292 struct scatterlist *sgl;
293 struct io_pgtable_ops *ops = mmu->pgtbl_ops;
294 u64 start_iova = iova;
295
296 for_each_sgtable_dma_sg(sgt, sgl, count) {
297 unsigned long paddr = sg_dma_address(sgl);
298 size_t len = sg_dma_len(sgl);
299
300 dev_dbg(pfdev->dev, "map: as=%d, iova=%llx, paddr=%lx, len=%zx", mmu->as, iova, paddr, len);
301
302 while (len) {
303 size_t pgcount, mapped = 0;
304 size_t pgsize = get_pgsize(iova | paddr, len, &pgcount);
305
306 ops->map_pages(ops, iova, paddr, pgsize, pgcount, prot,
307 GFP_KERNEL, &mapped);
308 /* Don't get stuck if things have gone wrong */
309 mapped = max(mapped, pgsize);
310 iova += mapped;
311 paddr += mapped;
312 len -= mapped;
313 }
314 }
315
316 panfrost_mmu_flush_range(pfdev, mmu, start_iova, iova - start_iova);
317
318 return 0;
319 }
320
panfrost_mmu_map(struct panfrost_gem_mapping * mapping)321 int panfrost_mmu_map(struct panfrost_gem_mapping *mapping)
322 {
323 struct panfrost_gem_object *bo = mapping->obj;
324 struct drm_gem_shmem_object *shmem = &bo->base;
325 struct drm_gem_object *obj = &shmem->base;
326 struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
327 struct sg_table *sgt;
328 int prot = IOMMU_READ | IOMMU_WRITE;
329
330 if (WARN_ON(mapping->active))
331 return 0;
332
333 if (bo->noexec)
334 prot |= IOMMU_NOEXEC;
335
336 sgt = drm_gem_shmem_get_pages_sgt(shmem);
337 if (WARN_ON(IS_ERR(sgt)))
338 return PTR_ERR(sgt);
339
340 mmu_map_sg(pfdev, mapping->mmu, mapping->mmnode.start << PAGE_SHIFT,
341 prot, sgt);
342 mapping->active = true;
343
344 return 0;
345 }
346
panfrost_mmu_unmap(struct panfrost_gem_mapping * mapping)347 void panfrost_mmu_unmap(struct panfrost_gem_mapping *mapping)
348 {
349 struct panfrost_gem_object *bo = mapping->obj;
350 struct drm_gem_object *obj = &bo->base.base;
351 struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
352 struct io_pgtable_ops *ops = mapping->mmu->pgtbl_ops;
353 u64 iova = mapping->mmnode.start << PAGE_SHIFT;
354 size_t len = mapping->mmnode.size << PAGE_SHIFT;
355 size_t unmapped_len = 0;
356
357 if (WARN_ON(!mapping->active))
358 return;
359
360 dev_dbg(pfdev->dev, "unmap: as=%d, iova=%llx, len=%zx",
361 mapping->mmu->as, iova, len);
362
363 while (unmapped_len < len) {
364 size_t unmapped_page, pgcount;
365 size_t pgsize = get_pgsize(iova, len - unmapped_len, &pgcount);
366
367 if (bo->is_heap)
368 pgcount = 1;
369 if (!bo->is_heap || ops->iova_to_phys(ops, iova)) {
370 unmapped_page = ops->unmap_pages(ops, iova, pgsize, pgcount, NULL);
371 WARN_ON(unmapped_page != pgsize * pgcount);
372 }
373 iova += pgsize * pgcount;
374 unmapped_len += pgsize * pgcount;
375 }
376
377 panfrost_mmu_flush_range(pfdev, mapping->mmu,
378 mapping->mmnode.start << PAGE_SHIFT, len);
379 mapping->active = false;
380 }
381
mmu_tlb_inv_context_s1(void * cookie)382 static void mmu_tlb_inv_context_s1(void *cookie)
383 {}
384
mmu_tlb_sync_context(void * cookie)385 static void mmu_tlb_sync_context(void *cookie)
386 {
387 //struct panfrost_mmu *mmu = cookie;
388 // TODO: Wait 1000 GPU cycles for HW_ISSUE_6367/T60X
389 }
390
mmu_tlb_flush_walk(unsigned long iova,size_t size,size_t granule,void * cookie)391 static void mmu_tlb_flush_walk(unsigned long iova, size_t size, size_t granule,
392 void *cookie)
393 {
394 mmu_tlb_sync_context(cookie);
395 }
396
397 static const struct iommu_flush_ops mmu_tlb_ops = {
398 .tlb_flush_all = mmu_tlb_inv_context_s1,
399 .tlb_flush_walk = mmu_tlb_flush_walk,
400 };
401
402 static struct panfrost_gem_mapping *
addr_to_mapping(struct panfrost_device * pfdev,int as,u64 addr)403 addr_to_mapping(struct panfrost_device *pfdev, int as, u64 addr)
404 {
405 struct panfrost_gem_mapping *mapping = NULL;
406 struct drm_mm_node *node;
407 u64 offset = addr >> PAGE_SHIFT;
408 struct panfrost_mmu *mmu;
409
410 spin_lock(&pfdev->as_lock);
411 list_for_each_entry(mmu, &pfdev->as_lru_list, list) {
412 if (as == mmu->as)
413 goto found_mmu;
414 }
415 goto out;
416
417 found_mmu:
418
419 spin_lock(&mmu->mm_lock);
420
421 drm_mm_for_each_node(node, &mmu->mm) {
422 if (offset >= node->start &&
423 offset < (node->start + node->size)) {
424 mapping = drm_mm_node_to_panfrost_mapping(node);
425
426 kref_get(&mapping->refcount);
427 break;
428 }
429 }
430
431 spin_unlock(&mmu->mm_lock);
432 out:
433 spin_unlock(&pfdev->as_lock);
434 return mapping;
435 }
436
437 #define NUM_FAULT_PAGES (SZ_2M / PAGE_SIZE)
438
panfrost_mmu_map_fault_addr(struct panfrost_device * pfdev,int as,u64 addr)439 static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
440 u64 addr)
441 {
442 int ret, i;
443 struct panfrost_gem_mapping *bomapping;
444 struct panfrost_gem_object *bo;
445 struct address_space *mapping;
446 pgoff_t page_offset;
447 struct sg_table *sgt;
448 struct page **pages;
449
450 bomapping = addr_to_mapping(pfdev, as, addr);
451 if (!bomapping)
452 return -ENOENT;
453
454 bo = bomapping->obj;
455 if (!bo->is_heap) {
456 dev_WARN(pfdev->dev, "matching BO is not heap type (GPU VA = %llx)",
457 bomapping->mmnode.start << PAGE_SHIFT);
458 ret = -EINVAL;
459 goto err_bo;
460 }
461 WARN_ON(bomapping->mmu->as != as);
462
463 /* Assume 2MB alignment and size multiple */
464 addr &= ~((u64)SZ_2M - 1);
465 page_offset = addr >> PAGE_SHIFT;
466 page_offset -= bomapping->mmnode.start;
467
468 mutex_lock(&bo->base.pages_lock);
469
470 if (!bo->base.pages) {
471 bo->sgts = kvmalloc_array(bo->base.base.size / SZ_2M,
472 sizeof(struct sg_table), GFP_KERNEL | __GFP_ZERO);
473 if (!bo->sgts) {
474 mutex_unlock(&bo->base.pages_lock);
475 ret = -ENOMEM;
476 goto err_bo;
477 }
478
479 pages = kvmalloc_array(bo->base.base.size >> PAGE_SHIFT,
480 sizeof(struct page *), GFP_KERNEL | __GFP_ZERO);
481 if (!pages) {
482 kvfree(bo->sgts);
483 bo->sgts = NULL;
484 mutex_unlock(&bo->base.pages_lock);
485 ret = -ENOMEM;
486 goto err_bo;
487 }
488 bo->base.pages = pages;
489 bo->base.pages_use_count = 1;
490 } else {
491 pages = bo->base.pages;
492 if (pages[page_offset]) {
493 /* Pages are already mapped, bail out. */
494 mutex_unlock(&bo->base.pages_lock);
495 goto out;
496 }
497 }
498
499 mapping = bo->base.base.filp->f_mapping;
500 mapping_set_unevictable(mapping);
501
502 for (i = page_offset; i < page_offset + NUM_FAULT_PAGES; i++) {
503 pages[i] = shmem_read_mapping_page(mapping, i);
504 if (IS_ERR(pages[i])) {
505 mutex_unlock(&bo->base.pages_lock);
506 ret = PTR_ERR(pages[i]);
507 pages[i] = NULL;
508 goto err_pages;
509 }
510 }
511
512 mutex_unlock(&bo->base.pages_lock);
513
514 sgt = &bo->sgts[page_offset / (SZ_2M / PAGE_SIZE)];
515 ret = sg_alloc_table_from_pages(sgt, pages + page_offset,
516 NUM_FAULT_PAGES, 0, SZ_2M, GFP_KERNEL);
517 if (ret)
518 goto err_pages;
519
520 ret = dma_map_sgtable(pfdev->dev, sgt, DMA_BIDIRECTIONAL, 0);
521 if (ret)
522 goto err_map;
523
524 mmu_map_sg(pfdev, bomapping->mmu, addr,
525 IOMMU_WRITE | IOMMU_READ | IOMMU_NOEXEC, sgt);
526
527 bomapping->active = true;
528
529 dev_dbg(pfdev->dev, "mapped page fault @ AS%d %llx", as, addr);
530
531 out:
532 panfrost_gem_mapping_put(bomapping);
533
534 return 0;
535
536 err_map:
537 sg_free_table(sgt);
538 err_pages:
539 drm_gem_shmem_put_pages(&bo->base);
540 err_bo:
541 panfrost_gem_mapping_put(bomapping);
542 return ret;
543 }
544
panfrost_mmu_release_ctx(struct kref * kref)545 static void panfrost_mmu_release_ctx(struct kref *kref)
546 {
547 struct panfrost_mmu *mmu = container_of(kref, struct panfrost_mmu,
548 refcount);
549 struct panfrost_device *pfdev = mmu->pfdev;
550
551 spin_lock(&pfdev->as_lock);
552 if (mmu->as >= 0) {
553 pm_runtime_get_noresume(pfdev->dev);
554 if (pm_runtime_active(pfdev->dev))
555 panfrost_mmu_disable(pfdev, mmu->as);
556 pm_runtime_put_autosuspend(pfdev->dev);
557
558 clear_bit(mmu->as, &pfdev->as_alloc_mask);
559 clear_bit(mmu->as, &pfdev->as_in_use_mask);
560 list_del(&mmu->list);
561 }
562 spin_unlock(&pfdev->as_lock);
563
564 free_io_pgtable_ops(mmu->pgtbl_ops);
565 drm_mm_takedown(&mmu->mm);
566 kfree(mmu);
567 }
568
panfrost_mmu_ctx_put(struct panfrost_mmu * mmu)569 void panfrost_mmu_ctx_put(struct panfrost_mmu *mmu)
570 {
571 kref_put(&mmu->refcount, panfrost_mmu_release_ctx);
572 }
573
panfrost_mmu_ctx_get(struct panfrost_mmu * mmu)574 struct panfrost_mmu *panfrost_mmu_ctx_get(struct panfrost_mmu *mmu)
575 {
576 kref_get(&mmu->refcount);
577
578 return mmu;
579 }
580
581 #define PFN_4G (SZ_4G >> PAGE_SHIFT)
582 #define PFN_4G_MASK (PFN_4G - 1)
583 #define PFN_16M (SZ_16M >> PAGE_SHIFT)
584
panfrost_drm_mm_color_adjust(const struct drm_mm_node * node,unsigned long color,u64 * start,u64 * end)585 static void panfrost_drm_mm_color_adjust(const struct drm_mm_node *node,
586 unsigned long color,
587 u64 *start, u64 *end)
588 {
589 /* Executable buffers can't start or end on a 4GB boundary */
590 if (!(color & PANFROST_BO_NOEXEC)) {
591 u64 next_seg;
592
593 if ((*start & PFN_4G_MASK) == 0)
594 (*start)++;
595
596 if ((*end & PFN_4G_MASK) == 0)
597 (*end)--;
598
599 next_seg = ALIGN(*start, PFN_4G);
600 if (next_seg - *start <= PFN_16M)
601 *start = next_seg + 1;
602
603 *end = min(*end, ALIGN(*start, PFN_4G) - 1);
604 }
605 }
606
panfrost_mmu_ctx_create(struct panfrost_device * pfdev)607 struct panfrost_mmu *panfrost_mmu_ctx_create(struct panfrost_device *pfdev)
608 {
609 struct panfrost_mmu *mmu;
610
611 mmu = kzalloc(sizeof(*mmu), GFP_KERNEL);
612 if (!mmu)
613 return ERR_PTR(-ENOMEM);
614
615 mmu->pfdev = pfdev;
616 spin_lock_init(&mmu->mm_lock);
617
618 /* 4G enough for now. can be 48-bit */
619 drm_mm_init(&mmu->mm, SZ_32M >> PAGE_SHIFT, (SZ_4G - SZ_32M) >> PAGE_SHIFT);
620 mmu->mm.color_adjust = panfrost_drm_mm_color_adjust;
621
622 INIT_LIST_HEAD(&mmu->list);
623 mmu->as = -1;
624
625 mmu->pgtbl_cfg = (struct io_pgtable_cfg) {
626 .pgsize_bitmap = SZ_4K | SZ_2M,
627 .ias = FIELD_GET(0xff, pfdev->features.mmu_features),
628 .oas = FIELD_GET(0xff00, pfdev->features.mmu_features),
629 .coherent_walk = pfdev->coherent,
630 .tlb = &mmu_tlb_ops,
631 .iommu_dev = pfdev->dev,
632 };
633
634 mmu->pgtbl_ops = alloc_io_pgtable_ops(ARM_MALI_LPAE, &mmu->pgtbl_cfg,
635 mmu);
636 if (!mmu->pgtbl_ops) {
637 kfree(mmu);
638 return ERR_PTR(-EINVAL);
639 }
640
641 kref_init(&mmu->refcount);
642
643 return mmu;
644 }
645
access_type_name(struct panfrost_device * pfdev,u32 fault_status)646 static const char *access_type_name(struct panfrost_device *pfdev,
647 u32 fault_status)
648 {
649 switch (fault_status & AS_FAULTSTATUS_ACCESS_TYPE_MASK) {
650 case AS_FAULTSTATUS_ACCESS_TYPE_ATOMIC:
651 if (panfrost_has_hw_feature(pfdev, HW_FEATURE_AARCH64_MMU))
652 return "ATOMIC";
653 else
654 return "UNKNOWN";
655 case AS_FAULTSTATUS_ACCESS_TYPE_READ:
656 return "READ";
657 case AS_FAULTSTATUS_ACCESS_TYPE_WRITE:
658 return "WRITE";
659 case AS_FAULTSTATUS_ACCESS_TYPE_EX:
660 return "EXECUTE";
661 default:
662 WARN_ON(1);
663 return NULL;
664 }
665 }
666
panfrost_mmu_irq_handler(int irq,void * data)667 static irqreturn_t panfrost_mmu_irq_handler(int irq, void *data)
668 {
669 struct panfrost_device *pfdev = data;
670
671 if (!mmu_read(pfdev, MMU_INT_STAT))
672 return IRQ_NONE;
673
674 mmu_write(pfdev, MMU_INT_MASK, 0);
675 return IRQ_WAKE_THREAD;
676 }
677
panfrost_mmu_irq_handler_thread(int irq,void * data)678 static irqreturn_t panfrost_mmu_irq_handler_thread(int irq, void *data)
679 {
680 struct panfrost_device *pfdev = data;
681 u32 status = mmu_read(pfdev, MMU_INT_RAWSTAT);
682 int ret;
683
684 while (status) {
685 u32 as = ffs(status | (status >> 16)) - 1;
686 u32 mask = BIT(as) | BIT(as + 16);
687 u64 addr;
688 u32 fault_status;
689 u32 exception_type;
690 u32 access_type;
691 u32 source_id;
692
693 fault_status = mmu_read(pfdev, AS_FAULTSTATUS(as));
694 addr = mmu_read(pfdev, AS_FAULTADDRESS_LO(as));
695 addr |= (u64)mmu_read(pfdev, AS_FAULTADDRESS_HI(as)) << 32;
696
697 /* decode the fault status */
698 exception_type = fault_status & 0xFF;
699 access_type = (fault_status >> 8) & 0x3;
700 source_id = (fault_status >> 16);
701
702 mmu_write(pfdev, MMU_INT_CLEAR, mask);
703
704 /* Page fault only */
705 ret = -1;
706 if ((status & mask) == BIT(as) && (exception_type & 0xF8) == 0xC0)
707 ret = panfrost_mmu_map_fault_addr(pfdev, as, addr);
708
709 if (ret) {
710 /* terminal fault, print info about the fault */
711 dev_err(pfdev->dev,
712 "Unhandled Page fault in AS%d at VA 0x%016llX\n"
713 "Reason: %s\n"
714 "raw fault status: 0x%X\n"
715 "decoded fault status: %s\n"
716 "exception type 0x%X: %s\n"
717 "access type 0x%X: %s\n"
718 "source id 0x%X\n",
719 as, addr,
720 "TODO",
721 fault_status,
722 (fault_status & (1 << 10) ? "DECODER FAULT" : "SLAVE FAULT"),
723 exception_type, panfrost_exception_name(exception_type),
724 access_type, access_type_name(pfdev, fault_status),
725 source_id);
726
727 spin_lock(&pfdev->as_lock);
728 /* Ignore MMU interrupts on this AS until it's been
729 * re-enabled.
730 */
731 pfdev->as_faulty_mask |= mask;
732
733 /* Disable the MMU to kill jobs on this AS. */
734 panfrost_mmu_disable(pfdev, as);
735 spin_unlock(&pfdev->as_lock);
736 }
737
738 status &= ~mask;
739
740 /* If we received new MMU interrupts, process them before returning. */
741 if (!status)
742 status = mmu_read(pfdev, MMU_INT_RAWSTAT) & ~pfdev->as_faulty_mask;
743 }
744
745 spin_lock(&pfdev->as_lock);
746 mmu_write(pfdev, MMU_INT_MASK, ~pfdev->as_faulty_mask);
747 spin_unlock(&pfdev->as_lock);
748
749 return IRQ_HANDLED;
750 };
751
panfrost_mmu_init(struct panfrost_device * pfdev)752 int panfrost_mmu_init(struct panfrost_device *pfdev)
753 {
754 int err, irq;
755
756 irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "mmu");
757 if (irq <= 0)
758 return -ENODEV;
759
760 err = devm_request_threaded_irq(pfdev->dev, irq,
761 panfrost_mmu_irq_handler,
762 panfrost_mmu_irq_handler_thread,
763 IRQF_SHARED, KBUILD_MODNAME "-mmu",
764 pfdev);
765
766 if (err) {
767 dev_err(pfdev->dev, "failed to request mmu irq");
768 return err;
769 }
770
771 return 0;
772 }
773
panfrost_mmu_fini(struct panfrost_device * pfdev)774 void panfrost_mmu_fini(struct panfrost_device *pfdev)
775 {
776 mmu_write(pfdev, MMU_INT_MASK, 0);
777 }
778