1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2015-2018 Etnaviv Project
4 */
5
6 #include <linux/dma-mapping.h>
7 #include <linux/scatterlist.h>
8
9 #include "common.xml.h"
10 #include "etnaviv_cmdbuf.h"
11 #include "etnaviv_drv.h"
12 #include "etnaviv_gem.h"
13 #include "etnaviv_gpu.h"
14 #include "etnaviv_mmu.h"
15
etnaviv_context_unmap(struct etnaviv_iommu_context * context,unsigned long iova,size_t size)16 static void etnaviv_context_unmap(struct etnaviv_iommu_context *context,
17 unsigned long iova, size_t size)
18 {
19 size_t unmapped_page, unmapped = 0;
20 size_t pgsize = SZ_4K;
21
22 if (!IS_ALIGNED(iova | size, pgsize)) {
23 pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%zx\n",
24 iova, size, pgsize);
25 return;
26 }
27
28 while (unmapped < size) {
29 unmapped_page = context->global->ops->unmap(context, iova,
30 pgsize);
31 if (!unmapped_page)
32 break;
33
34 iova += unmapped_page;
35 unmapped += unmapped_page;
36 }
37 }
38
etnaviv_context_map(struct etnaviv_iommu_context * context,unsigned long iova,phys_addr_t paddr,size_t size,int prot)39 static int etnaviv_context_map(struct etnaviv_iommu_context *context,
40 unsigned long iova, phys_addr_t paddr,
41 size_t size, int prot)
42 {
43 unsigned long orig_iova = iova;
44 size_t pgsize = SZ_4K;
45 size_t orig_size = size;
46 int ret = 0;
47
48 if (!IS_ALIGNED(iova | paddr | size, pgsize)) {
49 pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%zx\n",
50 iova, &paddr, size, pgsize);
51 return -EINVAL;
52 }
53
54 while (size) {
55 ret = context->global->ops->map(context, iova, paddr, pgsize,
56 prot);
57 if (ret)
58 break;
59
60 iova += pgsize;
61 paddr += pgsize;
62 size -= pgsize;
63 }
64
65 /* unroll mapping in case something went wrong */
66 if (ret)
67 etnaviv_context_unmap(context, orig_iova, orig_size - size);
68
69 return ret;
70 }
71
etnaviv_iommu_map(struct etnaviv_iommu_context * context,u32 iova,struct sg_table * sgt,unsigned len,int prot)72 static int etnaviv_iommu_map(struct etnaviv_iommu_context *context, u32 iova,
73 struct sg_table *sgt, unsigned len, int prot)
74 { struct scatterlist *sg;
75 unsigned int da = iova;
76 unsigned int i;
77 int ret;
78
79 if (!context || !sgt)
80 return -EINVAL;
81
82 for_each_sgtable_dma_sg(sgt, sg, i) {
83 u32 pa = sg_dma_address(sg) - sg->offset;
84 size_t bytes = sg_dma_len(sg) + sg->offset;
85
86 VERB("map[%d]: %08x %08x(%zx)", i, iova, pa, bytes);
87
88 ret = etnaviv_context_map(context, da, pa, bytes, prot);
89 if (ret)
90 goto fail;
91
92 da += bytes;
93 }
94
95 return 0;
96
97 fail:
98 etnaviv_context_unmap(context, iova, da - iova);
99 return ret;
100 }
101
etnaviv_iommu_unmap(struct etnaviv_iommu_context * context,u32 iova,struct sg_table * sgt,unsigned len)102 static void etnaviv_iommu_unmap(struct etnaviv_iommu_context *context, u32 iova,
103 struct sg_table *sgt, unsigned len)
104 {
105 struct scatterlist *sg;
106 unsigned int da = iova;
107 int i;
108
109 for_each_sgtable_dma_sg(sgt, sg, i) {
110 size_t bytes = sg_dma_len(sg) + sg->offset;
111
112 etnaviv_context_unmap(context, da, bytes);
113
114 VERB("unmap[%d]: %08x(%zx)", i, iova, bytes);
115
116 BUG_ON(!PAGE_ALIGNED(bytes));
117
118 da += bytes;
119 }
120 }
121
etnaviv_iommu_remove_mapping(struct etnaviv_iommu_context * context,struct etnaviv_vram_mapping * mapping)122 static void etnaviv_iommu_remove_mapping(struct etnaviv_iommu_context *context,
123 struct etnaviv_vram_mapping *mapping)
124 {
125 struct etnaviv_gem_object *etnaviv_obj = mapping->object;
126
127 etnaviv_iommu_unmap(context, mapping->vram_node.start,
128 etnaviv_obj->sgt, etnaviv_obj->base.size);
129 drm_mm_remove_node(&mapping->vram_node);
130 }
131
etnaviv_iommu_find_iova(struct etnaviv_iommu_context * context,struct drm_mm_node * node,size_t size)132 static int etnaviv_iommu_find_iova(struct etnaviv_iommu_context *context,
133 struct drm_mm_node *node, size_t size)
134 {
135 struct etnaviv_vram_mapping *free = NULL;
136 enum drm_mm_insert_mode mode = DRM_MM_INSERT_LOW;
137 int ret;
138
139 lockdep_assert_held(&context->lock);
140
141 while (1) {
142 struct etnaviv_vram_mapping *m, *n;
143 struct drm_mm_scan scan;
144 struct list_head list;
145 bool found;
146
147 ret = drm_mm_insert_node_in_range(&context->mm, node,
148 size, 0, 0, 0, U64_MAX, mode);
149 if (ret != -ENOSPC)
150 break;
151
152 /* Try to retire some entries */
153 drm_mm_scan_init(&scan, &context->mm, size, 0, 0, mode);
154
155 found = 0;
156 INIT_LIST_HEAD(&list);
157 list_for_each_entry(free, &context->mappings, mmu_node) {
158 /* If this vram node has not been used, skip this. */
159 if (!free->vram_node.mm)
160 continue;
161
162 /*
163 * If the iova is pinned, then it's in-use,
164 * so we must keep its mapping.
165 */
166 if (free->use)
167 continue;
168
169 list_add(&free->scan_node, &list);
170 if (drm_mm_scan_add_block(&scan, &free->vram_node)) {
171 found = true;
172 break;
173 }
174 }
175
176 if (!found) {
177 /* Nothing found, clean up and fail */
178 list_for_each_entry_safe(m, n, &list, scan_node)
179 BUG_ON(drm_mm_scan_remove_block(&scan, &m->vram_node));
180 break;
181 }
182
183 /*
184 * drm_mm does not allow any other operations while
185 * scanning, so we have to remove all blocks first.
186 * If drm_mm_scan_remove_block() returns false, we
187 * can leave the block pinned.
188 */
189 list_for_each_entry_safe(m, n, &list, scan_node)
190 if (!drm_mm_scan_remove_block(&scan, &m->vram_node))
191 list_del_init(&m->scan_node);
192
193 /*
194 * Unmap the blocks which need to be reaped from the MMU.
195 * Clear the mmu pointer to prevent the mapping_get finding
196 * this mapping.
197 */
198 list_for_each_entry_safe(m, n, &list, scan_node) {
199 etnaviv_iommu_remove_mapping(context, m);
200 etnaviv_iommu_context_put(m->context);
201 m->context = NULL;
202 list_del_init(&m->mmu_node);
203 list_del_init(&m->scan_node);
204 }
205
206 mode = DRM_MM_INSERT_EVICT;
207
208 /*
209 * We removed enough mappings so that the new allocation will
210 * succeed, retry the allocation one more time.
211 */
212 }
213
214 return ret;
215 }
216
etnaviv_iommu_insert_exact(struct etnaviv_iommu_context * context,struct drm_mm_node * node,size_t size,u64 va)217 static int etnaviv_iommu_insert_exact(struct etnaviv_iommu_context *context,
218 struct drm_mm_node *node, size_t size, u64 va)
219 {
220 return drm_mm_insert_node_in_range(&context->mm, node, size, 0, 0, va,
221 va + size, DRM_MM_INSERT_LOWEST);
222 }
223
etnaviv_iommu_map_gem(struct etnaviv_iommu_context * context,struct etnaviv_gem_object * etnaviv_obj,u32 memory_base,struct etnaviv_vram_mapping * mapping,u64 va)224 int etnaviv_iommu_map_gem(struct etnaviv_iommu_context *context,
225 struct etnaviv_gem_object *etnaviv_obj, u32 memory_base,
226 struct etnaviv_vram_mapping *mapping, u64 va)
227 {
228 struct sg_table *sgt = etnaviv_obj->sgt;
229 struct drm_mm_node *node;
230 int ret;
231
232 lockdep_assert_held(&etnaviv_obj->lock);
233
234 mutex_lock(&context->lock);
235
236 /* v1 MMU can optimize single entry (contiguous) scatterlists */
237 if (context->global->version == ETNAVIV_IOMMU_V1 &&
238 sgt->nents == 1 && !(etnaviv_obj->flags & ETNA_BO_FORCE_MMU)) {
239 u32 iova;
240
241 iova = sg_dma_address(sgt->sgl) - memory_base;
242 if (iova < 0x80000000 - sg_dma_len(sgt->sgl)) {
243 mapping->iova = iova;
244 list_add_tail(&mapping->mmu_node, &context->mappings);
245 ret = 0;
246 goto unlock;
247 }
248 }
249
250 node = &mapping->vram_node;
251
252 if (va)
253 ret = etnaviv_iommu_insert_exact(context, node,
254 etnaviv_obj->base.size, va);
255 else
256 ret = etnaviv_iommu_find_iova(context, node,
257 etnaviv_obj->base.size);
258 if (ret < 0)
259 goto unlock;
260
261 mapping->iova = node->start;
262 ret = etnaviv_iommu_map(context, node->start, sgt, etnaviv_obj->base.size,
263 ETNAVIV_PROT_READ | ETNAVIV_PROT_WRITE);
264
265 if (ret < 0) {
266 drm_mm_remove_node(node);
267 goto unlock;
268 }
269
270 list_add_tail(&mapping->mmu_node, &context->mappings);
271 context->flush_seq++;
272 unlock:
273 mutex_unlock(&context->lock);
274
275 return ret;
276 }
277
etnaviv_iommu_unmap_gem(struct etnaviv_iommu_context * context,struct etnaviv_vram_mapping * mapping)278 void etnaviv_iommu_unmap_gem(struct etnaviv_iommu_context *context,
279 struct etnaviv_vram_mapping *mapping)
280 {
281 WARN_ON(mapping->use);
282
283 mutex_lock(&context->lock);
284
285 /* If the vram node is on the mm, unmap and remove the node */
286 if (mapping->vram_node.mm == &context->mm)
287 etnaviv_iommu_remove_mapping(context, mapping);
288
289 list_del(&mapping->mmu_node);
290 context->flush_seq++;
291 mutex_unlock(&context->lock);
292 }
293
etnaviv_iommu_context_free(struct kref * kref)294 static void etnaviv_iommu_context_free(struct kref *kref)
295 {
296 struct etnaviv_iommu_context *context =
297 container_of(kref, struct etnaviv_iommu_context, refcount);
298
299 etnaviv_cmdbuf_suballoc_unmap(context, &context->cmdbuf_mapping);
300
301 context->global->ops->free(context);
302 }
etnaviv_iommu_context_put(struct etnaviv_iommu_context * context)303 void etnaviv_iommu_context_put(struct etnaviv_iommu_context *context)
304 {
305 kref_put(&context->refcount, etnaviv_iommu_context_free);
306 }
307
308 struct etnaviv_iommu_context *
etnaviv_iommu_context_init(struct etnaviv_iommu_global * global,struct etnaviv_cmdbuf_suballoc * suballoc)309 etnaviv_iommu_context_init(struct etnaviv_iommu_global *global,
310 struct etnaviv_cmdbuf_suballoc *suballoc)
311 {
312 struct etnaviv_iommu_context *ctx;
313 int ret;
314
315 if (global->version == ETNAVIV_IOMMU_V1)
316 ctx = etnaviv_iommuv1_context_alloc(global);
317 else
318 ctx = etnaviv_iommuv2_context_alloc(global);
319
320 if (!ctx)
321 return NULL;
322
323 ret = etnaviv_cmdbuf_suballoc_map(suballoc, ctx, &ctx->cmdbuf_mapping,
324 global->memory_base);
325 if (ret)
326 goto out_free;
327
328 if (global->version == ETNAVIV_IOMMU_V1 &&
329 ctx->cmdbuf_mapping.iova > 0x80000000) {
330 dev_err(global->dev,
331 "command buffer outside valid memory window\n");
332 goto out_unmap;
333 }
334
335 return ctx;
336
337 out_unmap:
338 etnaviv_cmdbuf_suballoc_unmap(ctx, &ctx->cmdbuf_mapping);
339 out_free:
340 global->ops->free(ctx);
341 return NULL;
342 }
343
etnaviv_iommu_restore(struct etnaviv_gpu * gpu,struct etnaviv_iommu_context * context)344 void etnaviv_iommu_restore(struct etnaviv_gpu *gpu,
345 struct etnaviv_iommu_context *context)
346 {
347 context->global->ops->restore(gpu, context);
348 }
349
etnaviv_iommu_get_suballoc_va(struct etnaviv_iommu_context * context,struct etnaviv_vram_mapping * mapping,u32 memory_base,dma_addr_t paddr,size_t size)350 int etnaviv_iommu_get_suballoc_va(struct etnaviv_iommu_context *context,
351 struct etnaviv_vram_mapping *mapping,
352 u32 memory_base, dma_addr_t paddr,
353 size_t size)
354 {
355 mutex_lock(&context->lock);
356
357 if (mapping->use > 0) {
358 mapping->use++;
359 mutex_unlock(&context->lock);
360 return 0;
361 }
362
363 /*
364 * For MMUv1 we don't add the suballoc region to the pagetables, as
365 * those GPUs can only work with cmdbufs accessed through the linear
366 * window. Instead we manufacture a mapping to make it look uniform
367 * to the upper layers.
368 */
369 if (context->global->version == ETNAVIV_IOMMU_V1) {
370 mapping->iova = paddr - memory_base;
371 } else {
372 struct drm_mm_node *node = &mapping->vram_node;
373 int ret;
374
375 ret = etnaviv_iommu_find_iova(context, node, size);
376 if (ret < 0) {
377 mutex_unlock(&context->lock);
378 return ret;
379 }
380
381 mapping->iova = node->start;
382 ret = etnaviv_context_map(context, node->start, paddr, size,
383 ETNAVIV_PROT_READ);
384 if (ret < 0) {
385 drm_mm_remove_node(node);
386 mutex_unlock(&context->lock);
387 return ret;
388 }
389
390 context->flush_seq++;
391 }
392
393 list_add_tail(&mapping->mmu_node, &context->mappings);
394 mapping->use = 1;
395
396 mutex_unlock(&context->lock);
397
398 return 0;
399 }
400
etnaviv_iommu_put_suballoc_va(struct etnaviv_iommu_context * context,struct etnaviv_vram_mapping * mapping)401 void etnaviv_iommu_put_suballoc_va(struct etnaviv_iommu_context *context,
402 struct etnaviv_vram_mapping *mapping)
403 {
404 struct drm_mm_node *node = &mapping->vram_node;
405
406 mutex_lock(&context->lock);
407 mapping->use--;
408
409 if (mapping->use > 0 || context->global->version == ETNAVIV_IOMMU_V1) {
410 mutex_unlock(&context->lock);
411 return;
412 }
413
414 etnaviv_context_unmap(context, node->start, node->size);
415 drm_mm_remove_node(node);
416 mutex_unlock(&context->lock);
417 }
418
etnaviv_iommu_dump_size(struct etnaviv_iommu_context * context)419 size_t etnaviv_iommu_dump_size(struct etnaviv_iommu_context *context)
420 {
421 return context->global->ops->dump_size(context);
422 }
423
etnaviv_iommu_dump(struct etnaviv_iommu_context * context,void * buf)424 void etnaviv_iommu_dump(struct etnaviv_iommu_context *context, void *buf)
425 {
426 context->global->ops->dump(context, buf);
427 }
428
etnaviv_iommu_global_init(struct etnaviv_gpu * gpu)429 int etnaviv_iommu_global_init(struct etnaviv_gpu *gpu)
430 {
431 enum etnaviv_iommu_version version = ETNAVIV_IOMMU_V1;
432 struct etnaviv_drm_private *priv = gpu->drm->dev_private;
433 struct etnaviv_iommu_global *global;
434 struct device *dev = gpu->drm->dev;
435
436 if (gpu->identity.minor_features1 & chipMinorFeatures1_MMU_VERSION)
437 version = ETNAVIV_IOMMU_V2;
438
439 if (priv->mmu_global) {
440 if (priv->mmu_global->version != version) {
441 dev_err(gpu->dev,
442 "MMU version doesn't match global version\n");
443 return -ENXIO;
444 }
445
446 priv->mmu_global->use++;
447 return 0;
448 }
449
450 global = kzalloc(sizeof(*global), GFP_KERNEL);
451 if (!global)
452 return -ENOMEM;
453
454 global->bad_page_cpu = dma_alloc_wc(dev, SZ_4K, &global->bad_page_dma,
455 GFP_KERNEL);
456 if (!global->bad_page_cpu)
457 goto free_global;
458
459 memset32(global->bad_page_cpu, 0xdead55aa, SZ_4K / sizeof(u32));
460
461 if (version == ETNAVIV_IOMMU_V2) {
462 global->v2.pta_cpu = dma_alloc_wc(dev, ETNAVIV_PTA_SIZE,
463 &global->v2.pta_dma, GFP_KERNEL);
464 if (!global->v2.pta_cpu)
465 goto free_bad_page;
466 }
467
468 global->dev = dev;
469 global->version = version;
470 global->use = 1;
471 mutex_init(&global->lock);
472
473 if (version == ETNAVIV_IOMMU_V1)
474 global->ops = &etnaviv_iommuv1_ops;
475 else
476 global->ops = &etnaviv_iommuv2_ops;
477
478 priv->mmu_global = global;
479
480 return 0;
481
482 free_bad_page:
483 dma_free_wc(dev, SZ_4K, global->bad_page_cpu, global->bad_page_dma);
484 free_global:
485 kfree(global);
486
487 return -ENOMEM;
488 }
489
etnaviv_iommu_global_fini(struct etnaviv_gpu * gpu)490 void etnaviv_iommu_global_fini(struct etnaviv_gpu *gpu)
491 {
492 struct etnaviv_drm_private *priv = gpu->drm->dev_private;
493 struct etnaviv_iommu_global *global = priv->mmu_global;
494
495 if (--global->use > 0)
496 return;
497
498 if (global->v2.pta_cpu)
499 dma_free_wc(global->dev, ETNAVIV_PTA_SIZE,
500 global->v2.pta_cpu, global->v2.pta_dma);
501
502 if (global->bad_page_cpu)
503 dma_free_wc(global->dev, SZ_4K,
504 global->bad_page_cpu, global->bad_page_dma);
505
506 mutex_destroy(&global->lock);
507 kfree(global);
508
509 priv->mmu_global = NULL;
510 }
511