• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2015-2018 Etnaviv Project
4  */
5 
6 #include <linux/dma-mapping.h>
7 #include <linux/scatterlist.h>
8 
9 #include "common.xml.h"
10 #include "etnaviv_cmdbuf.h"
11 #include "etnaviv_drv.h"
12 #include "etnaviv_gem.h"
13 #include "etnaviv_gpu.h"
14 #include "etnaviv_mmu.h"
15 
etnaviv_context_unmap(struct etnaviv_iommu_context * context,unsigned long iova,size_t size)16 static void etnaviv_context_unmap(struct etnaviv_iommu_context *context,
17 				 unsigned long iova, size_t size)
18 {
19 	size_t unmapped_page, unmapped = 0;
20 	size_t pgsize = SZ_4K;
21 
22 	if (!IS_ALIGNED(iova | size, pgsize)) {
23 		pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%zx\n",
24 		       iova, size, pgsize);
25 		return;
26 	}
27 
28 	while (unmapped < size) {
29 		unmapped_page = context->global->ops->unmap(context, iova,
30 							    pgsize);
31 		if (!unmapped_page)
32 			break;
33 
34 		iova += unmapped_page;
35 		unmapped += unmapped_page;
36 	}
37 }
38 
etnaviv_context_map(struct etnaviv_iommu_context * context,unsigned long iova,phys_addr_t paddr,size_t size,int prot)39 static int etnaviv_context_map(struct etnaviv_iommu_context *context,
40 			      unsigned long iova, phys_addr_t paddr,
41 			      size_t size, int prot)
42 {
43 	unsigned long orig_iova = iova;
44 	size_t pgsize = SZ_4K;
45 	size_t orig_size = size;
46 	int ret = 0;
47 
48 	if (!IS_ALIGNED(iova | paddr | size, pgsize)) {
49 		pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%zx\n",
50 		       iova, &paddr, size, pgsize);
51 		return -EINVAL;
52 	}
53 
54 	while (size) {
55 		ret = context->global->ops->map(context, iova, paddr, pgsize,
56 						prot);
57 		if (ret)
58 			break;
59 
60 		iova += pgsize;
61 		paddr += pgsize;
62 		size -= pgsize;
63 	}
64 
65 	/* unroll mapping in case something went wrong */
66 	if (ret)
67 		etnaviv_context_unmap(context, orig_iova, orig_size - size);
68 
69 	return ret;
70 }
71 
etnaviv_iommu_map(struct etnaviv_iommu_context * context,u32 iova,struct sg_table * sgt,unsigned len,int prot)72 static int etnaviv_iommu_map(struct etnaviv_iommu_context *context, u32 iova,
73 			     struct sg_table *sgt, unsigned len, int prot)
74 {	struct scatterlist *sg;
75 	unsigned int da = iova;
76 	unsigned int i;
77 	int ret;
78 
79 	if (!context || !sgt)
80 		return -EINVAL;
81 
82 	for_each_sgtable_dma_sg(sgt, sg, i) {
83 		phys_addr_t pa = sg_dma_address(sg) - sg->offset;
84 		size_t bytes = sg_dma_len(sg) + sg->offset;
85 
86 		VERB("map[%d]: %08x %pap(%zx)", i, iova, &pa, bytes);
87 
88 		ret = etnaviv_context_map(context, da, pa, bytes, prot);
89 		if (ret)
90 			goto fail;
91 
92 		da += bytes;
93 	}
94 
95 	return 0;
96 
97 fail:
98 	etnaviv_context_unmap(context, iova, da - iova);
99 	return ret;
100 }
101 
etnaviv_iommu_unmap(struct etnaviv_iommu_context * context,u32 iova,struct sg_table * sgt,unsigned len)102 static void etnaviv_iommu_unmap(struct etnaviv_iommu_context *context, u32 iova,
103 				struct sg_table *sgt, unsigned len)
104 {
105 	struct scatterlist *sg;
106 	unsigned int da = iova;
107 	int i;
108 
109 	for_each_sgtable_dma_sg(sgt, sg, i) {
110 		size_t bytes = sg_dma_len(sg) + sg->offset;
111 
112 		etnaviv_context_unmap(context, da, bytes);
113 
114 		VERB("unmap[%d]: %08x(%zx)", i, iova, bytes);
115 
116 		BUG_ON(!PAGE_ALIGNED(bytes));
117 
118 		da += bytes;
119 	}
120 }
121 
etnaviv_iommu_remove_mapping(struct etnaviv_iommu_context * context,struct etnaviv_vram_mapping * mapping)122 static void etnaviv_iommu_remove_mapping(struct etnaviv_iommu_context *context,
123 	struct etnaviv_vram_mapping *mapping)
124 {
125 	struct etnaviv_gem_object *etnaviv_obj = mapping->object;
126 
127 	lockdep_assert_held(&context->lock);
128 
129 	etnaviv_iommu_unmap(context, mapping->vram_node.start,
130 			    etnaviv_obj->sgt, etnaviv_obj->base.size);
131 	drm_mm_remove_node(&mapping->vram_node);
132 }
133 
etnaviv_iommu_find_iova(struct etnaviv_iommu_context * context,struct drm_mm_node * node,size_t size)134 static int etnaviv_iommu_find_iova(struct etnaviv_iommu_context *context,
135 				   struct drm_mm_node *node, size_t size)
136 {
137 	struct etnaviv_vram_mapping *free = NULL;
138 	enum drm_mm_insert_mode mode = DRM_MM_INSERT_LOW;
139 	int ret;
140 
141 	lockdep_assert_held(&context->lock);
142 
143 	while (1) {
144 		struct etnaviv_vram_mapping *m, *n;
145 		struct drm_mm_scan scan;
146 		struct list_head list;
147 		bool found;
148 
149 		ret = drm_mm_insert_node_in_range(&context->mm, node,
150 						  size, 0, 0, 0, U64_MAX, mode);
151 		if (ret != -ENOSPC)
152 			break;
153 
154 		/* Try to retire some entries */
155 		drm_mm_scan_init(&scan, &context->mm, size, 0, 0, mode);
156 
157 		found = 0;
158 		INIT_LIST_HEAD(&list);
159 		list_for_each_entry(free, &context->mappings, mmu_node) {
160 			/* If this vram node has not been used, skip this. */
161 			if (!free->vram_node.mm)
162 				continue;
163 
164 			/*
165 			 * If the iova is pinned, then it's in-use,
166 			 * so we must keep its mapping.
167 			 */
168 			if (free->use)
169 				continue;
170 
171 			list_add(&free->scan_node, &list);
172 			if (drm_mm_scan_add_block(&scan, &free->vram_node)) {
173 				found = true;
174 				break;
175 			}
176 		}
177 
178 		if (!found) {
179 			/* Nothing found, clean up and fail */
180 			list_for_each_entry_safe(m, n, &list, scan_node)
181 				BUG_ON(drm_mm_scan_remove_block(&scan, &m->vram_node));
182 			break;
183 		}
184 
185 		/*
186 		 * drm_mm does not allow any other operations while
187 		 * scanning, so we have to remove all blocks first.
188 		 * If drm_mm_scan_remove_block() returns false, we
189 		 * can leave the block pinned.
190 		 */
191 		list_for_each_entry_safe(m, n, &list, scan_node)
192 			if (!drm_mm_scan_remove_block(&scan, &m->vram_node))
193 				list_del_init(&m->scan_node);
194 
195 		/*
196 		 * Unmap the blocks which need to be reaped from the MMU.
197 		 * Clear the mmu pointer to prevent the mapping_get finding
198 		 * this mapping.
199 		 */
200 		list_for_each_entry_safe(m, n, &list, scan_node) {
201 			etnaviv_iommu_remove_mapping(context, m);
202 			etnaviv_iommu_context_put(m->context);
203 			m->context = NULL;
204 			list_del_init(&m->mmu_node);
205 			list_del_init(&m->scan_node);
206 		}
207 
208 		mode = DRM_MM_INSERT_EVICT;
209 
210 		/*
211 		 * We removed enough mappings so that the new allocation will
212 		 * succeed, retry the allocation one more time.
213 		 */
214 	}
215 
216 	return ret;
217 }
218 
etnaviv_iommu_insert_exact(struct etnaviv_iommu_context * context,struct drm_mm_node * node,size_t size,u64 va)219 static int etnaviv_iommu_insert_exact(struct etnaviv_iommu_context *context,
220 		   struct drm_mm_node *node, size_t size, u64 va)
221 {
222 	lockdep_assert_held(&context->lock);
223 
224 	return drm_mm_insert_node_in_range(&context->mm, node, size, 0, 0, va,
225 					   va + size, DRM_MM_INSERT_LOWEST);
226 }
227 
etnaviv_iommu_map_gem(struct etnaviv_iommu_context * context,struct etnaviv_gem_object * etnaviv_obj,u32 memory_base,struct etnaviv_vram_mapping * mapping,u64 va)228 int etnaviv_iommu_map_gem(struct etnaviv_iommu_context *context,
229 	struct etnaviv_gem_object *etnaviv_obj, u32 memory_base,
230 	struct etnaviv_vram_mapping *mapping, u64 va)
231 {
232 	struct sg_table *sgt = etnaviv_obj->sgt;
233 	struct drm_mm_node *node;
234 	int ret;
235 
236 	lockdep_assert_held(&etnaviv_obj->lock);
237 
238 	mutex_lock(&context->lock);
239 
240 	/* v1 MMU can optimize single entry (contiguous) scatterlists */
241 	if (context->global->version == ETNAVIV_IOMMU_V1 &&
242 	    sgt->nents == 1 && !(etnaviv_obj->flags & ETNA_BO_FORCE_MMU)) {
243 		u32 iova;
244 
245 		iova = sg_dma_address(sgt->sgl) - memory_base;
246 		if (iova < 0x80000000 - sg_dma_len(sgt->sgl)) {
247 			mapping->iova = iova;
248 			list_add_tail(&mapping->mmu_node, &context->mappings);
249 			ret = 0;
250 			goto unlock;
251 		}
252 	}
253 
254 	node = &mapping->vram_node;
255 
256 	if (va)
257 		ret = etnaviv_iommu_insert_exact(context, node,
258 						 etnaviv_obj->base.size, va);
259 	else
260 		ret = etnaviv_iommu_find_iova(context, node,
261 					      etnaviv_obj->base.size);
262 	if (ret < 0)
263 		goto unlock;
264 
265 	mapping->iova = node->start;
266 	ret = etnaviv_iommu_map(context, node->start, sgt, etnaviv_obj->base.size,
267 				ETNAVIV_PROT_READ | ETNAVIV_PROT_WRITE);
268 
269 	if (ret < 0) {
270 		drm_mm_remove_node(node);
271 		goto unlock;
272 	}
273 
274 	list_add_tail(&mapping->mmu_node, &context->mappings);
275 	context->flush_seq++;
276 unlock:
277 	mutex_unlock(&context->lock);
278 
279 	return ret;
280 }
281 
etnaviv_iommu_unmap_gem(struct etnaviv_iommu_context * context,struct etnaviv_vram_mapping * mapping)282 void etnaviv_iommu_unmap_gem(struct etnaviv_iommu_context *context,
283 	struct etnaviv_vram_mapping *mapping)
284 {
285 	WARN_ON(mapping->use);
286 
287 	mutex_lock(&context->lock);
288 
289 	/* Bail if the mapping has been reaped by another thread */
290 	if (!mapping->context) {
291 		mutex_unlock(&context->lock);
292 		return;
293 	}
294 
295 	/* If the vram node is on the mm, unmap and remove the node */
296 	if (mapping->vram_node.mm == &context->mm)
297 		etnaviv_iommu_remove_mapping(context, mapping);
298 
299 	list_del(&mapping->mmu_node);
300 	context->flush_seq++;
301 	mutex_unlock(&context->lock);
302 }
303 
etnaviv_iommu_context_free(struct kref * kref)304 static void etnaviv_iommu_context_free(struct kref *kref)
305 {
306 	struct etnaviv_iommu_context *context =
307 		container_of(kref, struct etnaviv_iommu_context, refcount);
308 
309 	etnaviv_cmdbuf_suballoc_unmap(context, &context->cmdbuf_mapping);
310 
311 	context->global->ops->free(context);
312 }
etnaviv_iommu_context_put(struct etnaviv_iommu_context * context)313 void etnaviv_iommu_context_put(struct etnaviv_iommu_context *context)
314 {
315 	kref_put(&context->refcount, etnaviv_iommu_context_free);
316 }
317 
318 struct etnaviv_iommu_context *
etnaviv_iommu_context_init(struct etnaviv_iommu_global * global,struct etnaviv_cmdbuf_suballoc * suballoc)319 etnaviv_iommu_context_init(struct etnaviv_iommu_global *global,
320 			   struct etnaviv_cmdbuf_suballoc *suballoc)
321 {
322 	struct etnaviv_iommu_context *ctx;
323 	int ret;
324 
325 	if (global->version == ETNAVIV_IOMMU_V1)
326 		ctx = etnaviv_iommuv1_context_alloc(global);
327 	else
328 		ctx = etnaviv_iommuv2_context_alloc(global);
329 
330 	if (!ctx)
331 		return NULL;
332 
333 	ret = etnaviv_cmdbuf_suballoc_map(suballoc, ctx, &ctx->cmdbuf_mapping,
334 					  global->memory_base);
335 	if (ret)
336 		goto out_free;
337 
338 	if (global->version == ETNAVIV_IOMMU_V1 &&
339 	    ctx->cmdbuf_mapping.iova > 0x80000000) {
340 		dev_err(global->dev,
341 		        "command buffer outside valid memory window\n");
342 		goto out_unmap;
343 	}
344 
345 	return ctx;
346 
347 out_unmap:
348 	etnaviv_cmdbuf_suballoc_unmap(ctx, &ctx->cmdbuf_mapping);
349 out_free:
350 	global->ops->free(ctx);
351 	return NULL;
352 }
353 
etnaviv_iommu_restore(struct etnaviv_gpu * gpu,struct etnaviv_iommu_context * context)354 void etnaviv_iommu_restore(struct etnaviv_gpu *gpu,
355 			   struct etnaviv_iommu_context *context)
356 {
357 	context->global->ops->restore(gpu, context);
358 }
359 
etnaviv_iommu_get_suballoc_va(struct etnaviv_iommu_context * context,struct etnaviv_vram_mapping * mapping,u32 memory_base,dma_addr_t paddr,size_t size)360 int etnaviv_iommu_get_suballoc_va(struct etnaviv_iommu_context *context,
361 				  struct etnaviv_vram_mapping *mapping,
362 				  u32 memory_base, dma_addr_t paddr,
363 				  size_t size)
364 {
365 	mutex_lock(&context->lock);
366 
367 	if (mapping->use > 0) {
368 		mapping->use++;
369 		mutex_unlock(&context->lock);
370 		return 0;
371 	}
372 
373 	/*
374 	 * For MMUv1 we don't add the suballoc region to the pagetables, as
375 	 * those GPUs can only work with cmdbufs accessed through the linear
376 	 * window. Instead we manufacture a mapping to make it look uniform
377 	 * to the upper layers.
378 	 */
379 	if (context->global->version == ETNAVIV_IOMMU_V1) {
380 		mapping->iova = paddr - memory_base;
381 	} else {
382 		struct drm_mm_node *node = &mapping->vram_node;
383 		int ret;
384 
385 		ret = etnaviv_iommu_find_iova(context, node, size);
386 		if (ret < 0) {
387 			mutex_unlock(&context->lock);
388 			return ret;
389 		}
390 
391 		mapping->iova = node->start;
392 		ret = etnaviv_context_map(context, node->start, paddr, size,
393 					  ETNAVIV_PROT_READ);
394 		if (ret < 0) {
395 			drm_mm_remove_node(node);
396 			mutex_unlock(&context->lock);
397 			return ret;
398 		}
399 
400 		context->flush_seq++;
401 	}
402 
403 	list_add_tail(&mapping->mmu_node, &context->mappings);
404 	mapping->use = 1;
405 
406 	mutex_unlock(&context->lock);
407 
408 	return 0;
409 }
410 
etnaviv_iommu_put_suballoc_va(struct etnaviv_iommu_context * context,struct etnaviv_vram_mapping * mapping)411 void etnaviv_iommu_put_suballoc_va(struct etnaviv_iommu_context *context,
412 		  struct etnaviv_vram_mapping *mapping)
413 {
414 	struct drm_mm_node *node = &mapping->vram_node;
415 
416 	mutex_lock(&context->lock);
417 	mapping->use--;
418 
419 	if (mapping->use > 0 || context->global->version == ETNAVIV_IOMMU_V1) {
420 		mutex_unlock(&context->lock);
421 		return;
422 	}
423 
424 	etnaviv_context_unmap(context, node->start, node->size);
425 	drm_mm_remove_node(node);
426 	mutex_unlock(&context->lock);
427 }
428 
etnaviv_iommu_dump_size(struct etnaviv_iommu_context * context)429 size_t etnaviv_iommu_dump_size(struct etnaviv_iommu_context *context)
430 {
431 	return context->global->ops->dump_size(context);
432 }
433 
etnaviv_iommu_dump(struct etnaviv_iommu_context * context,void * buf)434 void etnaviv_iommu_dump(struct etnaviv_iommu_context *context, void *buf)
435 {
436 	context->global->ops->dump(context, buf);
437 }
438 
etnaviv_iommu_global_init(struct etnaviv_gpu * gpu)439 int etnaviv_iommu_global_init(struct etnaviv_gpu *gpu)
440 {
441 	enum etnaviv_iommu_version version = ETNAVIV_IOMMU_V1;
442 	struct etnaviv_drm_private *priv = gpu->drm->dev_private;
443 	struct etnaviv_iommu_global *global;
444 	struct device *dev = gpu->drm->dev;
445 
446 	if (gpu->identity.minor_features1 & chipMinorFeatures1_MMU_VERSION)
447 		version = ETNAVIV_IOMMU_V2;
448 
449 	if (priv->mmu_global) {
450 		if (priv->mmu_global->version != version) {
451 			dev_err(gpu->dev,
452 				"MMU version doesn't match global version\n");
453 			return -ENXIO;
454 		}
455 
456 		priv->mmu_global->use++;
457 		return 0;
458 	}
459 
460 	global = kzalloc(sizeof(*global), GFP_KERNEL);
461 	if (!global)
462 		return -ENOMEM;
463 
464 	global->bad_page_cpu = dma_alloc_wc(dev, SZ_4K, &global->bad_page_dma,
465 					    GFP_KERNEL);
466 	if (!global->bad_page_cpu)
467 		goto free_global;
468 
469 	memset32(global->bad_page_cpu, 0xdead55aa, SZ_4K / sizeof(u32));
470 
471 	if (version == ETNAVIV_IOMMU_V2) {
472 		global->v2.pta_cpu = dma_alloc_wc(dev, ETNAVIV_PTA_SIZE,
473 					       &global->v2.pta_dma, GFP_KERNEL);
474 		if (!global->v2.pta_cpu)
475 			goto free_bad_page;
476 	}
477 
478 	global->dev = dev;
479 	global->version = version;
480 	global->use = 1;
481 	mutex_init(&global->lock);
482 
483 	if (version == ETNAVIV_IOMMU_V1)
484 		global->ops = &etnaviv_iommuv1_ops;
485 	else
486 		global->ops = &etnaviv_iommuv2_ops;
487 
488 	priv->mmu_global = global;
489 
490 	return 0;
491 
492 free_bad_page:
493 	dma_free_wc(dev, SZ_4K, global->bad_page_cpu, global->bad_page_dma);
494 free_global:
495 	kfree(global);
496 
497 	return -ENOMEM;
498 }
499 
etnaviv_iommu_global_fini(struct etnaviv_gpu * gpu)500 void etnaviv_iommu_global_fini(struct etnaviv_gpu *gpu)
501 {
502 	struct etnaviv_drm_private *priv = gpu->drm->dev_private;
503 	struct etnaviv_iommu_global *global = priv->mmu_global;
504 
505 	if (--global->use > 0)
506 		return;
507 
508 	if (global->v2.pta_cpu)
509 		dma_free_wc(global->dev, ETNAVIV_PTA_SIZE,
510 			    global->v2.pta_cpu, global->v2.pta_dma);
511 
512 	if (global->bad_page_cpu)
513 		dma_free_wc(global->dev, SZ_4K,
514 			    global->bad_page_cpu, global->bad_page_dma);
515 
516 	mutex_destroy(&global->lock);
517 	kfree(global);
518 
519 	priv->mmu_global = NULL;
520 }
521