• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  Copyright © 2015 Broadcom
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  */
8 
9 /* DOC: VC4 GEM BO management support.
10  *
11  * The VC4 GPU architecture (both scanout and rendering) has direct
12  * access to system memory with no MMU in between.  To support it, we
13  * use the GEM CMA helper functions to allocate contiguous ranges of
14  * physical memory for our BOs.
15  *
16  * Since the CMA allocator is very slow, we keep a cache of recently
17  * freed BOs around so that the kernel's allocation of objects for 3D
18  * rendering can return quickly.
19  */
20 
21 #include "vc4_drv.h"
22 #include "uapi/drm/vc4_drm.h"
23 
vc4_bo_stats_dump(struct vc4_dev * vc4)24 static void vc4_bo_stats_dump(struct vc4_dev *vc4)
25 {
26 	DRM_INFO("num bos allocated: %d\n",
27 		 vc4->bo_stats.num_allocated);
28 	DRM_INFO("size bos allocated: %dkb\n",
29 		 vc4->bo_stats.size_allocated / 1024);
30 	DRM_INFO("num bos used: %d\n",
31 		 vc4->bo_stats.num_allocated - vc4->bo_stats.num_cached);
32 	DRM_INFO("size bos used: %dkb\n",
33 		 (vc4->bo_stats.size_allocated -
34 		  vc4->bo_stats.size_cached) / 1024);
35 	DRM_INFO("num bos cached: %d\n",
36 		 vc4->bo_stats.num_cached);
37 	DRM_INFO("size bos cached: %dkb\n",
38 		 vc4->bo_stats.size_cached / 1024);
39 }
40 
41 #ifdef CONFIG_DEBUG_FS
vc4_bo_stats_debugfs(struct seq_file * m,void * unused)42 int vc4_bo_stats_debugfs(struct seq_file *m, void *unused)
43 {
44 	struct drm_info_node *node = (struct drm_info_node *)m->private;
45 	struct drm_device *dev = node->minor->dev;
46 	struct vc4_dev *vc4 = to_vc4_dev(dev);
47 	struct vc4_bo_stats stats;
48 
49 	/* Take a snapshot of the current stats with the lock held. */
50 	mutex_lock(&vc4->bo_lock);
51 	stats = vc4->bo_stats;
52 	mutex_unlock(&vc4->bo_lock);
53 
54 	seq_printf(m, "num bos allocated: %d\n",
55 		   stats.num_allocated);
56 	seq_printf(m, "size bos allocated: %dkb\n",
57 		   stats.size_allocated / 1024);
58 	seq_printf(m, "num bos used: %d\n",
59 		   stats.num_allocated - stats.num_cached);
60 	seq_printf(m, "size bos used: %dkb\n",
61 		   (stats.size_allocated - stats.size_cached) / 1024);
62 	seq_printf(m, "num bos cached: %d\n",
63 		   stats.num_cached);
64 	seq_printf(m, "size bos cached: %dkb\n",
65 		   stats.size_cached / 1024);
66 
67 	return 0;
68 }
69 #endif
70 
bo_page_index(size_t size)71 static uint32_t bo_page_index(size_t size)
72 {
73 	return (size / PAGE_SIZE) - 1;
74 }
75 
76 /* Must be called with bo_lock held. */
vc4_bo_destroy(struct vc4_bo * bo)77 static void vc4_bo_destroy(struct vc4_bo *bo)
78 {
79 	struct drm_gem_object *obj = &bo->base.base;
80 	struct vc4_dev *vc4 = to_vc4_dev(obj->dev);
81 
82 	if (bo->validated_shader) {
83 		kfree(bo->validated_shader->uniform_addr_offsets);
84 		kfree(bo->validated_shader->texture_samples);
85 		kfree(bo->validated_shader);
86 		bo->validated_shader = NULL;
87 	}
88 
89 	vc4->bo_stats.num_allocated--;
90 	vc4->bo_stats.size_allocated -= obj->size;
91 	drm_gem_cma_free_object(obj);
92 }
93 
94 /* Must be called with bo_lock held. */
vc4_bo_remove_from_cache(struct vc4_bo * bo)95 static void vc4_bo_remove_from_cache(struct vc4_bo *bo)
96 {
97 	struct drm_gem_object *obj = &bo->base.base;
98 	struct vc4_dev *vc4 = to_vc4_dev(obj->dev);
99 
100 	vc4->bo_stats.num_cached--;
101 	vc4->bo_stats.size_cached -= obj->size;
102 
103 	list_del(&bo->unref_head);
104 	list_del(&bo->size_head);
105 }
106 
vc4_get_cache_list_for_size(struct drm_device * dev,size_t size)107 static struct list_head *vc4_get_cache_list_for_size(struct drm_device *dev,
108 						     size_t size)
109 {
110 	struct vc4_dev *vc4 = to_vc4_dev(dev);
111 	uint32_t page_index = bo_page_index(size);
112 
113 	if (vc4->bo_cache.size_list_size <= page_index) {
114 		uint32_t new_size = max(vc4->bo_cache.size_list_size * 2,
115 					page_index + 1);
116 		struct list_head *new_list;
117 		uint32_t i;
118 
119 		new_list = kmalloc_array(new_size, sizeof(struct list_head),
120 					 GFP_KERNEL);
121 		if (!new_list)
122 			return NULL;
123 
124 		/* Rebase the old cached BO lists to their new list
125 		 * head locations.
126 		 */
127 		for (i = 0; i < vc4->bo_cache.size_list_size; i++) {
128 			struct list_head *old_list =
129 				&vc4->bo_cache.size_list[i];
130 
131 			if (list_empty(old_list))
132 				INIT_LIST_HEAD(&new_list[i]);
133 			else
134 				list_replace(old_list, &new_list[i]);
135 		}
136 		/* And initialize the brand new BO list heads. */
137 		for (i = vc4->bo_cache.size_list_size; i < new_size; i++)
138 			INIT_LIST_HEAD(&new_list[i]);
139 
140 		kfree(vc4->bo_cache.size_list);
141 		vc4->bo_cache.size_list = new_list;
142 		vc4->bo_cache.size_list_size = new_size;
143 	}
144 
145 	return &vc4->bo_cache.size_list[page_index];
146 }
147 
vc4_bo_cache_purge(struct drm_device * dev)148 static void vc4_bo_cache_purge(struct drm_device *dev)
149 {
150 	struct vc4_dev *vc4 = to_vc4_dev(dev);
151 
152 	mutex_lock(&vc4->bo_lock);
153 	while (!list_empty(&vc4->bo_cache.time_list)) {
154 		struct vc4_bo *bo = list_last_entry(&vc4->bo_cache.time_list,
155 						    struct vc4_bo, unref_head);
156 		vc4_bo_remove_from_cache(bo);
157 		vc4_bo_destroy(bo);
158 	}
159 	mutex_unlock(&vc4->bo_lock);
160 }
161 
vc4_bo_get_from_cache(struct drm_device * dev,uint32_t size)162 static struct vc4_bo *vc4_bo_get_from_cache(struct drm_device *dev,
163 					    uint32_t size)
164 {
165 	struct vc4_dev *vc4 = to_vc4_dev(dev);
166 	uint32_t page_index = bo_page_index(size);
167 	struct vc4_bo *bo = NULL;
168 
169 	size = roundup(size, PAGE_SIZE);
170 
171 	mutex_lock(&vc4->bo_lock);
172 	if (page_index >= vc4->bo_cache.size_list_size)
173 		goto out;
174 
175 	if (list_empty(&vc4->bo_cache.size_list[page_index]))
176 		goto out;
177 
178 	bo = list_first_entry(&vc4->bo_cache.size_list[page_index],
179 			      struct vc4_bo, size_head);
180 	vc4_bo_remove_from_cache(bo);
181 	kref_init(&bo->base.base.refcount);
182 
183 out:
184 	mutex_unlock(&vc4->bo_lock);
185 	return bo;
186 }
187 
188 /**
189  * vc4_gem_create_object - Implementation of driver->gem_create_object.
190  *
191  * This lets the CMA helpers allocate object structs for us, and keep
192  * our BO stats correct.
193  */
vc4_create_object(struct drm_device * dev,size_t size)194 struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size)
195 {
196 	struct vc4_dev *vc4 = to_vc4_dev(dev);
197 	struct vc4_bo *bo;
198 
199 	bo = kzalloc(sizeof(*bo), GFP_KERNEL);
200 	if (!bo)
201 		return ERR_PTR(-ENOMEM);
202 
203 	mutex_lock(&vc4->bo_lock);
204 	vc4->bo_stats.num_allocated++;
205 	vc4->bo_stats.size_allocated += size;
206 	mutex_unlock(&vc4->bo_lock);
207 
208 	return &bo->base.base;
209 }
210 
vc4_bo_create(struct drm_device * dev,size_t unaligned_size,bool from_cache)211 struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t unaligned_size,
212 			     bool from_cache)
213 {
214 	size_t size = roundup(unaligned_size, PAGE_SIZE);
215 	struct vc4_dev *vc4 = to_vc4_dev(dev);
216 	struct drm_gem_cma_object *cma_obj;
217 
218 	if (size == 0)
219 		return ERR_PTR(-EINVAL);
220 
221 	/* First, try to get a vc4_bo from the kernel BO cache. */
222 	if (from_cache) {
223 		struct vc4_bo *bo = vc4_bo_get_from_cache(dev, size);
224 
225 		if (bo)
226 			return bo;
227 	}
228 
229 	cma_obj = drm_gem_cma_create(dev, size);
230 	if (IS_ERR(cma_obj)) {
231 		/*
232 		 * If we've run out of CMA memory, kill the cache of
233 		 * CMA allocations we've got laying around and try again.
234 		 */
235 		vc4_bo_cache_purge(dev);
236 
237 		cma_obj = drm_gem_cma_create(dev, size);
238 		if (IS_ERR(cma_obj)) {
239 			DRM_ERROR("Failed to allocate from CMA:\n");
240 			vc4_bo_stats_dump(vc4);
241 			return ERR_PTR(-ENOMEM);
242 		}
243 	}
244 
245 	return to_vc4_bo(&cma_obj->base);
246 }
247 
vc4_dumb_create(struct drm_file * file_priv,struct drm_device * dev,struct drm_mode_create_dumb * args)248 int vc4_dumb_create(struct drm_file *file_priv,
249 		    struct drm_device *dev,
250 		    struct drm_mode_create_dumb *args)
251 {
252 	int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
253 	struct vc4_bo *bo = NULL;
254 	int ret;
255 
256 	if (args->pitch < min_pitch)
257 		args->pitch = min_pitch;
258 
259 	if (args->size < args->pitch * args->height)
260 		args->size = args->pitch * args->height;
261 
262 	bo = vc4_bo_create(dev, args->size, false);
263 	if (IS_ERR(bo))
264 		return PTR_ERR(bo);
265 
266 	ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
267 	drm_gem_object_unreference_unlocked(&bo->base.base);
268 
269 	return ret;
270 }
271 
272 /* Must be called with bo_lock held. */
vc4_bo_cache_free_old(struct drm_device * dev)273 static void vc4_bo_cache_free_old(struct drm_device *dev)
274 {
275 	struct vc4_dev *vc4 = to_vc4_dev(dev);
276 	unsigned long expire_time = jiffies - msecs_to_jiffies(1000);
277 
278 	while (!list_empty(&vc4->bo_cache.time_list)) {
279 		struct vc4_bo *bo = list_last_entry(&vc4->bo_cache.time_list,
280 						    struct vc4_bo, unref_head);
281 		if (time_before(expire_time, bo->free_time)) {
282 			mod_timer(&vc4->bo_cache.time_timer,
283 				  round_jiffies_up(jiffies +
284 						   msecs_to_jiffies(1000)));
285 			return;
286 		}
287 
288 		vc4_bo_remove_from_cache(bo);
289 		vc4_bo_destroy(bo);
290 	}
291 }
292 
293 /* Called on the last userspace/kernel unreference of the BO.  Returns
294  * it to the BO cache if possible, otherwise frees it.
295  */
vc4_free_object(struct drm_gem_object * gem_bo)296 void vc4_free_object(struct drm_gem_object *gem_bo)
297 {
298 	struct drm_device *dev = gem_bo->dev;
299 	struct vc4_dev *vc4 = to_vc4_dev(dev);
300 	struct vc4_bo *bo = to_vc4_bo(gem_bo);
301 	struct list_head *cache_list;
302 
303 	mutex_lock(&vc4->bo_lock);
304 	/* If the object references someone else's memory, we can't cache it.
305 	 */
306 	if (gem_bo->import_attach) {
307 		vc4_bo_destroy(bo);
308 		goto out;
309 	}
310 
311 	/* Don't cache if it was publicly named. */
312 	if (gem_bo->name) {
313 		vc4_bo_destroy(bo);
314 		goto out;
315 	}
316 
317 	/* If this object was partially constructed but CMA allocation
318 	 * had failed, just free it.
319 	 */
320 	if (!bo->base.vaddr) {
321 		vc4_bo_destroy(bo);
322 		goto out;
323 	}
324 
325 	cache_list = vc4_get_cache_list_for_size(dev, gem_bo->size);
326 	if (!cache_list) {
327 		vc4_bo_destroy(bo);
328 		goto out;
329 	}
330 
331 	if (bo->validated_shader) {
332 		kfree(bo->validated_shader->uniform_addr_offsets);
333 		kfree(bo->validated_shader->texture_samples);
334 		kfree(bo->validated_shader);
335 		bo->validated_shader = NULL;
336 	}
337 
338 	bo->free_time = jiffies;
339 	list_add(&bo->size_head, cache_list);
340 	list_add(&bo->unref_head, &vc4->bo_cache.time_list);
341 
342 	vc4->bo_stats.num_cached++;
343 	vc4->bo_stats.size_cached += gem_bo->size;
344 
345 	vc4_bo_cache_free_old(dev);
346 
347 out:
348 	mutex_unlock(&vc4->bo_lock);
349 }
350 
vc4_bo_cache_time_work(struct work_struct * work)351 static void vc4_bo_cache_time_work(struct work_struct *work)
352 {
353 	struct vc4_dev *vc4 =
354 		container_of(work, struct vc4_dev, bo_cache.time_work);
355 	struct drm_device *dev = vc4->dev;
356 
357 	mutex_lock(&vc4->bo_lock);
358 	vc4_bo_cache_free_old(dev);
359 	mutex_unlock(&vc4->bo_lock);
360 }
361 
vc4_bo_cache_time_timer(unsigned long data)362 static void vc4_bo_cache_time_timer(unsigned long data)
363 {
364 	struct drm_device *dev = (struct drm_device *)data;
365 	struct vc4_dev *vc4 = to_vc4_dev(dev);
366 
367 	schedule_work(&vc4->bo_cache.time_work);
368 }
369 
370 struct dma_buf *
vc4_prime_export(struct drm_device * dev,struct drm_gem_object * obj,int flags)371 vc4_prime_export(struct drm_device *dev, struct drm_gem_object *obj, int flags)
372 {
373 	struct vc4_bo *bo = to_vc4_bo(obj);
374 
375 	if (bo->validated_shader) {
376 		DRM_ERROR("Attempting to export shader BO\n");
377 		return ERR_PTR(-EINVAL);
378 	}
379 
380 	return drm_gem_prime_export(dev, obj, flags);
381 }
382 
vc4_mmap(struct file * filp,struct vm_area_struct * vma)383 int vc4_mmap(struct file *filp, struct vm_area_struct *vma)
384 {
385 	struct drm_gem_object *gem_obj;
386 	struct vc4_bo *bo;
387 	int ret;
388 
389 	ret = drm_gem_mmap(filp, vma);
390 	if (ret)
391 		return ret;
392 
393 	gem_obj = vma->vm_private_data;
394 	bo = to_vc4_bo(gem_obj);
395 
396 	if (bo->validated_shader && (vma->vm_flags & VM_WRITE)) {
397 		DRM_ERROR("mmaping of shader BOs for writing not allowed.\n");
398 		return -EINVAL;
399 	}
400 
401 	/*
402 	 * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the
403 	 * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map
404 	 * the whole buffer.
405 	 */
406 	vma->vm_flags &= ~VM_PFNMAP;
407 	vma->vm_pgoff = 0;
408 
409 	ret = dma_mmap_wc(bo->base.base.dev->dev, vma, bo->base.vaddr,
410 			  bo->base.paddr, vma->vm_end - vma->vm_start);
411 	if (ret)
412 		drm_gem_vm_close(vma);
413 
414 	return ret;
415 }
416 
vc4_prime_mmap(struct drm_gem_object * obj,struct vm_area_struct * vma)417 int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
418 {
419 	struct vc4_bo *bo = to_vc4_bo(obj);
420 
421 	if (bo->validated_shader && (vma->vm_flags & VM_WRITE)) {
422 		DRM_ERROR("mmaping of shader BOs for writing not allowed.\n");
423 		return -EINVAL;
424 	}
425 
426 	return drm_gem_cma_prime_mmap(obj, vma);
427 }
428 
vc4_prime_vmap(struct drm_gem_object * obj)429 void *vc4_prime_vmap(struct drm_gem_object *obj)
430 {
431 	struct vc4_bo *bo = to_vc4_bo(obj);
432 
433 	if (bo->validated_shader) {
434 		DRM_ERROR("mmaping of shader BOs not allowed.\n");
435 		return ERR_PTR(-EINVAL);
436 	}
437 
438 	return drm_gem_cma_prime_vmap(obj);
439 }
440 
vc4_create_bo_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)441 int vc4_create_bo_ioctl(struct drm_device *dev, void *data,
442 			struct drm_file *file_priv)
443 {
444 	struct drm_vc4_create_bo *args = data;
445 	struct vc4_bo *bo = NULL;
446 	int ret;
447 
448 	/*
449 	 * We can't allocate from the BO cache, because the BOs don't
450 	 * get zeroed, and that might leak data between users.
451 	 */
452 	bo = vc4_bo_create(dev, args->size, false);
453 	if (IS_ERR(bo))
454 		return PTR_ERR(bo);
455 
456 	ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
457 	drm_gem_object_unreference_unlocked(&bo->base.base);
458 
459 	return ret;
460 }
461 
vc4_mmap_bo_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)462 int vc4_mmap_bo_ioctl(struct drm_device *dev, void *data,
463 		      struct drm_file *file_priv)
464 {
465 	struct drm_vc4_mmap_bo *args = data;
466 	struct drm_gem_object *gem_obj;
467 
468 	gem_obj = drm_gem_object_lookup(file_priv, args->handle);
469 	if (!gem_obj) {
470 		DRM_ERROR("Failed to look up GEM BO %d\n", args->handle);
471 		return -EINVAL;
472 	}
473 
474 	/* The mmap offset was set up at BO allocation time. */
475 	args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
476 
477 	drm_gem_object_unreference_unlocked(gem_obj);
478 	return 0;
479 }
480 
481 int
vc4_create_shader_bo_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)482 vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data,
483 			   struct drm_file *file_priv)
484 {
485 	struct drm_vc4_create_shader_bo *args = data;
486 	struct vc4_bo *bo = NULL;
487 	int ret;
488 
489 	if (args->size == 0)
490 		return -EINVAL;
491 
492 	if (args->size % sizeof(u64) != 0)
493 		return -EINVAL;
494 
495 	if (args->flags != 0) {
496 		DRM_INFO("Unknown flags set: 0x%08x\n", args->flags);
497 		return -EINVAL;
498 	}
499 
500 	if (args->pad != 0) {
501 		DRM_INFO("Pad set: 0x%08x\n", args->pad);
502 		return -EINVAL;
503 	}
504 
505 	bo = vc4_bo_create(dev, args->size, true);
506 	if (IS_ERR(bo))
507 		return PTR_ERR(bo);
508 
509 	if (copy_from_user(bo->base.vaddr,
510 			     (void __user *)(uintptr_t)args->data,
511 			     args->size)) {
512 		ret = -EFAULT;
513 		goto fail;
514 	}
515 	/* Clear the rest of the memory from allocating from the BO
516 	 * cache.
517 	 */
518 	memset(bo->base.vaddr + args->size, 0,
519 	       bo->base.base.size - args->size);
520 
521 	bo->validated_shader = vc4_validate_shader(&bo->base);
522 	if (!bo->validated_shader) {
523 		ret = -EINVAL;
524 		goto fail;
525 	}
526 
527 	/* We have to create the handle after validation, to avoid
528 	 * races for users to do doing things like mmap the shader BO.
529 	 */
530 	ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
531 
532  fail:
533 	drm_gem_object_unreference_unlocked(&bo->base.base);
534 
535 	return ret;
536 }
537 
vc4_bo_cache_init(struct drm_device * dev)538 void vc4_bo_cache_init(struct drm_device *dev)
539 {
540 	struct vc4_dev *vc4 = to_vc4_dev(dev);
541 
542 	mutex_init(&vc4->bo_lock);
543 
544 	INIT_LIST_HEAD(&vc4->bo_cache.time_list);
545 
546 	INIT_WORK(&vc4->bo_cache.time_work, vc4_bo_cache_time_work);
547 	setup_timer(&vc4->bo_cache.time_timer,
548 		    vc4_bo_cache_time_timer,
549 		    (unsigned long)dev);
550 }
551 
vc4_bo_cache_destroy(struct drm_device * dev)552 void vc4_bo_cache_destroy(struct drm_device *dev)
553 {
554 	struct vc4_dev *vc4 = to_vc4_dev(dev);
555 
556 	del_timer(&vc4->bo_cache.time_timer);
557 	cancel_work_sync(&vc4->bo_cache.time_work);
558 
559 	vc4_bo_cache_purge(dev);
560 
561 	if (vc4->bo_stats.num_allocated) {
562 		DRM_ERROR("Destroying BO cache while BOs still allocated:\n");
563 		vc4_bo_stats_dump(vc4);
564 	}
565 }
566