1 /*
2 * Copyright © 2015 Broadcom
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9 /**
10 * DOC: VC4 GEM BO management support
11 *
12 * The VC4 GPU architecture (both scanout and rendering) has direct
13 * access to system memory with no MMU in between. To support it, we
14 * use the GEM CMA helper functions to allocate contiguous ranges of
15 * physical memory for our BOs.
16 *
17 * Since the CMA allocator is very slow, we keep a cache of recently
18 * freed BOs around so that the kernel's allocation of objects for 3D
19 * rendering can return quickly.
20 */
21
22 #include <linux/dma-buf.h>
23
24 #include "vc4_drv.h"
25 #include "uapi/drm/vc4_drm.h"
26
27 static const char * const bo_type_names[] = {
28 "kernel",
29 "V3D",
30 "V3D shader",
31 "dumb",
32 "binner",
33 "RCL",
34 "BCL",
35 "kernel BO cache",
36 };
37
is_user_label(int label)38 static bool is_user_label(int label)
39 {
40 return label >= VC4_BO_TYPE_COUNT;
41 }
42
vc4_bo_stats_dump(struct vc4_dev * vc4)43 static void vc4_bo_stats_dump(struct vc4_dev *vc4)
44 {
45 int i;
46
47 for (i = 0; i < vc4->num_labels; i++) {
48 if (!vc4->bo_labels[i].num_allocated)
49 continue;
50
51 DRM_INFO("%30s: %6dkb BOs (%d)\n",
52 vc4->bo_labels[i].name,
53 vc4->bo_labels[i].size_allocated / 1024,
54 vc4->bo_labels[i].num_allocated);
55 }
56 }
57
58 #ifdef CONFIG_DEBUG_FS
vc4_bo_stats_debugfs(struct seq_file * m,void * unused)59 int vc4_bo_stats_debugfs(struct seq_file *m, void *unused)
60 {
61 struct drm_info_node *node = (struct drm_info_node *)m->private;
62 struct drm_device *dev = node->minor->dev;
63 struct vc4_dev *vc4 = to_vc4_dev(dev);
64 int i;
65
66 mutex_lock(&vc4->bo_lock);
67 for (i = 0; i < vc4->num_labels; i++) {
68 if (!vc4->bo_labels[i].num_allocated)
69 continue;
70
71 seq_printf(m, "%30s: %6dkb BOs (%d)\n",
72 vc4->bo_labels[i].name,
73 vc4->bo_labels[i].size_allocated / 1024,
74 vc4->bo_labels[i].num_allocated);
75 }
76 mutex_unlock(&vc4->bo_lock);
77
78 return 0;
79 }
80 #endif
81
82 /* Takes ownership of *name and returns the appropriate slot for it in
83 * the bo_labels[] array, extending it as necessary.
84 *
85 * This is inefficient and could use a hash table instead of walking
86 * an array and strcmp()ing. However, the assumption is that user
87 * labeling will be infrequent (scanout buffers and other long-lived
88 * objects, or debug driver builds), so we can live with it for now.
89 */
vc4_get_user_label(struct vc4_dev * vc4,const char * name)90 static int vc4_get_user_label(struct vc4_dev *vc4, const char *name)
91 {
92 int i;
93 int free_slot = -1;
94
95 for (i = 0; i < vc4->num_labels; i++) {
96 if (!vc4->bo_labels[i].name) {
97 free_slot = i;
98 } else if (strcmp(vc4->bo_labels[i].name, name) == 0) {
99 kfree(name);
100 return i;
101 }
102 }
103
104 if (free_slot != -1) {
105 WARN_ON(vc4->bo_labels[free_slot].num_allocated != 0);
106 vc4->bo_labels[free_slot].name = name;
107 return free_slot;
108 } else {
109 u32 new_label_count = vc4->num_labels + 1;
110 struct vc4_label *new_labels =
111 krealloc(vc4->bo_labels,
112 new_label_count * sizeof(*new_labels),
113 GFP_KERNEL);
114
115 if (!new_labels) {
116 kfree(name);
117 return -1;
118 }
119
120 free_slot = vc4->num_labels;
121 vc4->bo_labels = new_labels;
122 vc4->num_labels = new_label_count;
123
124 vc4->bo_labels[free_slot].name = name;
125 vc4->bo_labels[free_slot].num_allocated = 0;
126 vc4->bo_labels[free_slot].size_allocated = 0;
127
128 return free_slot;
129 }
130 }
131
vc4_bo_set_label(struct drm_gem_object * gem_obj,int label)132 static void vc4_bo_set_label(struct drm_gem_object *gem_obj, int label)
133 {
134 struct vc4_bo *bo = to_vc4_bo(gem_obj);
135 struct vc4_dev *vc4 = to_vc4_dev(gem_obj->dev);
136
137 lockdep_assert_held(&vc4->bo_lock);
138
139 if (label != -1) {
140 vc4->bo_labels[label].num_allocated++;
141 vc4->bo_labels[label].size_allocated += gem_obj->size;
142 }
143
144 vc4->bo_labels[bo->label].num_allocated--;
145 vc4->bo_labels[bo->label].size_allocated -= gem_obj->size;
146
147 if (vc4->bo_labels[bo->label].num_allocated == 0 &&
148 is_user_label(bo->label)) {
149 /* Free user BO label slots on last unreference.
150 * Slots are just where we track the stats for a given
151 * name, and once a name is unused we can reuse that
152 * slot.
153 */
154 kfree(vc4->bo_labels[bo->label].name);
155 vc4->bo_labels[bo->label].name = NULL;
156 }
157
158 bo->label = label;
159 }
160
bo_page_index(size_t size)161 static uint32_t bo_page_index(size_t size)
162 {
163 return (size / PAGE_SIZE) - 1;
164 }
165
vc4_bo_destroy(struct vc4_bo * bo)166 static void vc4_bo_destroy(struct vc4_bo *bo)
167 {
168 struct drm_gem_object *obj = &bo->base.base;
169 struct vc4_dev *vc4 = to_vc4_dev(obj->dev);
170
171 lockdep_assert_held(&vc4->bo_lock);
172
173 vc4_bo_set_label(obj, -1);
174
175 if (bo->validated_shader) {
176 kfree(bo->validated_shader->uniform_addr_offsets);
177 kfree(bo->validated_shader->texture_samples);
178 kfree(bo->validated_shader);
179 bo->validated_shader = NULL;
180 }
181
182 reservation_object_fini(&bo->_resv);
183
184 drm_gem_cma_free_object(obj);
185 }
186
vc4_bo_remove_from_cache(struct vc4_bo * bo)187 static void vc4_bo_remove_from_cache(struct vc4_bo *bo)
188 {
189 struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev);
190
191 lockdep_assert_held(&vc4->bo_lock);
192 list_del(&bo->unref_head);
193 list_del(&bo->size_head);
194 }
195
vc4_get_cache_list_for_size(struct drm_device * dev,size_t size)196 static struct list_head *vc4_get_cache_list_for_size(struct drm_device *dev,
197 size_t size)
198 {
199 struct vc4_dev *vc4 = to_vc4_dev(dev);
200 uint32_t page_index = bo_page_index(size);
201
202 if (vc4->bo_cache.size_list_size <= page_index) {
203 uint32_t new_size = max(vc4->bo_cache.size_list_size * 2,
204 page_index + 1);
205 struct list_head *new_list;
206 uint32_t i;
207
208 new_list = kmalloc_array(new_size, sizeof(struct list_head),
209 GFP_KERNEL);
210 if (!new_list)
211 return NULL;
212
213 /* Rebase the old cached BO lists to their new list
214 * head locations.
215 */
216 for (i = 0; i < vc4->bo_cache.size_list_size; i++) {
217 struct list_head *old_list =
218 &vc4->bo_cache.size_list[i];
219
220 if (list_empty(old_list))
221 INIT_LIST_HEAD(&new_list[i]);
222 else
223 list_replace(old_list, &new_list[i]);
224 }
225 /* And initialize the brand new BO list heads. */
226 for (i = vc4->bo_cache.size_list_size; i < new_size; i++)
227 INIT_LIST_HEAD(&new_list[i]);
228
229 kfree(vc4->bo_cache.size_list);
230 vc4->bo_cache.size_list = new_list;
231 vc4->bo_cache.size_list_size = new_size;
232 }
233
234 return &vc4->bo_cache.size_list[page_index];
235 }
236
vc4_bo_cache_purge(struct drm_device * dev)237 static void vc4_bo_cache_purge(struct drm_device *dev)
238 {
239 struct vc4_dev *vc4 = to_vc4_dev(dev);
240
241 mutex_lock(&vc4->bo_lock);
242 while (!list_empty(&vc4->bo_cache.time_list)) {
243 struct vc4_bo *bo = list_last_entry(&vc4->bo_cache.time_list,
244 struct vc4_bo, unref_head);
245 vc4_bo_remove_from_cache(bo);
246 vc4_bo_destroy(bo);
247 }
248 mutex_unlock(&vc4->bo_lock);
249 }
250
vc4_bo_get_from_cache(struct drm_device * dev,uint32_t size,enum vc4_kernel_bo_type type)251 static struct vc4_bo *vc4_bo_get_from_cache(struct drm_device *dev,
252 uint32_t size,
253 enum vc4_kernel_bo_type type)
254 {
255 struct vc4_dev *vc4 = to_vc4_dev(dev);
256 uint32_t page_index = bo_page_index(size);
257 struct vc4_bo *bo = NULL;
258
259 size = roundup(size, PAGE_SIZE);
260
261 mutex_lock(&vc4->bo_lock);
262 if (page_index >= vc4->bo_cache.size_list_size)
263 goto out;
264
265 if (list_empty(&vc4->bo_cache.size_list[page_index]))
266 goto out;
267
268 bo = list_first_entry(&vc4->bo_cache.size_list[page_index],
269 struct vc4_bo, size_head);
270 vc4_bo_remove_from_cache(bo);
271 kref_init(&bo->base.base.refcount);
272
273 out:
274 if (bo)
275 vc4_bo_set_label(&bo->base.base, type);
276 mutex_unlock(&vc4->bo_lock);
277 return bo;
278 }
279
280 /**
281 * vc4_gem_create_object - Implementation of driver->gem_create_object.
282 * @dev: DRM device
283 * @size: Size in bytes of the memory the object will reference
284 *
285 * This lets the CMA helpers allocate object structs for us, and keep
286 * our BO stats correct.
287 */
vc4_create_object(struct drm_device * dev,size_t size)288 struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size)
289 {
290 struct vc4_dev *vc4 = to_vc4_dev(dev);
291 struct vc4_bo *bo;
292
293 bo = kzalloc(sizeof(*bo), GFP_KERNEL);
294 if (!bo)
295 return ERR_PTR(-ENOMEM);
296
297 mutex_lock(&vc4->bo_lock);
298 bo->label = VC4_BO_TYPE_KERNEL;
299 vc4->bo_labels[VC4_BO_TYPE_KERNEL].num_allocated++;
300 vc4->bo_labels[VC4_BO_TYPE_KERNEL].size_allocated += size;
301 mutex_unlock(&vc4->bo_lock);
302 bo->resv = &bo->_resv;
303 reservation_object_init(bo->resv);
304
305 return &bo->base.base;
306 }
307
vc4_bo_create(struct drm_device * dev,size_t unaligned_size,bool allow_unzeroed,enum vc4_kernel_bo_type type)308 struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t unaligned_size,
309 bool allow_unzeroed, enum vc4_kernel_bo_type type)
310 {
311 size_t size = roundup(unaligned_size, PAGE_SIZE);
312 struct vc4_dev *vc4 = to_vc4_dev(dev);
313 struct drm_gem_cma_object *cma_obj;
314 struct vc4_bo *bo;
315
316 if (size == 0)
317 return ERR_PTR(-EINVAL);
318
319 /* First, try to get a vc4_bo from the kernel BO cache. */
320 bo = vc4_bo_get_from_cache(dev, size, type);
321 if (bo) {
322 if (!allow_unzeroed)
323 memset(bo->base.vaddr, 0, bo->base.base.size);
324 return bo;
325 }
326
327 cma_obj = drm_gem_cma_create(dev, size);
328 if (IS_ERR(cma_obj)) {
329 /*
330 * If we've run out of CMA memory, kill the cache of
331 * CMA allocations we've got laying around and try again.
332 */
333 vc4_bo_cache_purge(dev);
334
335 cma_obj = drm_gem_cma_create(dev, size);
336 if (IS_ERR(cma_obj)) {
337 DRM_ERROR("Failed to allocate from CMA:\n");
338 vc4_bo_stats_dump(vc4);
339 return ERR_PTR(-ENOMEM);
340 }
341 }
342 bo = to_vc4_bo(&cma_obj->base);
343
344 mutex_lock(&vc4->bo_lock);
345 vc4_bo_set_label(&cma_obj->base, type);
346 mutex_unlock(&vc4->bo_lock);
347
348 return bo;
349 }
350
vc4_dumb_create(struct drm_file * file_priv,struct drm_device * dev,struct drm_mode_create_dumb * args)351 int vc4_dumb_create(struct drm_file *file_priv,
352 struct drm_device *dev,
353 struct drm_mode_create_dumb *args)
354 {
355 int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
356 struct vc4_bo *bo = NULL;
357 int ret;
358
359 if (args->pitch < min_pitch)
360 args->pitch = min_pitch;
361
362 if (args->size < args->pitch * args->height)
363 args->size = args->pitch * args->height;
364
365 bo = vc4_bo_create(dev, args->size, false, VC4_BO_TYPE_DUMB);
366 if (IS_ERR(bo))
367 return PTR_ERR(bo);
368
369 ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
370 drm_gem_object_put_unlocked(&bo->base.base);
371
372 return ret;
373 }
374
vc4_bo_cache_free_old(struct drm_device * dev)375 static void vc4_bo_cache_free_old(struct drm_device *dev)
376 {
377 struct vc4_dev *vc4 = to_vc4_dev(dev);
378 unsigned long expire_time = jiffies - msecs_to_jiffies(1000);
379
380 lockdep_assert_held(&vc4->bo_lock);
381
382 while (!list_empty(&vc4->bo_cache.time_list)) {
383 struct vc4_bo *bo = list_last_entry(&vc4->bo_cache.time_list,
384 struct vc4_bo, unref_head);
385 if (time_before(expire_time, bo->free_time)) {
386 mod_timer(&vc4->bo_cache.time_timer,
387 round_jiffies_up(jiffies +
388 msecs_to_jiffies(1000)));
389 return;
390 }
391
392 vc4_bo_remove_from_cache(bo);
393 vc4_bo_destroy(bo);
394 }
395 }
396
397 /* Called on the last userspace/kernel unreference of the BO. Returns
398 * it to the BO cache if possible, otherwise frees it.
399 */
vc4_free_object(struct drm_gem_object * gem_bo)400 void vc4_free_object(struct drm_gem_object *gem_bo)
401 {
402 struct drm_device *dev = gem_bo->dev;
403 struct vc4_dev *vc4 = to_vc4_dev(dev);
404 struct vc4_bo *bo = to_vc4_bo(gem_bo);
405 struct list_head *cache_list;
406
407 mutex_lock(&vc4->bo_lock);
408 /* If the object references someone else's memory, we can't cache it.
409 */
410 if (gem_bo->import_attach) {
411 vc4_bo_destroy(bo);
412 goto out;
413 }
414
415 /* Don't cache if it was publicly named. */
416 if (gem_bo->name) {
417 vc4_bo_destroy(bo);
418 goto out;
419 }
420
421 /* If this object was partially constructed but CMA allocation
422 * had failed, just free it.
423 */
424 if (!bo->base.vaddr) {
425 vc4_bo_destroy(bo);
426 goto out;
427 }
428
429 cache_list = vc4_get_cache_list_for_size(dev, gem_bo->size);
430 if (!cache_list) {
431 vc4_bo_destroy(bo);
432 goto out;
433 }
434
435 if (bo->validated_shader) {
436 kfree(bo->validated_shader->uniform_addr_offsets);
437 kfree(bo->validated_shader->texture_samples);
438 kfree(bo->validated_shader);
439 bo->validated_shader = NULL;
440 }
441
442 bo->t_format = false;
443 bo->free_time = jiffies;
444 list_add(&bo->size_head, cache_list);
445 list_add(&bo->unref_head, &vc4->bo_cache.time_list);
446
447 vc4_bo_set_label(&bo->base.base, VC4_BO_TYPE_KERNEL_CACHE);
448
449 vc4_bo_cache_free_old(dev);
450
451 out:
452 mutex_unlock(&vc4->bo_lock);
453 }
454
vc4_bo_cache_time_work(struct work_struct * work)455 static void vc4_bo_cache_time_work(struct work_struct *work)
456 {
457 struct vc4_dev *vc4 =
458 container_of(work, struct vc4_dev, bo_cache.time_work);
459 struct drm_device *dev = vc4->dev;
460
461 mutex_lock(&vc4->bo_lock);
462 vc4_bo_cache_free_old(dev);
463 mutex_unlock(&vc4->bo_lock);
464 }
465
vc4_bo_cache_time_timer(unsigned long data)466 static void vc4_bo_cache_time_timer(unsigned long data)
467 {
468 struct drm_device *dev = (struct drm_device *)data;
469 struct vc4_dev *vc4 = to_vc4_dev(dev);
470
471 schedule_work(&vc4->bo_cache.time_work);
472 }
473
vc4_prime_res_obj(struct drm_gem_object * obj)474 struct reservation_object *vc4_prime_res_obj(struct drm_gem_object *obj)
475 {
476 struct vc4_bo *bo = to_vc4_bo(obj);
477
478 return bo->resv;
479 }
480
481 struct dma_buf *
vc4_prime_export(struct drm_device * dev,struct drm_gem_object * obj,int flags)482 vc4_prime_export(struct drm_device *dev, struct drm_gem_object *obj, int flags)
483 {
484 struct vc4_bo *bo = to_vc4_bo(obj);
485
486 if (bo->validated_shader) {
487 DRM_DEBUG("Attempting to export shader BO\n");
488 return ERR_PTR(-EINVAL);
489 }
490
491 return drm_gem_prime_export(dev, obj, flags);
492 }
493
vc4_mmap(struct file * filp,struct vm_area_struct * vma)494 int vc4_mmap(struct file *filp, struct vm_area_struct *vma)
495 {
496 struct drm_gem_object *gem_obj;
497 struct vc4_bo *bo;
498 int ret;
499
500 ret = drm_gem_mmap(filp, vma);
501 if (ret)
502 return ret;
503
504 gem_obj = vma->vm_private_data;
505 bo = to_vc4_bo(gem_obj);
506
507 if (bo->validated_shader && (vma->vm_flags & VM_WRITE)) {
508 DRM_DEBUG("mmaping of shader BOs for writing not allowed.\n");
509 return -EINVAL;
510 }
511
512 /*
513 * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the
514 * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map
515 * the whole buffer.
516 */
517 vma->vm_flags &= ~VM_PFNMAP;
518 vma->vm_pgoff = 0;
519
520 ret = dma_mmap_wc(bo->base.base.dev->dev, vma, bo->base.vaddr,
521 bo->base.paddr, vma->vm_end - vma->vm_start);
522 if (ret)
523 drm_gem_vm_close(vma);
524
525 return ret;
526 }
527
vc4_prime_mmap(struct drm_gem_object * obj,struct vm_area_struct * vma)528 int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
529 {
530 struct vc4_bo *bo = to_vc4_bo(obj);
531
532 if (bo->validated_shader && (vma->vm_flags & VM_WRITE)) {
533 DRM_DEBUG("mmaping of shader BOs for writing not allowed.\n");
534 return -EINVAL;
535 }
536
537 return drm_gem_cma_prime_mmap(obj, vma);
538 }
539
vc4_prime_vmap(struct drm_gem_object * obj)540 void *vc4_prime_vmap(struct drm_gem_object *obj)
541 {
542 struct vc4_bo *bo = to_vc4_bo(obj);
543
544 if (bo->validated_shader) {
545 DRM_DEBUG("mmaping of shader BOs not allowed.\n");
546 return ERR_PTR(-EINVAL);
547 }
548
549 return drm_gem_cma_prime_vmap(obj);
550 }
551
552 struct drm_gem_object *
vc4_prime_import_sg_table(struct drm_device * dev,struct dma_buf_attachment * attach,struct sg_table * sgt)553 vc4_prime_import_sg_table(struct drm_device *dev,
554 struct dma_buf_attachment *attach,
555 struct sg_table *sgt)
556 {
557 struct drm_gem_object *obj;
558 struct vc4_bo *bo;
559
560 obj = drm_gem_cma_prime_import_sg_table(dev, attach, sgt);
561 if (IS_ERR(obj))
562 return obj;
563
564 bo = to_vc4_bo(obj);
565 bo->resv = attach->dmabuf->resv;
566
567 return obj;
568 }
569
vc4_create_bo_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)570 int vc4_create_bo_ioctl(struct drm_device *dev, void *data,
571 struct drm_file *file_priv)
572 {
573 struct drm_vc4_create_bo *args = data;
574 struct vc4_bo *bo = NULL;
575 int ret;
576
577 /*
578 * We can't allocate from the BO cache, because the BOs don't
579 * get zeroed, and that might leak data between users.
580 */
581 bo = vc4_bo_create(dev, args->size, false, VC4_BO_TYPE_V3D);
582 if (IS_ERR(bo))
583 return PTR_ERR(bo);
584
585 ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
586 drm_gem_object_put_unlocked(&bo->base.base);
587
588 return ret;
589 }
590
vc4_mmap_bo_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)591 int vc4_mmap_bo_ioctl(struct drm_device *dev, void *data,
592 struct drm_file *file_priv)
593 {
594 struct drm_vc4_mmap_bo *args = data;
595 struct drm_gem_object *gem_obj;
596
597 gem_obj = drm_gem_object_lookup(file_priv, args->handle);
598 if (!gem_obj) {
599 DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
600 return -EINVAL;
601 }
602
603 /* The mmap offset was set up at BO allocation time. */
604 args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
605
606 drm_gem_object_put_unlocked(gem_obj);
607 return 0;
608 }
609
610 int
vc4_create_shader_bo_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)611 vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data,
612 struct drm_file *file_priv)
613 {
614 struct drm_vc4_create_shader_bo *args = data;
615 struct vc4_bo *bo = NULL;
616 int ret;
617
618 if (args->size == 0)
619 return -EINVAL;
620
621 if (args->size % sizeof(u64) != 0)
622 return -EINVAL;
623
624 if (args->flags != 0) {
625 DRM_INFO("Unknown flags set: 0x%08x\n", args->flags);
626 return -EINVAL;
627 }
628
629 if (args->pad != 0) {
630 DRM_INFO("Pad set: 0x%08x\n", args->pad);
631 return -EINVAL;
632 }
633
634 bo = vc4_bo_create(dev, args->size, true, VC4_BO_TYPE_V3D_SHADER);
635 if (IS_ERR(bo))
636 return PTR_ERR(bo);
637
638 if (copy_from_user(bo->base.vaddr,
639 (void __user *)(uintptr_t)args->data,
640 args->size)) {
641 ret = -EFAULT;
642 goto fail;
643 }
644 /* Clear the rest of the memory from allocating from the BO
645 * cache.
646 */
647 memset(bo->base.vaddr + args->size, 0,
648 bo->base.base.size - args->size);
649
650 bo->validated_shader = vc4_validate_shader(&bo->base);
651 if (!bo->validated_shader) {
652 ret = -EINVAL;
653 goto fail;
654 }
655
656 /* We have to create the handle after validation, to avoid
657 * races for users to do doing things like mmap the shader BO.
658 */
659 ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
660
661 fail:
662 drm_gem_object_put_unlocked(&bo->base.base);
663
664 return ret;
665 }
666
667 /**
668 * vc4_set_tiling_ioctl() - Sets the tiling modifier for a BO.
669 * @dev: DRM device
670 * @data: ioctl argument
671 * @file_priv: DRM file for this fd
672 *
673 * The tiling state of the BO decides the default modifier of an fb if
674 * no specific modifier was set by userspace, and the return value of
675 * vc4_get_tiling_ioctl() (so that userspace can treat a BO it
676 * received from dmabuf as the same tiling format as the producer
677 * used).
678 */
vc4_set_tiling_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)679 int vc4_set_tiling_ioctl(struct drm_device *dev, void *data,
680 struct drm_file *file_priv)
681 {
682 struct drm_vc4_set_tiling *args = data;
683 struct drm_gem_object *gem_obj;
684 struct vc4_bo *bo;
685 bool t_format;
686
687 if (args->flags != 0)
688 return -EINVAL;
689
690 switch (args->modifier) {
691 case DRM_FORMAT_MOD_NONE:
692 t_format = false;
693 break;
694 case DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED:
695 t_format = true;
696 break;
697 default:
698 return -EINVAL;
699 }
700
701 gem_obj = drm_gem_object_lookup(file_priv, args->handle);
702 if (!gem_obj) {
703 DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
704 return -ENOENT;
705 }
706 bo = to_vc4_bo(gem_obj);
707 bo->t_format = t_format;
708
709 drm_gem_object_put_unlocked(gem_obj);
710
711 return 0;
712 }
713
714 /**
715 * vc4_get_tiling_ioctl() - Gets the tiling modifier for a BO.
716 * @dev: DRM device
717 * @data: ioctl argument
718 * @file_priv: DRM file for this fd
719 *
720 * Returns the tiling modifier for a BO as set by vc4_set_tiling_ioctl().
721 */
vc4_get_tiling_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)722 int vc4_get_tiling_ioctl(struct drm_device *dev, void *data,
723 struct drm_file *file_priv)
724 {
725 struct drm_vc4_get_tiling *args = data;
726 struct drm_gem_object *gem_obj;
727 struct vc4_bo *bo;
728
729 if (args->flags != 0 || args->modifier != 0)
730 return -EINVAL;
731
732 gem_obj = drm_gem_object_lookup(file_priv, args->handle);
733 if (!gem_obj) {
734 DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
735 return -ENOENT;
736 }
737 bo = to_vc4_bo(gem_obj);
738
739 if (bo->t_format)
740 args->modifier = DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED;
741 else
742 args->modifier = DRM_FORMAT_MOD_NONE;
743
744 drm_gem_object_put_unlocked(gem_obj);
745
746 return 0;
747 }
748
vc4_bo_cache_init(struct drm_device * dev)749 int vc4_bo_cache_init(struct drm_device *dev)
750 {
751 struct vc4_dev *vc4 = to_vc4_dev(dev);
752 int i;
753
754 /* Create the initial set of BO labels that the kernel will
755 * use. This lets us avoid a bunch of string reallocation in
756 * the kernel's draw and BO allocation paths.
757 */
758 vc4->bo_labels = kcalloc(VC4_BO_TYPE_COUNT, sizeof(*vc4->bo_labels),
759 GFP_KERNEL);
760 if (!vc4->bo_labels)
761 return -ENOMEM;
762 vc4->num_labels = VC4_BO_TYPE_COUNT;
763
764 BUILD_BUG_ON(ARRAY_SIZE(bo_type_names) != VC4_BO_TYPE_COUNT);
765 for (i = 0; i < VC4_BO_TYPE_COUNT; i++)
766 vc4->bo_labels[i].name = bo_type_names[i];
767
768 mutex_init(&vc4->bo_lock);
769
770 INIT_LIST_HEAD(&vc4->bo_cache.time_list);
771
772 INIT_WORK(&vc4->bo_cache.time_work, vc4_bo_cache_time_work);
773 setup_timer(&vc4->bo_cache.time_timer,
774 vc4_bo_cache_time_timer,
775 (unsigned long)dev);
776
777 return 0;
778 }
779
vc4_bo_cache_destroy(struct drm_device * dev)780 void vc4_bo_cache_destroy(struct drm_device *dev)
781 {
782 struct vc4_dev *vc4 = to_vc4_dev(dev);
783 int i;
784
785 del_timer(&vc4->bo_cache.time_timer);
786 cancel_work_sync(&vc4->bo_cache.time_work);
787
788 vc4_bo_cache_purge(dev);
789
790 for (i = 0; i < vc4->num_labels; i++) {
791 if (vc4->bo_labels[i].num_allocated) {
792 DRM_ERROR("Destroying BO cache with %d %s "
793 "BOs still allocated\n",
794 vc4->bo_labels[i].num_allocated,
795 vc4->bo_labels[i].name);
796 }
797
798 if (is_user_label(i))
799 kfree(vc4->bo_labels[i].name);
800 }
801 kfree(vc4->bo_labels);
802 }
803
vc4_label_bo_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)804 int vc4_label_bo_ioctl(struct drm_device *dev, void *data,
805 struct drm_file *file_priv)
806 {
807 struct vc4_dev *vc4 = to_vc4_dev(dev);
808 struct drm_vc4_label_bo *args = data;
809 char *name;
810 struct drm_gem_object *gem_obj;
811 int ret = 0, label;
812
813 if (!args->len)
814 return -EINVAL;
815
816 name = strndup_user(u64_to_user_ptr(args->name), args->len + 1);
817 if (IS_ERR(name))
818 return PTR_ERR(name);
819
820 gem_obj = drm_gem_object_lookup(file_priv, args->handle);
821 if (!gem_obj) {
822 DRM_ERROR("Failed to look up GEM BO %d\n", args->handle);
823 kfree(name);
824 return -ENOENT;
825 }
826
827 mutex_lock(&vc4->bo_lock);
828 label = vc4_get_user_label(vc4, name);
829 if (label != -1)
830 vc4_bo_set_label(gem_obj, label);
831 else
832 ret = -ENOMEM;
833 mutex_unlock(&vc4->bo_lock);
834
835 drm_gem_object_put_unlocked(gem_obj);
836
837 return ret;
838 }
839