1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * NVIDIA Tegra DRM GEM helper functions
4 *
5 * Copyright (C) 2012 Sascha Hauer, Pengutronix
6 * Copyright (C) 2013-2015 NVIDIA CORPORATION, All rights reserved.
7 *
8 * Based on the GEM/CMA helpers
9 *
10 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
11 */
12
13 #include <linux/dma-buf.h>
14 #include <linux/iommu.h>
15 #include <linux/module.h>
16 #include <linux/vmalloc.h>
17
18 #include <drm/drm_drv.h>
19 #include <drm/drm_prime.h>
20 #include <drm/tegra_drm.h>
21
22 #include "drm.h"
23 #include "gem.h"
24
25 MODULE_IMPORT_NS(DMA_BUF);
26
sg_dma_count_chunks(struct scatterlist * sgl,unsigned int nents)27 static unsigned int sg_dma_count_chunks(struct scatterlist *sgl, unsigned int nents)
28 {
29 dma_addr_t next = ~(dma_addr_t)0;
30 unsigned int count = 0, i;
31 struct scatterlist *s;
32
33 for_each_sg(sgl, s, nents, i) {
34 /* sg_dma_address(s) is only valid for entries that have sg_dma_len(s) != 0. */
35 if (!sg_dma_len(s))
36 continue;
37
38 if (sg_dma_address(s) != next) {
39 next = sg_dma_address(s) + sg_dma_len(s);
40 count++;
41 }
42 }
43
44 return count;
45 }
46
sgt_dma_count_chunks(struct sg_table * sgt)47 static inline unsigned int sgt_dma_count_chunks(struct sg_table *sgt)
48 {
49 return sg_dma_count_chunks(sgt->sgl, sgt->nents);
50 }
51
tegra_bo_put(struct host1x_bo * bo)52 static void tegra_bo_put(struct host1x_bo *bo)
53 {
54 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
55
56 drm_gem_object_put(&obj->gem);
57 }
58
tegra_bo_pin(struct device * dev,struct host1x_bo * bo,enum dma_data_direction direction)59 static struct host1x_bo_mapping *tegra_bo_pin(struct device *dev, struct host1x_bo *bo,
60 enum dma_data_direction direction)
61 {
62 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
63 struct drm_gem_object *gem = &obj->gem;
64 struct host1x_bo_mapping *map;
65 int err;
66
67 map = kzalloc(sizeof(*map), GFP_KERNEL);
68 if (!map)
69 return ERR_PTR(-ENOMEM);
70
71 kref_init(&map->ref);
72 map->bo = host1x_bo_get(bo);
73 map->direction = direction;
74 map->dev = dev;
75
76 /*
77 * Imported buffers need special treatment to satisfy the semantics of DMA-BUF.
78 */
79 if (gem->import_attach) {
80 struct dma_buf *buf = gem->import_attach->dmabuf;
81
82 map->attach = dma_buf_attach(buf, dev);
83 if (IS_ERR(map->attach)) {
84 err = PTR_ERR(map->attach);
85 goto free;
86 }
87
88 map->sgt = dma_buf_map_attachment_unlocked(map->attach, direction);
89 if (IS_ERR(map->sgt)) {
90 dma_buf_detach(buf, map->attach);
91 err = PTR_ERR(map->sgt);
92 map->sgt = NULL;
93 goto free;
94 }
95
96 err = sgt_dma_count_chunks(map->sgt);
97 map->size = gem->size;
98
99 goto out;
100 }
101
102 /*
103 * If we don't have a mapping for this buffer yet, return an SG table
104 * so that host1x can do the mapping for us via the DMA API.
105 */
106 map->sgt = kzalloc(sizeof(*map->sgt), GFP_KERNEL);
107 if (!map->sgt) {
108 err = -ENOMEM;
109 goto free;
110 }
111
112 if (obj->pages) {
113 /*
114 * If the buffer object was allocated from the explicit IOMMU
115 * API code paths, construct an SG table from the pages.
116 */
117 err = sg_alloc_table_from_pages(map->sgt, obj->pages, obj->num_pages, 0, gem->size,
118 GFP_KERNEL);
119 if (err < 0)
120 goto free;
121 } else {
122 /*
123 * If the buffer object had no pages allocated and if it was
124 * not imported, it had to be allocated with the DMA API, so
125 * the DMA API helper can be used.
126 */
127 err = dma_get_sgtable(dev, map->sgt, obj->vaddr, obj->iova, gem->size);
128 if (err < 0)
129 goto free;
130 }
131
132 err = dma_map_sgtable(dev, map->sgt, direction, 0);
133 if (err)
134 goto free_sgt;
135
136 out:
137 /*
138 * If we've manually mapped the buffer object through the IOMMU, make sure to return the
139 * existing IOVA address of our mapping.
140 */
141 if (!obj->mm) {
142 map->phys = sg_dma_address(map->sgt->sgl);
143 map->chunks = err;
144 } else {
145 map->phys = obj->iova;
146 map->chunks = 1;
147 }
148
149 map->size = gem->size;
150
151 return map;
152
153 free_sgt:
154 sg_free_table(map->sgt);
155 free:
156 kfree(map->sgt);
157 kfree(map);
158 return ERR_PTR(err);
159 }
160
tegra_bo_unpin(struct host1x_bo_mapping * map)161 static void tegra_bo_unpin(struct host1x_bo_mapping *map)
162 {
163 if (map->attach) {
164 dma_buf_unmap_attachment_unlocked(map->attach, map->sgt,
165 map->direction);
166 dma_buf_detach(map->attach->dmabuf, map->attach);
167 } else {
168 dma_unmap_sgtable(map->dev, map->sgt, map->direction, 0);
169 sg_free_table(map->sgt);
170 kfree(map->sgt);
171 }
172
173 host1x_bo_put(map->bo);
174 kfree(map);
175 }
176
tegra_bo_mmap(struct host1x_bo * bo)177 static void *tegra_bo_mmap(struct host1x_bo *bo)
178 {
179 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
180 struct iosys_map map = { 0 };
181 int ret;
182
183 if (obj->vaddr) {
184 return obj->vaddr;
185 } else if (obj->gem.import_attach) {
186 ret = dma_buf_vmap_unlocked(obj->gem.import_attach->dmabuf, &map);
187 return ret ? NULL : map.vaddr;
188 } else {
189 return vmap(obj->pages, obj->num_pages, VM_MAP,
190 pgprot_writecombine(PAGE_KERNEL));
191 }
192 }
193
tegra_bo_munmap(struct host1x_bo * bo,void * addr)194 static void tegra_bo_munmap(struct host1x_bo *bo, void *addr)
195 {
196 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
197 struct iosys_map map = IOSYS_MAP_INIT_VADDR(addr);
198
199 if (obj->vaddr)
200 return;
201 else if (obj->gem.import_attach)
202 dma_buf_vunmap_unlocked(obj->gem.import_attach->dmabuf, &map);
203 else
204 vunmap(addr);
205 }
206
tegra_bo_get(struct host1x_bo * bo)207 static struct host1x_bo *tegra_bo_get(struct host1x_bo *bo)
208 {
209 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
210
211 drm_gem_object_get(&obj->gem);
212
213 return bo;
214 }
215
216 static const struct host1x_bo_ops tegra_bo_ops = {
217 .get = tegra_bo_get,
218 .put = tegra_bo_put,
219 .pin = tegra_bo_pin,
220 .unpin = tegra_bo_unpin,
221 .mmap = tegra_bo_mmap,
222 .munmap = tegra_bo_munmap,
223 };
224
tegra_bo_iommu_map(struct tegra_drm * tegra,struct tegra_bo * bo)225 static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo)
226 {
227 int prot = IOMMU_READ | IOMMU_WRITE;
228 int err;
229
230 if (bo->mm)
231 return -EBUSY;
232
233 bo->mm = kzalloc(sizeof(*bo->mm), GFP_KERNEL);
234 if (!bo->mm)
235 return -ENOMEM;
236
237 mutex_lock(&tegra->mm_lock);
238
239 err = drm_mm_insert_node_generic(&tegra->mm,
240 bo->mm, bo->gem.size, PAGE_SIZE, 0, 0);
241 if (err < 0) {
242 dev_err(tegra->drm->dev, "out of I/O virtual memory: %d\n",
243 err);
244 goto unlock;
245 }
246
247 bo->iova = bo->mm->start;
248
249 bo->size = iommu_map_sgtable(tegra->domain, bo->iova, bo->sgt, prot);
250 if (!bo->size) {
251 dev_err(tegra->drm->dev, "failed to map buffer\n");
252 err = -ENOMEM;
253 goto remove;
254 }
255
256 mutex_unlock(&tegra->mm_lock);
257
258 return 0;
259
260 remove:
261 drm_mm_remove_node(bo->mm);
262 unlock:
263 mutex_unlock(&tegra->mm_lock);
264 kfree(bo->mm);
265 return err;
266 }
267
tegra_bo_iommu_unmap(struct tegra_drm * tegra,struct tegra_bo * bo)268 static int tegra_bo_iommu_unmap(struct tegra_drm *tegra, struct tegra_bo *bo)
269 {
270 if (!bo->mm)
271 return 0;
272
273 mutex_lock(&tegra->mm_lock);
274 iommu_unmap(tegra->domain, bo->iova, bo->size);
275 drm_mm_remove_node(bo->mm);
276 mutex_unlock(&tegra->mm_lock);
277
278 kfree(bo->mm);
279
280 return 0;
281 }
282
283 static const struct drm_gem_object_funcs tegra_gem_object_funcs = {
284 .free = tegra_bo_free_object,
285 .export = tegra_gem_prime_export,
286 .vm_ops = &tegra_bo_vm_ops,
287 };
288
tegra_bo_alloc_object(struct drm_device * drm,size_t size)289 static struct tegra_bo *tegra_bo_alloc_object(struct drm_device *drm,
290 size_t size)
291 {
292 struct tegra_bo *bo;
293 int err;
294
295 bo = kzalloc(sizeof(*bo), GFP_KERNEL);
296 if (!bo)
297 return ERR_PTR(-ENOMEM);
298
299 bo->gem.funcs = &tegra_gem_object_funcs;
300
301 host1x_bo_init(&bo->base, &tegra_bo_ops);
302 size = round_up(size, PAGE_SIZE);
303
304 err = drm_gem_object_init(drm, &bo->gem, size);
305 if (err < 0)
306 goto free;
307
308 err = drm_gem_create_mmap_offset(&bo->gem);
309 if (err < 0)
310 goto release;
311
312 return bo;
313
314 release:
315 drm_gem_object_release(&bo->gem);
316 free:
317 kfree(bo);
318 return ERR_PTR(err);
319 }
320
tegra_bo_free(struct drm_device * drm,struct tegra_bo * bo)321 static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo)
322 {
323 if (bo->pages) {
324 dma_unmap_sgtable(drm->dev, bo->sgt, DMA_FROM_DEVICE, 0);
325 drm_gem_put_pages(&bo->gem, bo->pages, true, true);
326 sg_free_table(bo->sgt);
327 kfree(bo->sgt);
328 } else if (bo->vaddr) {
329 dma_free_wc(drm->dev, bo->gem.size, bo->vaddr, bo->iova);
330 }
331 }
332
tegra_bo_get_pages(struct drm_device * drm,struct tegra_bo * bo)333 static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo)
334 {
335 int err;
336
337 bo->pages = drm_gem_get_pages(&bo->gem);
338 if (IS_ERR(bo->pages))
339 return PTR_ERR(bo->pages);
340
341 bo->num_pages = bo->gem.size >> PAGE_SHIFT;
342
343 bo->sgt = drm_prime_pages_to_sg(bo->gem.dev, bo->pages, bo->num_pages);
344 if (IS_ERR(bo->sgt)) {
345 err = PTR_ERR(bo->sgt);
346 goto put_pages;
347 }
348
349 err = dma_map_sgtable(drm->dev, bo->sgt, DMA_FROM_DEVICE, 0);
350 if (err)
351 goto free_sgt;
352
353 return 0;
354
355 free_sgt:
356 sg_free_table(bo->sgt);
357 kfree(bo->sgt);
358 put_pages:
359 drm_gem_put_pages(&bo->gem, bo->pages, false, false);
360 return err;
361 }
362
tegra_bo_alloc(struct drm_device * drm,struct tegra_bo * bo)363 static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo)
364 {
365 struct tegra_drm *tegra = drm->dev_private;
366 int err;
367
368 if (tegra->domain) {
369 err = tegra_bo_get_pages(drm, bo);
370 if (err < 0)
371 return err;
372
373 err = tegra_bo_iommu_map(tegra, bo);
374 if (err < 0) {
375 tegra_bo_free(drm, bo);
376 return err;
377 }
378 } else {
379 size_t size = bo->gem.size;
380
381 bo->vaddr = dma_alloc_wc(drm->dev, size, &bo->iova,
382 GFP_KERNEL | __GFP_NOWARN);
383 if (!bo->vaddr) {
384 dev_err(drm->dev,
385 "failed to allocate buffer of size %zu\n",
386 size);
387 return -ENOMEM;
388 }
389 }
390
391 return 0;
392 }
393
tegra_bo_create(struct drm_device * drm,size_t size,unsigned long flags)394 struct tegra_bo *tegra_bo_create(struct drm_device *drm, size_t size,
395 unsigned long flags)
396 {
397 struct tegra_bo *bo;
398 int err;
399
400 bo = tegra_bo_alloc_object(drm, size);
401 if (IS_ERR(bo))
402 return bo;
403
404 err = tegra_bo_alloc(drm, bo);
405 if (err < 0)
406 goto release;
407
408 if (flags & DRM_TEGRA_GEM_CREATE_TILED)
409 bo->tiling.mode = TEGRA_BO_TILING_MODE_TILED;
410
411 if (flags & DRM_TEGRA_GEM_CREATE_BOTTOM_UP)
412 bo->flags |= TEGRA_BO_BOTTOM_UP;
413
414 return bo;
415
416 release:
417 drm_gem_object_release(&bo->gem);
418 kfree(bo);
419 return ERR_PTR(err);
420 }
421
tegra_bo_create_with_handle(struct drm_file * file,struct drm_device * drm,size_t size,unsigned long flags,u32 * handle)422 struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
423 struct drm_device *drm,
424 size_t size,
425 unsigned long flags,
426 u32 *handle)
427 {
428 struct tegra_bo *bo;
429 int err;
430
431 bo = tegra_bo_create(drm, size, flags);
432 if (IS_ERR(bo))
433 return bo;
434
435 err = drm_gem_handle_create(file, &bo->gem, handle);
436 if (err) {
437 tegra_bo_free_object(&bo->gem);
438 return ERR_PTR(err);
439 }
440
441 drm_gem_object_put(&bo->gem);
442
443 return bo;
444 }
445
tegra_bo_import(struct drm_device * drm,struct dma_buf * buf)446 static struct tegra_bo *tegra_bo_import(struct drm_device *drm,
447 struct dma_buf *buf)
448 {
449 struct tegra_drm *tegra = drm->dev_private;
450 struct dma_buf_attachment *attach;
451 struct tegra_bo *bo;
452 int err;
453
454 bo = tegra_bo_alloc_object(drm, buf->size);
455 if (IS_ERR(bo))
456 return bo;
457
458 attach = dma_buf_attach(buf, drm->dev);
459 if (IS_ERR(attach)) {
460 err = PTR_ERR(attach);
461 goto free;
462 }
463
464 get_dma_buf(buf);
465
466 bo->sgt = dma_buf_map_attachment_unlocked(attach, DMA_TO_DEVICE);
467 if (IS_ERR(bo->sgt)) {
468 err = PTR_ERR(bo->sgt);
469 goto detach;
470 }
471
472 if (tegra->domain) {
473 err = tegra_bo_iommu_map(tegra, bo);
474 if (err < 0)
475 goto detach;
476 }
477
478 bo->gem.import_attach = attach;
479
480 return bo;
481
482 detach:
483 if (!IS_ERR_OR_NULL(bo->sgt))
484 dma_buf_unmap_attachment_unlocked(attach, bo->sgt, DMA_TO_DEVICE);
485
486 dma_buf_detach(buf, attach);
487 dma_buf_put(buf);
488 free:
489 drm_gem_object_release(&bo->gem);
490 kfree(bo);
491 return ERR_PTR(err);
492 }
493
tegra_bo_free_object(struct drm_gem_object * gem)494 void tegra_bo_free_object(struct drm_gem_object *gem)
495 {
496 struct tegra_drm *tegra = gem->dev->dev_private;
497 struct host1x_bo_mapping *mapping, *tmp;
498 struct tegra_bo *bo = to_tegra_bo(gem);
499
500 /* remove all mappings of this buffer object from any caches */
501 list_for_each_entry_safe(mapping, tmp, &bo->base.mappings, list) {
502 if (mapping->cache)
503 host1x_bo_unpin(mapping);
504 else
505 dev_err(gem->dev->dev, "mapping %p stale for device %s\n", mapping,
506 dev_name(mapping->dev));
507 }
508
509 if (tegra->domain)
510 tegra_bo_iommu_unmap(tegra, bo);
511
512 if (gem->import_attach) {
513 dma_buf_unmap_attachment_unlocked(gem->import_attach, bo->sgt,
514 DMA_TO_DEVICE);
515 drm_prime_gem_destroy(gem, NULL);
516 } else {
517 tegra_bo_free(gem->dev, bo);
518 }
519
520 drm_gem_object_release(gem);
521 kfree(bo);
522 }
523
tegra_bo_dumb_create(struct drm_file * file,struct drm_device * drm,struct drm_mode_create_dumb * args)524 int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
525 struct drm_mode_create_dumb *args)
526 {
527 unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
528 struct tegra_drm *tegra = drm->dev_private;
529 struct tegra_bo *bo;
530
531 args->pitch = round_up(min_pitch, tegra->pitch_align);
532 args->size = args->pitch * args->height;
533
534 bo = tegra_bo_create_with_handle(file, drm, args->size, 0,
535 &args->handle);
536 if (IS_ERR(bo))
537 return PTR_ERR(bo);
538
539 return 0;
540 }
541
tegra_bo_fault(struct vm_fault * vmf)542 static vm_fault_t tegra_bo_fault(struct vm_fault *vmf)
543 {
544 struct vm_area_struct *vma = vmf->vma;
545 struct drm_gem_object *gem = vma->vm_private_data;
546 struct tegra_bo *bo = to_tegra_bo(gem);
547 struct page *page;
548 pgoff_t offset;
549
550 if (!bo->pages)
551 return VM_FAULT_SIGBUS;
552
553 offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
554 page = bo->pages[offset];
555
556 return vmf_insert_page(vma, vmf->address, page);
557 }
558
559 const struct vm_operations_struct tegra_bo_vm_ops = {
560 .fault = tegra_bo_fault,
561 .open = drm_gem_vm_open,
562 .close = drm_gem_vm_close,
563 };
564
__tegra_gem_mmap(struct drm_gem_object * gem,struct vm_area_struct * vma)565 int __tegra_gem_mmap(struct drm_gem_object *gem, struct vm_area_struct *vma)
566 {
567 struct tegra_bo *bo = to_tegra_bo(gem);
568
569 if (!bo->pages) {
570 unsigned long vm_pgoff = vma->vm_pgoff;
571 int err;
572
573 /*
574 * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(),
575 * and set the vm_pgoff (used as a fake buffer offset by DRM)
576 * to 0 as we want to map the whole buffer.
577 */
578 vm_flags_clear(vma, VM_PFNMAP);
579 vma->vm_pgoff = 0;
580
581 err = dma_mmap_wc(gem->dev->dev, vma, bo->vaddr, bo->iova,
582 gem->size);
583 if (err < 0) {
584 drm_gem_vm_close(vma);
585 return err;
586 }
587
588 vma->vm_pgoff = vm_pgoff;
589 } else {
590 pgprot_t prot = vm_get_page_prot(vma->vm_flags);
591
592 vm_flags_mod(vma, VM_MIXEDMAP, VM_PFNMAP);
593
594 vma->vm_page_prot = pgprot_writecombine(prot);
595 }
596
597 return 0;
598 }
599
tegra_drm_mmap(struct file * file,struct vm_area_struct * vma)600 int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma)
601 {
602 struct drm_gem_object *gem;
603 int err;
604
605 err = drm_gem_mmap(file, vma);
606 if (err < 0)
607 return err;
608
609 gem = vma->vm_private_data;
610
611 return __tegra_gem_mmap(gem, vma);
612 }
613
614 static struct sg_table *
tegra_gem_prime_map_dma_buf(struct dma_buf_attachment * attach,enum dma_data_direction dir)615 tegra_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
616 enum dma_data_direction dir)
617 {
618 struct drm_gem_object *gem = attach->dmabuf->priv;
619 struct tegra_bo *bo = to_tegra_bo(gem);
620 struct sg_table *sgt;
621
622 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
623 if (!sgt)
624 return NULL;
625
626 if (bo->pages) {
627 if (sg_alloc_table_from_pages(sgt, bo->pages, bo->num_pages,
628 0, gem->size, GFP_KERNEL) < 0)
629 goto free;
630 } else {
631 if (dma_get_sgtable(attach->dev, sgt, bo->vaddr, bo->iova,
632 gem->size) < 0)
633 goto free;
634 }
635
636 if (dma_map_sgtable(attach->dev, sgt, dir, 0))
637 goto free;
638
639 return sgt;
640
641 free:
642 sg_free_table(sgt);
643 kfree(sgt);
644 return NULL;
645 }
646
tegra_gem_prime_unmap_dma_buf(struct dma_buf_attachment * attach,struct sg_table * sgt,enum dma_data_direction dir)647 static void tegra_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
648 struct sg_table *sgt,
649 enum dma_data_direction dir)
650 {
651 struct drm_gem_object *gem = attach->dmabuf->priv;
652 struct tegra_bo *bo = to_tegra_bo(gem);
653
654 if (bo->pages)
655 dma_unmap_sgtable(attach->dev, sgt, dir, 0);
656
657 sg_free_table(sgt);
658 kfree(sgt);
659 }
660
tegra_gem_prime_release(struct dma_buf * buf)661 static void tegra_gem_prime_release(struct dma_buf *buf)
662 {
663 drm_gem_dmabuf_release(buf);
664 }
665
tegra_gem_prime_begin_cpu_access(struct dma_buf * buf,enum dma_data_direction direction)666 static int tegra_gem_prime_begin_cpu_access(struct dma_buf *buf,
667 enum dma_data_direction direction)
668 {
669 struct drm_gem_object *gem = buf->priv;
670 struct tegra_bo *bo = to_tegra_bo(gem);
671 struct drm_device *drm = gem->dev;
672
673 if (bo->pages)
674 dma_sync_sgtable_for_cpu(drm->dev, bo->sgt, DMA_FROM_DEVICE);
675
676 return 0;
677 }
678
tegra_gem_prime_end_cpu_access(struct dma_buf * buf,enum dma_data_direction direction)679 static int tegra_gem_prime_end_cpu_access(struct dma_buf *buf,
680 enum dma_data_direction direction)
681 {
682 struct drm_gem_object *gem = buf->priv;
683 struct tegra_bo *bo = to_tegra_bo(gem);
684 struct drm_device *drm = gem->dev;
685
686 if (bo->pages)
687 dma_sync_sgtable_for_device(drm->dev, bo->sgt, DMA_TO_DEVICE);
688
689 return 0;
690 }
691
tegra_gem_prime_mmap(struct dma_buf * buf,struct vm_area_struct * vma)692 static int tegra_gem_prime_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
693 {
694 struct drm_gem_object *gem = buf->priv;
695 int err;
696
697 err = drm_gem_mmap_obj(gem, gem->size, vma);
698 if (err < 0)
699 return err;
700
701 return __tegra_gem_mmap(gem, vma);
702 }
703
tegra_gem_prime_vmap(struct dma_buf * buf,struct iosys_map * map)704 static int tegra_gem_prime_vmap(struct dma_buf *buf, struct iosys_map *map)
705 {
706 struct drm_gem_object *gem = buf->priv;
707 struct tegra_bo *bo = to_tegra_bo(gem);
708 void *vaddr;
709
710 vaddr = tegra_bo_mmap(&bo->base);
711 if (IS_ERR(vaddr))
712 return PTR_ERR(vaddr);
713
714 iosys_map_set_vaddr(map, vaddr);
715
716 return 0;
717 }
718
tegra_gem_prime_vunmap(struct dma_buf * buf,struct iosys_map * map)719 static void tegra_gem_prime_vunmap(struct dma_buf *buf, struct iosys_map *map)
720 {
721 struct drm_gem_object *gem = buf->priv;
722 struct tegra_bo *bo = to_tegra_bo(gem);
723
724 tegra_bo_munmap(&bo->base, map->vaddr);
725 }
726
727 static const struct dma_buf_ops tegra_gem_prime_dmabuf_ops = {
728 .map_dma_buf = tegra_gem_prime_map_dma_buf,
729 .unmap_dma_buf = tegra_gem_prime_unmap_dma_buf,
730 .release = tegra_gem_prime_release,
731 .begin_cpu_access = tegra_gem_prime_begin_cpu_access,
732 .end_cpu_access = tegra_gem_prime_end_cpu_access,
733 .mmap = tegra_gem_prime_mmap,
734 .vmap = tegra_gem_prime_vmap,
735 .vunmap = tegra_gem_prime_vunmap,
736 };
737
tegra_gem_prime_export(struct drm_gem_object * gem,int flags)738 struct dma_buf *tegra_gem_prime_export(struct drm_gem_object *gem,
739 int flags)
740 {
741 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
742
743 exp_info.exp_name = KBUILD_MODNAME;
744 exp_info.owner = gem->dev->driver->fops->owner;
745 exp_info.ops = &tegra_gem_prime_dmabuf_ops;
746 exp_info.size = gem->size;
747 exp_info.flags = flags;
748 exp_info.priv = gem;
749
750 return drm_gem_dmabuf_export(gem->dev, &exp_info);
751 }
752
tegra_gem_prime_import(struct drm_device * drm,struct dma_buf * buf)753 struct drm_gem_object *tegra_gem_prime_import(struct drm_device *drm,
754 struct dma_buf *buf)
755 {
756 struct tegra_bo *bo;
757
758 if (buf->ops == &tegra_gem_prime_dmabuf_ops) {
759 struct drm_gem_object *gem = buf->priv;
760
761 if (gem->dev == drm) {
762 drm_gem_object_get(gem);
763 return gem;
764 }
765 }
766
767 bo = tegra_bo_import(drm, buf);
768 if (IS_ERR(bo))
769 return ERR_CAST(bo);
770
771 return &bo->gem;
772 }
773
tegra_gem_lookup(struct drm_file * file,u32 handle)774 struct host1x_bo *tegra_gem_lookup(struct drm_file *file, u32 handle)
775 {
776 struct drm_gem_object *gem;
777 struct tegra_bo *bo;
778
779 gem = drm_gem_object_lookup(file, handle);
780 if (!gem)
781 return NULL;
782
783 bo = to_tegra_bo(gem);
784 return &bo->base;
785 }
786