1 /*sunxi_drm_gem.c
2 *
3 * Copyright (C) 2022 Allwinnertech Co., Ltd.
4 * Authors: zhengwanyu <zhengwanyu@allwinnertech.com>
5 * Authors: hongyaobin <hongyaobin@allwinnertech.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; either version 2 of the License, or (at your
10 * option) any later version.
11 */
12
13 #include <linux/mm.h>
14 #include <linux/slab.h>
15 #include <linux/mutex.h>
16 #include <linux/export.h>
17 #include <linux/dma-buf.h>
18 #include <linux/dma-mapping.h>
19
20 #include <drm/drm.h>
21 #include <drm/drm_vma_manager.h>
22 #include <drm/drm_prime.h>
23 #include "sunxi_drm_gem.h"
24 #include "sunxi_drm_drv.h"
25 #include "sunxi_drm_iommu.h"
26 #include "drm/sunxi_drm.h"
27
sunxi_drm_alloc_buf(struct sunxi_drm_gem_object * sunxi_gem_obj)28 static int sunxi_drm_alloc_buf(struct sunxi_drm_gem_object *sunxi_gem_obj)
29 {
30 struct drm_device *dev = sunxi_gem_obj->base.dev;
31 unsigned long attr = 0;
32 unsigned int nr_pages;
33 struct sg_table sgt;
34 int ret = -ENOMEM;
35
36 if (sunxi_gem_obj->dma_addr) {
37 DRM_DEBUG_KMS("already allocated.\n");
38 return 0;
39 }
40
41 sunxi_gem_obj->dma_attrs = 0;
42
43 /*
44 * if SUNXI_BO_CONTIG, fully physically contiguous memory
45 * region will be allocated else physically contiguous
46 * as possible.
47 */
48 if (!(sunxi_gem_obj->flags & SUNXI_BO_NONCONTIG))
49 sunxi_gem_obj->dma_attrs |= DMA_ATTR_FORCE_CONTIGUOUS;
50
51 /*
52 * if SUNXI_BO_WC or SUNXI_BO_NONCACHABLE, writecombine mapping
53 * else cachable mapping.
54 */
55 if (sunxi_gem_obj->flags & SUNXI_BO_WC ||
56 !(sunxi_gem_obj->flags & SUNXI_BO_CACHABLE))
57 attr = DMA_ATTR_WRITE_COMBINE;
58
59 sunxi_gem_obj->dma_attrs |= attr;
60 sunxi_gem_obj->dma_attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
61
62 nr_pages = sunxi_gem_obj->size >> PAGE_SHIFT;
63
64 sunxi_gem_obj->pages = kvmalloc_array(nr_pages, sizeof(struct page *),
65 GFP_KERNEL | __GFP_ZERO);
66 if (!sunxi_gem_obj->pages) {
67 DRM_ERROR("failed to allocate pages.\n");
68 return -ENOMEM;
69 }
70
71 sunxi_gem_obj->vaddr = dma_alloc_attrs(dev->dev, sunxi_gem_obj->size,
72 &sunxi_gem_obj->dma_addr, GFP_KERNEL,
73 sunxi_gem_obj->dma_attrs);
74 if (!sunxi_gem_obj->vaddr) {
75 DRM_ERROR("failed to allocate buffer.\n");
76 goto err_free;
77 }
78
79 ret = dma_get_sgtable_attrs(dev->dev, &sgt, sunxi_gem_obj->vaddr,
80 sunxi_gem_obj->dma_addr, sunxi_gem_obj->size,
81 sunxi_gem_obj->dma_attrs);
82 if (ret < 0) {
83 DRM_ERROR("failed to get sgtable.\n");
84 goto err_dma_free;
85 }
86
87 if (drm_prime_sg_to_page_addr_arrays(&sgt, sunxi_gem_obj->pages, NULL,
88 nr_pages)) {
89 DRM_ERROR("invalid sgtable.\n");
90 ret = -EINVAL;
91 goto err_sgt_free;
92 }
93
94 sg_free_table(&sgt);
95
96 DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
97 (unsigned long)sunxi_gem_obj->dma_addr, sunxi_gem_obj->size);
98
99 return 0;
100
101 err_sgt_free:
102 sg_free_table(&sgt);
103 err_dma_free:
104 dma_free_attrs(dev->dev, sunxi_gem_obj->size, sunxi_gem_obj->vaddr,
105 sunxi_gem_obj->dma_addr, sunxi_gem_obj->dma_attrs);
106 err_free:
107 kvfree(sunxi_gem_obj->pages);
108
109 return ret;
110 }
111
sunxi_drm_free_buf(struct sunxi_drm_gem_object * sunxi_gem_obj)112 static void sunxi_drm_free_buf(struct sunxi_drm_gem_object *sunxi_gem_obj)
113 {
114 struct drm_device *dev = sunxi_gem_obj->base.dev;
115
116 if (!sunxi_gem_obj->dma_addr) {
117 DRM_DEBUG_KMS("dma_addr is invalid.\n");
118 return;
119 }
120
121 DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
122 (unsigned long)sunxi_gem_obj->dma_addr, sunxi_gem_obj->size);
123
124 dma_free_attrs(dev->dev, sunxi_gem_obj->size, sunxi_gem_obj->vaddr,
125 (dma_addr_t)sunxi_gem_obj->dma_addr,
126 sunxi_gem_obj->dma_attrs);
127
128 kvfree(sunxi_gem_obj->pages);
129 }
130
sunxi_gem_dump(struct drm_gem_object * gem_obj)131 void sunxi_gem_dump(struct drm_gem_object *gem_obj)
132 {
133 struct sunxi_drm_gem_object *sunxi_gem_obj = to_sunxi_drm_gem_obj(gem_obj);
134
135 DRM_INFO("gem_obj: size:%u name:%d handle count:%u\n",
136 (unsigned int)gem_obj->size, gem_obj->name,
137 gem_obj->handle_count);
138 DRM_INFO("phy_addr:0x%lx virt_addr:0x%lx sgt:%p\n",
139 (unsigned long)sunxi_gem_obj->dma_addr,
140 (unsigned long)sunxi_gem_obj->vaddr,
141 sunxi_gem_obj->sgt);
142 }
143
sunxi_drm_gem_show(char * buf,struct sunxi_drm_gem_object * sunxi_gem_obj,unsigned int offset,unsigned int pitches)144 ssize_t sunxi_drm_gem_show(char *buf,
145 struct sunxi_drm_gem_object *sunxi_gem_obj,
146 unsigned int offset, unsigned int pitches)
147 {
148 ssize_t n = 0;
149 struct drm_gem_object *gem_obj = &sunxi_gem_obj->base;
150
151 n += sprintf(buf + n, "gem_obj: size:%u name:%d handle count:%u\n",
152 (unsigned int)gem_obj->size, gem_obj->name,
153 gem_obj->handle_count);
154 n += sprintf(buf + n, "phy_addr:0x%lx virt_addr:0x%lx sgt:%p\n",
155 (unsigned long)sunxi_gem_obj->dma_addr + offset,
156 (unsigned long)sunxi_gem_obj->vaddr + offset,
157 sunxi_gem_obj->sgt);
158 return n;
159 }
160
sunxi_drm_gem_get_phyaddr_ioctl(struct drm_device * dev,void * data,struct drm_file * file)161 int sunxi_drm_gem_get_phyaddr_ioctl(struct drm_device *dev,
162 void *data, struct drm_file *file)
163 {
164 struct sunxi_drm_phyaddr *arg = data;
165 struct drm_gem_object *gem_obj = NULL;
166 struct sg_table *sgt = NULL;
167 struct page *page = NULL;
168 int ret;
169 uint32_t handle = 0;
170 ret = drm_gem_prime_fd_to_handle(dev, file, arg->fd, &handle);
171 if (ret) {
172 DRM_ERROR("fd %d to handle failed", arg->fd);
173 return -1;
174 }
175 gem_obj = drm_gem_object_lookup(file, handle);
176 if (gem_obj == NULL) {
177 DRM_ERROR("gem object not finde fd %d, handle 0x%x", arg->fd, handle);
178 return -1;
179 }
180 sgt = sunxi_drm_gem_prime_get_sg_table(gem_obj);
181 if (sgt == NULL) {
182 DRM_ERROR("gem prime get sg_table failed");
183 drm_gem_object_put(gem_obj);
184 return -1;
185 }
186 page = sg_page(sgt->sgl);
187 arg->phyaddr = PFN_PHYS(page_to_pfn(page));
188 drm_gem_object_put(gem_obj);
189 return 0;
190 }
191
sunxi_drm_gem_destroy(struct sunxi_drm_gem_object * sunxi_gem_obj)192 void sunxi_drm_gem_destroy(struct sunxi_drm_gem_object *sunxi_gem_obj)
193 {
194 struct drm_gem_object *obj = &sunxi_gem_obj->base;
195
196 DRM_DEBUG_KMS("handle count = %d\n", obj->handle_count);
197
198 /*
199 * do not release memory region from exporter.
200 *
201 * the region will be released by exporter
202 * once dmabuf's refcount becomes 0.
203 */
204 if (obj->import_attach)
205 drm_prime_gem_destroy(obj, sunxi_gem_obj->sgt);
206 else
207 sunxi_drm_free_buf(sunxi_gem_obj);
208
209 /* release file pointer to gem object. */
210 drm_gem_object_release(obj);
211
212 kfree(sunxi_gem_obj);
213 }
214
215 /**
216 * sunxi_drm_gem_obj_init - Create a sunxi GEM object
217 * WITHOUT allocating memory
218 * @dev: DRM device
219 * @size: size of the object to allocate
220 *
221 * This function creates and initializes a sunxi GEM object of the given size,
222 * but doesn't allocate any memory to back the object.
223 *
224 * Returns:
225 * A struct sunxi_drm_gem_object * on success or an ERR_PTR()-encoded negative
226 * error code on failure.
227 */
228 static struct sunxi_drm_gem_object *
sunxi_drm_gem_obj_init(struct drm_device * dev,size_t size)229 sunxi_drm_gem_obj_init(struct drm_device *dev, size_t size)
230 {
231 struct sunxi_drm_gem_object *sunxi_gem_obj;
232 struct drm_gem_object *gem_obj;
233 int ret;
234
235 sunxi_gem_obj = kzalloc(sizeof(*sunxi_gem_obj), GFP_KERNEL);
236 if (!sunxi_gem_obj)
237 return ERR_PTR(-ENOMEM);
238
239 sunxi_gem_obj->size = size;
240 gem_obj = &sunxi_gem_obj->base;
241
242 ret = drm_gem_object_init(dev, gem_obj, size);
243 if (ret) {
244 DRM_ERROR("failed to initialize gem object\n");
245 goto error;
246 }
247 ret = drm_gem_create_mmap_offset(gem_obj);
248 if (ret) {
249 drm_gem_object_release(gem_obj);
250 goto error;
251 }
252
253 return sunxi_gem_obj;
254
255 error:
256 kfree(sunxi_gem_obj);
257 return ERR_PTR(ret);
258 }
259
260 /**
261 * sunxi_drm_gem_create - allocate an object with the given size
262 * @drm: DRM device
263 * @size: size of the object to allocate
264 *
265 * This function creates a SUNXI GEM object and allocates a contiguous chunk of
266 * memory as backing store. The backing memory has the writecombine attribute
267 * set.
268 *
269 * Returns:
270 * A struct sunxi_drm_gem_object * on success or an ERR_PTR()-encoded negative
271 * error code on failure.
272 */
273 struct sunxi_drm_gem_object *
sunxi_drm_gem_create(struct drm_device * dev,size_t flags,size_t size)274 sunxi_drm_gem_create(struct drm_device *dev,
275 size_t flags,
276 size_t size)
277 {
278 struct sunxi_drm_gem_object *sunxi_gem_obj;
279 int ret;
280
281 if (flags & ~(SUNXI_BO_MASK)) {
282 DRM_ERROR("invalid GEM buffer flags: %lu\n", flags);
283 return ERR_PTR(-EINVAL);
284 }
285
286 if (!size) {
287 DRM_ERROR("invalid GEM buffer size: %lu\n", size);
288 return ERR_PTR(-EINVAL);
289 }
290
291 size = round_up(size, PAGE_SIZE);
292
293 sunxi_gem_obj = sunxi_drm_gem_obj_init(dev, size);
294 if (IS_ERR(sunxi_gem_obj))
295 return sunxi_gem_obj;
296
297 if (!is_drm_iommu_supported(dev) && (flags & SUNXI_BO_NONCONTIG)) {
298 /*
299 * when no IOMMU is available, all allocated buffers are
300 * contiguous anyway, so drop SUNXI_BO_NONCONTIG flag
301 */
302 flags &= ~SUNXI_BO_NONCONTIG;
303 DRM_WARN("Non-contiguous allocation is not supported without IOMMU, falling back to contiguous buffer\n");
304 }
305
306 /* set memory type and cache attribute from user side. */
307 sunxi_gem_obj->flags = flags;
308
309 ret = sunxi_drm_alloc_buf(sunxi_gem_obj);
310 if (ret < 0) {
311 drm_gem_object_release(&sunxi_gem_obj->base);
312 kfree(sunxi_gem_obj);
313 return ERR_PTR(ret);
314 }
315
316 return sunxi_gem_obj;
317 }
318
sunxi_drm_gem_handle_create(struct drm_gem_object * obj,struct drm_file * file_priv,unsigned int * handle)319 static int sunxi_drm_gem_handle_create(struct drm_gem_object *obj,
320 struct drm_file *file_priv,
321 unsigned int *handle)
322 {
323 int ret;
324
325 /*
326 * allocate a id of idr table where the obj is registered
327 * and handle has the id what user can see.
328 */
329 ret = drm_gem_handle_create(file_priv, obj, handle);
330 if (ret)
331 return ret;
332
333 /* drop reference from allocate - handle holds it now. */
334 drm_gem_object_put(obj);
335
336 return 0;
337 }
338
sunxi_drm_gem_create_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)339 int sunxi_drm_gem_create_ioctl(struct drm_device *dev, void *data,
340 struct drm_file *file_priv)
341 {
342 struct drm_sunxi_gem_create *args = data;
343 struct sunxi_drm_gem_object *sunxi_gem_obj;
344 int ret;
345
346 sunxi_gem_obj = sunxi_drm_gem_create(dev, args->flags, args->size);
347 if (IS_ERR(sunxi_gem_obj))
348 return PTR_ERR(sunxi_gem_obj);
349
350 ret = sunxi_drm_gem_handle_create(&sunxi_gem_obj->base, file_priv,
351 &args->handle);
352 if (ret) {
353 sunxi_drm_gem_destroy(sunxi_gem_obj);
354 return ret;
355 }
356
357 return 0;
358 }
359
360 /**
361 * sunxi_drm_gem_free_object - free resources associated with a SUNXI GEM object
362 * @gem_obj: GEM object to free
363 *
364 * This function frees the backing memory of the SUNXI GEM object, cleans up the
365 * GEM object state and frees the memory used to store the object itself.
366 */
sunxi_drm_gem_free_object(struct drm_gem_object * gem_obj)367 void sunxi_drm_gem_free_object(struct drm_gem_object *gem_obj)
368 {
369 sunxi_drm_gem_destroy(to_sunxi_drm_gem_obj(gem_obj));
370 }
371
372 /**
373 * sunxi_drm_gem_dumb_create - create a dumb buffer object
374 * @file_priv: DRM file-private structure to create the dumb buffer for
375 * @drm: DRM device
376 * @args: IOCTL data
377 *
378 * This function computes the pitch of the dumb buffer and rounds it up to an
379 * integer number of bytes per pixel. Drivers for hardware that doesn't have
380 * any additional restrictions on the pitch can directly use this function as
381 * their ->dumb_create() callback.
382 *
383 * NOTE!!!!!!:
384 * DRM_IOCTL_MODE_CREATE_DUMB--->(dev->driver->)dumb_create()
385 * --->sunxi_drm_gem_dumb_create()
386 *
387 * For hardware with additional restrictions, drivers can adjust the fields
388 * set up by userspace and pass the IOCTL data along to the
389 * sunxi_drm_gem_dumb_create_internal() function.
390 *
391 * Returns:
392 * 0 on success or a negative error code on failure.
393 */
sunxi_drm_gem_dumb_create(struct drm_file * file_priv,struct drm_device * dev,struct drm_mode_create_dumb * args)394 int sunxi_drm_gem_dumb_create(struct drm_file *file_priv,
395 struct drm_device *dev,
396 struct drm_mode_create_dumb *args)
397 {
398 struct sunxi_drm_gem_object *sunxi_gem_obj;
399 unsigned int flags;
400 int ret;
401
402 args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
403 args->size = args->pitch * args->height;
404
405 if (is_drm_iommu_supported(dev))
406 flags = SUNXI_BO_NONCONTIG | SUNXI_BO_WC;
407 else
408 flags = SUNXI_BO_CONTIG | SUNXI_BO_WC;
409
410 sunxi_gem_obj = sunxi_drm_gem_create(dev, flags, args->size);
411 if (IS_ERR(sunxi_gem_obj))
412 return PTR_ERR(sunxi_gem_obj);
413
414 ret = sunxi_drm_gem_handle_create(&sunxi_gem_obj->base, file_priv,
415 &args->handle);
416 if (ret) {
417 sunxi_drm_gem_destroy(sunxi_gem_obj);
418 return ret;
419 }
420
421 return 0;
422 }
423
424 /**
425 * sunxi_drm_gem_dumb_map_offset - return the fake mmap offset for a SUNXI GEM
426 * object
427 * @file_priv: DRM file-private structure containing the GEM object
428 * @drm: DRM device
429 * @handle: GEM object handle
430 * @offset: return location for the fake mmap offset
431 *
432 * This function look up an object by its handle and returns the fake mmap
433 * offset associated with it. Drivers using the SUNXI function should set this
434 * as their DRM driver's ->dumb_map_offset() callback.
435 *
436 * NOTE!!!!!!:
437 * DRM_IOCTL_MODE_MAP_DUMB--->(dev->driver->)dumb_map_offset
438 * --->sunxi_drm_gem_dumb_map_offset()
439 *
440 * Returns:
441 * 0 on success or a negative error code on failure.
442 */
sunxi_drm_gem_dumb_map_offset(struct drm_file * file_priv,struct drm_device * drm,u32 handle,u64 * offset)443 int sunxi_drm_gem_dumb_map_offset(struct drm_file *file_priv,
444 struct drm_device *drm, u32 handle,
445 u64 *offset)
446 {
447 struct drm_gem_object *gem_obj;
448
449 gem_obj = drm_gem_object_lookup(file_priv, handle);
450 if (!gem_obj) {
451 dev_err(drm->dev, "failed to lookup GEM object\n");
452 return -EINVAL;
453 }
454
455 *offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
456
457 drm_gem_object_put(gem_obj);
458
459 return 0;
460 }
461
462 const struct vm_operations_struct sunxi_drm_gem_vm_ops = {
463 .open = drm_gem_vm_open,
464 .close = drm_gem_vm_close,
465 };
466
sunxi_drm_gem_mmap_buffer(struct sunxi_drm_gem_object * sunxi_gem_obj,struct vm_area_struct * vma)467 static int sunxi_drm_gem_mmap_buffer(struct sunxi_drm_gem_object *sunxi_gem_obj,
468 struct vm_area_struct *vma)
469 {
470 struct drm_device *drm_dev = sunxi_gem_obj->base.dev;
471 unsigned long vm_size;
472 int ret;
473
474 vma->vm_flags &= ~VM_PFNMAP;
475 vma->vm_pgoff = 0;
476 vm_size = vma->vm_end - vma->vm_start;
477
478 /* check if user-requested size is valid. */
479 if (vm_size > sunxi_gem_obj->size)
480 return -EINVAL;
481
482 ret = dma_mmap_attrs(drm_dev->dev, vma, sunxi_gem_obj->vaddr,
483 sunxi_gem_obj->dma_addr, sunxi_gem_obj->size,
484 sunxi_gem_obj->dma_attrs);
485 if (ret < 0) {
486 DRM_ERROR("failed to mmap.\n");
487 return ret;
488 }
489
490 return 0;
491 }
492
sunxi_drm_gem_mmap_obj(struct drm_gem_object * obj,struct vm_area_struct * vma)493 static int sunxi_drm_gem_mmap_obj(struct drm_gem_object *obj,
494 struct vm_area_struct *vma)
495 {
496 int ret;
497 struct sunxi_drm_gem_object *sunxi_gem_obj = to_sunxi_drm_gem_obj(obj);
498
499 /* non-cachable as default. */
500 if (sunxi_gem_obj->flags & SUNXI_BO_CACHABLE)
501 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
502 else if (sunxi_gem_obj->flags & SUNXI_BO_WC)
503 vma->vm_page_prot =
504 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
505 else
506 vma->vm_page_prot =
507 pgprot_noncached(vm_get_page_prot(vma->vm_flags));
508
509 ret = sunxi_drm_gem_mmap_buffer(sunxi_gem_obj, vma);
510 if (ret)
511 goto err_close_vm;
512
513 return ret;
514
515 err_close_vm:
516 drm_gem_vm_close(vma);
517
518 return ret;
519 }
520
521 /**
522 * sunxi_drm_gem_mmap - memory-map a SUNXI GEM object, For fops->mmap()
523 * @filp: file object
524 * @vma: VMA for the area to be mapped
525 *
526 * This function implements an augmented version of the GEM DRM file mmap
527 * operation for SUNXI objects: In addition to the usual GEM VMA setup it
528 * immediately faults in the entire object instead of using on-demaind
529 * faulting.
530 *
531 * NOTE!!!!!!:
532 * (user-space)mmap--->sunxi_drm_gem_mmap()
533 *
534 * Returns:
535 * 0 on success or a negative error code on failure.
536 */
sunxi_drm_gem_mmap(struct file * filp,struct vm_area_struct * vma)537 int sunxi_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
538 {
539 struct drm_gem_object *gem_obj;
540 int ret;
541
542 ret = drm_gem_mmap(filp, vma);
543 if (ret < 0) {
544 DRM_ERROR("failed to mmap.\n");
545 return ret;
546 }
547
548 gem_obj = vma->vm_private_data;
549
550 if (gem_obj->import_attach)
551 return dma_buf_mmap(gem_obj->dma_buf, vma, 0);
552
553 return sunxi_drm_gem_mmap_obj(gem_obj, vma);
554 }
555
556 /**
557 * sunxi_drm_gem_prime_get_sg_table - provide a scatter/gather table of pinned
558 * pages for a SUNXI GEM object
559 * @obj: GEM object
560 *
561 * This function exports a scatter/gather table suitable for PRIME usage by
562 * calling the standard DMA mapping API. Drivers should set this as their DRM
563 * driver's ->gem_prime_get_sg_table() callback.
564 *
565 * Returns:
566 * A pointer to the scatter/gather table of pinned pages or NULL on failure.
567 */
sunxi_drm_gem_prime_get_sg_table(struct drm_gem_object * obj)568 struct sg_table *sunxi_drm_gem_prime_get_sg_table(struct drm_gem_object *obj)
569 {
570 struct sunxi_drm_gem_object *sunxi_gem_obj = to_sunxi_drm_gem_obj(obj);
571 int npages;
572 struct drm_device *drm_dev = obj->dev;
573
574 npages = sunxi_gem_obj->size >> PAGE_SHIFT;
575
576 return drm_prime_pages_to_sg(drm_dev, sunxi_gem_obj->pages, npages);
577 }
578
579 /**
580 * sunxi_drm_gem_prime_import_sg_table - produce a SUNXI GEM object from another
581 * driver's scatter/gather table of pinned pages
582 * @dev: device to import into
583 * @attach: DMA-BUF attachment
584 * @sgt: scatter/gather table of pinned pages
585 *
586 * This function imports a scatter/gather table exported via DMA-BUF by
587 * another driver. Imported buffers must be physically contiguous in memory
588 * (i.e. the scatter/gather table must contain a single entry). Drivers should
589 * set this as their DRM driver's ->gem_prime_import_sg_table() callback.
590 *
591 * Returns:
592 * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
593 * error code on failure.
594 */
595 struct drm_gem_object *
sunxi_drm_gem_prime_import_sg_table(struct drm_device * dev,struct dma_buf_attachment * attach,struct sg_table * sgt)596 sunxi_drm_gem_prime_import_sg_table(struct drm_device *dev,
597 struct dma_buf_attachment *attach,
598 struct sg_table *sgt)
599 {
600 struct sunxi_drm_gem_object *sunxi_gem_obj;
601 int npages;
602 int ret;
603
604 sunxi_gem_obj = sunxi_drm_gem_obj_init(dev, attach->dmabuf->size);
605 if (IS_ERR(sunxi_gem_obj)) {
606 ret = PTR_ERR(sunxi_gem_obj);
607 return ERR_PTR(ret);
608 }
609
610 sunxi_gem_obj->dma_addr = sg_dma_address(sgt->sgl);
611
612 npages = sunxi_gem_obj->size >> PAGE_SHIFT;
613 sunxi_gem_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
614 if (!sunxi_gem_obj->pages) {
615 ret = -ENOMEM;
616 goto err;
617 }
618
619 ret = drm_prime_sg_to_page_addr_arrays(sgt, sunxi_gem_obj->pages, NULL,
620 npages);
621 if (ret < 0)
622 goto err_free_large;
623
624 sunxi_gem_obj->sgt = sgt;
625
626 if (sgt->nents == 1) {
627 /* always physically continuous memory if sgt->nents is 1. */
628 sunxi_gem_obj->flags |= SUNXI_BO_CONTIG;
629 } else {
630 /*
631 * this case could be CONTIG or NONCONTIG type but for now
632 * sets NONCONTIG.
633 */
634 sunxi_gem_obj->flags |= SUNXI_BO_NONCONTIG;
635 }
636
637 return &sunxi_gem_obj->base;
638
639 err_free_large:
640 kvfree(sunxi_gem_obj->pages);
641 err:
642 drm_gem_object_release(&sunxi_gem_obj->base);
643 kfree(sunxi_gem_obj);
644 return ERR_PTR(ret);
645 }
646
647 /**
648 * sunxi_drm_gem_prime_mmap - memory-map an exported SUNXI GEM object
649 * @obj: GEM object
650 * @vma: VMA for the area to be mapped
651 *
652 * This function maps a buffer imported via DRM PRIME into a userspace
653 * process's address space. Drivers should set this as their DRM
654 * driver's ->gem_prime_mmap() callback.
655 *
656 * Returns:
657 * 0 on success or a negative error code on failure.
658 */
sunxi_drm_gem_prime_mmap(struct drm_gem_object * obj,struct vm_area_struct * vma)659 int sunxi_drm_gem_prime_mmap(struct drm_gem_object *obj,
660 struct vm_area_struct *vma)
661 {
662 int ret;
663
664 ret = drm_gem_mmap_obj(obj, obj->size, vma);
665 if (ret < 0)
666 return ret;
667
668 return sunxi_drm_gem_mmap_obj(obj, vma);
669 }
670
671 /**
672 * sunxi_drm_gem_prime_vmap - map a SUNXI GEM object into the kernel's virtual
673 * address space
674 * @obj: GEM object
675 *
676 * This function maps a buffer exported via DRM PRIME into the kernel's
677 * virtual address space. Since the SUNXI buffers are already mapped into the
678 * kernel virtual address space this simply returns the cached virtual
679 * address. Drivers should set this as their
680 * DRM driver's ->gem_prime_vmap() callback.
681 *
682 * Returns:
683 * The kernel virtual address of the SUNXI GEM object's backing store.
684 */
sunxi_drm_gem_prime_vmap(struct drm_gem_object * obj)685 void *sunxi_drm_gem_prime_vmap(struct drm_gem_object *obj)
686 {
687 struct sunxi_drm_gem_object *sunxi_obj = to_sunxi_drm_gem_obj(obj);
688
689 return sunxi_obj->vaddr;
690 }
691
692 /**
693 * sunxi_drm_gem_prime_vunmap - unmap a SUNXI GEM object from the kernel's
694 * virtual address space
695 * @obj: GEM object
696 * @vaddr: kernel virtual address where the SUNXI GEM object was mapped
697 *
698 * This function removes a buffer exported via DRM PRIME from the kernel's
699 * virtual address space. This is a no-op because SUNXI buffers cannot be
700 * unmapped from kernel space. Drivers should set this as their
701 * DRM driver's ->gem_prime_vunmap() callback.
702 */
sunxi_drm_gem_prime_vunmap(struct drm_gem_object * obj,void * vaddr)703 void sunxi_drm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
704 {
705 /* Nothing to do */
706 }
707
708