1 /*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18 #include <linux/spinlock.h>
19 #include <linux/shmem_fs.h>
20 #include <linux/dma-buf.h>
21
22 #include "msm_drv.h"
23 #include "msm_gem.h"
24 #include "msm_gpu.h"
25 #include "msm_mmu.h"
26
physaddr(struct drm_gem_object * obj)27 static dma_addr_t physaddr(struct drm_gem_object *obj)
28 {
29 struct msm_gem_object *msm_obj = to_msm_bo(obj);
30 struct msm_drm_private *priv = obj->dev->dev_private;
31 return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
32 priv->vram.paddr;
33 }
34
35 /* allocate pages from VRAM carveout, used when no IOMMU: */
get_pages_vram(struct drm_gem_object * obj,int npages)36 static struct page **get_pages_vram(struct drm_gem_object *obj,
37 int npages)
38 {
39 struct msm_gem_object *msm_obj = to_msm_bo(obj);
40 struct msm_drm_private *priv = obj->dev->dev_private;
41 dma_addr_t paddr;
42 struct page **p;
43 int ret, i;
44
45 p = drm_malloc_ab(npages, sizeof(struct page *));
46 if (!p)
47 return ERR_PTR(-ENOMEM);
48
49 ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node,
50 npages, 0, DRM_MM_SEARCH_DEFAULT);
51 if (ret) {
52 drm_free_large(p);
53 return ERR_PTR(ret);
54 }
55
56 paddr = physaddr(obj);
57 for (i = 0; i < npages; i++) {
58 p[i] = phys_to_page(paddr);
59 paddr += PAGE_SIZE;
60 }
61
62 return p;
63 }
64
65 /* called with dev->struct_mutex held */
get_pages(struct drm_gem_object * obj)66 static struct page **get_pages(struct drm_gem_object *obj)
67 {
68 struct msm_gem_object *msm_obj = to_msm_bo(obj);
69
70 if (!msm_obj->pages) {
71 struct drm_device *dev = obj->dev;
72 struct page **p;
73 int npages = obj->size >> PAGE_SHIFT;
74
75 if (iommu_present(&platform_bus_type))
76 p = drm_gem_get_pages(obj);
77 else
78 p = get_pages_vram(obj, npages);
79
80 if (IS_ERR(p)) {
81 dev_err(dev->dev, "could not get pages: %ld\n",
82 PTR_ERR(p));
83 return p;
84 }
85
86 msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
87 if (IS_ERR(msm_obj->sgt)) {
88 dev_err(dev->dev, "failed to allocate sgt\n");
89 return ERR_CAST(msm_obj->sgt);
90 }
91
92 msm_obj->pages = p;
93
94 /* For non-cached buffers, ensure the new pages are clean
95 * because display controller, GPU, etc. are not coherent:
96 */
97 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
98 dma_map_sg(dev->dev, msm_obj->sgt->sgl,
99 msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
100 }
101
102 return msm_obj->pages;
103 }
104
put_pages(struct drm_gem_object * obj)105 static void put_pages(struct drm_gem_object *obj)
106 {
107 struct msm_gem_object *msm_obj = to_msm_bo(obj);
108
109 if (msm_obj->pages) {
110 /* For non-cached buffers, ensure the new pages are clean
111 * because display controller, GPU, etc. are not coherent:
112 */
113 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
114 dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl,
115 msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
116 sg_free_table(msm_obj->sgt);
117 kfree(msm_obj->sgt);
118
119 if (iommu_present(&platform_bus_type))
120 drm_gem_put_pages(obj, msm_obj->pages, true, false);
121 else {
122 drm_mm_remove_node(msm_obj->vram_node);
123 drm_free_large(msm_obj->pages);
124 }
125
126 msm_obj->pages = NULL;
127 }
128 }
129
msm_gem_get_pages(struct drm_gem_object * obj)130 struct page **msm_gem_get_pages(struct drm_gem_object *obj)
131 {
132 struct drm_device *dev = obj->dev;
133 struct page **p;
134 mutex_lock(&dev->struct_mutex);
135 p = get_pages(obj);
136 mutex_unlock(&dev->struct_mutex);
137 return p;
138 }
139
msm_gem_put_pages(struct drm_gem_object * obj)140 void msm_gem_put_pages(struct drm_gem_object *obj)
141 {
142 /* when we start tracking the pin count, then do something here */
143 }
144
msm_gem_mmap_obj(struct drm_gem_object * obj,struct vm_area_struct * vma)145 int msm_gem_mmap_obj(struct drm_gem_object *obj,
146 struct vm_area_struct *vma)
147 {
148 struct msm_gem_object *msm_obj = to_msm_bo(obj);
149
150 vma->vm_flags &= ~VM_PFNMAP;
151 vma->vm_flags |= VM_MIXEDMAP;
152
153 if (msm_obj->flags & MSM_BO_WC) {
154 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
155 } else if (msm_obj->flags & MSM_BO_UNCACHED) {
156 vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
157 } else {
158 /*
159 * Shunt off cached objs to shmem file so they have their own
160 * address_space (so unmap_mapping_range does what we want,
161 * in particular in the case of mmap'd dmabufs)
162 */
163 fput(vma->vm_file);
164 get_file(obj->filp);
165 vma->vm_pgoff = 0;
166 vma->vm_file = obj->filp;
167
168 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
169 }
170
171 return 0;
172 }
173
msm_gem_mmap(struct file * filp,struct vm_area_struct * vma)174 int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
175 {
176 int ret;
177
178 ret = drm_gem_mmap(filp, vma);
179 if (ret) {
180 DBG("mmap failed: %d", ret);
181 return ret;
182 }
183
184 return msm_gem_mmap_obj(vma->vm_private_data, vma);
185 }
186
msm_gem_fault(struct vm_area_struct * vma,struct vm_fault * vmf)187 int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
188 {
189 struct drm_gem_object *obj = vma->vm_private_data;
190 struct drm_device *dev = obj->dev;
191 struct page **pages;
192 unsigned long pfn;
193 pgoff_t pgoff;
194 int ret;
195
196 /* Make sure we don't parallel update on a fault, nor move or remove
197 * something from beneath our feet
198 */
199 ret = mutex_lock_interruptible(&dev->struct_mutex);
200 if (ret)
201 goto out;
202
203 /* make sure we have pages attached now */
204 pages = get_pages(obj);
205 if (IS_ERR(pages)) {
206 ret = PTR_ERR(pages);
207 goto out_unlock;
208 }
209
210 /* We don't use vmf->pgoff since that has the fake offset: */
211 pgoff = ((unsigned long)vmf->virtual_address -
212 vma->vm_start) >> PAGE_SHIFT;
213
214 pfn = page_to_pfn(pages[pgoff]);
215
216 VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
217 pfn, pfn << PAGE_SHIFT);
218
219 ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn);
220
221 out_unlock:
222 mutex_unlock(&dev->struct_mutex);
223 out:
224 switch (ret) {
225 case -EAGAIN:
226 case 0:
227 case -ERESTARTSYS:
228 case -EINTR:
229 case -EBUSY:
230 /*
231 * EBUSY is ok: this just means that another thread
232 * already did the job.
233 */
234 return VM_FAULT_NOPAGE;
235 case -ENOMEM:
236 return VM_FAULT_OOM;
237 default:
238 return VM_FAULT_SIGBUS;
239 }
240 }
241
242 /** get mmap offset */
mmap_offset(struct drm_gem_object * obj)243 static uint64_t mmap_offset(struct drm_gem_object *obj)
244 {
245 struct drm_device *dev = obj->dev;
246 int ret;
247
248 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
249
250 /* Make it mmapable */
251 ret = drm_gem_create_mmap_offset(obj);
252
253 if (ret) {
254 dev_err(dev->dev, "could not allocate mmap offset\n");
255 return 0;
256 }
257
258 return drm_vma_node_offset_addr(&obj->vma_node);
259 }
260
msm_gem_mmap_offset(struct drm_gem_object * obj)261 uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
262 {
263 uint64_t offset;
264 mutex_lock(&obj->dev->struct_mutex);
265 offset = mmap_offset(obj);
266 mutex_unlock(&obj->dev->struct_mutex);
267 return offset;
268 }
269
270 /* should be called under struct_mutex.. although it can be called
271 * from atomic context without struct_mutex to acquire an extra
272 * iova ref if you know one is already held.
273 *
274 * That means when I do eventually need to add support for unpinning
275 * the refcnt counter needs to be atomic_t.
276 */
msm_gem_get_iova_locked(struct drm_gem_object * obj,int id,uint32_t * iova)277 int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
278 uint32_t *iova)
279 {
280 struct msm_gem_object *msm_obj = to_msm_bo(obj);
281 int ret = 0;
282
283 if (!msm_obj->domain[id].iova) {
284 struct msm_drm_private *priv = obj->dev->dev_private;
285 struct page **pages = get_pages(obj);
286
287 if (IS_ERR(pages))
288 return PTR_ERR(pages);
289
290 if (iommu_present(&platform_bus_type)) {
291 struct msm_mmu *mmu = priv->mmus[id];
292 uint32_t offset;
293
294 if (WARN_ON(!mmu))
295 return -EINVAL;
296
297 offset = (uint32_t)mmap_offset(obj);
298 ret = mmu->funcs->map(mmu, offset, msm_obj->sgt,
299 obj->size, IOMMU_READ | IOMMU_WRITE);
300 msm_obj->domain[id].iova = offset;
301 } else {
302 msm_obj->domain[id].iova = physaddr(obj);
303 }
304 }
305
306 if (!ret)
307 *iova = msm_obj->domain[id].iova;
308
309 return ret;
310 }
311
msm_gem_get_iova(struct drm_gem_object * obj,int id,uint32_t * iova)312 int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova)
313 {
314 struct msm_gem_object *msm_obj = to_msm_bo(obj);
315 int ret;
316
317 /* this is safe right now because we don't unmap until the
318 * bo is deleted:
319 */
320 if (msm_obj->domain[id].iova) {
321 *iova = msm_obj->domain[id].iova;
322 return 0;
323 }
324
325 mutex_lock(&obj->dev->struct_mutex);
326 ret = msm_gem_get_iova_locked(obj, id, iova);
327 mutex_unlock(&obj->dev->struct_mutex);
328 return ret;
329 }
330
msm_gem_put_iova(struct drm_gem_object * obj,int id)331 void msm_gem_put_iova(struct drm_gem_object *obj, int id)
332 {
333 // XXX TODO ..
334 // NOTE: probably don't need a _locked() version.. we wouldn't
335 // normally unmap here, but instead just mark that it could be
336 // unmapped (if the iova refcnt drops to zero), but then later
337 // if another _get_iova_locked() fails we can start unmapping
338 // things that are no longer needed..
339 }
340
msm_gem_dumb_create(struct drm_file * file,struct drm_device * dev,struct drm_mode_create_dumb * args)341 int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
342 struct drm_mode_create_dumb *args)
343 {
344 args->pitch = align_pitch(args->width, args->bpp);
345 args->size = PAGE_ALIGN(args->pitch * args->height);
346 return msm_gem_new_handle(dev, file, args->size,
347 MSM_BO_SCANOUT | MSM_BO_WC, &args->handle);
348 }
349
msm_gem_dumb_map_offset(struct drm_file * file,struct drm_device * dev,uint32_t handle,uint64_t * offset)350 int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
351 uint32_t handle, uint64_t *offset)
352 {
353 struct drm_gem_object *obj;
354 int ret = 0;
355
356 /* GEM does all our handle to object mapping */
357 obj = drm_gem_object_lookup(dev, file, handle);
358 if (obj == NULL) {
359 ret = -ENOENT;
360 goto fail;
361 }
362
363 *offset = msm_gem_mmap_offset(obj);
364
365 drm_gem_object_unreference_unlocked(obj);
366
367 fail:
368 return ret;
369 }
370
msm_gem_vaddr_locked(struct drm_gem_object * obj)371 void *msm_gem_vaddr_locked(struct drm_gem_object *obj)
372 {
373 struct msm_gem_object *msm_obj = to_msm_bo(obj);
374 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
375 if (!msm_obj->vaddr) {
376 struct page **pages = get_pages(obj);
377 if (IS_ERR(pages))
378 return ERR_CAST(pages);
379 msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
380 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
381 }
382 return msm_obj->vaddr;
383 }
384
msm_gem_vaddr(struct drm_gem_object * obj)385 void *msm_gem_vaddr(struct drm_gem_object *obj)
386 {
387 void *ret;
388 mutex_lock(&obj->dev->struct_mutex);
389 ret = msm_gem_vaddr_locked(obj);
390 mutex_unlock(&obj->dev->struct_mutex);
391 return ret;
392 }
393
394 /* setup callback for when bo is no longer busy..
395 * TODO probably want to differentiate read vs write..
396 */
msm_gem_queue_inactive_cb(struct drm_gem_object * obj,struct msm_fence_cb * cb)397 int msm_gem_queue_inactive_cb(struct drm_gem_object *obj,
398 struct msm_fence_cb *cb)
399 {
400 struct drm_device *dev = obj->dev;
401 struct msm_drm_private *priv = dev->dev_private;
402 struct msm_gem_object *msm_obj = to_msm_bo(obj);
403 int ret = 0;
404
405 mutex_lock(&dev->struct_mutex);
406 if (!list_empty(&cb->work.entry)) {
407 ret = -EINVAL;
408 } else if (is_active(msm_obj)) {
409 cb->fence = max(msm_obj->read_fence, msm_obj->write_fence);
410 list_add_tail(&cb->work.entry, &priv->fence_cbs);
411 } else {
412 queue_work(priv->wq, &cb->work);
413 }
414 mutex_unlock(&dev->struct_mutex);
415
416 return ret;
417 }
418
msm_gem_move_to_active(struct drm_gem_object * obj,struct msm_gpu * gpu,bool write,uint32_t fence)419 void msm_gem_move_to_active(struct drm_gem_object *obj,
420 struct msm_gpu *gpu, bool write, uint32_t fence)
421 {
422 struct msm_gem_object *msm_obj = to_msm_bo(obj);
423 msm_obj->gpu = gpu;
424 if (write)
425 msm_obj->write_fence = fence;
426 else
427 msm_obj->read_fence = fence;
428 list_del_init(&msm_obj->mm_list);
429 list_add_tail(&msm_obj->mm_list, &gpu->active_list);
430 }
431
msm_gem_move_to_inactive(struct drm_gem_object * obj)432 void msm_gem_move_to_inactive(struct drm_gem_object *obj)
433 {
434 struct drm_device *dev = obj->dev;
435 struct msm_drm_private *priv = dev->dev_private;
436 struct msm_gem_object *msm_obj = to_msm_bo(obj);
437
438 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
439
440 msm_obj->gpu = NULL;
441 msm_obj->read_fence = 0;
442 msm_obj->write_fence = 0;
443 list_del_init(&msm_obj->mm_list);
444 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
445 }
446
msm_gem_cpu_prep(struct drm_gem_object * obj,uint32_t op,struct timespec * timeout)447 int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op,
448 struct timespec *timeout)
449 {
450 struct drm_device *dev = obj->dev;
451 struct msm_gem_object *msm_obj = to_msm_bo(obj);
452 int ret = 0;
453
454 if (is_active(msm_obj)) {
455 uint32_t fence = 0;
456
457 if (op & MSM_PREP_READ)
458 fence = msm_obj->write_fence;
459 if (op & MSM_PREP_WRITE)
460 fence = max(fence, msm_obj->read_fence);
461 if (op & MSM_PREP_NOSYNC)
462 timeout = NULL;
463
464 ret = msm_wait_fence_interruptable(dev, fence, timeout);
465 }
466
467 /* TODO cache maintenance */
468
469 return ret;
470 }
471
msm_gem_cpu_fini(struct drm_gem_object * obj)472 int msm_gem_cpu_fini(struct drm_gem_object *obj)
473 {
474 /* TODO cache maintenance */
475 return 0;
476 }
477
478 #ifdef CONFIG_DEBUG_FS
msm_gem_describe(struct drm_gem_object * obj,struct seq_file * m)479 void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
480 {
481 struct drm_device *dev = obj->dev;
482 struct msm_gem_object *msm_obj = to_msm_bo(obj);
483 uint64_t off = drm_vma_node_start(&obj->vma_node);
484
485 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
486 seq_printf(m, "%08x: %c(r=%u,w=%u) %2d (%2d) %08llx %p %d\n",
487 msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
488 msm_obj->read_fence, msm_obj->write_fence,
489 obj->name, obj->refcount.refcount.counter,
490 off, msm_obj->vaddr, obj->size);
491 }
492
msm_gem_describe_objects(struct list_head * list,struct seq_file * m)493 void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
494 {
495 struct msm_gem_object *msm_obj;
496 int count = 0;
497 size_t size = 0;
498
499 list_for_each_entry(msm_obj, list, mm_list) {
500 struct drm_gem_object *obj = &msm_obj->base;
501 seq_printf(m, " ");
502 msm_gem_describe(obj, m);
503 count++;
504 size += obj->size;
505 }
506
507 seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
508 }
509 #endif
510
msm_gem_free_object(struct drm_gem_object * obj)511 void msm_gem_free_object(struct drm_gem_object *obj)
512 {
513 struct drm_device *dev = obj->dev;
514 struct msm_drm_private *priv = obj->dev->dev_private;
515 struct msm_gem_object *msm_obj = to_msm_bo(obj);
516 int id;
517
518 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
519
520 /* object should not be on active list: */
521 WARN_ON(is_active(msm_obj));
522
523 list_del(&msm_obj->mm_list);
524
525 for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) {
526 struct msm_mmu *mmu = priv->mmus[id];
527 if (mmu && msm_obj->domain[id].iova) {
528 uint32_t offset = (uint32_t)mmap_offset(obj);
529 mmu->funcs->unmap(mmu, offset, msm_obj->sgt, obj->size);
530 }
531 }
532
533 drm_gem_free_mmap_offset(obj);
534
535 if (obj->import_attach) {
536 if (msm_obj->vaddr)
537 dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);
538
539 /* Don't drop the pages for imported dmabuf, as they are not
540 * ours, just free the array we allocated:
541 */
542 if (msm_obj->pages)
543 drm_free_large(msm_obj->pages);
544
545 } else {
546 if (msm_obj->vaddr)
547 vunmap(msm_obj->vaddr);
548 put_pages(obj);
549 }
550
551 if (msm_obj->resv == &msm_obj->_resv)
552 reservation_object_fini(msm_obj->resv);
553
554 drm_gem_object_release(obj);
555
556 kfree(msm_obj);
557 }
558
559 /* convenience method to construct a GEM buffer object, and userspace handle */
msm_gem_new_handle(struct drm_device * dev,struct drm_file * file,uint32_t size,uint32_t flags,uint32_t * handle)560 int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
561 uint32_t size, uint32_t flags, uint32_t *handle)
562 {
563 struct drm_gem_object *obj;
564 int ret;
565
566 ret = mutex_lock_interruptible(&dev->struct_mutex);
567 if (ret)
568 return ret;
569
570 obj = msm_gem_new(dev, size, flags);
571
572 mutex_unlock(&dev->struct_mutex);
573
574 if (IS_ERR(obj))
575 return PTR_ERR(obj);
576
577 ret = drm_gem_handle_create(file, obj, handle);
578
579 /* drop reference from allocate - handle holds it now */
580 drm_gem_object_unreference_unlocked(obj);
581
582 return ret;
583 }
584
msm_gem_new_impl(struct drm_device * dev,uint32_t size,uint32_t flags,struct drm_gem_object ** obj)585 static int msm_gem_new_impl(struct drm_device *dev,
586 uint32_t size, uint32_t flags,
587 struct drm_gem_object **obj)
588 {
589 struct msm_drm_private *priv = dev->dev_private;
590 struct msm_gem_object *msm_obj;
591 unsigned sz;
592
593 switch (flags & MSM_BO_CACHE_MASK) {
594 case MSM_BO_UNCACHED:
595 case MSM_BO_CACHED:
596 case MSM_BO_WC:
597 break;
598 default:
599 dev_err(dev->dev, "invalid cache flag: %x\n",
600 (flags & MSM_BO_CACHE_MASK));
601 return -EINVAL;
602 }
603
604 sz = sizeof(*msm_obj);
605 if (!iommu_present(&platform_bus_type))
606 sz += sizeof(struct drm_mm_node);
607
608 msm_obj = kzalloc(sz, GFP_KERNEL);
609 if (!msm_obj)
610 return -ENOMEM;
611
612 if (!iommu_present(&platform_bus_type))
613 msm_obj->vram_node = (void *)&msm_obj[1];
614
615 msm_obj->flags = flags;
616
617 msm_obj->resv = &msm_obj->_resv;
618 reservation_object_init(msm_obj->resv);
619
620 INIT_LIST_HEAD(&msm_obj->submit_entry);
621 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
622
623 *obj = &msm_obj->base;
624
625 return 0;
626 }
627
msm_gem_new(struct drm_device * dev,uint32_t size,uint32_t flags)628 struct drm_gem_object *msm_gem_new(struct drm_device *dev,
629 uint32_t size, uint32_t flags)
630 {
631 struct drm_gem_object *obj = NULL;
632 int ret;
633
634 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
635
636 size = PAGE_ALIGN(size);
637
638 ret = msm_gem_new_impl(dev, size, flags, &obj);
639 if (ret)
640 goto fail;
641
642 if (iommu_present(&platform_bus_type)) {
643 ret = drm_gem_object_init(dev, obj, size);
644 if (ret)
645 goto fail;
646 } else {
647 drm_gem_private_object_init(dev, obj, size);
648 }
649
650 return obj;
651
652 fail:
653 if (obj)
654 drm_gem_object_unreference(obj);
655
656 return ERR_PTR(ret);
657 }
658
msm_gem_import(struct drm_device * dev,uint32_t size,struct sg_table * sgt)659 struct drm_gem_object *msm_gem_import(struct drm_device *dev,
660 uint32_t size, struct sg_table *sgt)
661 {
662 struct msm_gem_object *msm_obj;
663 struct drm_gem_object *obj;
664 int ret, npages;
665
666 /* if we don't have IOMMU, don't bother pretending we can import: */
667 if (!iommu_present(&platform_bus_type)) {
668 dev_err(dev->dev, "cannot import without IOMMU\n");
669 return ERR_PTR(-EINVAL);
670 }
671
672 size = PAGE_ALIGN(size);
673
674 ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj);
675 if (ret)
676 goto fail;
677
678 drm_gem_private_object_init(dev, obj, size);
679
680 npages = size / PAGE_SIZE;
681
682 msm_obj = to_msm_bo(obj);
683 msm_obj->sgt = sgt;
684 msm_obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
685 if (!msm_obj->pages) {
686 ret = -ENOMEM;
687 goto fail;
688 }
689
690 ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages);
691 if (ret)
692 goto fail;
693
694 return obj;
695
696 fail:
697 if (obj)
698 drm_gem_object_unreference_unlocked(obj);
699
700 return ERR_PTR(ret);
701 }
702