1 /*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18 #include <linux/spinlock.h>
19 #include <linux/shmem_fs.h>
20 #include <linux/dma-buf.h>
21 #include <linux/pfn_t.h>
22
23 #include "msm_drv.h"
24 #include "msm_fence.h"
25 #include "msm_gem.h"
26 #include "msm_gpu.h"
27 #include "msm_mmu.h"
28
physaddr(struct drm_gem_object * obj)29 static dma_addr_t physaddr(struct drm_gem_object *obj)
30 {
31 struct msm_gem_object *msm_obj = to_msm_bo(obj);
32 struct msm_drm_private *priv = obj->dev->dev_private;
33 return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
34 priv->vram.paddr;
35 }
36
use_pages(struct drm_gem_object * obj)37 static bool use_pages(struct drm_gem_object *obj)
38 {
39 struct msm_gem_object *msm_obj = to_msm_bo(obj);
40 return !msm_obj->vram_node;
41 }
42
43 /* allocate pages from VRAM carveout, used when no IOMMU: */
get_pages_vram(struct drm_gem_object * obj,int npages)44 static struct page **get_pages_vram(struct drm_gem_object *obj,
45 int npages)
46 {
47 struct msm_gem_object *msm_obj = to_msm_bo(obj);
48 struct msm_drm_private *priv = obj->dev->dev_private;
49 dma_addr_t paddr;
50 struct page **p;
51 int ret, i;
52
53 p = drm_malloc_ab(npages, sizeof(struct page *));
54 if (!p)
55 return ERR_PTR(-ENOMEM);
56
57 ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node,
58 npages, 0, DRM_MM_SEARCH_DEFAULT);
59 if (ret) {
60 drm_free_large(p);
61 return ERR_PTR(ret);
62 }
63
64 paddr = physaddr(obj);
65 for (i = 0; i < npages; i++) {
66 p[i] = phys_to_page(paddr);
67 paddr += PAGE_SIZE;
68 }
69
70 return p;
71 }
72
73 /* called with dev->struct_mutex held */
get_pages(struct drm_gem_object * obj)74 static struct page **get_pages(struct drm_gem_object *obj)
75 {
76 struct msm_gem_object *msm_obj = to_msm_bo(obj);
77
78 if (!msm_obj->pages) {
79 struct drm_device *dev = obj->dev;
80 struct page **p;
81 int npages = obj->size >> PAGE_SHIFT;
82
83 if (use_pages(obj))
84 p = drm_gem_get_pages(obj);
85 else
86 p = get_pages_vram(obj, npages);
87
88 if (IS_ERR(p)) {
89 dev_err(dev->dev, "could not get pages: %ld\n",
90 PTR_ERR(p));
91 return p;
92 }
93
94 msm_obj->pages = p;
95
96 msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
97 if (IS_ERR(msm_obj->sgt)) {
98 void *ptr = ERR_CAST(msm_obj->sgt);
99
100 dev_err(dev->dev, "failed to allocate sgt\n");
101 msm_obj->sgt = NULL;
102 return ptr;
103 }
104
105 /* For non-cached buffers, ensure the new pages are clean
106 * because display controller, GPU, etc. are not coherent:
107 */
108 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
109 dma_map_sg(dev->dev, msm_obj->sgt->sgl,
110 msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
111 }
112
113 return msm_obj->pages;
114 }
115
put_pages(struct drm_gem_object * obj)116 static void put_pages(struct drm_gem_object *obj)
117 {
118 struct msm_gem_object *msm_obj = to_msm_bo(obj);
119
120 if (msm_obj->pages) {
121 /* For non-cached buffers, ensure the new pages are clean
122 * because display controller, GPU, etc. are not coherent:
123 */
124 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
125 dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl,
126 msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
127
128 if (msm_obj->sgt)
129 sg_free_table(msm_obj->sgt);
130
131 kfree(msm_obj->sgt);
132
133 if (use_pages(obj))
134 drm_gem_put_pages(obj, msm_obj->pages, true, false);
135 else {
136 drm_mm_remove_node(msm_obj->vram_node);
137 drm_free_large(msm_obj->pages);
138 }
139
140 msm_obj->pages = NULL;
141 }
142 }
143
msm_gem_get_pages(struct drm_gem_object * obj)144 struct page **msm_gem_get_pages(struct drm_gem_object *obj)
145 {
146 struct drm_device *dev = obj->dev;
147 struct page **p;
148 mutex_lock(&dev->struct_mutex);
149 p = get_pages(obj);
150 mutex_unlock(&dev->struct_mutex);
151 return p;
152 }
153
msm_gem_put_pages(struct drm_gem_object * obj)154 void msm_gem_put_pages(struct drm_gem_object *obj)
155 {
156 /* when we start tracking the pin count, then do something here */
157 }
158
msm_gem_mmap_obj(struct drm_gem_object * obj,struct vm_area_struct * vma)159 int msm_gem_mmap_obj(struct drm_gem_object *obj,
160 struct vm_area_struct *vma)
161 {
162 struct msm_gem_object *msm_obj = to_msm_bo(obj);
163
164 vma->vm_flags &= ~VM_PFNMAP;
165 vma->vm_flags |= VM_MIXEDMAP;
166
167 if (msm_obj->flags & MSM_BO_WC) {
168 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
169 } else if (msm_obj->flags & MSM_BO_UNCACHED) {
170 vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
171 } else {
172 /*
173 * Shunt off cached objs to shmem file so they have their own
174 * address_space (so unmap_mapping_range does what we want,
175 * in particular in the case of mmap'd dmabufs)
176 */
177 fput(vma->vm_file);
178 get_file(obj->filp);
179 vma->vm_pgoff = 0;
180 vma->vm_file = obj->filp;
181
182 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
183 }
184
185 return 0;
186 }
187
msm_gem_mmap(struct file * filp,struct vm_area_struct * vma)188 int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
189 {
190 int ret;
191
192 ret = drm_gem_mmap(filp, vma);
193 if (ret) {
194 DBG("mmap failed: %d", ret);
195 return ret;
196 }
197
198 return msm_gem_mmap_obj(vma->vm_private_data, vma);
199 }
200
msm_gem_fault(struct vm_area_struct * vma,struct vm_fault * vmf)201 int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
202 {
203 struct drm_gem_object *obj = vma->vm_private_data;
204 struct drm_device *dev = obj->dev;
205 struct msm_drm_private *priv = dev->dev_private;
206 struct page **pages;
207 unsigned long pfn;
208 pgoff_t pgoff;
209 int ret;
210
211 /* This should only happen if userspace tries to pass a mmap'd
212 * but unfaulted gem bo vaddr into submit ioctl, triggering
213 * a page fault while struct_mutex is already held. This is
214 * not a valid use-case so just bail.
215 */
216 if (priv->struct_mutex_task == current)
217 return VM_FAULT_SIGBUS;
218
219 /* Make sure we don't parallel update on a fault, nor move or remove
220 * something from beneath our feet
221 */
222 ret = mutex_lock_interruptible(&dev->struct_mutex);
223 if (ret)
224 goto out;
225
226 /* make sure we have pages attached now */
227 pages = get_pages(obj);
228 if (IS_ERR(pages)) {
229 ret = PTR_ERR(pages);
230 goto out_unlock;
231 }
232
233 /* We don't use vmf->pgoff since that has the fake offset: */
234 pgoff = ((unsigned long)vmf->virtual_address -
235 vma->vm_start) >> PAGE_SHIFT;
236
237 pfn = page_to_pfn(pages[pgoff]);
238
239 VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
240 pfn, pfn << PAGE_SHIFT);
241
242 ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address,
243 __pfn_to_pfn_t(pfn, PFN_DEV));
244
245 out_unlock:
246 mutex_unlock(&dev->struct_mutex);
247 out:
248 switch (ret) {
249 case -EAGAIN:
250 case 0:
251 case -ERESTARTSYS:
252 case -EINTR:
253 case -EBUSY:
254 /*
255 * EBUSY is ok: this just means that another thread
256 * already did the job.
257 */
258 return VM_FAULT_NOPAGE;
259 case -ENOMEM:
260 return VM_FAULT_OOM;
261 default:
262 return VM_FAULT_SIGBUS;
263 }
264 }
265
266 /** get mmap offset */
mmap_offset(struct drm_gem_object * obj)267 static uint64_t mmap_offset(struct drm_gem_object *obj)
268 {
269 struct drm_device *dev = obj->dev;
270 int ret;
271
272 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
273
274 /* Make it mmapable */
275 ret = drm_gem_create_mmap_offset(obj);
276
277 if (ret) {
278 dev_err(dev->dev, "could not allocate mmap offset\n");
279 return 0;
280 }
281
282 return drm_vma_node_offset_addr(&obj->vma_node);
283 }
284
msm_gem_mmap_offset(struct drm_gem_object * obj)285 uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
286 {
287 uint64_t offset;
288 mutex_lock(&obj->dev->struct_mutex);
289 offset = mmap_offset(obj);
290 mutex_unlock(&obj->dev->struct_mutex);
291 return offset;
292 }
293
294 static void
put_iova(struct drm_gem_object * obj)295 put_iova(struct drm_gem_object *obj)
296 {
297 struct drm_device *dev = obj->dev;
298 struct msm_drm_private *priv = obj->dev->dev_private;
299 struct msm_gem_object *msm_obj = to_msm_bo(obj);
300 int id;
301
302 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
303
304 for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) {
305 struct msm_mmu *mmu = priv->mmus[id];
306 if (mmu && msm_obj->domain[id].iova) {
307 uint32_t offset = msm_obj->domain[id].iova;
308 mmu->funcs->unmap(mmu, offset, msm_obj->sgt, obj->size);
309 msm_obj->domain[id].iova = 0;
310 }
311 }
312 }
313
314 /* should be called under struct_mutex.. although it can be called
315 * from atomic context without struct_mutex to acquire an extra
316 * iova ref if you know one is already held.
317 *
318 * That means when I do eventually need to add support for unpinning
319 * the refcnt counter needs to be atomic_t.
320 */
msm_gem_get_iova_locked(struct drm_gem_object * obj,int id,uint32_t * iova)321 int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
322 uint32_t *iova)
323 {
324 struct msm_gem_object *msm_obj = to_msm_bo(obj);
325 int ret = 0;
326
327 if (!msm_obj->domain[id].iova) {
328 struct msm_drm_private *priv = obj->dev->dev_private;
329 struct page **pages = get_pages(obj);
330
331 if (IS_ERR(pages))
332 return PTR_ERR(pages);
333
334 if (iommu_present(&platform_bus_type)) {
335 struct msm_mmu *mmu = priv->mmus[id];
336 uint32_t offset;
337
338 if (WARN_ON(!mmu))
339 return -EINVAL;
340
341 offset = (uint32_t)mmap_offset(obj);
342 ret = mmu->funcs->map(mmu, offset, msm_obj->sgt,
343 obj->size, IOMMU_READ | IOMMU_WRITE);
344 msm_obj->domain[id].iova = offset;
345 } else {
346 msm_obj->domain[id].iova = physaddr(obj);
347 }
348 }
349
350 if (!ret)
351 *iova = msm_obj->domain[id].iova;
352
353 return ret;
354 }
355
356 /* get iova, taking a reference. Should have a matching put */
msm_gem_get_iova(struct drm_gem_object * obj,int id,uint32_t * iova)357 int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova)
358 {
359 struct msm_gem_object *msm_obj = to_msm_bo(obj);
360 int ret;
361
362 /* this is safe right now because we don't unmap until the
363 * bo is deleted:
364 */
365 if (msm_obj->domain[id].iova) {
366 *iova = msm_obj->domain[id].iova;
367 return 0;
368 }
369
370 mutex_lock(&obj->dev->struct_mutex);
371 ret = msm_gem_get_iova_locked(obj, id, iova);
372 mutex_unlock(&obj->dev->struct_mutex);
373 return ret;
374 }
375
376 /* get iova without taking a reference, used in places where you have
377 * already done a 'msm_gem_get_iova()'.
378 */
msm_gem_iova(struct drm_gem_object * obj,int id)379 uint32_t msm_gem_iova(struct drm_gem_object *obj, int id)
380 {
381 struct msm_gem_object *msm_obj = to_msm_bo(obj);
382 WARN_ON(!msm_obj->domain[id].iova);
383 return msm_obj->domain[id].iova;
384 }
385
msm_gem_put_iova(struct drm_gem_object * obj,int id)386 void msm_gem_put_iova(struct drm_gem_object *obj, int id)
387 {
388 // XXX TODO ..
389 // NOTE: probably don't need a _locked() version.. we wouldn't
390 // normally unmap here, but instead just mark that it could be
391 // unmapped (if the iova refcnt drops to zero), but then later
392 // if another _get_iova_locked() fails we can start unmapping
393 // things that are no longer needed..
394 }
395
msm_gem_dumb_create(struct drm_file * file,struct drm_device * dev,struct drm_mode_create_dumb * args)396 int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
397 struct drm_mode_create_dumb *args)
398 {
399 args->pitch = align_pitch(args->width, args->bpp);
400 args->size = PAGE_ALIGN(args->pitch * args->height);
401 return msm_gem_new_handle(dev, file, args->size,
402 MSM_BO_SCANOUT | MSM_BO_WC, &args->handle);
403 }
404
msm_gem_dumb_map_offset(struct drm_file * file,struct drm_device * dev,uint32_t handle,uint64_t * offset)405 int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
406 uint32_t handle, uint64_t *offset)
407 {
408 struct drm_gem_object *obj;
409 int ret = 0;
410
411 /* GEM does all our handle to object mapping */
412 obj = drm_gem_object_lookup(file, handle);
413 if (obj == NULL) {
414 ret = -ENOENT;
415 goto fail;
416 }
417
418 *offset = msm_gem_mmap_offset(obj);
419
420 drm_gem_object_unreference_unlocked(obj);
421
422 fail:
423 return ret;
424 }
425
msm_gem_get_vaddr_locked(struct drm_gem_object * obj)426 void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj)
427 {
428 struct msm_gem_object *msm_obj = to_msm_bo(obj);
429 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
430 if (!msm_obj->vaddr) {
431 struct page **pages = get_pages(obj);
432 if (IS_ERR(pages))
433 return ERR_CAST(pages);
434 msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
435 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
436 if (msm_obj->vaddr == NULL)
437 return ERR_PTR(-ENOMEM);
438 }
439 msm_obj->vmap_count++;
440 return msm_obj->vaddr;
441 }
442
msm_gem_get_vaddr(struct drm_gem_object * obj)443 void *msm_gem_get_vaddr(struct drm_gem_object *obj)
444 {
445 void *ret;
446 mutex_lock(&obj->dev->struct_mutex);
447 ret = msm_gem_get_vaddr_locked(obj);
448 mutex_unlock(&obj->dev->struct_mutex);
449 return ret;
450 }
451
msm_gem_put_vaddr_locked(struct drm_gem_object * obj)452 void msm_gem_put_vaddr_locked(struct drm_gem_object *obj)
453 {
454 struct msm_gem_object *msm_obj = to_msm_bo(obj);
455 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
456 WARN_ON(msm_obj->vmap_count < 1);
457 msm_obj->vmap_count--;
458 }
459
msm_gem_put_vaddr(struct drm_gem_object * obj)460 void msm_gem_put_vaddr(struct drm_gem_object *obj)
461 {
462 mutex_lock(&obj->dev->struct_mutex);
463 msm_gem_put_vaddr_locked(obj);
464 mutex_unlock(&obj->dev->struct_mutex);
465 }
466
467 /* Update madvise status, returns true if not purged, else
468 * false or -errno.
469 */
msm_gem_madvise(struct drm_gem_object * obj,unsigned madv)470 int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
471 {
472 struct msm_gem_object *msm_obj = to_msm_bo(obj);
473
474 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
475
476 if (msm_obj->madv != __MSM_MADV_PURGED)
477 msm_obj->madv = madv;
478
479 return (msm_obj->madv != __MSM_MADV_PURGED);
480 }
481
msm_gem_purge(struct drm_gem_object * obj)482 void msm_gem_purge(struct drm_gem_object *obj)
483 {
484 struct drm_device *dev = obj->dev;
485 struct msm_gem_object *msm_obj = to_msm_bo(obj);
486
487 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
488 WARN_ON(!is_purgeable(msm_obj));
489 WARN_ON(obj->import_attach);
490
491 put_iova(obj);
492
493 msm_gem_vunmap(obj);
494
495 put_pages(obj);
496
497 msm_obj->madv = __MSM_MADV_PURGED;
498
499 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
500 drm_gem_free_mmap_offset(obj);
501
502 /* Our goal here is to return as much of the memory as
503 * is possible back to the system as we are called from OOM.
504 * To do this we must instruct the shmfs to drop all of its
505 * backing pages, *now*.
506 */
507 shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
508
509 invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
510 0, (loff_t)-1);
511 }
512
msm_gem_vunmap(struct drm_gem_object * obj)513 void msm_gem_vunmap(struct drm_gem_object *obj)
514 {
515 struct msm_gem_object *msm_obj = to_msm_bo(obj);
516
517 if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj)))
518 return;
519
520 vunmap(msm_obj->vaddr);
521 msm_obj->vaddr = NULL;
522 }
523
524 /* must be called before _move_to_active().. */
msm_gem_sync_object(struct drm_gem_object * obj,struct msm_fence_context * fctx,bool exclusive)525 int msm_gem_sync_object(struct drm_gem_object *obj,
526 struct msm_fence_context *fctx, bool exclusive)
527 {
528 struct msm_gem_object *msm_obj = to_msm_bo(obj);
529 struct reservation_object_list *fobj;
530 struct fence *fence;
531 int i, ret;
532
533 if (!exclusive) {
534 /* NOTE: _reserve_shared() must happen before _add_shared_fence(),
535 * which makes this a slightly strange place to call it. OTOH this
536 * is a convenient can-fail point to hook it in. (And similar to
537 * how etnaviv and nouveau handle this.)
538 */
539 ret = reservation_object_reserve_shared(msm_obj->resv);
540 if (ret)
541 return ret;
542 }
543
544 fobj = reservation_object_get_list(msm_obj->resv);
545 if (!fobj || (fobj->shared_count == 0)) {
546 fence = reservation_object_get_excl(msm_obj->resv);
547 /* don't need to wait on our own fences, since ring is fifo */
548 if (fence && (fence->context != fctx->context)) {
549 ret = fence_wait(fence, true);
550 if (ret)
551 return ret;
552 }
553 }
554
555 if (!exclusive || !fobj)
556 return 0;
557
558 for (i = 0; i < fobj->shared_count; i++) {
559 fence = rcu_dereference_protected(fobj->shared[i],
560 reservation_object_held(msm_obj->resv));
561 if (fence->context != fctx->context) {
562 ret = fence_wait(fence, true);
563 if (ret)
564 return ret;
565 }
566 }
567
568 return 0;
569 }
570
msm_gem_move_to_active(struct drm_gem_object * obj,struct msm_gpu * gpu,bool exclusive,struct fence * fence)571 void msm_gem_move_to_active(struct drm_gem_object *obj,
572 struct msm_gpu *gpu, bool exclusive, struct fence *fence)
573 {
574 struct msm_gem_object *msm_obj = to_msm_bo(obj);
575 WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
576 msm_obj->gpu = gpu;
577 if (exclusive)
578 reservation_object_add_excl_fence(msm_obj->resv, fence);
579 else
580 reservation_object_add_shared_fence(msm_obj->resv, fence);
581 list_del_init(&msm_obj->mm_list);
582 list_add_tail(&msm_obj->mm_list, &gpu->active_list);
583 }
584
msm_gem_move_to_inactive(struct drm_gem_object * obj)585 void msm_gem_move_to_inactive(struct drm_gem_object *obj)
586 {
587 struct drm_device *dev = obj->dev;
588 struct msm_drm_private *priv = dev->dev_private;
589 struct msm_gem_object *msm_obj = to_msm_bo(obj);
590
591 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
592
593 msm_obj->gpu = NULL;
594 list_del_init(&msm_obj->mm_list);
595 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
596 }
597
msm_gem_cpu_prep(struct drm_gem_object * obj,uint32_t op,ktime_t * timeout)598 int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
599 {
600 struct msm_gem_object *msm_obj = to_msm_bo(obj);
601 bool write = !!(op & MSM_PREP_WRITE);
602 unsigned long remain =
603 op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
604 long ret;
605
606 ret = reservation_object_wait_timeout_rcu(msm_obj->resv, write,
607 true, remain);
608 if (ret == 0)
609 return remain == 0 ? -EBUSY : -ETIMEDOUT;
610 else if (ret < 0)
611 return ret;
612
613 /* TODO cache maintenance */
614
615 return 0;
616 }
617
msm_gem_cpu_fini(struct drm_gem_object * obj)618 int msm_gem_cpu_fini(struct drm_gem_object *obj)
619 {
620 /* TODO cache maintenance */
621 return 0;
622 }
623
624 #ifdef CONFIG_DEBUG_FS
describe_fence(struct fence * fence,const char * type,struct seq_file * m)625 static void describe_fence(struct fence *fence, const char *type,
626 struct seq_file *m)
627 {
628 if (!fence_is_signaled(fence))
629 seq_printf(m, "\t%9s: %s %s seq %u\n", type,
630 fence->ops->get_driver_name(fence),
631 fence->ops->get_timeline_name(fence),
632 fence->seqno);
633 }
634
msm_gem_describe(struct drm_gem_object * obj,struct seq_file * m)635 void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
636 {
637 struct msm_gem_object *msm_obj = to_msm_bo(obj);
638 struct reservation_object *robj = msm_obj->resv;
639 struct reservation_object_list *fobj;
640 struct fence *fence;
641 uint64_t off = drm_vma_node_start(&obj->vma_node);
642 const char *madv;
643
644 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
645
646 switch (msm_obj->madv) {
647 case __MSM_MADV_PURGED:
648 madv = " purged";
649 break;
650 case MSM_MADV_DONTNEED:
651 madv = " purgeable";
652 break;
653 case MSM_MADV_WILLNEED:
654 default:
655 madv = "";
656 break;
657 }
658
659 seq_printf(m, "%08x: %c %2d (%2d) %08llx %p %zu%s\n",
660 msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
661 obj->name, obj->refcount.refcount.counter,
662 off, msm_obj->vaddr, obj->size, madv);
663
664 rcu_read_lock();
665 fobj = rcu_dereference(robj->fence);
666 if (fobj) {
667 unsigned int i, shared_count = fobj->shared_count;
668
669 for (i = 0; i < shared_count; i++) {
670 fence = rcu_dereference(fobj->shared[i]);
671 describe_fence(fence, "Shared", m);
672 }
673 }
674
675 fence = rcu_dereference(robj->fence_excl);
676 if (fence)
677 describe_fence(fence, "Exclusive", m);
678 rcu_read_unlock();
679 }
680
msm_gem_describe_objects(struct list_head * list,struct seq_file * m)681 void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
682 {
683 struct msm_gem_object *msm_obj;
684 int count = 0;
685 size_t size = 0;
686
687 list_for_each_entry(msm_obj, list, mm_list) {
688 struct drm_gem_object *obj = &msm_obj->base;
689 seq_printf(m, " ");
690 msm_gem_describe(obj, m);
691 count++;
692 size += obj->size;
693 }
694
695 seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
696 }
697 #endif
698
msm_gem_free_object(struct drm_gem_object * obj)699 void msm_gem_free_object(struct drm_gem_object *obj)
700 {
701 struct drm_device *dev = obj->dev;
702 struct msm_gem_object *msm_obj = to_msm_bo(obj);
703
704 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
705
706 /* object should not be on active list: */
707 WARN_ON(is_active(msm_obj));
708
709 list_del(&msm_obj->mm_list);
710
711 put_iova(obj);
712
713 if (obj->import_attach) {
714 if (msm_obj->vaddr)
715 dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);
716
717 /* Don't drop the pages for imported dmabuf, as they are not
718 * ours, just free the array we allocated:
719 */
720 if (msm_obj->pages)
721 drm_free_large(msm_obj->pages);
722
723 drm_prime_gem_destroy(obj, msm_obj->sgt);
724 } else {
725 msm_gem_vunmap(obj);
726 put_pages(obj);
727 }
728
729 if (msm_obj->resv == &msm_obj->_resv)
730 reservation_object_fini(msm_obj->resv);
731
732 drm_gem_object_release(obj);
733
734 kfree(msm_obj);
735 }
736
737 /* convenience method to construct a GEM buffer object, and userspace handle */
msm_gem_new_handle(struct drm_device * dev,struct drm_file * file,uint32_t size,uint32_t flags,uint32_t * handle)738 int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
739 uint32_t size, uint32_t flags, uint32_t *handle)
740 {
741 struct drm_gem_object *obj;
742 int ret;
743
744 ret = mutex_lock_interruptible(&dev->struct_mutex);
745 if (ret)
746 return ret;
747
748 obj = msm_gem_new(dev, size, flags);
749
750 mutex_unlock(&dev->struct_mutex);
751
752 if (IS_ERR(obj))
753 return PTR_ERR(obj);
754
755 ret = drm_gem_handle_create(file, obj, handle);
756
757 /* drop reference from allocate - handle holds it now */
758 drm_gem_object_unreference_unlocked(obj);
759
760 return ret;
761 }
762
msm_gem_new_impl(struct drm_device * dev,uint32_t size,uint32_t flags,struct reservation_object * resv,struct drm_gem_object ** obj)763 static int msm_gem_new_impl(struct drm_device *dev,
764 uint32_t size, uint32_t flags,
765 struct reservation_object *resv,
766 struct drm_gem_object **obj)
767 {
768 struct msm_drm_private *priv = dev->dev_private;
769 struct msm_gem_object *msm_obj;
770 unsigned sz;
771 bool use_vram = false;
772
773 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
774
775 switch (flags & MSM_BO_CACHE_MASK) {
776 case MSM_BO_UNCACHED:
777 case MSM_BO_CACHED:
778 case MSM_BO_WC:
779 break;
780 default:
781 dev_err(dev->dev, "invalid cache flag: %x\n",
782 (flags & MSM_BO_CACHE_MASK));
783 return -EINVAL;
784 }
785
786 if (!iommu_present(&platform_bus_type))
787 use_vram = true;
788 else if ((flags & MSM_BO_STOLEN) && priv->vram.size)
789 use_vram = true;
790
791 if (WARN_ON(use_vram && !priv->vram.size))
792 return -EINVAL;
793
794 sz = sizeof(*msm_obj);
795 if (use_vram)
796 sz += sizeof(struct drm_mm_node);
797
798 msm_obj = kzalloc(sz, GFP_KERNEL);
799 if (!msm_obj)
800 return -ENOMEM;
801
802 if (use_vram)
803 msm_obj->vram_node = (void *)&msm_obj[1];
804
805 msm_obj->flags = flags;
806 msm_obj->madv = MSM_MADV_WILLNEED;
807
808 if (resv) {
809 msm_obj->resv = resv;
810 } else {
811 msm_obj->resv = &msm_obj->_resv;
812 reservation_object_init(msm_obj->resv);
813 }
814
815 INIT_LIST_HEAD(&msm_obj->submit_entry);
816 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
817
818 *obj = &msm_obj->base;
819
820 return 0;
821 }
822
msm_gem_new(struct drm_device * dev,uint32_t size,uint32_t flags)823 struct drm_gem_object *msm_gem_new(struct drm_device *dev,
824 uint32_t size, uint32_t flags)
825 {
826 struct drm_gem_object *obj = NULL;
827 int ret;
828
829 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
830
831 size = PAGE_ALIGN(size);
832
833 ret = msm_gem_new_impl(dev, size, flags, NULL, &obj);
834 if (ret)
835 goto fail;
836
837 if (use_pages(obj)) {
838 ret = drm_gem_object_init(dev, obj, size);
839 if (ret)
840 goto fail;
841 } else {
842 drm_gem_private_object_init(dev, obj, size);
843 }
844
845 return obj;
846
847 fail:
848 drm_gem_object_unreference(obj);
849 return ERR_PTR(ret);
850 }
851
msm_gem_import(struct drm_device * dev,struct dma_buf * dmabuf,struct sg_table * sgt)852 struct drm_gem_object *msm_gem_import(struct drm_device *dev,
853 struct dma_buf *dmabuf, struct sg_table *sgt)
854 {
855 struct msm_gem_object *msm_obj;
856 struct drm_gem_object *obj;
857 uint32_t size;
858 int ret, npages;
859
860 /* if we don't have IOMMU, don't bother pretending we can import: */
861 if (!iommu_present(&platform_bus_type)) {
862 dev_err(dev->dev, "cannot import without IOMMU\n");
863 return ERR_PTR(-EINVAL);
864 }
865
866 size = PAGE_ALIGN(dmabuf->size);
867
868 /* Take mutex so we can modify the inactive list in msm_gem_new_impl */
869 mutex_lock(&dev->struct_mutex);
870 ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj);
871 mutex_unlock(&dev->struct_mutex);
872
873 if (ret)
874 goto fail;
875
876 drm_gem_private_object_init(dev, obj, size);
877
878 npages = size / PAGE_SIZE;
879
880 msm_obj = to_msm_bo(obj);
881 msm_obj->sgt = sgt;
882 msm_obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
883 if (!msm_obj->pages) {
884 ret = -ENOMEM;
885 goto fail;
886 }
887
888 ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages);
889 if (ret)
890 goto fail;
891
892 return obj;
893
894 fail:
895 drm_gem_object_unreference_unlocked(obj);
896 return ERR_PTR(ret);
897 }
898