• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2013 Red Hat
4  * Author: Rob Clark <robdclark@gmail.com>
5  */
6 
7 #include <linux/dma-map-ops.h>
8 #include <linux/vmalloc.h>
9 #include <linux/spinlock.h>
10 #include <linux/shmem_fs.h>
11 #include <linux/dma-buf.h>
12 #include <linux/pfn_t.h>
13 
14 #include <drm/drm_prime.h>
15 #include <drm/drm_file.h>
16 
17 #include <trace/events/gpu_mem.h>
18 
19 #include "msm_drv.h"
20 #include "msm_fence.h"
21 #include "msm_gem.h"
22 #include "msm_gpu.h"
23 #include "msm_mmu.h"
24 
physaddr(struct drm_gem_object * obj)25 static dma_addr_t physaddr(struct drm_gem_object *obj)
26 {
27 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
28 	struct msm_drm_private *priv = obj->dev->dev_private;
29 	return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
30 			priv->vram.paddr;
31 }
32 
use_pages(struct drm_gem_object * obj)33 static bool use_pages(struct drm_gem_object *obj)
34 {
35 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
36 	return !msm_obj->vram_node;
37 }
38 
update_device_mem(struct msm_drm_private * priv,ssize_t size)39 static void update_device_mem(struct msm_drm_private *priv, ssize_t size)
40 {
41 	uint64_t total_mem = atomic64_add_return(size, &priv->total_mem);
42 	trace_gpu_mem_total(0, 0, total_mem);
43 }
44 
update_ctx_mem(struct drm_file * file,ssize_t size)45 static void update_ctx_mem(struct drm_file *file, ssize_t size)
46 {
47 	struct msm_file_private *ctx = file->driver_priv;
48 	uint64_t ctx_mem = atomic64_add_return(size, &ctx->ctx_mem);
49 
50 	rcu_read_lock(); /* Locks file->pid! */
51 	trace_gpu_mem_total(0, pid_nr(rcu_dereference(file->pid)), ctx_mem);
52 	rcu_read_unlock();
53 
54 }
55 
msm_gem_open(struct drm_gem_object * obj,struct drm_file * file)56 static int msm_gem_open(struct drm_gem_object *obj, struct drm_file *file)
57 {
58 	update_ctx_mem(file, obj->size);
59 	return 0;
60 }
61 
msm_gem_close(struct drm_gem_object * obj,struct drm_file * file)62 static void msm_gem_close(struct drm_gem_object *obj, struct drm_file *file)
63 {
64 	update_ctx_mem(file, -obj->size);
65 }
66 
67 /*
68  * Cache sync.. this is a bit over-complicated, to fit dma-mapping
69  * API.  Really GPU cache is out of scope here (handled on cmdstream)
70  * and all we need to do is invalidate newly allocated pages before
71  * mapping to CPU as uncached/writecombine.
72  *
73  * On top of this, we have the added headache, that depending on
74  * display generation, the display's iommu may be wired up to either
75  * the toplevel drm device (mdss), or to the mdp sub-node, meaning
76  * that here we either have dma-direct or iommu ops.
77  *
78  * Let this be a cautionary tail of abstraction gone wrong.
79  */
80 
sync_for_device(struct msm_gem_object * msm_obj)81 static void sync_for_device(struct msm_gem_object *msm_obj)
82 {
83 	struct device *dev = msm_obj->base.dev->dev;
84 
85 	dma_map_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
86 }
87 
sync_for_cpu(struct msm_gem_object * msm_obj)88 static void sync_for_cpu(struct msm_gem_object *msm_obj)
89 {
90 	struct device *dev = msm_obj->base.dev->dev;
91 
92 	dma_unmap_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
93 }
94 
update_lru_active(struct drm_gem_object * obj)95 static void update_lru_active(struct drm_gem_object *obj)
96 {
97 	struct msm_drm_private *priv = obj->dev->dev_private;
98 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
99 
100 	GEM_WARN_ON(!msm_obj->pages);
101 
102 	if (msm_obj->pin_count) {
103 		drm_gem_lru_move_tail_locked(&priv->lru.pinned, obj);
104 	} else if (msm_obj->madv == MSM_MADV_WILLNEED) {
105 		drm_gem_lru_move_tail_locked(&priv->lru.willneed, obj);
106 	} else {
107 		GEM_WARN_ON(msm_obj->madv != MSM_MADV_DONTNEED);
108 
109 		drm_gem_lru_move_tail_locked(&priv->lru.dontneed, obj);
110 	}
111 }
112 
update_lru_locked(struct drm_gem_object * obj)113 static void update_lru_locked(struct drm_gem_object *obj)
114 {
115 	struct msm_drm_private *priv = obj->dev->dev_private;
116 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
117 
118 	msm_gem_assert_locked(&msm_obj->base);
119 
120 	if (!msm_obj->pages) {
121 		GEM_WARN_ON(msm_obj->pin_count);
122 
123 		drm_gem_lru_move_tail_locked(&priv->lru.unbacked, obj);
124 	} else {
125 		update_lru_active(obj);
126 	}
127 }
128 
update_lru(struct drm_gem_object * obj)129 static void update_lru(struct drm_gem_object *obj)
130 {
131 	struct msm_drm_private *priv = obj->dev->dev_private;
132 
133 	mutex_lock(&priv->lru.lock);
134 	update_lru_locked(obj);
135 	mutex_unlock(&priv->lru.lock);
136 }
137 
138 /* allocate pages from VRAM carveout, used when no IOMMU: */
get_pages_vram(struct drm_gem_object * obj,int npages)139 static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
140 {
141 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
142 	struct msm_drm_private *priv = obj->dev->dev_private;
143 	dma_addr_t paddr;
144 	struct page **p;
145 	int ret, i;
146 
147 	p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
148 	if (!p)
149 		return ERR_PTR(-ENOMEM);
150 
151 	spin_lock(&priv->vram.lock);
152 	ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
153 	spin_unlock(&priv->vram.lock);
154 	if (ret) {
155 		kvfree(p);
156 		return ERR_PTR(ret);
157 	}
158 
159 	paddr = physaddr(obj);
160 	for (i = 0; i < npages; i++) {
161 		p[i] = pfn_to_page(__phys_to_pfn(paddr));
162 		paddr += PAGE_SIZE;
163 	}
164 
165 	return p;
166 }
167 
get_pages(struct drm_gem_object * obj)168 static struct page **get_pages(struct drm_gem_object *obj)
169 {
170 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
171 
172 	msm_gem_assert_locked(obj);
173 
174 	if (!msm_obj->pages) {
175 		struct drm_device *dev = obj->dev;
176 		struct page **p;
177 		int npages = obj->size >> PAGE_SHIFT;
178 
179 		if (use_pages(obj))
180 			p = drm_gem_get_pages(obj);
181 		else
182 			p = get_pages_vram(obj, npages);
183 
184 		if (IS_ERR(p)) {
185 			DRM_DEV_ERROR(dev->dev, "could not get pages: %ld\n",
186 					PTR_ERR(p));
187 			return p;
188 		}
189 
190 		update_device_mem(dev->dev_private, obj->size);
191 
192 		msm_obj->pages = p;
193 
194 		msm_obj->sgt = drm_prime_pages_to_sg(obj->dev, p, npages);
195 		if (IS_ERR(msm_obj->sgt)) {
196 			void *ptr = ERR_CAST(msm_obj->sgt);
197 
198 			DRM_DEV_ERROR(dev->dev, "failed to allocate sgt\n");
199 			msm_obj->sgt = NULL;
200 			return ptr;
201 		}
202 
203 		/* For non-cached buffers, ensure the new pages are clean
204 		 * because display controller, GPU, etc. are not coherent:
205 		 */
206 		if (msm_obj->flags & MSM_BO_WC)
207 			sync_for_device(msm_obj);
208 
209 		update_lru(obj);
210 	}
211 
212 	return msm_obj->pages;
213 }
214 
put_pages_vram(struct drm_gem_object * obj)215 static void put_pages_vram(struct drm_gem_object *obj)
216 {
217 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
218 	struct msm_drm_private *priv = obj->dev->dev_private;
219 
220 	spin_lock(&priv->vram.lock);
221 	drm_mm_remove_node(msm_obj->vram_node);
222 	spin_unlock(&priv->vram.lock);
223 
224 	kvfree(msm_obj->pages);
225 }
226 
put_pages(struct drm_gem_object * obj)227 static void put_pages(struct drm_gem_object *obj)
228 {
229 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
230 
231 	if (msm_obj->pages) {
232 		if (msm_obj->sgt) {
233 			/* For non-cached buffers, ensure the new
234 			 * pages are clean because display controller,
235 			 * GPU, etc. are not coherent:
236 			 */
237 			if (msm_obj->flags & MSM_BO_WC)
238 				sync_for_cpu(msm_obj);
239 
240 			sg_free_table(msm_obj->sgt);
241 			kfree(msm_obj->sgt);
242 			msm_obj->sgt = NULL;
243 		}
244 
245 		update_device_mem(obj->dev->dev_private, -obj->size);
246 
247 		if (use_pages(obj))
248 			drm_gem_put_pages(obj, msm_obj->pages, true, false);
249 		else
250 			put_pages_vram(obj);
251 
252 		msm_obj->pages = NULL;
253 		update_lru(obj);
254 	}
255 }
256 
msm_gem_get_pages_locked(struct drm_gem_object * obj,unsigned madv)257 static struct page **msm_gem_get_pages_locked(struct drm_gem_object *obj,
258 					      unsigned madv)
259 {
260 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
261 
262 	msm_gem_assert_locked(obj);
263 
264 	if (msm_obj->madv > madv) {
265 		DRM_DEV_DEBUG_DRIVER(obj->dev->dev, "Invalid madv state: %u vs %u\n",
266 				     msm_obj->madv, madv);
267 		return ERR_PTR(-EBUSY);
268 	}
269 
270 	return get_pages(obj);
271 }
272 
273 /*
274  * Update the pin count of the object, call under lru.lock
275  */
msm_gem_pin_obj_locked(struct drm_gem_object * obj)276 void msm_gem_pin_obj_locked(struct drm_gem_object *obj)
277 {
278 	struct msm_drm_private *priv = obj->dev->dev_private;
279 
280 	msm_gem_assert_locked(obj);
281 
282 	to_msm_bo(obj)->pin_count++;
283 	drm_gem_lru_move_tail_locked(&priv->lru.pinned, obj);
284 }
285 
pin_obj_locked(struct drm_gem_object * obj)286 static void pin_obj_locked(struct drm_gem_object *obj)
287 {
288 	struct msm_drm_private *priv = obj->dev->dev_private;
289 
290 	mutex_lock(&priv->lru.lock);
291 	msm_gem_pin_obj_locked(obj);
292 	mutex_unlock(&priv->lru.lock);
293 }
294 
msm_gem_pin_pages_locked(struct drm_gem_object * obj)295 struct page **msm_gem_pin_pages_locked(struct drm_gem_object *obj)
296 {
297 	struct page **p;
298 
299 	msm_gem_assert_locked(obj);
300 
301 	p = msm_gem_get_pages_locked(obj, MSM_MADV_WILLNEED);
302 	if (!IS_ERR(p))
303 		pin_obj_locked(obj);
304 
305 	return p;
306 }
307 
msm_gem_unpin_pages_locked(struct drm_gem_object * obj)308 void msm_gem_unpin_pages_locked(struct drm_gem_object *obj)
309 {
310 	msm_gem_assert_locked(obj);
311 
312 	msm_gem_unpin_locked(obj);
313 }
314 
msm_gem_pgprot(struct msm_gem_object * msm_obj,pgprot_t prot)315 static pgprot_t msm_gem_pgprot(struct msm_gem_object *msm_obj, pgprot_t prot)
316 {
317 	if (msm_obj->flags & MSM_BO_WC)
318 		return pgprot_writecombine(prot);
319 	return prot;
320 }
321 
msm_gem_fault(struct vm_fault * vmf)322 static vm_fault_t msm_gem_fault(struct vm_fault *vmf)
323 {
324 	struct vm_area_struct *vma = vmf->vma;
325 	struct drm_gem_object *obj = vma->vm_private_data;
326 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
327 	struct page **pages;
328 	unsigned long pfn;
329 	pgoff_t pgoff;
330 	int err;
331 	vm_fault_t ret;
332 
333 	/*
334 	 * vm_ops.open/drm_gem_mmap_obj and close get and put
335 	 * a reference on obj. So, we dont need to hold one here.
336 	 */
337 	err = msm_gem_lock_interruptible(obj);
338 	if (err) {
339 		ret = VM_FAULT_NOPAGE;
340 		goto out;
341 	}
342 
343 	if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
344 		msm_gem_unlock(obj);
345 		return VM_FAULT_SIGBUS;
346 	}
347 
348 	/* make sure we have pages attached now */
349 	pages = get_pages(obj);
350 	if (IS_ERR(pages)) {
351 		ret = vmf_error(PTR_ERR(pages));
352 		goto out_unlock;
353 	}
354 
355 	/* We don't use vmf->pgoff since that has the fake offset: */
356 	pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
357 
358 	pfn = page_to_pfn(pages[pgoff]);
359 
360 	VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
361 			pfn, pfn << PAGE_SHIFT);
362 
363 	ret = vmf_insert_pfn(vma, vmf->address, pfn);
364 
365 out_unlock:
366 	msm_gem_unlock(obj);
367 out:
368 	return ret;
369 }
370 
371 /** get mmap offset */
mmap_offset(struct drm_gem_object * obj)372 static uint64_t mmap_offset(struct drm_gem_object *obj)
373 {
374 	struct drm_device *dev = obj->dev;
375 	int ret;
376 
377 	msm_gem_assert_locked(obj);
378 
379 	/* Make it mmapable */
380 	ret = drm_gem_create_mmap_offset(obj);
381 
382 	if (ret) {
383 		DRM_DEV_ERROR(dev->dev, "could not allocate mmap offset\n");
384 		return 0;
385 	}
386 
387 	return drm_vma_node_offset_addr(&obj->vma_node);
388 }
389 
msm_gem_mmap_offset(struct drm_gem_object * obj)390 uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
391 {
392 	uint64_t offset;
393 
394 	msm_gem_lock(obj);
395 	offset = mmap_offset(obj);
396 	msm_gem_unlock(obj);
397 	return offset;
398 }
399 
add_vma(struct drm_gem_object * obj,struct msm_gem_address_space * aspace)400 static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
401 		struct msm_gem_address_space *aspace)
402 {
403 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
404 	struct msm_gem_vma *vma;
405 
406 	msm_gem_assert_locked(obj);
407 
408 	vma = msm_gem_vma_new(aspace);
409 	if (!vma)
410 		return ERR_PTR(-ENOMEM);
411 
412 	list_add_tail(&vma->list, &msm_obj->vmas);
413 
414 	return vma;
415 }
416 
lookup_vma(struct drm_gem_object * obj,struct msm_gem_address_space * aspace)417 static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj,
418 		struct msm_gem_address_space *aspace)
419 {
420 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
421 	struct msm_gem_vma *vma;
422 
423 	msm_gem_assert_locked(obj);
424 
425 	list_for_each_entry(vma, &msm_obj->vmas, list) {
426 		if (vma->aspace == aspace)
427 			return vma;
428 	}
429 
430 	return NULL;
431 }
432 
del_vma(struct msm_gem_vma * vma)433 static void del_vma(struct msm_gem_vma *vma)
434 {
435 	if (!vma)
436 		return;
437 
438 	list_del(&vma->list);
439 	kfree(vma);
440 }
441 
442 /*
443  * If close is true, this also closes the VMA (releasing the allocated
444  * iova range) in addition to removing the iommu mapping.  In the eviction
445  * case (!close), we keep the iova allocated, but only remove the iommu
446  * mapping.
447  */
448 static void
put_iova_spaces(struct drm_gem_object * obj,bool close)449 put_iova_spaces(struct drm_gem_object *obj, bool close)
450 {
451 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
452 	struct msm_gem_vma *vma;
453 
454 	msm_gem_assert_locked(obj);
455 
456 	list_for_each_entry(vma, &msm_obj->vmas, list) {
457 		if (vma->aspace) {
458 			msm_gem_vma_purge(vma);
459 			if (close)
460 				msm_gem_vma_close(vma);
461 		}
462 	}
463 }
464 
465 /* Called with msm_obj locked */
466 static void
put_iova_vmas(struct drm_gem_object * obj)467 put_iova_vmas(struct drm_gem_object *obj)
468 {
469 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
470 	struct msm_gem_vma *vma, *tmp;
471 
472 	msm_gem_assert_locked(obj);
473 
474 	list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
475 		del_vma(vma);
476 	}
477 }
478 
get_vma_locked(struct drm_gem_object * obj,struct msm_gem_address_space * aspace,u64 range_start,u64 range_end)479 static struct msm_gem_vma *get_vma_locked(struct drm_gem_object *obj,
480 		struct msm_gem_address_space *aspace,
481 		u64 range_start, u64 range_end)
482 {
483 	struct msm_gem_vma *vma;
484 
485 	msm_gem_assert_locked(obj);
486 
487 	vma = lookup_vma(obj, aspace);
488 
489 	if (!vma) {
490 		int ret;
491 
492 		vma = add_vma(obj, aspace);
493 		if (IS_ERR(vma))
494 			return vma;
495 
496 		ret = msm_gem_vma_init(vma, obj->size,
497 			range_start, range_end);
498 		if (ret) {
499 			del_vma(vma);
500 			return ERR_PTR(ret);
501 		}
502 	} else {
503 		GEM_WARN_ON(vma->iova < range_start);
504 		GEM_WARN_ON((vma->iova + obj->size) > range_end);
505 	}
506 
507 	return vma;
508 }
509 
msm_gem_pin_vma_locked(struct drm_gem_object * obj,struct msm_gem_vma * vma)510 int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma)
511 {
512 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
513 	struct page **pages;
514 	int prot = IOMMU_READ;
515 
516 	if (!(msm_obj->flags & MSM_BO_GPU_READONLY))
517 		prot |= IOMMU_WRITE;
518 
519 	if (msm_obj->flags & MSM_BO_MAP_PRIV)
520 		prot |= IOMMU_PRIV;
521 
522 	if (msm_obj->flags & MSM_BO_CACHED_COHERENT)
523 		prot |= IOMMU_CACHE;
524 
525 	msm_gem_assert_locked(obj);
526 
527 	pages = msm_gem_get_pages_locked(obj, MSM_MADV_WILLNEED);
528 	if (IS_ERR(pages))
529 		return PTR_ERR(pages);
530 
531 	return msm_gem_vma_map(vma, prot, msm_obj->sgt, obj->size);
532 }
533 
msm_gem_unpin_locked(struct drm_gem_object * obj)534 void msm_gem_unpin_locked(struct drm_gem_object *obj)
535 {
536 	struct msm_drm_private *priv = obj->dev->dev_private;
537 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
538 
539 	msm_gem_assert_locked(obj);
540 
541 	mutex_lock(&priv->lru.lock);
542 	msm_obj->pin_count--;
543 	GEM_WARN_ON(msm_obj->pin_count < 0);
544 	update_lru_locked(obj);
545 	mutex_unlock(&priv->lru.lock);
546 }
547 
548 /* Special unpin path for use in fence-signaling path, avoiding the need
549  * to hold the obj lock by only depending on things that a protected by
550  * the LRU lock.  In particular we know that that we already have backing
551  * and and that the object's dma_resv has the fence for the current
552  * submit/job which will prevent us racing against page eviction.
553  */
msm_gem_unpin_active(struct drm_gem_object * obj)554 void msm_gem_unpin_active(struct drm_gem_object *obj)
555 {
556 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
557 
558 	msm_obj->pin_count--;
559 	GEM_WARN_ON(msm_obj->pin_count < 0);
560 	update_lru_active(obj);
561 }
562 
msm_gem_get_vma_locked(struct drm_gem_object * obj,struct msm_gem_address_space * aspace)563 struct msm_gem_vma *msm_gem_get_vma_locked(struct drm_gem_object *obj,
564 					   struct msm_gem_address_space *aspace)
565 {
566 	return get_vma_locked(obj, aspace, 0, U64_MAX);
567 }
568 
get_and_pin_iova_range_locked(struct drm_gem_object * obj,struct msm_gem_address_space * aspace,uint64_t * iova,u64 range_start,u64 range_end)569 static int get_and_pin_iova_range_locked(struct drm_gem_object *obj,
570 		struct msm_gem_address_space *aspace, uint64_t *iova,
571 		u64 range_start, u64 range_end)
572 {
573 	struct msm_gem_vma *vma;
574 	int ret;
575 
576 	msm_gem_assert_locked(obj);
577 
578 	vma = get_vma_locked(obj, aspace, range_start, range_end);
579 	if (IS_ERR(vma))
580 		return PTR_ERR(vma);
581 
582 	ret = msm_gem_pin_vma_locked(obj, vma);
583 	if (!ret) {
584 		*iova = vma->iova;
585 		pin_obj_locked(obj);
586 	}
587 
588 	return ret;
589 }
590 
591 /*
592  * get iova and pin it. Should have a matching put
593  * limits iova to specified range (in pages)
594  */
msm_gem_get_and_pin_iova_range(struct drm_gem_object * obj,struct msm_gem_address_space * aspace,uint64_t * iova,u64 range_start,u64 range_end)595 int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj,
596 		struct msm_gem_address_space *aspace, uint64_t *iova,
597 		u64 range_start, u64 range_end)
598 {
599 	int ret;
600 
601 	msm_gem_lock(obj);
602 	ret = get_and_pin_iova_range_locked(obj, aspace, iova, range_start, range_end);
603 	msm_gem_unlock(obj);
604 
605 	return ret;
606 }
607 
608 /* get iova and pin it. Should have a matching put */
msm_gem_get_and_pin_iova(struct drm_gem_object * obj,struct msm_gem_address_space * aspace,uint64_t * iova)609 int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
610 		struct msm_gem_address_space *aspace, uint64_t *iova)
611 {
612 	return msm_gem_get_and_pin_iova_range(obj, aspace, iova, 0, U64_MAX);
613 }
614 
615 /*
616  * Get an iova but don't pin it. Doesn't need a put because iovas are currently
617  * valid for the life of the object
618  */
msm_gem_get_iova(struct drm_gem_object * obj,struct msm_gem_address_space * aspace,uint64_t * iova)619 int msm_gem_get_iova(struct drm_gem_object *obj,
620 		struct msm_gem_address_space *aspace, uint64_t *iova)
621 {
622 	struct msm_gem_vma *vma;
623 	int ret = 0;
624 
625 	msm_gem_lock(obj);
626 	vma = get_vma_locked(obj, aspace, 0, U64_MAX);
627 	if (IS_ERR(vma)) {
628 		ret = PTR_ERR(vma);
629 	} else {
630 		*iova = vma->iova;
631 	}
632 	msm_gem_unlock(obj);
633 
634 	return ret;
635 }
636 
clear_iova(struct drm_gem_object * obj,struct msm_gem_address_space * aspace)637 static int clear_iova(struct drm_gem_object *obj,
638 		      struct msm_gem_address_space *aspace)
639 {
640 	struct msm_gem_vma *vma = lookup_vma(obj, aspace);
641 
642 	if (!vma)
643 		return 0;
644 
645 	msm_gem_vma_purge(vma);
646 	msm_gem_vma_close(vma);
647 	del_vma(vma);
648 
649 	return 0;
650 }
651 
652 /*
653  * Get the requested iova but don't pin it.  Fails if the requested iova is
654  * not available.  Doesn't need a put because iovas are currently valid for
655  * the life of the object.
656  *
657  * Setting an iova of zero will clear the vma.
658  */
msm_gem_set_iova(struct drm_gem_object * obj,struct msm_gem_address_space * aspace,uint64_t iova)659 int msm_gem_set_iova(struct drm_gem_object *obj,
660 		     struct msm_gem_address_space *aspace, uint64_t iova)
661 {
662 	int ret = 0;
663 
664 	msm_gem_lock(obj);
665 	if (!iova) {
666 		ret = clear_iova(obj, aspace);
667 	} else {
668 		struct msm_gem_vma *vma;
669 		vma = get_vma_locked(obj, aspace, iova, iova + obj->size);
670 		if (IS_ERR(vma)) {
671 			ret = PTR_ERR(vma);
672 		} else if (GEM_WARN_ON(vma->iova != iova)) {
673 			clear_iova(obj, aspace);
674 			ret = -EBUSY;
675 		}
676 	}
677 	msm_gem_unlock(obj);
678 
679 	return ret;
680 }
681 
682 /*
683  * Unpin a iova by updating the reference counts. The memory isn't actually
684  * purged until something else (shrinker, mm_notifier, destroy, etc) decides
685  * to get rid of it
686  */
msm_gem_unpin_iova(struct drm_gem_object * obj,struct msm_gem_address_space * aspace)687 void msm_gem_unpin_iova(struct drm_gem_object *obj,
688 		struct msm_gem_address_space *aspace)
689 {
690 	struct msm_gem_vma *vma;
691 
692 	msm_gem_lock(obj);
693 	vma = lookup_vma(obj, aspace);
694 	if (!GEM_WARN_ON(!vma)) {
695 		msm_gem_unpin_locked(obj);
696 	}
697 	msm_gem_unlock(obj);
698 }
699 
msm_gem_dumb_create(struct drm_file * file,struct drm_device * dev,struct drm_mode_create_dumb * args)700 int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
701 		struct drm_mode_create_dumb *args)
702 {
703 	args->pitch = align_pitch(args->width, args->bpp);
704 	args->size  = PAGE_ALIGN(args->pitch * args->height);
705 	return msm_gem_new_handle(dev, file, args->size,
706 			MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb");
707 }
708 
msm_gem_dumb_map_offset(struct drm_file * file,struct drm_device * dev,uint32_t handle,uint64_t * offset)709 int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
710 		uint32_t handle, uint64_t *offset)
711 {
712 	struct drm_gem_object *obj;
713 	int ret = 0;
714 
715 	/* GEM does all our handle to object mapping */
716 	obj = drm_gem_object_lookup(file, handle);
717 	if (obj == NULL) {
718 		ret = -ENOENT;
719 		goto fail;
720 	}
721 
722 	*offset = msm_gem_mmap_offset(obj);
723 
724 	drm_gem_object_put(obj);
725 
726 fail:
727 	return ret;
728 }
729 
get_vaddr(struct drm_gem_object * obj,unsigned madv)730 static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
731 {
732 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
733 	struct page **pages;
734 	int ret = 0;
735 
736 	msm_gem_assert_locked(obj);
737 
738 	if (obj->import_attach)
739 		return ERR_PTR(-ENODEV);
740 
741 	pages = msm_gem_get_pages_locked(obj, madv);
742 	if (IS_ERR(pages))
743 		return ERR_CAST(pages);
744 
745 	pin_obj_locked(obj);
746 
747 	/* increment vmap_count *before* vmap() call, so shrinker can
748 	 * check vmap_count (is_vunmapable()) outside of msm_obj lock.
749 	 * This guarantees that we won't try to msm_gem_vunmap() this
750 	 * same object from within the vmap() call (while we already
751 	 * hold msm_obj lock)
752 	 */
753 	msm_obj->vmap_count++;
754 
755 	if (!msm_obj->vaddr) {
756 		msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
757 				VM_MAP, msm_gem_pgprot(msm_obj, PAGE_KERNEL));
758 		if (msm_obj->vaddr == NULL) {
759 			ret = -ENOMEM;
760 			goto fail;
761 		}
762 	}
763 
764 	return msm_obj->vaddr;
765 
766 fail:
767 	msm_obj->vmap_count--;
768 	msm_gem_unpin_locked(obj);
769 	return ERR_PTR(ret);
770 }
771 
msm_gem_get_vaddr_locked(struct drm_gem_object * obj)772 void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj)
773 {
774 	return get_vaddr(obj, MSM_MADV_WILLNEED);
775 }
776 
msm_gem_get_vaddr(struct drm_gem_object * obj)777 void *msm_gem_get_vaddr(struct drm_gem_object *obj)
778 {
779 	void *ret;
780 
781 	msm_gem_lock(obj);
782 	ret = msm_gem_get_vaddr_locked(obj);
783 	msm_gem_unlock(obj);
784 
785 	return ret;
786 }
787 
788 /*
789  * Don't use this!  It is for the very special case of dumping
790  * submits from GPU hangs or faults, were the bo may already
791  * be MSM_MADV_DONTNEED, but we know the buffer is still on the
792  * active list.
793  */
msm_gem_get_vaddr_active(struct drm_gem_object * obj)794 void *msm_gem_get_vaddr_active(struct drm_gem_object *obj)
795 {
796 	return get_vaddr(obj, __MSM_MADV_PURGED);
797 }
798 
msm_gem_put_vaddr_locked(struct drm_gem_object * obj)799 void msm_gem_put_vaddr_locked(struct drm_gem_object *obj)
800 {
801 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
802 
803 	msm_gem_assert_locked(obj);
804 	GEM_WARN_ON(msm_obj->vmap_count < 1);
805 
806 	msm_obj->vmap_count--;
807 	msm_gem_unpin_locked(obj);
808 }
809 
msm_gem_put_vaddr(struct drm_gem_object * obj)810 void msm_gem_put_vaddr(struct drm_gem_object *obj)
811 {
812 	msm_gem_lock(obj);
813 	msm_gem_put_vaddr_locked(obj);
814 	msm_gem_unlock(obj);
815 }
816 
817 /* Update madvise status, returns true if not purged, else
818  * false or -errno.
819  */
msm_gem_madvise(struct drm_gem_object * obj,unsigned madv)820 int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
821 {
822 	struct msm_drm_private *priv = obj->dev->dev_private;
823 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
824 
825 	msm_gem_lock(obj);
826 
827 	mutex_lock(&priv->lru.lock);
828 
829 	if (msm_obj->madv != __MSM_MADV_PURGED)
830 		msm_obj->madv = madv;
831 
832 	madv = msm_obj->madv;
833 
834 	/* If the obj is inactive, we might need to move it
835 	 * between inactive lists
836 	 */
837 	update_lru_locked(obj);
838 
839 	mutex_unlock(&priv->lru.lock);
840 
841 	msm_gem_unlock(obj);
842 
843 	return (madv != __MSM_MADV_PURGED);
844 }
845 
msm_gem_purge(struct drm_gem_object * obj)846 void msm_gem_purge(struct drm_gem_object *obj)
847 {
848 	struct drm_device *dev = obj->dev;
849 	struct msm_drm_private *priv = obj->dev->dev_private;
850 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
851 
852 	msm_gem_assert_locked(obj);
853 	GEM_WARN_ON(!is_purgeable(msm_obj));
854 
855 	/* Get rid of any iommu mapping(s): */
856 	put_iova_spaces(obj, true);
857 
858 	msm_gem_vunmap(obj);
859 
860 	drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
861 
862 	put_pages(obj);
863 
864 	put_iova_vmas(obj);
865 
866 	mutex_lock(&priv->lru.lock);
867 	/* A one-way transition: */
868 	msm_obj->madv = __MSM_MADV_PURGED;
869 	mutex_unlock(&priv->lru.lock);
870 
871 	drm_gem_free_mmap_offset(obj);
872 
873 	/* Our goal here is to return as much of the memory as
874 	 * is possible back to the system as we are called from OOM.
875 	 * To do this we must instruct the shmfs to drop all of its
876 	 * backing pages, *now*.
877 	 */
878 	shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
879 
880 	invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
881 			0, (loff_t)-1);
882 }
883 
884 /*
885  * Unpin the backing pages and make them available to be swapped out.
886  */
msm_gem_evict(struct drm_gem_object * obj)887 void msm_gem_evict(struct drm_gem_object *obj)
888 {
889 	struct drm_device *dev = obj->dev;
890 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
891 
892 	msm_gem_assert_locked(obj);
893 	GEM_WARN_ON(is_unevictable(msm_obj));
894 
895 	/* Get rid of any iommu mapping(s): */
896 	put_iova_spaces(obj, false);
897 
898 	drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
899 
900 	put_pages(obj);
901 }
902 
msm_gem_vunmap(struct drm_gem_object * obj)903 void msm_gem_vunmap(struct drm_gem_object *obj)
904 {
905 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
906 
907 	msm_gem_assert_locked(obj);
908 
909 	if (!msm_obj->vaddr || GEM_WARN_ON(!is_vunmapable(msm_obj)))
910 		return;
911 
912 	vunmap(msm_obj->vaddr);
913 	msm_obj->vaddr = NULL;
914 }
915 
msm_gem_active(struct drm_gem_object * obj)916 bool msm_gem_active(struct drm_gem_object *obj)
917 {
918 	msm_gem_assert_locked(obj);
919 
920 	if (to_msm_bo(obj)->pin_count)
921 		return true;
922 
923 	return !dma_resv_test_signaled(obj->resv, dma_resv_usage_rw(true));
924 }
925 
msm_gem_cpu_prep(struct drm_gem_object * obj,uint32_t op,ktime_t * timeout)926 int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
927 {
928 	bool write = !!(op & MSM_PREP_WRITE);
929 	unsigned long remain =
930 		op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
931 	long ret;
932 
933 	if (op & MSM_PREP_BOOST) {
934 		dma_resv_set_deadline(obj->resv, dma_resv_usage_rw(write),
935 				      ktime_get());
936 	}
937 
938 	ret = dma_resv_wait_timeout(obj->resv, dma_resv_usage_rw(write),
939 				    true,  remain);
940 	if (ret == 0)
941 		return remain == 0 ? -EBUSY : -ETIMEDOUT;
942 	else if (ret < 0)
943 		return ret;
944 
945 	/* TODO cache maintenance */
946 
947 	return 0;
948 }
949 
msm_gem_cpu_fini(struct drm_gem_object * obj)950 int msm_gem_cpu_fini(struct drm_gem_object *obj)
951 {
952 	/* TODO cache maintenance */
953 	return 0;
954 }
955 
956 #ifdef CONFIG_DEBUG_FS
msm_gem_describe(struct drm_gem_object * obj,struct seq_file * m,struct msm_gem_stats * stats)957 void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
958 		struct msm_gem_stats *stats)
959 {
960 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
961 	struct dma_resv *robj = obj->resv;
962 	struct msm_gem_vma *vma;
963 	uint64_t off = drm_vma_node_start(&obj->vma_node);
964 	const char *madv;
965 
966 	if (!msm_gem_trylock(obj))
967 		return;
968 
969 	stats->all.count++;
970 	stats->all.size += obj->size;
971 
972 	if (msm_gem_active(obj)) {
973 		stats->active.count++;
974 		stats->active.size += obj->size;
975 	}
976 
977 	if (msm_obj->pages) {
978 		stats->resident.count++;
979 		stats->resident.size += obj->size;
980 	}
981 
982 	switch (msm_obj->madv) {
983 	case __MSM_MADV_PURGED:
984 		stats->purged.count++;
985 		stats->purged.size += obj->size;
986 		madv = " purged";
987 		break;
988 	case MSM_MADV_DONTNEED:
989 		stats->purgeable.count++;
990 		stats->purgeable.size += obj->size;
991 		madv = " purgeable";
992 		break;
993 	case MSM_MADV_WILLNEED:
994 	default:
995 		madv = "";
996 		break;
997 	}
998 
999 	seq_printf(m, "%08x: %c %2d (%2d) %08llx %p",
1000 			msm_obj->flags, msm_gem_active(obj) ? 'A' : 'I',
1001 			obj->name, kref_read(&obj->refcount),
1002 			off, msm_obj->vaddr);
1003 
1004 	seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name);
1005 
1006 	if (!list_empty(&msm_obj->vmas)) {
1007 
1008 		seq_puts(m, "      vmas:");
1009 
1010 		list_for_each_entry(vma, &msm_obj->vmas, list) {
1011 			const char *name, *comm;
1012 			if (vma->aspace) {
1013 				struct msm_gem_address_space *aspace = vma->aspace;
1014 				struct task_struct *task =
1015 					get_pid_task(aspace->pid, PIDTYPE_PID);
1016 				if (task) {
1017 					comm = kstrdup(task->comm, GFP_KERNEL);
1018 					put_task_struct(task);
1019 				} else {
1020 					comm = NULL;
1021 				}
1022 				name = aspace->name;
1023 			} else {
1024 				name = comm = NULL;
1025 			}
1026 			seq_printf(m, " [%s%s%s: aspace=%p, %08llx,%s]",
1027 				name, comm ? ":" : "", comm ? comm : "",
1028 				vma->aspace, vma->iova,
1029 				vma->mapped ? "mapped" : "unmapped");
1030 			kfree(comm);
1031 		}
1032 
1033 		seq_puts(m, "\n");
1034 	}
1035 
1036 	dma_resv_describe(robj, m);
1037 	msm_gem_unlock(obj);
1038 }
1039 
msm_gem_describe_objects(struct list_head * list,struct seq_file * m)1040 void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
1041 {
1042 	struct msm_gem_stats stats = {};
1043 	struct msm_gem_object *msm_obj;
1044 
1045 	seq_puts(m, "   flags       id ref  offset   kaddr            size     madv      name\n");
1046 	list_for_each_entry(msm_obj, list, node) {
1047 		struct drm_gem_object *obj = &msm_obj->base;
1048 		seq_puts(m, "   ");
1049 		msm_gem_describe(obj, m, &stats);
1050 	}
1051 
1052 	seq_printf(m, "Total:     %4d objects, %9zu bytes\n",
1053 			stats.all.count, stats.all.size);
1054 	seq_printf(m, "Active:    %4d objects, %9zu bytes\n",
1055 			stats.active.count, stats.active.size);
1056 	seq_printf(m, "Resident:  %4d objects, %9zu bytes\n",
1057 			stats.resident.count, stats.resident.size);
1058 	seq_printf(m, "Purgeable: %4d objects, %9zu bytes\n",
1059 			stats.purgeable.count, stats.purgeable.size);
1060 	seq_printf(m, "Purged:    %4d objects, %9zu bytes\n",
1061 			stats.purged.count, stats.purged.size);
1062 }
1063 #endif
1064 
1065 /* don't call directly!  Use drm_gem_object_put() */
msm_gem_free_object(struct drm_gem_object * obj)1066 static void msm_gem_free_object(struct drm_gem_object *obj)
1067 {
1068 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
1069 	struct drm_device *dev = obj->dev;
1070 	struct msm_drm_private *priv = dev->dev_private;
1071 
1072 	mutex_lock(&priv->obj_lock);
1073 	list_del(&msm_obj->node);
1074 	mutex_unlock(&priv->obj_lock);
1075 
1076 	put_iova_spaces(obj, true);
1077 
1078 	if (obj->import_attach) {
1079 		GEM_WARN_ON(msm_obj->vaddr);
1080 
1081 		/* Don't drop the pages for imported dmabuf, as they are not
1082 		 * ours, just free the array we allocated:
1083 		 */
1084 		kvfree(msm_obj->pages);
1085 
1086 		put_iova_vmas(obj);
1087 
1088 		drm_prime_gem_destroy(obj, msm_obj->sgt);
1089 	} else {
1090 		msm_gem_vunmap(obj);
1091 		put_pages(obj);
1092 		put_iova_vmas(obj);
1093 	}
1094 
1095 	drm_gem_object_release(obj);
1096 
1097 	kfree(msm_obj->metadata);
1098 	kfree(msm_obj);
1099 }
1100 
msm_gem_object_mmap(struct drm_gem_object * obj,struct vm_area_struct * vma)1101 static int msm_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
1102 {
1103 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
1104 
1105 	vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
1106 	vma->vm_page_prot = msm_gem_pgprot(msm_obj, vm_get_page_prot(vma->vm_flags));
1107 
1108 	return 0;
1109 }
1110 
1111 /* convenience method to construct a GEM buffer object, and userspace handle */
msm_gem_new_handle(struct drm_device * dev,struct drm_file * file,uint32_t size,uint32_t flags,uint32_t * handle,char * name)1112 int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
1113 		uint32_t size, uint32_t flags, uint32_t *handle,
1114 		char *name)
1115 {
1116 	struct drm_gem_object *obj;
1117 	int ret;
1118 
1119 	obj = msm_gem_new(dev, size, flags);
1120 
1121 	if (IS_ERR(obj))
1122 		return PTR_ERR(obj);
1123 
1124 	if (name)
1125 		msm_gem_object_set_name(obj, "%s", name);
1126 
1127 	ret = drm_gem_handle_create(file, obj, handle);
1128 
1129 	/* drop reference from allocate - handle holds it now */
1130 	drm_gem_object_put(obj);
1131 
1132 	return ret;
1133 }
1134 
msm_gem_status(struct drm_gem_object * obj)1135 static enum drm_gem_object_status msm_gem_status(struct drm_gem_object *obj)
1136 {
1137 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
1138 	enum drm_gem_object_status status = 0;
1139 
1140 	if (msm_obj->pages)
1141 		status |= DRM_GEM_OBJECT_RESIDENT;
1142 
1143 	if (msm_obj->madv == MSM_MADV_DONTNEED)
1144 		status |= DRM_GEM_OBJECT_PURGEABLE;
1145 
1146 	return status;
1147 }
1148 
1149 static const struct vm_operations_struct vm_ops = {
1150 	.fault = msm_gem_fault,
1151 	.open = drm_gem_vm_open,
1152 	.close = drm_gem_vm_close,
1153 };
1154 
1155 static const struct drm_gem_object_funcs msm_gem_object_funcs = {
1156 	.free = msm_gem_free_object,
1157 	.open = msm_gem_open,
1158 	.close = msm_gem_close,
1159 	.pin = msm_gem_prime_pin,
1160 	.unpin = msm_gem_prime_unpin,
1161 	.get_sg_table = msm_gem_prime_get_sg_table,
1162 	.vmap = msm_gem_prime_vmap,
1163 	.vunmap = msm_gem_prime_vunmap,
1164 	.mmap = msm_gem_object_mmap,
1165 	.status = msm_gem_status,
1166 	.vm_ops = &vm_ops,
1167 };
1168 
msm_gem_new_impl(struct drm_device * dev,uint32_t size,uint32_t flags,struct drm_gem_object ** obj)1169 static int msm_gem_new_impl(struct drm_device *dev,
1170 		uint32_t size, uint32_t flags,
1171 		struct drm_gem_object **obj)
1172 {
1173 	struct msm_drm_private *priv = dev->dev_private;
1174 	struct msm_gem_object *msm_obj;
1175 
1176 	switch (flags & MSM_BO_CACHE_MASK) {
1177 	case MSM_BO_CACHED:
1178 	case MSM_BO_WC:
1179 		break;
1180 	case MSM_BO_CACHED_COHERENT:
1181 		if (priv->has_cached_coherent)
1182 			break;
1183 		fallthrough;
1184 	default:
1185 		DRM_DEV_DEBUG(dev->dev, "invalid cache flag: %x\n",
1186 				(flags & MSM_BO_CACHE_MASK));
1187 		return -EINVAL;
1188 	}
1189 
1190 	msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
1191 	if (!msm_obj)
1192 		return -ENOMEM;
1193 
1194 	msm_obj->flags = flags;
1195 	msm_obj->madv = MSM_MADV_WILLNEED;
1196 
1197 	INIT_LIST_HEAD(&msm_obj->node);
1198 	INIT_LIST_HEAD(&msm_obj->vmas);
1199 
1200 	*obj = &msm_obj->base;
1201 	(*obj)->funcs = &msm_gem_object_funcs;
1202 
1203 	return 0;
1204 }
1205 
msm_gem_new(struct drm_device * dev,uint32_t size,uint32_t flags)1206 struct drm_gem_object *msm_gem_new(struct drm_device *dev, uint32_t size, uint32_t flags)
1207 {
1208 	struct msm_drm_private *priv = dev->dev_private;
1209 	struct msm_gem_object *msm_obj;
1210 	struct drm_gem_object *obj = NULL;
1211 	bool use_vram = false;
1212 	int ret;
1213 
1214 	size = PAGE_ALIGN(size);
1215 
1216 	if (!msm_use_mmu(dev))
1217 		use_vram = true;
1218 	else if ((flags & (MSM_BO_STOLEN | MSM_BO_SCANOUT)) && priv->vram.size)
1219 		use_vram = true;
1220 
1221 	if (GEM_WARN_ON(use_vram && !priv->vram.size))
1222 		return ERR_PTR(-EINVAL);
1223 
1224 	/* Disallow zero sized objects as they make the underlying
1225 	 * infrastructure grumpy
1226 	 */
1227 	if (size == 0)
1228 		return ERR_PTR(-EINVAL);
1229 
1230 	ret = msm_gem_new_impl(dev, size, flags, &obj);
1231 	if (ret)
1232 		return ERR_PTR(ret);
1233 
1234 	msm_obj = to_msm_bo(obj);
1235 
1236 	if (use_vram) {
1237 		struct msm_gem_vma *vma;
1238 		struct page **pages;
1239 
1240 		drm_gem_private_object_init(dev, obj, size);
1241 
1242 		msm_gem_lock(obj);
1243 
1244 		vma = add_vma(obj, NULL);
1245 		msm_gem_unlock(obj);
1246 		if (IS_ERR(vma)) {
1247 			ret = PTR_ERR(vma);
1248 			goto fail;
1249 		}
1250 
1251 		to_msm_bo(obj)->vram_node = &vma->node;
1252 
1253 		msm_gem_lock(obj);
1254 		pages = get_pages(obj);
1255 		msm_gem_unlock(obj);
1256 		if (IS_ERR(pages)) {
1257 			ret = PTR_ERR(pages);
1258 			goto fail;
1259 		}
1260 
1261 		vma->iova = physaddr(obj);
1262 	} else {
1263 		ret = drm_gem_object_init(dev, obj, size);
1264 		if (ret)
1265 			goto fail;
1266 		/*
1267 		 * Our buffers are kept pinned, so allocating them from the
1268 		 * MOVABLE zone is a really bad idea, and conflicts with CMA.
1269 		 * See comments above new_inode() why this is required _and_
1270 		 * expected if you're going to pin these pages.
1271 		 */
1272 		mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER);
1273 	}
1274 
1275 	drm_gem_lru_move_tail(&priv->lru.unbacked, obj);
1276 
1277 	mutex_lock(&priv->obj_lock);
1278 	list_add_tail(&msm_obj->node, &priv->objects);
1279 	mutex_unlock(&priv->obj_lock);
1280 
1281 	ret = drm_gem_create_mmap_offset(obj);
1282 	if (ret)
1283 		goto fail;
1284 
1285 	return obj;
1286 
1287 fail:
1288 	drm_gem_object_put(obj);
1289 	return ERR_PTR(ret);
1290 }
1291 
msm_gem_import(struct drm_device * dev,struct dma_buf * dmabuf,struct sg_table * sgt)1292 struct drm_gem_object *msm_gem_import(struct drm_device *dev,
1293 		struct dma_buf *dmabuf, struct sg_table *sgt)
1294 {
1295 	struct msm_drm_private *priv = dev->dev_private;
1296 	struct msm_gem_object *msm_obj;
1297 	struct drm_gem_object *obj;
1298 	uint32_t size;
1299 	int ret, npages;
1300 
1301 	/* if we don't have IOMMU, don't bother pretending we can import: */
1302 	if (!msm_use_mmu(dev)) {
1303 		DRM_DEV_ERROR(dev->dev, "cannot import without IOMMU\n");
1304 		return ERR_PTR(-EINVAL);
1305 	}
1306 
1307 	size = PAGE_ALIGN(dmabuf->size);
1308 
1309 	ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj);
1310 	if (ret)
1311 		return ERR_PTR(ret);
1312 
1313 	drm_gem_private_object_init(dev, obj, size);
1314 
1315 	npages = size / PAGE_SIZE;
1316 
1317 	msm_obj = to_msm_bo(obj);
1318 	msm_gem_lock(obj);
1319 	msm_obj->sgt = sgt;
1320 	msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
1321 	if (!msm_obj->pages) {
1322 		msm_gem_unlock(obj);
1323 		ret = -ENOMEM;
1324 		goto fail;
1325 	}
1326 
1327 	ret = drm_prime_sg_to_page_array(sgt, msm_obj->pages, npages);
1328 	if (ret) {
1329 		msm_gem_unlock(obj);
1330 		goto fail;
1331 	}
1332 
1333 	msm_gem_unlock(obj);
1334 
1335 	drm_gem_lru_move_tail(&priv->lru.pinned, obj);
1336 
1337 	mutex_lock(&priv->obj_lock);
1338 	list_add_tail(&msm_obj->node, &priv->objects);
1339 	mutex_unlock(&priv->obj_lock);
1340 
1341 	ret = drm_gem_create_mmap_offset(obj);
1342 	if (ret)
1343 		goto fail;
1344 
1345 	return obj;
1346 
1347 fail:
1348 	drm_gem_object_put(obj);
1349 	return ERR_PTR(ret);
1350 }
1351 
msm_gem_kernel_new(struct drm_device * dev,uint32_t size,uint32_t flags,struct msm_gem_address_space * aspace,struct drm_gem_object ** bo,uint64_t * iova)1352 void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
1353 		uint32_t flags, struct msm_gem_address_space *aspace,
1354 		struct drm_gem_object **bo, uint64_t *iova)
1355 {
1356 	void *vaddr;
1357 	struct drm_gem_object *obj = msm_gem_new(dev, size, flags);
1358 	int ret;
1359 
1360 	if (IS_ERR(obj))
1361 		return ERR_CAST(obj);
1362 
1363 	if (iova) {
1364 		ret = msm_gem_get_and_pin_iova(obj, aspace, iova);
1365 		if (ret)
1366 			goto err;
1367 	}
1368 
1369 	vaddr = msm_gem_get_vaddr(obj);
1370 	if (IS_ERR(vaddr)) {
1371 		msm_gem_unpin_iova(obj, aspace);
1372 		ret = PTR_ERR(vaddr);
1373 		goto err;
1374 	}
1375 
1376 	if (bo)
1377 		*bo = obj;
1378 
1379 	return vaddr;
1380 err:
1381 	drm_gem_object_put(obj);
1382 
1383 	return ERR_PTR(ret);
1384 
1385 }
1386 
msm_gem_kernel_put(struct drm_gem_object * bo,struct msm_gem_address_space * aspace)1387 void msm_gem_kernel_put(struct drm_gem_object *bo,
1388 		struct msm_gem_address_space *aspace)
1389 {
1390 	if (IS_ERR_OR_NULL(bo))
1391 		return;
1392 
1393 	msm_gem_put_vaddr(bo);
1394 	msm_gem_unpin_iova(bo, aspace);
1395 	drm_gem_object_put(bo);
1396 }
1397 
msm_gem_object_set_name(struct drm_gem_object * bo,const char * fmt,...)1398 void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...)
1399 {
1400 	struct msm_gem_object *msm_obj = to_msm_bo(bo);
1401 	va_list ap;
1402 
1403 	if (!fmt)
1404 		return;
1405 
1406 	va_start(ap, fmt);
1407 	vsnprintf(msm_obj->name, sizeof(msm_obj->name), fmt, ap);
1408 	va_end(ap);
1409 }
1410