• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2015-2018 Etnaviv Project
4  */
5 
6 #include <drm/drm_prime.h>
7 #include <linux/dma-mapping.h>
8 #include <linux/shmem_fs.h>
9 #include <linux/spinlock.h>
10 #include <linux/vmalloc.h>
11 
12 #include "etnaviv_drv.h"
13 #include "etnaviv_gem.h"
14 #include "etnaviv_gpu.h"
15 #include "etnaviv_mmu.h"
16 
17 static struct lock_class_key etnaviv_shm_lock_class;
18 static struct lock_class_key etnaviv_userptr_lock_class;
19 
etnaviv_gem_scatter_map(struct etnaviv_gem_object * etnaviv_obj)20 static void etnaviv_gem_scatter_map(struct etnaviv_gem_object *etnaviv_obj)
21 {
22 	struct drm_device *dev = etnaviv_obj->base.dev;
23 	struct sg_table *sgt = etnaviv_obj->sgt;
24 
25 	/*
26 	 * For non-cached buffers, ensure the new pages are clean
27 	 * because display controller, GPU, etc. are not coherent.
28 	 */
29 	if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
30 		dma_map_sgtable(dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
31 }
32 
etnaviv_gem_scatterlist_unmap(struct etnaviv_gem_object * etnaviv_obj)33 static void etnaviv_gem_scatterlist_unmap(struct etnaviv_gem_object *etnaviv_obj)
34 {
35 	struct drm_device *dev = etnaviv_obj->base.dev;
36 	struct sg_table *sgt = etnaviv_obj->sgt;
37 
38 	/*
39 	 * For non-cached buffers, ensure the new pages are clean
40 	 * because display controller, GPU, etc. are not coherent:
41 	 *
42 	 * WARNING: The DMA API does not support concurrent CPU
43 	 * and device access to the memory area.  With BIDIRECTIONAL,
44 	 * we will clean the cache lines which overlap the region,
45 	 * and invalidate all cache lines (partially) contained in
46 	 * the region.
47 	 *
48 	 * If you have dirty data in the overlapping cache lines,
49 	 * that will corrupt the GPU-written data.  If you have
50 	 * written into the remainder of the region, this can
51 	 * discard those writes.
52 	 */
53 	if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
54 		dma_unmap_sgtable(dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
55 }
56 
57 /* called with etnaviv_obj->lock held */
etnaviv_gem_shmem_get_pages(struct etnaviv_gem_object * etnaviv_obj)58 static int etnaviv_gem_shmem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
59 {
60 	struct drm_device *dev = etnaviv_obj->base.dev;
61 	struct page **p = drm_gem_get_pages(&etnaviv_obj->base);
62 
63 	if (IS_ERR(p)) {
64 		dev_dbg(dev->dev, "could not get pages: %ld\n", PTR_ERR(p));
65 		return PTR_ERR(p);
66 	}
67 
68 	etnaviv_obj->pages = p;
69 
70 	return 0;
71 }
72 
put_pages(struct etnaviv_gem_object * etnaviv_obj)73 static void put_pages(struct etnaviv_gem_object *etnaviv_obj)
74 {
75 	if (etnaviv_obj->sgt) {
76 		etnaviv_gem_scatterlist_unmap(etnaviv_obj);
77 		sg_free_table(etnaviv_obj->sgt);
78 		kfree(etnaviv_obj->sgt);
79 		etnaviv_obj->sgt = NULL;
80 	}
81 	if (etnaviv_obj->pages) {
82 		drm_gem_put_pages(&etnaviv_obj->base, etnaviv_obj->pages,
83 				  true, false);
84 
85 		etnaviv_obj->pages = NULL;
86 	}
87 }
88 
etnaviv_gem_get_pages(struct etnaviv_gem_object * etnaviv_obj)89 struct page **etnaviv_gem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
90 {
91 	int ret;
92 
93 	lockdep_assert_held(&etnaviv_obj->lock);
94 
95 	if (!etnaviv_obj->pages) {
96 		ret = etnaviv_obj->ops->get_pages(etnaviv_obj);
97 		if (ret < 0)
98 			return ERR_PTR(ret);
99 	}
100 
101 	if (!etnaviv_obj->sgt) {
102 		struct drm_device *dev = etnaviv_obj->base.dev;
103 		int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
104 		struct sg_table *sgt;
105 
106 		sgt = drm_prime_pages_to_sg(etnaviv_obj->base.dev,
107 					    etnaviv_obj->pages, npages);
108 		if (IS_ERR(sgt)) {
109 			dev_err(dev->dev, "failed to allocate sgt: %ld\n",
110 				PTR_ERR(sgt));
111 			return ERR_CAST(sgt);
112 		}
113 
114 		etnaviv_obj->sgt = sgt;
115 
116 		etnaviv_gem_scatter_map(etnaviv_obj);
117 	}
118 
119 	return etnaviv_obj->pages;
120 }
121 
etnaviv_gem_put_pages(struct etnaviv_gem_object * etnaviv_obj)122 void etnaviv_gem_put_pages(struct etnaviv_gem_object *etnaviv_obj)
123 {
124 	lockdep_assert_held(&etnaviv_obj->lock);
125 	/* when we start tracking the pin count, then do something here */
126 }
127 
etnaviv_gem_mmap_obj(struct etnaviv_gem_object * etnaviv_obj,struct vm_area_struct * vma)128 static int etnaviv_gem_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
129 		struct vm_area_struct *vma)
130 {
131 	pgprot_t vm_page_prot;
132 
133 	vma->vm_flags &= ~VM_PFNMAP;
134 	vma->vm_flags |= VM_MIXEDMAP;
135 
136 	vm_page_prot = vm_get_page_prot(vma->vm_flags);
137 
138 	if (etnaviv_obj->flags & ETNA_BO_WC) {
139 		vma->vm_page_prot = pgprot_writecombine(vm_page_prot);
140 	} else if (etnaviv_obj->flags & ETNA_BO_UNCACHED) {
141 		vma->vm_page_prot = pgprot_noncached(vm_page_prot);
142 	} else {
143 		/*
144 		 * Shunt off cached objs to shmem file so they have their own
145 		 * address_space (so unmap_mapping_range does what we want,
146 		 * in particular in the case of mmap'd dmabufs)
147 		 */
148 		fput(vma->vm_file);
149 		get_file(etnaviv_obj->base.filp);
150 		vma->vm_pgoff = 0;
151 		vma->vm_file  = etnaviv_obj->base.filp;
152 
153 		vma->vm_page_prot = vm_page_prot;
154 	}
155 
156 	return 0;
157 }
158 
etnaviv_gem_mmap(struct file * filp,struct vm_area_struct * vma)159 int etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma)
160 {
161 	struct etnaviv_gem_object *obj;
162 	int ret;
163 
164 	ret = drm_gem_mmap(filp, vma);
165 	if (ret) {
166 		DBG("mmap failed: %d", ret);
167 		return ret;
168 	}
169 
170 	obj = to_etnaviv_bo(vma->vm_private_data);
171 	return obj->ops->mmap(obj, vma);
172 }
173 
etnaviv_gem_fault(struct vm_fault * vmf)174 vm_fault_t etnaviv_gem_fault(struct vm_fault *vmf)
175 {
176 	struct vm_area_struct *vma = vmf->vma;
177 	struct drm_gem_object *obj = vma->vm_private_data;
178 	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
179 	struct page **pages, *page;
180 	pgoff_t pgoff;
181 	int err;
182 
183 	/*
184 	 * Make sure we don't parallel update on a fault, nor move or remove
185 	 * something from beneath our feet.  Note that vmf_insert_page() is
186 	 * specifically coded to take care of this, so we don't have to.
187 	 */
188 	err = mutex_lock_interruptible(&etnaviv_obj->lock);
189 	if (err)
190 		return VM_FAULT_NOPAGE;
191 	/* make sure we have pages attached now */
192 	pages = etnaviv_gem_get_pages(etnaviv_obj);
193 	mutex_unlock(&etnaviv_obj->lock);
194 
195 	if (IS_ERR(pages)) {
196 		err = PTR_ERR(pages);
197 		return vmf_error(err);
198 	}
199 
200 	/* We don't use vmf->pgoff since that has the fake offset: */
201 	pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
202 
203 	page = pages[pgoff];
204 
205 	VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
206 	     page_to_pfn(page), page_to_pfn(page) << PAGE_SHIFT);
207 
208 	return vmf_insert_page(vma, vmf->address, page);
209 }
210 
etnaviv_gem_mmap_offset(struct drm_gem_object * obj,u64 * offset)211 int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset)
212 {
213 	int ret;
214 
215 	/* Make it mmapable */
216 	ret = drm_gem_create_mmap_offset(obj);
217 	if (ret)
218 		dev_err(obj->dev->dev, "could not allocate mmap offset\n");
219 	else
220 		*offset = drm_vma_node_offset_addr(&obj->vma_node);
221 
222 	return ret;
223 }
224 
225 static struct etnaviv_vram_mapping *
etnaviv_gem_get_vram_mapping(struct etnaviv_gem_object * obj,struct etnaviv_iommu_context * context)226 etnaviv_gem_get_vram_mapping(struct etnaviv_gem_object *obj,
227 			     struct etnaviv_iommu_context *context)
228 {
229 	struct etnaviv_vram_mapping *mapping;
230 
231 	list_for_each_entry(mapping, &obj->vram_list, obj_node) {
232 		if (mapping->context == context)
233 			return mapping;
234 	}
235 
236 	return NULL;
237 }
238 
etnaviv_gem_mapping_unreference(struct etnaviv_vram_mapping * mapping)239 void etnaviv_gem_mapping_unreference(struct etnaviv_vram_mapping *mapping)
240 {
241 	struct etnaviv_gem_object *etnaviv_obj = mapping->object;
242 
243 	mutex_lock(&etnaviv_obj->lock);
244 	WARN_ON(mapping->use == 0);
245 	mapping->use -= 1;
246 	mutex_unlock(&etnaviv_obj->lock);
247 
248 	drm_gem_object_put(&etnaviv_obj->base);
249 }
250 
etnaviv_gem_mapping_get(struct drm_gem_object * obj,struct etnaviv_iommu_context * mmu_context,u64 va)251 struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
252 	struct drm_gem_object *obj, struct etnaviv_iommu_context *mmu_context,
253 	u64 va)
254 {
255 	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
256 	struct etnaviv_vram_mapping *mapping;
257 	struct page **pages;
258 	int ret = 0;
259 
260 	mutex_lock(&etnaviv_obj->lock);
261 	mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, mmu_context);
262 	if (mapping) {
263 		/*
264 		 * Holding the object lock prevents the use count changing
265 		 * beneath us.  If the use count is zero, the MMU might be
266 		 * reaping this object, so take the lock and re-check that
267 		 * the MMU owns this mapping to close this race.
268 		 */
269 		if (mapping->use == 0) {
270 			mutex_lock(&mmu_context->lock);
271 			if (mapping->context == mmu_context)
272 				mapping->use += 1;
273 			else
274 				mapping = NULL;
275 			mutex_unlock(&mmu_context->lock);
276 			if (mapping)
277 				goto out;
278 		} else {
279 			mapping->use += 1;
280 			goto out;
281 		}
282 	}
283 
284 	pages = etnaviv_gem_get_pages(etnaviv_obj);
285 	if (IS_ERR(pages)) {
286 		ret = PTR_ERR(pages);
287 		goto out;
288 	}
289 
290 	/*
291 	 * See if we have a reaped vram mapping we can re-use before
292 	 * allocating a fresh mapping.
293 	 */
294 	mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, NULL);
295 	if (!mapping) {
296 		mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
297 		if (!mapping) {
298 			ret = -ENOMEM;
299 			goto out;
300 		}
301 
302 		INIT_LIST_HEAD(&mapping->scan_node);
303 		mapping->object = etnaviv_obj;
304 	} else {
305 		list_del(&mapping->obj_node);
306 	}
307 
308 	mapping->context = etnaviv_iommu_context_get(mmu_context);
309 	mapping->use = 1;
310 
311 	ret = etnaviv_iommu_map_gem(mmu_context, etnaviv_obj,
312 				    mmu_context->global->memory_base,
313 				    mapping, va);
314 	if (ret < 0) {
315 		etnaviv_iommu_context_put(mmu_context);
316 		kfree(mapping);
317 	} else {
318 		list_add_tail(&mapping->obj_node, &etnaviv_obj->vram_list);
319 	}
320 
321 out:
322 	mutex_unlock(&etnaviv_obj->lock);
323 
324 	if (ret)
325 		return ERR_PTR(ret);
326 
327 	/* Take a reference on the object */
328 	drm_gem_object_get(obj);
329 	return mapping;
330 }
331 
etnaviv_gem_vmap(struct drm_gem_object * obj)332 void *etnaviv_gem_vmap(struct drm_gem_object *obj)
333 {
334 	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
335 
336 	if (etnaviv_obj->vaddr)
337 		return etnaviv_obj->vaddr;
338 
339 	mutex_lock(&etnaviv_obj->lock);
340 	/*
341 	 * Need to check again, as we might have raced with another thread
342 	 * while waiting for the mutex.
343 	 */
344 	if (!etnaviv_obj->vaddr)
345 		etnaviv_obj->vaddr = etnaviv_obj->ops->vmap(etnaviv_obj);
346 	mutex_unlock(&etnaviv_obj->lock);
347 
348 	return etnaviv_obj->vaddr;
349 }
350 
etnaviv_gem_vmap_impl(struct etnaviv_gem_object * obj)351 static void *etnaviv_gem_vmap_impl(struct etnaviv_gem_object *obj)
352 {
353 	struct page **pages;
354 
355 	lockdep_assert_held(&obj->lock);
356 
357 	pages = etnaviv_gem_get_pages(obj);
358 	if (IS_ERR(pages))
359 		return NULL;
360 
361 	return vmap(pages, obj->base.size >> PAGE_SHIFT,
362 			VM_MAP, pgprot_writecombine(PAGE_KERNEL));
363 }
364 
etnaviv_op_to_dma_dir(u32 op)365 static inline enum dma_data_direction etnaviv_op_to_dma_dir(u32 op)
366 {
367 	if (op & ETNA_PREP_READ)
368 		return DMA_FROM_DEVICE;
369 	else if (op & ETNA_PREP_WRITE)
370 		return DMA_TO_DEVICE;
371 	else
372 		return DMA_BIDIRECTIONAL;
373 }
374 
etnaviv_gem_cpu_prep(struct drm_gem_object * obj,u32 op,struct drm_etnaviv_timespec * timeout)375 int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
376 		struct drm_etnaviv_timespec *timeout)
377 {
378 	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
379 	struct drm_device *dev = obj->dev;
380 	bool write = !!(op & ETNA_PREP_WRITE);
381 	int ret;
382 
383 	if (!etnaviv_obj->sgt) {
384 		void *ret;
385 
386 		mutex_lock(&etnaviv_obj->lock);
387 		ret = etnaviv_gem_get_pages(etnaviv_obj);
388 		mutex_unlock(&etnaviv_obj->lock);
389 		if (IS_ERR(ret))
390 			return PTR_ERR(ret);
391 	}
392 
393 	if (op & ETNA_PREP_NOSYNC) {
394 		if (!dma_resv_test_signaled_rcu(obj->resv,
395 							  write))
396 			return -EBUSY;
397 	} else {
398 		unsigned long remain = etnaviv_timeout_to_jiffies(timeout);
399 
400 		ret = dma_resv_wait_timeout_rcu(obj->resv,
401 							  write, true, remain);
402 		if (ret <= 0)
403 			return ret == 0 ? -ETIMEDOUT : ret;
404 	}
405 
406 	if (etnaviv_obj->flags & ETNA_BO_CACHED) {
407 		dma_sync_sgtable_for_cpu(dev->dev, etnaviv_obj->sgt,
408 					 etnaviv_op_to_dma_dir(op));
409 		etnaviv_obj->last_cpu_prep_op = op;
410 	}
411 
412 	return 0;
413 }
414 
etnaviv_gem_cpu_fini(struct drm_gem_object * obj)415 int etnaviv_gem_cpu_fini(struct drm_gem_object *obj)
416 {
417 	struct drm_device *dev = obj->dev;
418 	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
419 
420 	if (etnaviv_obj->flags & ETNA_BO_CACHED) {
421 		/* fini without a prep is almost certainly a userspace error */
422 		WARN_ON(etnaviv_obj->last_cpu_prep_op == 0);
423 		dma_sync_sgtable_for_device(dev->dev, etnaviv_obj->sgt,
424 			etnaviv_op_to_dma_dir(etnaviv_obj->last_cpu_prep_op));
425 		etnaviv_obj->last_cpu_prep_op = 0;
426 	}
427 
428 	return 0;
429 }
430 
etnaviv_gem_wait_bo(struct etnaviv_gpu * gpu,struct drm_gem_object * obj,struct drm_etnaviv_timespec * timeout)431 int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
432 	struct drm_etnaviv_timespec *timeout)
433 {
434 	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
435 
436 	return etnaviv_gpu_wait_obj_inactive(gpu, etnaviv_obj, timeout);
437 }
438 
439 #ifdef CONFIG_DEBUG_FS
etnaviv_gem_describe_fence(struct dma_fence * fence,const char * type,struct seq_file * m)440 static void etnaviv_gem_describe_fence(struct dma_fence *fence,
441 	const char *type, struct seq_file *m)
442 {
443 	if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
444 		seq_printf(m, "\t%9s: %s %s seq %llu\n",
445 			   type,
446 			   fence->ops->get_driver_name(fence),
447 			   fence->ops->get_timeline_name(fence),
448 			   fence->seqno);
449 }
450 
etnaviv_gem_describe(struct drm_gem_object * obj,struct seq_file * m)451 static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
452 {
453 	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
454 	struct dma_resv *robj = obj->resv;
455 	struct dma_resv_list *fobj;
456 	struct dma_fence *fence;
457 	unsigned long off = drm_vma_node_start(&obj->vma_node);
458 
459 	seq_printf(m, "%08x: %c %2d (%2d) %08lx %p %zd\n",
460 			etnaviv_obj->flags, is_active(etnaviv_obj) ? 'A' : 'I',
461 			obj->name, kref_read(&obj->refcount),
462 			off, etnaviv_obj->vaddr, obj->size);
463 
464 	rcu_read_lock();
465 	fobj = rcu_dereference(robj->fence);
466 	if (fobj) {
467 		unsigned int i, shared_count = fobj->shared_count;
468 
469 		for (i = 0; i < shared_count; i++) {
470 			fence = rcu_dereference(fobj->shared[i]);
471 			etnaviv_gem_describe_fence(fence, "Shared", m);
472 		}
473 	}
474 
475 	fence = rcu_dereference(robj->fence_excl);
476 	if (fence)
477 		etnaviv_gem_describe_fence(fence, "Exclusive", m);
478 	rcu_read_unlock();
479 }
480 
etnaviv_gem_describe_objects(struct etnaviv_drm_private * priv,struct seq_file * m)481 void etnaviv_gem_describe_objects(struct etnaviv_drm_private *priv,
482 	struct seq_file *m)
483 {
484 	struct etnaviv_gem_object *etnaviv_obj;
485 	int count = 0;
486 	size_t size = 0;
487 
488 	mutex_lock(&priv->gem_lock);
489 	list_for_each_entry(etnaviv_obj, &priv->gem_list, gem_node) {
490 		struct drm_gem_object *obj = &etnaviv_obj->base;
491 
492 		seq_puts(m, "   ");
493 		etnaviv_gem_describe(obj, m);
494 		count++;
495 		size += obj->size;
496 	}
497 	mutex_unlock(&priv->gem_lock);
498 
499 	seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
500 }
501 #endif
502 
etnaviv_gem_shmem_release(struct etnaviv_gem_object * etnaviv_obj)503 static void etnaviv_gem_shmem_release(struct etnaviv_gem_object *etnaviv_obj)
504 {
505 	vunmap(etnaviv_obj->vaddr);
506 	put_pages(etnaviv_obj);
507 }
508 
509 static const struct etnaviv_gem_ops etnaviv_gem_shmem_ops = {
510 	.get_pages = etnaviv_gem_shmem_get_pages,
511 	.release = etnaviv_gem_shmem_release,
512 	.vmap = etnaviv_gem_vmap_impl,
513 	.mmap = etnaviv_gem_mmap_obj,
514 };
515 
etnaviv_gem_free_object(struct drm_gem_object * obj)516 void etnaviv_gem_free_object(struct drm_gem_object *obj)
517 {
518 	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
519 	struct etnaviv_drm_private *priv = obj->dev->dev_private;
520 	struct etnaviv_vram_mapping *mapping, *tmp;
521 
522 	/* object should not be active */
523 	WARN_ON(is_active(etnaviv_obj));
524 
525 	mutex_lock(&priv->gem_lock);
526 	list_del(&etnaviv_obj->gem_node);
527 	mutex_unlock(&priv->gem_lock);
528 
529 	list_for_each_entry_safe(mapping, tmp, &etnaviv_obj->vram_list,
530 				 obj_node) {
531 		struct etnaviv_iommu_context *context = mapping->context;
532 
533 		WARN_ON(mapping->use);
534 
535 		if (context) {
536 			etnaviv_iommu_unmap_gem(context, mapping);
537 			etnaviv_iommu_context_put(context);
538 		}
539 
540 		list_del(&mapping->obj_node);
541 		kfree(mapping);
542 	}
543 
544 	drm_gem_free_mmap_offset(obj);
545 	etnaviv_obj->ops->release(etnaviv_obj);
546 	drm_gem_object_release(obj);
547 
548 	kfree(etnaviv_obj);
549 }
550 
etnaviv_gem_obj_add(struct drm_device * dev,struct drm_gem_object * obj)551 void etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj)
552 {
553 	struct etnaviv_drm_private *priv = dev->dev_private;
554 	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
555 
556 	mutex_lock(&priv->gem_lock);
557 	list_add_tail(&etnaviv_obj->gem_node, &priv->gem_list);
558 	mutex_unlock(&priv->gem_lock);
559 }
560 
etnaviv_gem_new_impl(struct drm_device * dev,u32 size,u32 flags,const struct etnaviv_gem_ops * ops,struct drm_gem_object ** obj)561 static int etnaviv_gem_new_impl(struct drm_device *dev, u32 size, u32 flags,
562 	const struct etnaviv_gem_ops *ops, struct drm_gem_object **obj)
563 {
564 	struct etnaviv_gem_object *etnaviv_obj;
565 	unsigned sz = sizeof(*etnaviv_obj);
566 	bool valid = true;
567 
568 	/* validate flags */
569 	switch (flags & ETNA_BO_CACHE_MASK) {
570 	case ETNA_BO_UNCACHED:
571 	case ETNA_BO_CACHED:
572 	case ETNA_BO_WC:
573 		break;
574 	default:
575 		valid = false;
576 	}
577 
578 	if (!valid) {
579 		dev_err(dev->dev, "invalid cache flag: %x\n",
580 			(flags & ETNA_BO_CACHE_MASK));
581 		return -EINVAL;
582 	}
583 
584 	etnaviv_obj = kzalloc(sz, GFP_KERNEL);
585 	if (!etnaviv_obj)
586 		return -ENOMEM;
587 
588 	etnaviv_obj->flags = flags;
589 	etnaviv_obj->ops = ops;
590 
591 	mutex_init(&etnaviv_obj->lock);
592 	INIT_LIST_HEAD(&etnaviv_obj->vram_list);
593 
594 	*obj = &etnaviv_obj->base;
595 
596 	return 0;
597 }
598 
599 /* convenience method to construct a GEM buffer object, and userspace handle */
etnaviv_gem_new_handle(struct drm_device * dev,struct drm_file * file,u32 size,u32 flags,u32 * handle)600 int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file,
601 	u32 size, u32 flags, u32 *handle)
602 {
603 	struct etnaviv_drm_private *priv = dev->dev_private;
604 	struct drm_gem_object *obj = NULL;
605 	int ret;
606 
607 	size = PAGE_ALIGN(size);
608 
609 	ret = etnaviv_gem_new_impl(dev, size, flags,
610 				   &etnaviv_gem_shmem_ops, &obj);
611 	if (ret)
612 		goto fail;
613 
614 	lockdep_set_class(&to_etnaviv_bo(obj)->lock, &etnaviv_shm_lock_class);
615 
616 	ret = drm_gem_object_init(dev, obj, size);
617 	if (ret)
618 		goto fail;
619 
620 	/*
621 	 * Our buffers are kept pinned, so allocating them from the MOVABLE
622 	 * zone is a really bad idea, and conflicts with CMA. See comments
623 	 * above new_inode() why this is required _and_ expected if you're
624 	 * going to pin these pages.
625 	 */
626 	mapping_set_gfp_mask(obj->filp->f_mapping, priv->shm_gfp_mask);
627 
628 	etnaviv_gem_obj_add(dev, obj);
629 
630 	ret = drm_gem_handle_create(file, obj, handle);
631 
632 	/* drop reference from allocate - handle holds it now */
633 fail:
634 	drm_gem_object_put(obj);
635 
636 	return ret;
637 }
638 
etnaviv_gem_new_private(struct drm_device * dev,size_t size,u32 flags,const struct etnaviv_gem_ops * ops,struct etnaviv_gem_object ** res)639 int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags,
640 	const struct etnaviv_gem_ops *ops, struct etnaviv_gem_object **res)
641 {
642 	struct drm_gem_object *obj;
643 	int ret;
644 
645 	ret = etnaviv_gem_new_impl(dev, size, flags, ops, &obj);
646 	if (ret)
647 		return ret;
648 
649 	drm_gem_private_object_init(dev, obj, size);
650 
651 	*res = to_etnaviv_bo(obj);
652 
653 	return 0;
654 }
655 
etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object * etnaviv_obj)656 static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj)
657 {
658 	struct page **pvec = NULL;
659 	struct etnaviv_gem_userptr *userptr = &etnaviv_obj->userptr;
660 	int ret, pinned = 0, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
661 
662 	might_lock_read(&current->mm->mmap_lock);
663 
664 	if (userptr->mm != current->mm)
665 		return -EPERM;
666 
667 	pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
668 	if (!pvec)
669 		return -ENOMEM;
670 
671 	do {
672 		unsigned num_pages = npages - pinned;
673 		uint64_t ptr = userptr->ptr + pinned * PAGE_SIZE;
674 		struct page **pages = pvec + pinned;
675 
676 		ret = pin_user_pages_fast(ptr, num_pages,
677 					  FOLL_WRITE | FOLL_FORCE, pages);
678 		if (ret < 0) {
679 			unpin_user_pages(pvec, pinned);
680 			kvfree(pvec);
681 			return ret;
682 		}
683 
684 		pinned += ret;
685 
686 	} while (pinned < npages);
687 
688 	etnaviv_obj->pages = pvec;
689 
690 	return 0;
691 }
692 
etnaviv_gem_userptr_release(struct etnaviv_gem_object * etnaviv_obj)693 static void etnaviv_gem_userptr_release(struct etnaviv_gem_object *etnaviv_obj)
694 {
695 	if (etnaviv_obj->sgt) {
696 		etnaviv_gem_scatterlist_unmap(etnaviv_obj);
697 		sg_free_table(etnaviv_obj->sgt);
698 		kfree(etnaviv_obj->sgt);
699 	}
700 	if (etnaviv_obj->pages) {
701 		int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
702 
703 		unpin_user_pages(etnaviv_obj->pages, npages);
704 		kvfree(etnaviv_obj->pages);
705 	}
706 }
707 
etnaviv_gem_userptr_mmap_obj(struct etnaviv_gem_object * etnaviv_obj,struct vm_area_struct * vma)708 static int etnaviv_gem_userptr_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
709 		struct vm_area_struct *vma)
710 {
711 	return -EINVAL;
712 }
713 
714 static const struct etnaviv_gem_ops etnaviv_gem_userptr_ops = {
715 	.get_pages = etnaviv_gem_userptr_get_pages,
716 	.release = etnaviv_gem_userptr_release,
717 	.vmap = etnaviv_gem_vmap_impl,
718 	.mmap = etnaviv_gem_userptr_mmap_obj,
719 };
720 
etnaviv_gem_new_userptr(struct drm_device * dev,struct drm_file * file,uintptr_t ptr,u32 size,u32 flags,u32 * handle)721 int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file,
722 	uintptr_t ptr, u32 size, u32 flags, u32 *handle)
723 {
724 	struct etnaviv_gem_object *etnaviv_obj;
725 	int ret;
726 
727 	ret = etnaviv_gem_new_private(dev, size, ETNA_BO_CACHED,
728 				      &etnaviv_gem_userptr_ops, &etnaviv_obj);
729 	if (ret)
730 		return ret;
731 
732 	lockdep_set_class(&etnaviv_obj->lock, &etnaviv_userptr_lock_class);
733 
734 	etnaviv_obj->userptr.ptr = ptr;
735 	etnaviv_obj->userptr.mm = current->mm;
736 	etnaviv_obj->userptr.ro = !(flags & ETNA_USERPTR_WRITE);
737 
738 	etnaviv_gem_obj_add(dev, &etnaviv_obj->base);
739 
740 	ret = drm_gem_handle_create(file, &etnaviv_obj->base, handle);
741 
742 	/* drop reference from allocate - handle holds it now */
743 	drm_gem_object_put(&etnaviv_obj->base);
744 	return ret;
745 }
746