• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 
29 #include <linux/iosys-map.h>
30 #include <linux/pci.h>
31 
32 #include <drm/drm_device.h>
33 #include <drm/drm_file.h>
34 #include <drm/drm_gem_ttm_helper.h>
35 #include <drm/radeon_drm.h>
36 
37 #include "radeon.h"
38 #include "radeon_prime.h"
39 
40 struct dma_buf *radeon_gem_prime_export(struct drm_gem_object *gobj,
41 					int flags);
42 struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj);
43 int radeon_gem_prime_pin(struct drm_gem_object *obj);
44 void radeon_gem_prime_unpin(struct drm_gem_object *obj);
45 
46 const struct drm_gem_object_funcs radeon_gem_object_funcs;
47 
radeon_gem_fault(struct vm_fault * vmf)48 static vm_fault_t radeon_gem_fault(struct vm_fault *vmf)
49 {
50 	struct ttm_buffer_object *bo = vmf->vma->vm_private_data;
51 	struct radeon_device *rdev = radeon_get_rdev(bo->bdev);
52 	vm_fault_t ret;
53 
54 	down_read(&rdev->pm.mclk_lock);
55 
56 	ret = ttm_bo_vm_reserve(bo, vmf);
57 	if (ret)
58 		goto unlock_mclk;
59 
60 	ret = radeon_bo_fault_reserve_notify(bo);
61 	if (ret)
62 		goto unlock_resv;
63 
64 	ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
65 				       TTM_BO_VM_NUM_PREFAULT);
66 	if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
67 		goto unlock_mclk;
68 
69 unlock_resv:
70 	dma_resv_unlock(bo->base.resv);
71 
72 unlock_mclk:
73 	up_read(&rdev->pm.mclk_lock);
74 	return ret;
75 }
76 
77 static const struct vm_operations_struct radeon_gem_vm_ops = {
78 	.fault = radeon_gem_fault,
79 	.open = ttm_bo_vm_open,
80 	.close = ttm_bo_vm_close,
81 	.access = ttm_bo_vm_access
82 };
83 
radeon_gem_object_free(struct drm_gem_object * gobj)84 static void radeon_gem_object_free(struct drm_gem_object *gobj)
85 {
86 	struct radeon_bo *robj = gem_to_radeon_bo(gobj);
87 
88 	if (robj) {
89 		radeon_mn_unregister(robj);
90 		radeon_bo_unref(&robj);
91 	}
92 }
93 
radeon_gem_object_create(struct radeon_device * rdev,unsigned long size,int alignment,int initial_domain,u32 flags,bool kernel,struct drm_gem_object ** obj)94 int radeon_gem_object_create(struct radeon_device *rdev, unsigned long size,
95 				int alignment, int initial_domain,
96 				u32 flags, bool kernel,
97 				struct drm_gem_object **obj)
98 {
99 	struct radeon_bo *robj;
100 	unsigned long max_size;
101 	int r;
102 
103 	*obj = NULL;
104 	/* At least align on page size */
105 	if (alignment < PAGE_SIZE) {
106 		alignment = PAGE_SIZE;
107 	}
108 
109 	/* Maximum bo size is the unpinned gtt size since we use the gtt to
110 	 * handle vram to system pool migrations.
111 	 */
112 	max_size = rdev->mc.gtt_size - rdev->gart_pin_size;
113 	if (size > max_size) {
114 		DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n",
115 			  size >> 20, max_size >> 20);
116 		return -ENOMEM;
117 	}
118 
119 retry:
120 	r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain,
121 			     flags, NULL, NULL, &robj);
122 	if (r) {
123 		if (r != -ERESTARTSYS) {
124 			if (initial_domain == RADEON_GEM_DOMAIN_VRAM) {
125 				initial_domain |= RADEON_GEM_DOMAIN_GTT;
126 				goto retry;
127 			}
128 			DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
129 				  size, initial_domain, alignment, r);
130 		}
131 		return r;
132 	}
133 	*obj = &robj->tbo.base;
134 	(*obj)->funcs = &radeon_gem_object_funcs;
135 	robj->pid = task_pid_nr(current);
136 
137 	mutex_lock(&rdev->gem.mutex);
138 	list_add_tail(&robj->list, &rdev->gem.objects);
139 	mutex_unlock(&rdev->gem.mutex);
140 
141 	return 0;
142 }
143 
radeon_gem_set_domain(struct drm_gem_object * gobj,uint32_t rdomain,uint32_t wdomain)144 static int radeon_gem_set_domain(struct drm_gem_object *gobj,
145 			  uint32_t rdomain, uint32_t wdomain)
146 {
147 	struct radeon_bo *robj;
148 	uint32_t domain;
149 	long r;
150 
151 	/* FIXME: reeimplement */
152 	robj = gem_to_radeon_bo(gobj);
153 	/* work out where to validate the buffer to */
154 	domain = wdomain;
155 	if (!domain) {
156 		domain = rdomain;
157 	}
158 	if (!domain) {
159 		/* Do nothings */
160 		pr_warn("Set domain without domain !\n");
161 		return 0;
162 	}
163 	if (domain == RADEON_GEM_DOMAIN_CPU) {
164 		/* Asking for cpu access wait for object idle */
165 		r = dma_resv_wait_timeout(robj->tbo.base.resv,
166 					  DMA_RESV_USAGE_BOOKKEEP,
167 					  true, 30 * HZ);
168 		if (!r)
169 			r = -EBUSY;
170 
171 		if (r < 0 && r != -EINTR) {
172 			pr_err("Failed to wait for object: %li\n", r);
173 			return r;
174 		}
175 	}
176 	if (domain == RADEON_GEM_DOMAIN_VRAM && robj->prime_shared_count) {
177 		/* A BO that is associated with a dma-buf cannot be sensibly migrated to VRAM */
178 		return -EINVAL;
179 	}
180 	return 0;
181 }
182 
radeon_gem_init(struct radeon_device * rdev)183 int radeon_gem_init(struct radeon_device *rdev)
184 {
185 	INIT_LIST_HEAD(&rdev->gem.objects);
186 	return 0;
187 }
188 
radeon_gem_fini(struct radeon_device * rdev)189 void radeon_gem_fini(struct radeon_device *rdev)
190 {
191 	radeon_bo_force_delete(rdev);
192 }
193 
194 /*
195  * Call from drm_gem_handle_create which appear in both new and open ioctl
196  * case.
197  */
radeon_gem_object_open(struct drm_gem_object * obj,struct drm_file * file_priv)198 static int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
199 {
200 	struct radeon_bo *rbo = gem_to_radeon_bo(obj);
201 	struct radeon_device *rdev = rbo->rdev;
202 	struct radeon_fpriv *fpriv = file_priv->driver_priv;
203 	struct radeon_vm *vm = &fpriv->vm;
204 	struct radeon_bo_va *bo_va;
205 	int r;
206 
207 	if ((rdev->family < CHIP_CAYMAN) ||
208 	    (!rdev->accel_working)) {
209 		return 0;
210 	}
211 
212 	r = radeon_bo_reserve(rbo, false);
213 	if (r) {
214 		return r;
215 	}
216 
217 	bo_va = radeon_vm_bo_find(vm, rbo);
218 	if (!bo_va) {
219 		bo_va = radeon_vm_bo_add(rdev, vm, rbo);
220 	} else {
221 		++bo_va->ref_count;
222 	}
223 	radeon_bo_unreserve(rbo);
224 
225 	return 0;
226 }
227 
radeon_gem_object_close(struct drm_gem_object * obj,struct drm_file * file_priv)228 static void radeon_gem_object_close(struct drm_gem_object *obj,
229 				    struct drm_file *file_priv)
230 {
231 	struct radeon_bo *rbo = gem_to_radeon_bo(obj);
232 	struct radeon_device *rdev = rbo->rdev;
233 	struct radeon_fpriv *fpriv = file_priv->driver_priv;
234 	struct radeon_vm *vm = &fpriv->vm;
235 	struct radeon_bo_va *bo_va;
236 	int r;
237 
238 	if ((rdev->family < CHIP_CAYMAN) ||
239 	    (!rdev->accel_working)) {
240 		return;
241 	}
242 
243 	r = radeon_bo_reserve(rbo, true);
244 	if (r) {
245 		dev_err(rdev->dev, "leaking bo va because "
246 			"we fail to reserve bo (%d)\n", r);
247 		return;
248 	}
249 	bo_va = radeon_vm_bo_find(vm, rbo);
250 	if (bo_va) {
251 		if (--bo_va->ref_count == 0) {
252 			radeon_vm_bo_rmv(rdev, bo_va);
253 		}
254 	}
255 	radeon_bo_unreserve(rbo);
256 }
257 
radeon_gem_handle_lockup(struct radeon_device * rdev,int r)258 static int radeon_gem_handle_lockup(struct radeon_device *rdev, int r)
259 {
260 	if (r == -EDEADLK) {
261 		r = radeon_gpu_reset(rdev);
262 		if (!r)
263 			r = -EAGAIN;
264 	}
265 	return r;
266 }
267 
radeon_gem_object_mmap(struct drm_gem_object * obj,struct vm_area_struct * vma)268 static int radeon_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
269 {
270 	struct radeon_bo *bo = gem_to_radeon_bo(obj);
271 	struct radeon_device *rdev = radeon_get_rdev(bo->tbo.bdev);
272 
273 	if (radeon_ttm_tt_has_userptr(rdev, bo->tbo.ttm))
274 		return -EPERM;
275 
276 	return drm_gem_ttm_mmap(obj, vma);
277 }
278 
279 const struct drm_gem_object_funcs radeon_gem_object_funcs = {
280 	.free = radeon_gem_object_free,
281 	.open = radeon_gem_object_open,
282 	.close = radeon_gem_object_close,
283 	.export = radeon_gem_prime_export,
284 	.pin = radeon_gem_prime_pin,
285 	.unpin = radeon_gem_prime_unpin,
286 	.get_sg_table = radeon_gem_prime_get_sg_table,
287 	.vmap = drm_gem_ttm_vmap,
288 	.vunmap = drm_gem_ttm_vunmap,
289 	.mmap = radeon_gem_object_mmap,
290 	.vm_ops = &radeon_gem_vm_ops,
291 };
292 
293 /*
294  * GEM ioctls.
295  */
radeon_gem_info_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)296 int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
297 			  struct drm_file *filp)
298 {
299 	struct radeon_device *rdev = dev->dev_private;
300 	struct drm_radeon_gem_info *args = data;
301 	struct ttm_resource_manager *man;
302 
303 	man = ttm_manager_type(&rdev->mman.bdev, TTM_PL_VRAM);
304 
305 	args->vram_size = (u64)man->size << PAGE_SHIFT;
306 	args->vram_visible = rdev->mc.visible_vram_size;
307 	args->vram_visible -= rdev->vram_pin_size;
308 	args->gart_size = rdev->mc.gtt_size;
309 	args->gart_size -= rdev->gart_pin_size;
310 
311 	return 0;
312 }
313 
radeon_gem_pread_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)314 int radeon_gem_pread_ioctl(struct drm_device *dev, void *data,
315 			   struct drm_file *filp)
316 {
317 	/* TODO: implement */
318 	DRM_ERROR("unimplemented %s\n", __func__);
319 	return -ENOSYS;
320 }
321 
radeon_gem_pwrite_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)322 int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data,
323 			    struct drm_file *filp)
324 {
325 	/* TODO: implement */
326 	DRM_ERROR("unimplemented %s\n", __func__);
327 	return -ENOSYS;
328 }
329 
radeon_gem_create_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)330 int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
331 			    struct drm_file *filp)
332 {
333 	struct radeon_device *rdev = dev->dev_private;
334 	struct drm_radeon_gem_create *args = data;
335 	struct drm_gem_object *gobj;
336 	uint32_t handle;
337 	int r;
338 
339 	down_read(&rdev->exclusive_lock);
340 	/* create a gem object to contain this object in */
341 	args->size = roundup(args->size, PAGE_SIZE);
342 	r = radeon_gem_object_create(rdev, args->size, args->alignment,
343 				     args->initial_domain, args->flags,
344 				     false, &gobj);
345 	if (r) {
346 		up_read(&rdev->exclusive_lock);
347 		r = radeon_gem_handle_lockup(rdev, r);
348 		return r;
349 	}
350 	r = drm_gem_handle_create(filp, gobj, &handle);
351 	/* drop reference from allocate - handle holds it now */
352 	drm_gem_object_put(gobj);
353 	if (r) {
354 		up_read(&rdev->exclusive_lock);
355 		r = radeon_gem_handle_lockup(rdev, r);
356 		return r;
357 	}
358 	args->handle = handle;
359 	up_read(&rdev->exclusive_lock);
360 	return 0;
361 }
362 
radeon_gem_userptr_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)363 int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data,
364 			     struct drm_file *filp)
365 {
366 	struct ttm_operation_ctx ctx = { true, false };
367 	struct radeon_device *rdev = dev->dev_private;
368 	struct drm_radeon_gem_userptr *args = data;
369 	struct drm_gem_object *gobj;
370 	struct radeon_bo *bo;
371 	uint32_t handle;
372 	int r;
373 
374 	args->addr = untagged_addr(args->addr);
375 
376 	if (offset_in_page(args->addr | args->size))
377 		return -EINVAL;
378 
379 	/* reject unknown flag values */
380 	if (args->flags & ~(RADEON_GEM_USERPTR_READONLY |
381 	    RADEON_GEM_USERPTR_ANONONLY | RADEON_GEM_USERPTR_VALIDATE |
382 	    RADEON_GEM_USERPTR_REGISTER))
383 		return -EINVAL;
384 
385 	if (args->flags & RADEON_GEM_USERPTR_READONLY) {
386 		/* readonly pages not tested on older hardware */
387 		if (rdev->family < CHIP_R600)
388 			return -EINVAL;
389 
390 	} else if (!(args->flags & RADEON_GEM_USERPTR_ANONONLY) ||
391 		   !(args->flags & RADEON_GEM_USERPTR_REGISTER)) {
392 
393 		/* if we want to write to it we must require anonymous
394 		   memory and install a MMU notifier */
395 		return -EACCES;
396 	}
397 
398 	down_read(&rdev->exclusive_lock);
399 
400 	/* create a gem object to contain this object in */
401 	r = radeon_gem_object_create(rdev, args->size, 0,
402 				     RADEON_GEM_DOMAIN_CPU, 0,
403 				     false, &gobj);
404 	if (r)
405 		goto handle_lockup;
406 
407 	bo = gem_to_radeon_bo(gobj);
408 	r = radeon_ttm_tt_set_userptr(rdev, bo->tbo.ttm, args->addr, args->flags);
409 	if (r)
410 		goto release_object;
411 
412 	if (args->flags & RADEON_GEM_USERPTR_REGISTER) {
413 		r = radeon_mn_register(bo, args->addr);
414 		if (r)
415 			goto release_object;
416 	}
417 
418 	if (args->flags & RADEON_GEM_USERPTR_VALIDATE) {
419 		mmap_read_lock(current->mm);
420 		r = radeon_bo_reserve(bo, true);
421 		if (r) {
422 			mmap_read_unlock(current->mm);
423 			goto release_object;
424 		}
425 
426 		radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_GTT);
427 		r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
428 		radeon_bo_unreserve(bo);
429 		mmap_read_unlock(current->mm);
430 		if (r)
431 			goto release_object;
432 	}
433 
434 	r = drm_gem_handle_create(filp, gobj, &handle);
435 	/* drop reference from allocate - handle holds it now */
436 	drm_gem_object_put(gobj);
437 	if (r)
438 		goto handle_lockup;
439 
440 	args->handle = handle;
441 	up_read(&rdev->exclusive_lock);
442 	return 0;
443 
444 release_object:
445 	drm_gem_object_put(gobj);
446 
447 handle_lockup:
448 	up_read(&rdev->exclusive_lock);
449 	r = radeon_gem_handle_lockup(rdev, r);
450 
451 	return r;
452 }
453 
radeon_gem_set_domain_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)454 int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
455 				struct drm_file *filp)
456 {
457 	/* transition the BO to a domain -
458 	 * just validate the BO into a certain domain */
459 	struct radeon_device *rdev = dev->dev_private;
460 	struct drm_radeon_gem_set_domain *args = data;
461 	struct drm_gem_object *gobj;
462 	int r;
463 
464 	/* for now if someone requests domain CPU -
465 	 * just make sure the buffer is finished with */
466 	down_read(&rdev->exclusive_lock);
467 
468 	/* just do a BO wait for now */
469 	gobj = drm_gem_object_lookup(filp, args->handle);
470 	if (gobj == NULL) {
471 		up_read(&rdev->exclusive_lock);
472 		return -ENOENT;
473 	}
474 
475 	r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
476 
477 	drm_gem_object_put(gobj);
478 	up_read(&rdev->exclusive_lock);
479 	r = radeon_gem_handle_lockup(rdev, r);
480 	return r;
481 }
482 
radeon_mode_dumb_mmap(struct drm_file * filp,struct drm_device * dev,uint32_t handle,uint64_t * offset_p)483 int radeon_mode_dumb_mmap(struct drm_file *filp,
484 			  struct drm_device *dev,
485 			  uint32_t handle, uint64_t *offset_p)
486 {
487 	struct drm_gem_object *gobj;
488 	struct radeon_bo *robj;
489 
490 	gobj = drm_gem_object_lookup(filp, handle);
491 	if (gobj == NULL) {
492 		return -ENOENT;
493 	}
494 	robj = gem_to_radeon_bo(gobj);
495 	if (radeon_ttm_tt_has_userptr(robj->rdev, robj->tbo.ttm)) {
496 		drm_gem_object_put(gobj);
497 		return -EPERM;
498 	}
499 	*offset_p = radeon_bo_mmap_offset(robj);
500 	drm_gem_object_put(gobj);
501 	return 0;
502 }
503 
radeon_gem_mmap_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)504 int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
505 			  struct drm_file *filp)
506 {
507 	struct drm_radeon_gem_mmap *args = data;
508 
509 	return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr);
510 }
511 
radeon_gem_busy_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)512 int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
513 			  struct drm_file *filp)
514 {
515 	struct drm_radeon_gem_busy *args = data;
516 	struct drm_gem_object *gobj;
517 	struct radeon_bo *robj;
518 	int r;
519 	uint32_t cur_placement = 0;
520 
521 	gobj = drm_gem_object_lookup(filp, args->handle);
522 	if (gobj == NULL) {
523 		return -ENOENT;
524 	}
525 	robj = gem_to_radeon_bo(gobj);
526 
527 	r = dma_resv_test_signaled(robj->tbo.base.resv, DMA_RESV_USAGE_READ);
528 	if (r == 0)
529 		r = -EBUSY;
530 	else
531 		r = 0;
532 
533 	cur_placement = READ_ONCE(robj->tbo.resource->mem_type);
534 	args->domain = radeon_mem_type_to_domain(cur_placement);
535 	drm_gem_object_put(gobj);
536 	return r;
537 }
538 
radeon_gem_wait_idle_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)539 int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
540 			      struct drm_file *filp)
541 {
542 	struct radeon_device *rdev = dev->dev_private;
543 	struct drm_radeon_gem_wait_idle *args = data;
544 	struct drm_gem_object *gobj;
545 	struct radeon_bo *robj;
546 	int r = 0;
547 	uint32_t cur_placement = 0;
548 	long ret;
549 
550 	gobj = drm_gem_object_lookup(filp, args->handle);
551 	if (gobj == NULL) {
552 		return -ENOENT;
553 	}
554 	robj = gem_to_radeon_bo(gobj);
555 
556 	ret = dma_resv_wait_timeout(robj->tbo.base.resv, DMA_RESV_USAGE_READ,
557 				    true, 30 * HZ);
558 	if (ret == 0)
559 		r = -EBUSY;
560 	else if (ret < 0)
561 		r = ret;
562 
563 	/* Flush HDP cache via MMIO if necessary */
564 	cur_placement = READ_ONCE(robj->tbo.resource->mem_type);
565 	if (rdev->asic->mmio_hdp_flush &&
566 	    radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM)
567 		robj->rdev->asic->mmio_hdp_flush(rdev);
568 	drm_gem_object_put(gobj);
569 	r = radeon_gem_handle_lockup(rdev, r);
570 	return r;
571 }
572 
radeon_gem_set_tiling_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)573 int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
574 				struct drm_file *filp)
575 {
576 	struct drm_radeon_gem_set_tiling *args = data;
577 	struct drm_gem_object *gobj;
578 	struct radeon_bo *robj;
579 	int r = 0;
580 
581 	DRM_DEBUG("%d \n", args->handle);
582 	gobj = drm_gem_object_lookup(filp, args->handle);
583 	if (gobj == NULL)
584 		return -ENOENT;
585 	robj = gem_to_radeon_bo(gobj);
586 	r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch);
587 	drm_gem_object_put(gobj);
588 	return r;
589 }
590 
radeon_gem_get_tiling_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)591 int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
592 				struct drm_file *filp)
593 {
594 	struct drm_radeon_gem_get_tiling *args = data;
595 	struct drm_gem_object *gobj;
596 	struct radeon_bo *rbo;
597 	int r = 0;
598 
599 	DRM_DEBUG("\n");
600 	gobj = drm_gem_object_lookup(filp, args->handle);
601 	if (gobj == NULL)
602 		return -ENOENT;
603 	rbo = gem_to_radeon_bo(gobj);
604 	r = radeon_bo_reserve(rbo, false);
605 	if (unlikely(r != 0))
606 		goto out;
607 	radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch);
608 	radeon_bo_unreserve(rbo);
609 out:
610 	drm_gem_object_put(gobj);
611 	return r;
612 }
613 
614 /**
615  * radeon_gem_va_update_vm -update the bo_va in its VM
616  *
617  * @rdev: radeon_device pointer
618  * @bo_va: bo_va to update
619  *
620  * Update the bo_va directly after setting it's address. Errors are not
621  * vital here, so they are not reported back to userspace.
622  */
radeon_gem_va_update_vm(struct radeon_device * rdev,struct radeon_bo_va * bo_va)623 static void radeon_gem_va_update_vm(struct radeon_device *rdev,
624 				    struct radeon_bo_va *bo_va)
625 {
626 	struct ttm_validate_buffer tv, *entry;
627 	struct radeon_bo_list *vm_bos;
628 	struct ww_acquire_ctx ticket;
629 	struct list_head list;
630 	unsigned domain;
631 	int r;
632 
633 	INIT_LIST_HEAD(&list);
634 
635 	tv.bo = &bo_va->bo->tbo;
636 	tv.num_shared = 1;
637 	list_add(&tv.head, &list);
638 
639 	vm_bos = radeon_vm_get_bos(rdev, bo_va->vm, &list);
640 	if (!vm_bos)
641 		return;
642 
643 	r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
644 	if (r)
645 		goto error_free;
646 
647 	list_for_each_entry(entry, &list, head) {
648 		domain = radeon_mem_type_to_domain(entry->bo->resource->mem_type);
649 		/* if anything is swapped out don't swap it in here,
650 		   just abort and wait for the next CS */
651 		if (domain == RADEON_GEM_DOMAIN_CPU)
652 			goto error_unreserve;
653 	}
654 
655 	mutex_lock(&bo_va->vm->mutex);
656 	r = radeon_vm_clear_freed(rdev, bo_va->vm);
657 	if (r)
658 		goto error_unlock;
659 
660 	if (bo_va->it.start)
661 		r = radeon_vm_bo_update(rdev, bo_va, bo_va->bo->tbo.resource);
662 
663 error_unlock:
664 	mutex_unlock(&bo_va->vm->mutex);
665 
666 error_unreserve:
667 	ttm_eu_backoff_reservation(&ticket, &list);
668 
669 error_free:
670 	kvfree(vm_bos);
671 
672 	if (r && r != -ERESTARTSYS)
673 		DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
674 }
675 
radeon_gem_va_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)676 int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
677 			  struct drm_file *filp)
678 {
679 	struct drm_radeon_gem_va *args = data;
680 	struct drm_gem_object *gobj;
681 	struct radeon_device *rdev = dev->dev_private;
682 	struct radeon_fpriv *fpriv = filp->driver_priv;
683 	struct radeon_bo *rbo;
684 	struct radeon_bo_va *bo_va;
685 	u32 invalid_flags;
686 	int r = 0;
687 
688 	if (!rdev->vm_manager.enabled) {
689 		args->operation = RADEON_VA_RESULT_ERROR;
690 		return -ENOTTY;
691 	}
692 
693 	/* !! DONT REMOVE !!
694 	 * We don't support vm_id yet, to be sure we don't have broken
695 	 * userspace, reject anyone trying to use non 0 value thus moving
696 	 * forward we can use those fields without breaking existant userspace
697 	 */
698 	if (args->vm_id) {
699 		args->operation = RADEON_VA_RESULT_ERROR;
700 		return -EINVAL;
701 	}
702 
703 	if (args->offset < RADEON_VA_RESERVED_SIZE) {
704 		dev_err(dev->dev,
705 			"offset 0x%lX is in reserved area 0x%X\n",
706 			(unsigned long)args->offset,
707 			RADEON_VA_RESERVED_SIZE);
708 		args->operation = RADEON_VA_RESULT_ERROR;
709 		return -EINVAL;
710 	}
711 
712 	/* don't remove, we need to enforce userspace to set the snooped flag
713 	 * otherwise we will endup with broken userspace and we won't be able
714 	 * to enable this feature without adding new interface
715 	 */
716 	invalid_flags = RADEON_VM_PAGE_VALID | RADEON_VM_PAGE_SYSTEM;
717 	if ((args->flags & invalid_flags)) {
718 		dev_err(dev->dev, "invalid flags 0x%08X vs 0x%08X\n",
719 			args->flags, invalid_flags);
720 		args->operation = RADEON_VA_RESULT_ERROR;
721 		return -EINVAL;
722 	}
723 
724 	switch (args->operation) {
725 	case RADEON_VA_MAP:
726 	case RADEON_VA_UNMAP:
727 		break;
728 	default:
729 		dev_err(dev->dev, "unsupported operation %d\n",
730 			args->operation);
731 		args->operation = RADEON_VA_RESULT_ERROR;
732 		return -EINVAL;
733 	}
734 
735 	gobj = drm_gem_object_lookup(filp, args->handle);
736 	if (gobj == NULL) {
737 		args->operation = RADEON_VA_RESULT_ERROR;
738 		return -ENOENT;
739 	}
740 	rbo = gem_to_radeon_bo(gobj);
741 	r = radeon_bo_reserve(rbo, false);
742 	if (r) {
743 		args->operation = RADEON_VA_RESULT_ERROR;
744 		drm_gem_object_put(gobj);
745 		return r;
746 	}
747 	bo_va = radeon_vm_bo_find(&fpriv->vm, rbo);
748 	if (!bo_va) {
749 		args->operation = RADEON_VA_RESULT_ERROR;
750 		radeon_bo_unreserve(rbo);
751 		drm_gem_object_put(gobj);
752 		return -ENOENT;
753 	}
754 
755 	switch (args->operation) {
756 	case RADEON_VA_MAP:
757 		if (bo_va->it.start) {
758 			args->operation = RADEON_VA_RESULT_VA_EXIST;
759 			args->offset = bo_va->it.start * RADEON_GPU_PAGE_SIZE;
760 			radeon_bo_unreserve(rbo);
761 			goto out;
762 		}
763 		r = radeon_vm_bo_set_addr(rdev, bo_va, args->offset, args->flags);
764 		break;
765 	case RADEON_VA_UNMAP:
766 		r = radeon_vm_bo_set_addr(rdev, bo_va, 0, 0);
767 		break;
768 	default:
769 		break;
770 	}
771 	if (!r)
772 		radeon_gem_va_update_vm(rdev, bo_va);
773 	args->operation = RADEON_VA_RESULT_OK;
774 	if (r) {
775 		args->operation = RADEON_VA_RESULT_ERROR;
776 	}
777 out:
778 	drm_gem_object_put(gobj);
779 	return r;
780 }
781 
radeon_gem_op_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)782 int radeon_gem_op_ioctl(struct drm_device *dev, void *data,
783 			struct drm_file *filp)
784 {
785 	struct drm_radeon_gem_op *args = data;
786 	struct drm_gem_object *gobj;
787 	struct radeon_bo *robj;
788 	int r;
789 
790 	gobj = drm_gem_object_lookup(filp, args->handle);
791 	if (gobj == NULL) {
792 		return -ENOENT;
793 	}
794 	robj = gem_to_radeon_bo(gobj);
795 
796 	r = -EPERM;
797 	if (radeon_ttm_tt_has_userptr(robj->rdev, robj->tbo.ttm))
798 		goto out;
799 
800 	r = radeon_bo_reserve(robj, false);
801 	if (unlikely(r))
802 		goto out;
803 
804 	switch (args->op) {
805 	case RADEON_GEM_OP_GET_INITIAL_DOMAIN:
806 		args->value = robj->initial_domain;
807 		break;
808 	case RADEON_GEM_OP_SET_INITIAL_DOMAIN:
809 		robj->initial_domain = args->value & (RADEON_GEM_DOMAIN_VRAM |
810 						      RADEON_GEM_DOMAIN_GTT |
811 						      RADEON_GEM_DOMAIN_CPU);
812 		break;
813 	default:
814 		r = -EINVAL;
815 	}
816 
817 	radeon_bo_unreserve(robj);
818 out:
819 	drm_gem_object_put(gobj);
820 	return r;
821 }
822 
radeon_mode_dumb_create(struct drm_file * file_priv,struct drm_device * dev,struct drm_mode_create_dumb * args)823 int radeon_mode_dumb_create(struct drm_file *file_priv,
824 			    struct drm_device *dev,
825 			    struct drm_mode_create_dumb *args)
826 {
827 	struct radeon_device *rdev = dev->dev_private;
828 	struct drm_gem_object *gobj;
829 	uint32_t handle;
830 	int r;
831 
832 	args->pitch = radeon_align_pitch(rdev, args->width,
833 					 DIV_ROUND_UP(args->bpp, 8), 0);
834 	args->size = (u64)args->pitch * args->height;
835 	args->size = ALIGN(args->size, PAGE_SIZE);
836 
837 	r = radeon_gem_object_create(rdev, args->size, 0,
838 				     RADEON_GEM_DOMAIN_VRAM, 0,
839 				     false, &gobj);
840 	if (r)
841 		return -ENOMEM;
842 
843 	r = drm_gem_handle_create(file_priv, gobj, &handle);
844 	/* drop reference from allocate - handle holds it now */
845 	drm_gem_object_put(gobj);
846 	if (r) {
847 		return r;
848 	}
849 	args->handle = handle;
850 	return 0;
851 }
852 
853 #if defined(CONFIG_DEBUG_FS)
radeon_debugfs_gem_info_show(struct seq_file * m,void * unused)854 static int radeon_debugfs_gem_info_show(struct seq_file *m, void *unused)
855 {
856 	struct radeon_device *rdev = (struct radeon_device *)m->private;
857 	struct radeon_bo *rbo;
858 	unsigned i = 0;
859 
860 	mutex_lock(&rdev->gem.mutex);
861 	list_for_each_entry(rbo, &rdev->gem.objects, list) {
862 		unsigned domain;
863 		const char *placement;
864 
865 		domain = radeon_mem_type_to_domain(rbo->tbo.resource->mem_type);
866 		switch (domain) {
867 		case RADEON_GEM_DOMAIN_VRAM:
868 			placement = "VRAM";
869 			break;
870 		case RADEON_GEM_DOMAIN_GTT:
871 			placement = " GTT";
872 			break;
873 		case RADEON_GEM_DOMAIN_CPU:
874 		default:
875 			placement = " CPU";
876 			break;
877 		}
878 		seq_printf(m, "bo[0x%08x] %8ldkB %8ldMB %s pid %8ld\n",
879 			   i, radeon_bo_size(rbo) >> 10, radeon_bo_size(rbo) >> 20,
880 			   placement, (unsigned long)rbo->pid);
881 		i++;
882 	}
883 	mutex_unlock(&rdev->gem.mutex);
884 	return 0;
885 }
886 
887 DEFINE_SHOW_ATTRIBUTE(radeon_debugfs_gem_info);
888 #endif
889 
radeon_gem_debugfs_init(struct radeon_device * rdev)890 void radeon_gem_debugfs_init(struct radeon_device *rdev)
891 {
892 #if defined(CONFIG_DEBUG_FS)
893 	struct dentry *root = rdev->ddev->primary->debugfs_root;
894 
895 	debugfs_create_file("radeon_gem_info", 0444, root, rdev,
896 			    &radeon_debugfs_gem_info_fops);
897 
898 #endif
899 }
900