1 /*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28
29 #include <linux/pci.h>
30
31 #include <drm/drm_device.h>
32 #include <drm/drm_file.h>
33 #include <drm/drm_gem_ttm_helper.h>
34 #include <drm/radeon_drm.h>
35
36 #include "radeon.h"
37 #include "radeon_prime.h"
38
39 struct dma_buf *radeon_gem_prime_export(struct drm_gem_object *gobj,
40 int flags);
41 struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj);
42 int radeon_gem_prime_pin(struct drm_gem_object *obj);
43 void radeon_gem_prime_unpin(struct drm_gem_object *obj);
44
45 const struct drm_gem_object_funcs radeon_gem_object_funcs;
46
radeon_gem_fault(struct vm_fault * vmf)47 static vm_fault_t radeon_gem_fault(struct vm_fault *vmf)
48 {
49 struct ttm_buffer_object *bo = vmf->vma->vm_private_data;
50 struct radeon_device *rdev = radeon_get_rdev(bo->bdev);
51 vm_fault_t ret;
52
53 down_read(&rdev->pm.mclk_lock);
54
55 ret = ttm_bo_vm_reserve(bo, vmf);
56 if (ret)
57 goto unlock_mclk;
58
59 ret = radeon_bo_fault_reserve_notify(bo);
60 if (ret)
61 goto unlock_resv;
62
63 ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
64 TTM_BO_VM_NUM_PREFAULT);
65 if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
66 goto unlock_mclk;
67
68 unlock_resv:
69 dma_resv_unlock(bo->base.resv);
70
71 unlock_mclk:
72 up_read(&rdev->pm.mclk_lock);
73 return ret;
74 }
75
76 static const struct vm_operations_struct radeon_gem_vm_ops = {
77 .fault = radeon_gem_fault,
78 .open = ttm_bo_vm_open,
79 .close = ttm_bo_vm_close,
80 .access = ttm_bo_vm_access
81 };
82
radeon_gem_object_free(struct drm_gem_object * gobj)83 static void radeon_gem_object_free(struct drm_gem_object *gobj)
84 {
85 struct radeon_bo *robj = gem_to_radeon_bo(gobj);
86
87 if (robj) {
88 radeon_mn_unregister(robj);
89 radeon_bo_unref(&robj);
90 }
91 }
92
radeon_gem_object_create(struct radeon_device * rdev,unsigned long size,int alignment,int initial_domain,u32 flags,bool kernel,struct drm_gem_object ** obj)93 int radeon_gem_object_create(struct radeon_device *rdev, unsigned long size,
94 int alignment, int initial_domain,
95 u32 flags, bool kernel,
96 struct drm_gem_object **obj)
97 {
98 struct radeon_bo *robj;
99 unsigned long max_size;
100 int r;
101
102 *obj = NULL;
103 /* At least align on page size */
104 if (alignment < PAGE_SIZE) {
105 alignment = PAGE_SIZE;
106 }
107
108 /* Maximum bo size is the unpinned gtt size since we use the gtt to
109 * handle vram to system pool migrations.
110 */
111 max_size = rdev->mc.gtt_size - rdev->gart_pin_size;
112 if (size > max_size) {
113 DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n",
114 size >> 20, max_size >> 20);
115 return -ENOMEM;
116 }
117
118 retry:
119 r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain,
120 flags, NULL, NULL, &robj);
121 if (r) {
122 if (r != -ERESTARTSYS) {
123 if (initial_domain == RADEON_GEM_DOMAIN_VRAM) {
124 initial_domain |= RADEON_GEM_DOMAIN_GTT;
125 goto retry;
126 }
127 DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
128 size, initial_domain, alignment, r);
129 }
130 return r;
131 }
132 *obj = &robj->tbo.base;
133 (*obj)->funcs = &radeon_gem_object_funcs;
134 robj->pid = task_pid_nr(current);
135
136 mutex_lock(&rdev->gem.mutex);
137 list_add_tail(&robj->list, &rdev->gem.objects);
138 mutex_unlock(&rdev->gem.mutex);
139
140 return 0;
141 }
142
radeon_gem_set_domain(struct drm_gem_object * gobj,uint32_t rdomain,uint32_t wdomain)143 static int radeon_gem_set_domain(struct drm_gem_object *gobj,
144 uint32_t rdomain, uint32_t wdomain)
145 {
146 struct radeon_bo *robj;
147 uint32_t domain;
148 long r;
149
150 /* FIXME: reeimplement */
151 robj = gem_to_radeon_bo(gobj);
152 /* work out where to validate the buffer to */
153 domain = wdomain;
154 if (!domain) {
155 domain = rdomain;
156 }
157 if (!domain) {
158 /* Do nothings */
159 pr_warn("Set domain without domain !\n");
160 return 0;
161 }
162 if (domain == RADEON_GEM_DOMAIN_CPU) {
163 /* Asking for cpu access wait for object idle */
164 r = dma_resv_wait_timeout(robj->tbo.base.resv, true, true, 30 * HZ);
165 if (!r)
166 r = -EBUSY;
167
168 if (r < 0 && r != -EINTR) {
169 pr_err("Failed to wait for object: %li\n", r);
170 return r;
171 }
172 }
173 if (domain == RADEON_GEM_DOMAIN_VRAM && robj->prime_shared_count) {
174 /* A BO that is associated with a dma-buf cannot be sensibly migrated to VRAM */
175 return -EINVAL;
176 }
177 return 0;
178 }
179
radeon_gem_init(struct radeon_device * rdev)180 int radeon_gem_init(struct radeon_device *rdev)
181 {
182 INIT_LIST_HEAD(&rdev->gem.objects);
183 return 0;
184 }
185
radeon_gem_fini(struct radeon_device * rdev)186 void radeon_gem_fini(struct radeon_device *rdev)
187 {
188 radeon_bo_force_delete(rdev);
189 }
190
191 /*
192 * Call from drm_gem_handle_create which appear in both new and open ioctl
193 * case.
194 */
radeon_gem_object_open(struct drm_gem_object * obj,struct drm_file * file_priv)195 static int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
196 {
197 struct radeon_bo *rbo = gem_to_radeon_bo(obj);
198 struct radeon_device *rdev = rbo->rdev;
199 struct radeon_fpriv *fpriv = file_priv->driver_priv;
200 struct radeon_vm *vm = &fpriv->vm;
201 struct radeon_bo_va *bo_va;
202 int r;
203
204 if ((rdev->family < CHIP_CAYMAN) ||
205 (!rdev->accel_working)) {
206 return 0;
207 }
208
209 r = radeon_bo_reserve(rbo, false);
210 if (r) {
211 return r;
212 }
213
214 bo_va = radeon_vm_bo_find(vm, rbo);
215 if (!bo_va) {
216 bo_va = radeon_vm_bo_add(rdev, vm, rbo);
217 } else {
218 ++bo_va->ref_count;
219 }
220 radeon_bo_unreserve(rbo);
221
222 return 0;
223 }
224
radeon_gem_object_close(struct drm_gem_object * obj,struct drm_file * file_priv)225 static void radeon_gem_object_close(struct drm_gem_object *obj,
226 struct drm_file *file_priv)
227 {
228 struct radeon_bo *rbo = gem_to_radeon_bo(obj);
229 struct radeon_device *rdev = rbo->rdev;
230 struct radeon_fpriv *fpriv = file_priv->driver_priv;
231 struct radeon_vm *vm = &fpriv->vm;
232 struct radeon_bo_va *bo_va;
233 int r;
234
235 if ((rdev->family < CHIP_CAYMAN) ||
236 (!rdev->accel_working)) {
237 return;
238 }
239
240 r = radeon_bo_reserve(rbo, true);
241 if (r) {
242 dev_err(rdev->dev, "leaking bo va because "
243 "we fail to reserve bo (%d)\n", r);
244 return;
245 }
246 bo_va = radeon_vm_bo_find(vm, rbo);
247 if (bo_va) {
248 if (--bo_va->ref_count == 0) {
249 radeon_vm_bo_rmv(rdev, bo_va);
250 }
251 }
252 radeon_bo_unreserve(rbo);
253 }
254
radeon_gem_handle_lockup(struct radeon_device * rdev,int r)255 static int radeon_gem_handle_lockup(struct radeon_device *rdev, int r)
256 {
257 if (r == -EDEADLK) {
258 r = radeon_gpu_reset(rdev);
259 if (!r)
260 r = -EAGAIN;
261 }
262 return r;
263 }
264
radeon_gem_object_mmap(struct drm_gem_object * obj,struct vm_area_struct * vma)265 static int radeon_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
266 {
267 struct radeon_bo *bo = gem_to_radeon_bo(obj);
268 struct radeon_device *rdev = radeon_get_rdev(bo->tbo.bdev);
269
270 if (radeon_ttm_tt_has_userptr(rdev, bo->tbo.ttm))
271 return -EPERM;
272
273 return drm_gem_ttm_mmap(obj, vma);
274 }
275
276 const struct drm_gem_object_funcs radeon_gem_object_funcs = {
277 .free = radeon_gem_object_free,
278 .open = radeon_gem_object_open,
279 .close = radeon_gem_object_close,
280 .export = radeon_gem_prime_export,
281 .pin = radeon_gem_prime_pin,
282 .unpin = radeon_gem_prime_unpin,
283 .get_sg_table = radeon_gem_prime_get_sg_table,
284 .vmap = drm_gem_ttm_vmap,
285 .vunmap = drm_gem_ttm_vunmap,
286 .mmap = radeon_gem_object_mmap,
287 .vm_ops = &radeon_gem_vm_ops,
288 };
289
290 /*
291 * GEM ioctls.
292 */
radeon_gem_info_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)293 int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
294 struct drm_file *filp)
295 {
296 struct radeon_device *rdev = dev->dev_private;
297 struct drm_radeon_gem_info *args = data;
298 struct ttm_resource_manager *man;
299
300 man = ttm_manager_type(&rdev->mman.bdev, TTM_PL_VRAM);
301
302 args->vram_size = (u64)man->size << PAGE_SHIFT;
303 args->vram_visible = rdev->mc.visible_vram_size;
304 args->vram_visible -= rdev->vram_pin_size;
305 args->gart_size = rdev->mc.gtt_size;
306 args->gart_size -= rdev->gart_pin_size;
307
308 return 0;
309 }
310
radeon_gem_pread_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)311 int radeon_gem_pread_ioctl(struct drm_device *dev, void *data,
312 struct drm_file *filp)
313 {
314 /* TODO: implement */
315 DRM_ERROR("unimplemented %s\n", __func__);
316 return -ENOSYS;
317 }
318
radeon_gem_pwrite_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)319 int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data,
320 struct drm_file *filp)
321 {
322 /* TODO: implement */
323 DRM_ERROR("unimplemented %s\n", __func__);
324 return -ENOSYS;
325 }
326
radeon_gem_create_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)327 int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
328 struct drm_file *filp)
329 {
330 struct radeon_device *rdev = dev->dev_private;
331 struct drm_radeon_gem_create *args = data;
332 struct drm_gem_object *gobj;
333 uint32_t handle;
334 int r;
335
336 down_read(&rdev->exclusive_lock);
337 /* create a gem object to contain this object in */
338 args->size = roundup(args->size, PAGE_SIZE);
339 r = radeon_gem_object_create(rdev, args->size, args->alignment,
340 args->initial_domain, args->flags,
341 false, &gobj);
342 if (r) {
343 up_read(&rdev->exclusive_lock);
344 r = radeon_gem_handle_lockup(rdev, r);
345 return r;
346 }
347 r = drm_gem_handle_create(filp, gobj, &handle);
348 /* drop reference from allocate - handle holds it now */
349 drm_gem_object_put(gobj);
350 if (r) {
351 up_read(&rdev->exclusive_lock);
352 r = radeon_gem_handle_lockup(rdev, r);
353 return r;
354 }
355 args->handle = handle;
356 up_read(&rdev->exclusive_lock);
357 return 0;
358 }
359
radeon_gem_userptr_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)360 int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data,
361 struct drm_file *filp)
362 {
363 struct ttm_operation_ctx ctx = { true, false };
364 struct radeon_device *rdev = dev->dev_private;
365 struct drm_radeon_gem_userptr *args = data;
366 struct drm_gem_object *gobj;
367 struct radeon_bo *bo;
368 uint32_t handle;
369 int r;
370
371 args->addr = untagged_addr(args->addr);
372
373 if (offset_in_page(args->addr | args->size))
374 return -EINVAL;
375
376 /* reject unknown flag values */
377 if (args->flags & ~(RADEON_GEM_USERPTR_READONLY |
378 RADEON_GEM_USERPTR_ANONONLY | RADEON_GEM_USERPTR_VALIDATE |
379 RADEON_GEM_USERPTR_REGISTER))
380 return -EINVAL;
381
382 if (args->flags & RADEON_GEM_USERPTR_READONLY) {
383 /* readonly pages not tested on older hardware */
384 if (rdev->family < CHIP_R600)
385 return -EINVAL;
386
387 } else if (!(args->flags & RADEON_GEM_USERPTR_ANONONLY) ||
388 !(args->flags & RADEON_GEM_USERPTR_REGISTER)) {
389
390 /* if we want to write to it we must require anonymous
391 memory and install a MMU notifier */
392 return -EACCES;
393 }
394
395 down_read(&rdev->exclusive_lock);
396
397 /* create a gem object to contain this object in */
398 r = radeon_gem_object_create(rdev, args->size, 0,
399 RADEON_GEM_DOMAIN_CPU, 0,
400 false, &gobj);
401 if (r)
402 goto handle_lockup;
403
404 bo = gem_to_radeon_bo(gobj);
405 r = radeon_ttm_tt_set_userptr(rdev, bo->tbo.ttm, args->addr, args->flags);
406 if (r)
407 goto release_object;
408
409 if (args->flags & RADEON_GEM_USERPTR_REGISTER) {
410 r = radeon_mn_register(bo, args->addr);
411 if (r)
412 goto release_object;
413 }
414
415 if (args->flags & RADEON_GEM_USERPTR_VALIDATE) {
416 mmap_read_lock(current->mm);
417 r = radeon_bo_reserve(bo, true);
418 if (r) {
419 mmap_read_unlock(current->mm);
420 goto release_object;
421 }
422
423 radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_GTT);
424 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
425 radeon_bo_unreserve(bo);
426 mmap_read_unlock(current->mm);
427 if (r)
428 goto release_object;
429 }
430
431 r = drm_gem_handle_create(filp, gobj, &handle);
432 /* drop reference from allocate - handle holds it now */
433 drm_gem_object_put(gobj);
434 if (r)
435 goto handle_lockup;
436
437 args->handle = handle;
438 up_read(&rdev->exclusive_lock);
439 return 0;
440
441 release_object:
442 drm_gem_object_put(gobj);
443
444 handle_lockup:
445 up_read(&rdev->exclusive_lock);
446 r = radeon_gem_handle_lockup(rdev, r);
447
448 return r;
449 }
450
radeon_gem_set_domain_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)451 int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
452 struct drm_file *filp)
453 {
454 /* transition the BO to a domain -
455 * just validate the BO into a certain domain */
456 struct radeon_device *rdev = dev->dev_private;
457 struct drm_radeon_gem_set_domain *args = data;
458 struct drm_gem_object *gobj;
459 int r;
460
461 /* for now if someone requests domain CPU -
462 * just make sure the buffer is finished with */
463 down_read(&rdev->exclusive_lock);
464
465 /* just do a BO wait for now */
466 gobj = drm_gem_object_lookup(filp, args->handle);
467 if (gobj == NULL) {
468 up_read(&rdev->exclusive_lock);
469 return -ENOENT;
470 }
471
472 r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
473
474 drm_gem_object_put(gobj);
475 up_read(&rdev->exclusive_lock);
476 r = radeon_gem_handle_lockup(rdev, r);
477 return r;
478 }
479
radeon_mode_dumb_mmap(struct drm_file * filp,struct drm_device * dev,uint32_t handle,uint64_t * offset_p)480 int radeon_mode_dumb_mmap(struct drm_file *filp,
481 struct drm_device *dev,
482 uint32_t handle, uint64_t *offset_p)
483 {
484 struct drm_gem_object *gobj;
485 struct radeon_bo *robj;
486
487 gobj = drm_gem_object_lookup(filp, handle);
488 if (gobj == NULL) {
489 return -ENOENT;
490 }
491 robj = gem_to_radeon_bo(gobj);
492 if (radeon_ttm_tt_has_userptr(robj->rdev, robj->tbo.ttm)) {
493 drm_gem_object_put(gobj);
494 return -EPERM;
495 }
496 *offset_p = radeon_bo_mmap_offset(robj);
497 drm_gem_object_put(gobj);
498 return 0;
499 }
500
radeon_gem_mmap_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)501 int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
502 struct drm_file *filp)
503 {
504 struct drm_radeon_gem_mmap *args = data;
505
506 return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr);
507 }
508
radeon_gem_busy_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)509 int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
510 struct drm_file *filp)
511 {
512 struct drm_radeon_gem_busy *args = data;
513 struct drm_gem_object *gobj;
514 struct radeon_bo *robj;
515 int r;
516 uint32_t cur_placement = 0;
517
518 gobj = drm_gem_object_lookup(filp, args->handle);
519 if (gobj == NULL) {
520 return -ENOENT;
521 }
522 robj = gem_to_radeon_bo(gobj);
523
524 r = dma_resv_test_signaled(robj->tbo.base.resv, true);
525 if (r == 0)
526 r = -EBUSY;
527 else
528 r = 0;
529
530 cur_placement = READ_ONCE(robj->tbo.resource->mem_type);
531 args->domain = radeon_mem_type_to_domain(cur_placement);
532 drm_gem_object_put(gobj);
533 return r;
534 }
535
radeon_gem_wait_idle_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)536 int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
537 struct drm_file *filp)
538 {
539 struct radeon_device *rdev = dev->dev_private;
540 struct drm_radeon_gem_wait_idle *args = data;
541 struct drm_gem_object *gobj;
542 struct radeon_bo *robj;
543 int r = 0;
544 uint32_t cur_placement = 0;
545 long ret;
546
547 gobj = drm_gem_object_lookup(filp, args->handle);
548 if (gobj == NULL) {
549 return -ENOENT;
550 }
551 robj = gem_to_radeon_bo(gobj);
552
553 ret = dma_resv_wait_timeout(robj->tbo.base.resv, true, true, 30 * HZ);
554 if (ret == 0)
555 r = -EBUSY;
556 else if (ret < 0)
557 r = ret;
558
559 /* Flush HDP cache via MMIO if necessary */
560 cur_placement = READ_ONCE(robj->tbo.resource->mem_type);
561 if (rdev->asic->mmio_hdp_flush &&
562 radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM)
563 robj->rdev->asic->mmio_hdp_flush(rdev);
564 drm_gem_object_put(gobj);
565 r = radeon_gem_handle_lockup(rdev, r);
566 return r;
567 }
568
radeon_gem_set_tiling_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)569 int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
570 struct drm_file *filp)
571 {
572 struct drm_radeon_gem_set_tiling *args = data;
573 struct drm_gem_object *gobj;
574 struct radeon_bo *robj;
575 int r = 0;
576
577 DRM_DEBUG("%d \n", args->handle);
578 gobj = drm_gem_object_lookup(filp, args->handle);
579 if (gobj == NULL)
580 return -ENOENT;
581 robj = gem_to_radeon_bo(gobj);
582 r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch);
583 drm_gem_object_put(gobj);
584 return r;
585 }
586
radeon_gem_get_tiling_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)587 int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
588 struct drm_file *filp)
589 {
590 struct drm_radeon_gem_get_tiling *args = data;
591 struct drm_gem_object *gobj;
592 struct radeon_bo *rbo;
593 int r = 0;
594
595 DRM_DEBUG("\n");
596 gobj = drm_gem_object_lookup(filp, args->handle);
597 if (gobj == NULL)
598 return -ENOENT;
599 rbo = gem_to_radeon_bo(gobj);
600 r = radeon_bo_reserve(rbo, false);
601 if (unlikely(r != 0))
602 goto out;
603 radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch);
604 radeon_bo_unreserve(rbo);
605 out:
606 drm_gem_object_put(gobj);
607 return r;
608 }
609
610 /**
611 * radeon_gem_va_update_vm -update the bo_va in its VM
612 *
613 * @rdev: radeon_device pointer
614 * @bo_va: bo_va to update
615 *
616 * Update the bo_va directly after setting it's address. Errors are not
617 * vital here, so they are not reported back to userspace.
618 */
radeon_gem_va_update_vm(struct radeon_device * rdev,struct radeon_bo_va * bo_va)619 static void radeon_gem_va_update_vm(struct radeon_device *rdev,
620 struct radeon_bo_va *bo_va)
621 {
622 struct ttm_validate_buffer tv, *entry;
623 struct radeon_bo_list *vm_bos;
624 struct ww_acquire_ctx ticket;
625 struct list_head list;
626 unsigned domain;
627 int r;
628
629 INIT_LIST_HEAD(&list);
630
631 tv.bo = &bo_va->bo->tbo;
632 tv.num_shared = 1;
633 list_add(&tv.head, &list);
634
635 vm_bos = radeon_vm_get_bos(rdev, bo_va->vm, &list);
636 if (!vm_bos)
637 return;
638
639 r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
640 if (r)
641 goto error_free;
642
643 list_for_each_entry(entry, &list, head) {
644 domain = radeon_mem_type_to_domain(entry->bo->resource->mem_type);
645 /* if anything is swapped out don't swap it in here,
646 just abort and wait for the next CS */
647 if (domain == RADEON_GEM_DOMAIN_CPU)
648 goto error_unreserve;
649 }
650
651 mutex_lock(&bo_va->vm->mutex);
652 r = radeon_vm_clear_freed(rdev, bo_va->vm);
653 if (r)
654 goto error_unlock;
655
656 if (bo_va->it.start)
657 r = radeon_vm_bo_update(rdev, bo_va, bo_va->bo->tbo.resource);
658
659 error_unlock:
660 mutex_unlock(&bo_va->vm->mutex);
661
662 error_unreserve:
663 ttm_eu_backoff_reservation(&ticket, &list);
664
665 error_free:
666 kvfree(vm_bos);
667
668 if (r && r != -ERESTARTSYS)
669 DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
670 }
671
radeon_gem_va_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)672 int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
673 struct drm_file *filp)
674 {
675 struct drm_radeon_gem_va *args = data;
676 struct drm_gem_object *gobj;
677 struct radeon_device *rdev = dev->dev_private;
678 struct radeon_fpriv *fpriv = filp->driver_priv;
679 struct radeon_bo *rbo;
680 struct radeon_bo_va *bo_va;
681 u32 invalid_flags;
682 int r = 0;
683
684 if (!rdev->vm_manager.enabled) {
685 args->operation = RADEON_VA_RESULT_ERROR;
686 return -ENOTTY;
687 }
688
689 /* !! DONT REMOVE !!
690 * We don't support vm_id yet, to be sure we don't have have broken
691 * userspace, reject anyone trying to use non 0 value thus moving
692 * forward we can use those fields without breaking existant userspace
693 */
694 if (args->vm_id) {
695 args->operation = RADEON_VA_RESULT_ERROR;
696 return -EINVAL;
697 }
698
699 if (args->offset < RADEON_VA_RESERVED_SIZE) {
700 dev_err(dev->dev,
701 "offset 0x%lX is in reserved area 0x%X\n",
702 (unsigned long)args->offset,
703 RADEON_VA_RESERVED_SIZE);
704 args->operation = RADEON_VA_RESULT_ERROR;
705 return -EINVAL;
706 }
707
708 /* don't remove, we need to enforce userspace to set the snooped flag
709 * otherwise we will endup with broken userspace and we won't be able
710 * to enable this feature without adding new interface
711 */
712 invalid_flags = RADEON_VM_PAGE_VALID | RADEON_VM_PAGE_SYSTEM;
713 if ((args->flags & invalid_flags)) {
714 dev_err(dev->dev, "invalid flags 0x%08X vs 0x%08X\n",
715 args->flags, invalid_flags);
716 args->operation = RADEON_VA_RESULT_ERROR;
717 return -EINVAL;
718 }
719
720 switch (args->operation) {
721 case RADEON_VA_MAP:
722 case RADEON_VA_UNMAP:
723 break;
724 default:
725 dev_err(dev->dev, "unsupported operation %d\n",
726 args->operation);
727 args->operation = RADEON_VA_RESULT_ERROR;
728 return -EINVAL;
729 }
730
731 gobj = drm_gem_object_lookup(filp, args->handle);
732 if (gobj == NULL) {
733 args->operation = RADEON_VA_RESULT_ERROR;
734 return -ENOENT;
735 }
736 rbo = gem_to_radeon_bo(gobj);
737 r = radeon_bo_reserve(rbo, false);
738 if (r) {
739 args->operation = RADEON_VA_RESULT_ERROR;
740 drm_gem_object_put(gobj);
741 return r;
742 }
743 bo_va = radeon_vm_bo_find(&fpriv->vm, rbo);
744 if (!bo_va) {
745 args->operation = RADEON_VA_RESULT_ERROR;
746 radeon_bo_unreserve(rbo);
747 drm_gem_object_put(gobj);
748 return -ENOENT;
749 }
750
751 switch (args->operation) {
752 case RADEON_VA_MAP:
753 if (bo_va->it.start) {
754 args->operation = RADEON_VA_RESULT_VA_EXIST;
755 args->offset = bo_va->it.start * RADEON_GPU_PAGE_SIZE;
756 radeon_bo_unreserve(rbo);
757 goto out;
758 }
759 r = radeon_vm_bo_set_addr(rdev, bo_va, args->offset, args->flags);
760 break;
761 case RADEON_VA_UNMAP:
762 r = radeon_vm_bo_set_addr(rdev, bo_va, 0, 0);
763 break;
764 default:
765 break;
766 }
767 if (!r)
768 radeon_gem_va_update_vm(rdev, bo_va);
769 args->operation = RADEON_VA_RESULT_OK;
770 if (r) {
771 args->operation = RADEON_VA_RESULT_ERROR;
772 }
773 out:
774 drm_gem_object_put(gobj);
775 return r;
776 }
777
radeon_gem_op_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)778 int radeon_gem_op_ioctl(struct drm_device *dev, void *data,
779 struct drm_file *filp)
780 {
781 struct drm_radeon_gem_op *args = data;
782 struct drm_gem_object *gobj;
783 struct radeon_bo *robj;
784 int r;
785
786 gobj = drm_gem_object_lookup(filp, args->handle);
787 if (gobj == NULL) {
788 return -ENOENT;
789 }
790 robj = gem_to_radeon_bo(gobj);
791
792 r = -EPERM;
793 if (radeon_ttm_tt_has_userptr(robj->rdev, robj->tbo.ttm))
794 goto out;
795
796 r = radeon_bo_reserve(robj, false);
797 if (unlikely(r))
798 goto out;
799
800 switch (args->op) {
801 case RADEON_GEM_OP_GET_INITIAL_DOMAIN:
802 args->value = robj->initial_domain;
803 break;
804 case RADEON_GEM_OP_SET_INITIAL_DOMAIN:
805 robj->initial_domain = args->value & (RADEON_GEM_DOMAIN_VRAM |
806 RADEON_GEM_DOMAIN_GTT |
807 RADEON_GEM_DOMAIN_CPU);
808 break;
809 default:
810 r = -EINVAL;
811 }
812
813 radeon_bo_unreserve(robj);
814 out:
815 drm_gem_object_put(gobj);
816 return r;
817 }
818
radeon_mode_dumb_create(struct drm_file * file_priv,struct drm_device * dev,struct drm_mode_create_dumb * args)819 int radeon_mode_dumb_create(struct drm_file *file_priv,
820 struct drm_device *dev,
821 struct drm_mode_create_dumb *args)
822 {
823 struct radeon_device *rdev = dev->dev_private;
824 struct drm_gem_object *gobj;
825 uint32_t handle;
826 int r;
827
828 args->pitch = radeon_align_pitch(rdev, args->width,
829 DIV_ROUND_UP(args->bpp, 8), 0);
830 args->size = args->pitch * args->height;
831 args->size = ALIGN(args->size, PAGE_SIZE);
832
833 r = radeon_gem_object_create(rdev, args->size, 0,
834 RADEON_GEM_DOMAIN_VRAM, 0,
835 false, &gobj);
836 if (r)
837 return -ENOMEM;
838
839 r = drm_gem_handle_create(file_priv, gobj, &handle);
840 /* drop reference from allocate - handle holds it now */
841 drm_gem_object_put(gobj);
842 if (r) {
843 return r;
844 }
845 args->handle = handle;
846 return 0;
847 }
848
849 #if defined(CONFIG_DEBUG_FS)
radeon_debugfs_gem_info_show(struct seq_file * m,void * unused)850 static int radeon_debugfs_gem_info_show(struct seq_file *m, void *unused)
851 {
852 struct radeon_device *rdev = (struct radeon_device *)m->private;
853 struct radeon_bo *rbo;
854 unsigned i = 0;
855
856 mutex_lock(&rdev->gem.mutex);
857 list_for_each_entry(rbo, &rdev->gem.objects, list) {
858 unsigned domain;
859 const char *placement;
860
861 domain = radeon_mem_type_to_domain(rbo->tbo.resource->mem_type);
862 switch (domain) {
863 case RADEON_GEM_DOMAIN_VRAM:
864 placement = "VRAM";
865 break;
866 case RADEON_GEM_DOMAIN_GTT:
867 placement = " GTT";
868 break;
869 case RADEON_GEM_DOMAIN_CPU:
870 default:
871 placement = " CPU";
872 break;
873 }
874 seq_printf(m, "bo[0x%08x] %8ldkB %8ldMB %s pid %8ld\n",
875 i, radeon_bo_size(rbo) >> 10, radeon_bo_size(rbo) >> 20,
876 placement, (unsigned long)rbo->pid);
877 i++;
878 }
879 mutex_unlock(&rdev->gem.mutex);
880 return 0;
881 }
882
883 DEFINE_SHOW_ATTRIBUTE(radeon_debugfs_gem_info);
884 #endif
885
radeon_gem_debugfs_init(struct radeon_device * rdev)886 void radeon_gem_debugfs_init(struct radeon_device *rdev)
887 {
888 #if defined(CONFIG_DEBUG_FS)
889 struct dentry *root = rdev->ddev->primary->debugfs_root;
890
891 debugfs_create_file("radeon_gem_info", 0444, root, rdev,
892 &radeon_debugfs_gem_info_fops);
893
894 #endif
895 }
896