1 /*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28
29 #include <linux/pci.h>
30
31 #include <drm/drm_debugfs.h>
32 #include <drm/drm_device.h>
33 #include <drm/drm_file.h>
34 #include <drm/radeon_drm.h>
35
36 #include "radeon.h"
37
radeon_gem_object_free(struct drm_gem_object * gobj)38 void radeon_gem_object_free(struct drm_gem_object *gobj)
39 {
40 struct radeon_bo *robj = gem_to_radeon_bo(gobj);
41
42 if (robj) {
43 radeon_mn_unregister(robj);
44 radeon_bo_unref(&robj);
45 }
46 }
47
radeon_gem_object_create(struct radeon_device * rdev,unsigned long size,int alignment,int initial_domain,u32 flags,bool kernel,struct drm_gem_object ** obj)48 int radeon_gem_object_create(struct radeon_device *rdev, unsigned long size,
49 int alignment, int initial_domain,
50 u32 flags, bool kernel,
51 struct drm_gem_object **obj)
52 {
53 struct radeon_bo *robj;
54 unsigned long max_size;
55 int r;
56
57 *obj = NULL;
58 /* At least align on page size */
59 if (alignment < PAGE_SIZE) {
60 alignment = PAGE_SIZE;
61 }
62
63 /* Maximum bo size is the unpinned gtt size since we use the gtt to
64 * handle vram to system pool migrations.
65 */
66 max_size = rdev->mc.gtt_size - rdev->gart_pin_size;
67 if (size > max_size) {
68 DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n",
69 size >> 20, max_size >> 20);
70 return -ENOMEM;
71 }
72
73 retry:
74 r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain,
75 flags, NULL, NULL, &robj);
76 if (r) {
77 if (r != -ERESTARTSYS) {
78 if (initial_domain == RADEON_GEM_DOMAIN_VRAM) {
79 initial_domain |= RADEON_GEM_DOMAIN_GTT;
80 goto retry;
81 }
82 DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
83 size, initial_domain, alignment, r);
84 }
85 return r;
86 }
87 *obj = &robj->tbo.base;
88 robj->pid = task_pid_nr(current);
89
90 mutex_lock(&rdev->gem.mutex);
91 list_add_tail(&robj->list, &rdev->gem.objects);
92 mutex_unlock(&rdev->gem.mutex);
93
94 return 0;
95 }
96
radeon_gem_set_domain(struct drm_gem_object * gobj,uint32_t rdomain,uint32_t wdomain)97 static int radeon_gem_set_domain(struct drm_gem_object *gobj,
98 uint32_t rdomain, uint32_t wdomain)
99 {
100 struct radeon_bo *robj;
101 uint32_t domain;
102 long r;
103
104 /* FIXME: reeimplement */
105 robj = gem_to_radeon_bo(gobj);
106 /* work out where to validate the buffer to */
107 domain = wdomain;
108 if (!domain) {
109 domain = rdomain;
110 }
111 if (!domain) {
112 /* Do nothings */
113 pr_warn("Set domain without domain !\n");
114 return 0;
115 }
116 if (domain == RADEON_GEM_DOMAIN_CPU) {
117 /* Asking for cpu access wait for object idle */
118 r = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true, 30 * HZ);
119 if (!r)
120 r = -EBUSY;
121
122 if (r < 0 && r != -EINTR) {
123 pr_err("Failed to wait for object: %li\n", r);
124 return r;
125 }
126 }
127 if (domain == RADEON_GEM_DOMAIN_VRAM && robj->prime_shared_count) {
128 /* A BO that is associated with a dma-buf cannot be sensibly migrated to VRAM */
129 return -EINVAL;
130 }
131 return 0;
132 }
133
radeon_gem_init(struct radeon_device * rdev)134 int radeon_gem_init(struct radeon_device *rdev)
135 {
136 INIT_LIST_HEAD(&rdev->gem.objects);
137 return 0;
138 }
139
radeon_gem_fini(struct radeon_device * rdev)140 void radeon_gem_fini(struct radeon_device *rdev)
141 {
142 radeon_bo_force_delete(rdev);
143 }
144
145 /*
146 * Call from drm_gem_handle_create which appear in both new and open ioctl
147 * case.
148 */
radeon_gem_object_open(struct drm_gem_object * obj,struct drm_file * file_priv)149 int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
150 {
151 struct radeon_bo *rbo = gem_to_radeon_bo(obj);
152 struct radeon_device *rdev = rbo->rdev;
153 struct radeon_fpriv *fpriv = file_priv->driver_priv;
154 struct radeon_vm *vm = &fpriv->vm;
155 struct radeon_bo_va *bo_va;
156 int r;
157
158 if ((rdev->family < CHIP_CAYMAN) ||
159 (!rdev->accel_working)) {
160 return 0;
161 }
162
163 r = radeon_bo_reserve(rbo, false);
164 if (r) {
165 return r;
166 }
167
168 bo_va = radeon_vm_bo_find(vm, rbo);
169 if (!bo_va) {
170 bo_va = radeon_vm_bo_add(rdev, vm, rbo);
171 } else {
172 ++bo_va->ref_count;
173 }
174 radeon_bo_unreserve(rbo);
175
176 return 0;
177 }
178
radeon_gem_object_close(struct drm_gem_object * obj,struct drm_file * file_priv)179 void radeon_gem_object_close(struct drm_gem_object *obj,
180 struct drm_file *file_priv)
181 {
182 struct radeon_bo *rbo = gem_to_radeon_bo(obj);
183 struct radeon_device *rdev = rbo->rdev;
184 struct radeon_fpriv *fpriv = file_priv->driver_priv;
185 struct radeon_vm *vm = &fpriv->vm;
186 struct radeon_bo_va *bo_va;
187 int r;
188
189 if ((rdev->family < CHIP_CAYMAN) ||
190 (!rdev->accel_working)) {
191 return;
192 }
193
194 r = radeon_bo_reserve(rbo, true);
195 if (r) {
196 dev_err(rdev->dev, "leaking bo va because "
197 "we fail to reserve bo (%d)\n", r);
198 return;
199 }
200 bo_va = radeon_vm_bo_find(vm, rbo);
201 if (bo_va) {
202 if (--bo_va->ref_count == 0) {
203 radeon_vm_bo_rmv(rdev, bo_va);
204 }
205 }
206 radeon_bo_unreserve(rbo);
207 }
208
radeon_gem_handle_lockup(struct radeon_device * rdev,int r)209 static int radeon_gem_handle_lockup(struct radeon_device *rdev, int r)
210 {
211 if (r == -EDEADLK) {
212 r = radeon_gpu_reset(rdev);
213 if (!r)
214 r = -EAGAIN;
215 }
216 return r;
217 }
218
219 /*
220 * GEM ioctls.
221 */
radeon_gem_info_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)222 int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
223 struct drm_file *filp)
224 {
225 struct radeon_device *rdev = dev->dev_private;
226 struct drm_radeon_gem_info *args = data;
227 struct ttm_resource_manager *man;
228
229 man = ttm_manager_type(&rdev->mman.bdev, TTM_PL_VRAM);
230
231 args->vram_size = (u64)man->size << PAGE_SHIFT;
232 args->vram_visible = rdev->mc.visible_vram_size;
233 args->vram_visible -= rdev->vram_pin_size;
234 args->gart_size = rdev->mc.gtt_size;
235 args->gart_size -= rdev->gart_pin_size;
236
237 return 0;
238 }
239
radeon_gem_pread_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)240 int radeon_gem_pread_ioctl(struct drm_device *dev, void *data,
241 struct drm_file *filp)
242 {
243 /* TODO: implement */
244 DRM_ERROR("unimplemented %s\n", __func__);
245 return -ENOSYS;
246 }
247
radeon_gem_pwrite_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)248 int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data,
249 struct drm_file *filp)
250 {
251 /* TODO: implement */
252 DRM_ERROR("unimplemented %s\n", __func__);
253 return -ENOSYS;
254 }
255
radeon_gem_create_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)256 int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
257 struct drm_file *filp)
258 {
259 struct radeon_device *rdev = dev->dev_private;
260 struct drm_radeon_gem_create *args = data;
261 struct drm_gem_object *gobj;
262 uint32_t handle;
263 int r;
264
265 down_read(&rdev->exclusive_lock);
266 /* create a gem object to contain this object in */
267 args->size = roundup(args->size, PAGE_SIZE);
268 r = radeon_gem_object_create(rdev, args->size, args->alignment,
269 args->initial_domain, args->flags,
270 false, &gobj);
271 if (r) {
272 up_read(&rdev->exclusive_lock);
273 r = radeon_gem_handle_lockup(rdev, r);
274 return r;
275 }
276 r = drm_gem_handle_create(filp, gobj, &handle);
277 /* drop reference from allocate - handle holds it now */
278 drm_gem_object_put(gobj);
279 if (r) {
280 up_read(&rdev->exclusive_lock);
281 r = radeon_gem_handle_lockup(rdev, r);
282 return r;
283 }
284 args->handle = handle;
285 up_read(&rdev->exclusive_lock);
286 return 0;
287 }
288
radeon_gem_userptr_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)289 int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data,
290 struct drm_file *filp)
291 {
292 struct ttm_operation_ctx ctx = { true, false };
293 struct radeon_device *rdev = dev->dev_private;
294 struct drm_radeon_gem_userptr *args = data;
295 struct drm_gem_object *gobj;
296 struct radeon_bo *bo;
297 uint32_t handle;
298 int r;
299
300 args->addr = untagged_addr(args->addr);
301
302 if (offset_in_page(args->addr | args->size))
303 return -EINVAL;
304
305 /* reject unknown flag values */
306 if (args->flags & ~(RADEON_GEM_USERPTR_READONLY |
307 RADEON_GEM_USERPTR_ANONONLY | RADEON_GEM_USERPTR_VALIDATE |
308 RADEON_GEM_USERPTR_REGISTER))
309 return -EINVAL;
310
311 if (args->flags & RADEON_GEM_USERPTR_READONLY) {
312 /* readonly pages not tested on older hardware */
313 if (rdev->family < CHIP_R600)
314 return -EINVAL;
315
316 } else if (!(args->flags & RADEON_GEM_USERPTR_ANONONLY) ||
317 !(args->flags & RADEON_GEM_USERPTR_REGISTER)) {
318
319 /* if we want to write to it we must require anonymous
320 memory and install a MMU notifier */
321 return -EACCES;
322 }
323
324 down_read(&rdev->exclusive_lock);
325
326 /* create a gem object to contain this object in */
327 r = radeon_gem_object_create(rdev, args->size, 0,
328 RADEON_GEM_DOMAIN_CPU, 0,
329 false, &gobj);
330 if (r)
331 goto handle_lockup;
332
333 bo = gem_to_radeon_bo(gobj);
334 r = radeon_ttm_tt_set_userptr(rdev, bo->tbo.ttm, args->addr, args->flags);
335 if (r)
336 goto release_object;
337
338 if (args->flags & RADEON_GEM_USERPTR_REGISTER) {
339 r = radeon_mn_register(bo, args->addr);
340 if (r)
341 goto release_object;
342 }
343
344 if (args->flags & RADEON_GEM_USERPTR_VALIDATE) {
345 mmap_read_lock(current->mm);
346 r = radeon_bo_reserve(bo, true);
347 if (r) {
348 mmap_read_unlock(current->mm);
349 goto release_object;
350 }
351
352 radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_GTT);
353 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
354 radeon_bo_unreserve(bo);
355 mmap_read_unlock(current->mm);
356 if (r)
357 goto release_object;
358 }
359
360 r = drm_gem_handle_create(filp, gobj, &handle);
361 /* drop reference from allocate - handle holds it now */
362 drm_gem_object_put(gobj);
363 if (r)
364 goto handle_lockup;
365
366 args->handle = handle;
367 up_read(&rdev->exclusive_lock);
368 return 0;
369
370 release_object:
371 drm_gem_object_put(gobj);
372
373 handle_lockup:
374 up_read(&rdev->exclusive_lock);
375 r = radeon_gem_handle_lockup(rdev, r);
376
377 return r;
378 }
379
radeon_gem_set_domain_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)380 int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
381 struct drm_file *filp)
382 {
383 /* transition the BO to a domain -
384 * just validate the BO into a certain domain */
385 struct radeon_device *rdev = dev->dev_private;
386 struct drm_radeon_gem_set_domain *args = data;
387 struct drm_gem_object *gobj;
388 int r;
389
390 /* for now if someone requests domain CPU -
391 * just make sure the buffer is finished with */
392 down_read(&rdev->exclusive_lock);
393
394 /* just do a BO wait for now */
395 gobj = drm_gem_object_lookup(filp, args->handle);
396 if (gobj == NULL) {
397 up_read(&rdev->exclusive_lock);
398 return -ENOENT;
399 }
400
401 r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
402
403 drm_gem_object_put(gobj);
404 up_read(&rdev->exclusive_lock);
405 r = radeon_gem_handle_lockup(rdev, r);
406 return r;
407 }
408
radeon_mode_dumb_mmap(struct drm_file * filp,struct drm_device * dev,uint32_t handle,uint64_t * offset_p)409 int radeon_mode_dumb_mmap(struct drm_file *filp,
410 struct drm_device *dev,
411 uint32_t handle, uint64_t *offset_p)
412 {
413 struct drm_gem_object *gobj;
414 struct radeon_bo *robj;
415
416 gobj = drm_gem_object_lookup(filp, handle);
417 if (gobj == NULL) {
418 return -ENOENT;
419 }
420 robj = gem_to_radeon_bo(gobj);
421 if (radeon_ttm_tt_has_userptr(robj->rdev, robj->tbo.ttm)) {
422 drm_gem_object_put(gobj);
423 return -EPERM;
424 }
425 *offset_p = radeon_bo_mmap_offset(robj);
426 drm_gem_object_put(gobj);
427 return 0;
428 }
429
radeon_gem_mmap_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)430 int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
431 struct drm_file *filp)
432 {
433 struct drm_radeon_gem_mmap *args = data;
434
435 return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr);
436 }
437
radeon_gem_busy_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)438 int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
439 struct drm_file *filp)
440 {
441 struct drm_radeon_gem_busy *args = data;
442 struct drm_gem_object *gobj;
443 struct radeon_bo *robj;
444 int r;
445 uint32_t cur_placement = 0;
446
447 gobj = drm_gem_object_lookup(filp, args->handle);
448 if (gobj == NULL) {
449 return -ENOENT;
450 }
451 robj = gem_to_radeon_bo(gobj);
452
453 r = dma_resv_test_signaled_rcu(robj->tbo.base.resv, true);
454 if (r == 0)
455 r = -EBUSY;
456 else
457 r = 0;
458
459 cur_placement = READ_ONCE(robj->tbo.mem.mem_type);
460 args->domain = radeon_mem_type_to_domain(cur_placement);
461 drm_gem_object_put(gobj);
462 return r;
463 }
464
radeon_gem_wait_idle_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)465 int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
466 struct drm_file *filp)
467 {
468 struct radeon_device *rdev = dev->dev_private;
469 struct drm_radeon_gem_wait_idle *args = data;
470 struct drm_gem_object *gobj;
471 struct radeon_bo *robj;
472 int r = 0;
473 uint32_t cur_placement = 0;
474 long ret;
475
476 gobj = drm_gem_object_lookup(filp, args->handle);
477 if (gobj == NULL) {
478 return -ENOENT;
479 }
480 robj = gem_to_radeon_bo(gobj);
481
482 ret = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true, 30 * HZ);
483 if (ret == 0)
484 r = -EBUSY;
485 else if (ret < 0)
486 r = ret;
487
488 /* Flush HDP cache via MMIO if necessary */
489 cur_placement = READ_ONCE(robj->tbo.mem.mem_type);
490 if (rdev->asic->mmio_hdp_flush &&
491 radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM)
492 robj->rdev->asic->mmio_hdp_flush(rdev);
493 drm_gem_object_put(gobj);
494 r = radeon_gem_handle_lockup(rdev, r);
495 return r;
496 }
497
radeon_gem_set_tiling_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)498 int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
499 struct drm_file *filp)
500 {
501 struct drm_radeon_gem_set_tiling *args = data;
502 struct drm_gem_object *gobj;
503 struct radeon_bo *robj;
504 int r = 0;
505
506 DRM_DEBUG("%d \n", args->handle);
507 gobj = drm_gem_object_lookup(filp, args->handle);
508 if (gobj == NULL)
509 return -ENOENT;
510 robj = gem_to_radeon_bo(gobj);
511 r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch);
512 drm_gem_object_put(gobj);
513 return r;
514 }
515
radeon_gem_get_tiling_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)516 int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
517 struct drm_file *filp)
518 {
519 struct drm_radeon_gem_get_tiling *args = data;
520 struct drm_gem_object *gobj;
521 struct radeon_bo *rbo;
522 int r = 0;
523
524 DRM_DEBUG("\n");
525 gobj = drm_gem_object_lookup(filp, args->handle);
526 if (gobj == NULL)
527 return -ENOENT;
528 rbo = gem_to_radeon_bo(gobj);
529 r = radeon_bo_reserve(rbo, false);
530 if (unlikely(r != 0))
531 goto out;
532 radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch);
533 radeon_bo_unreserve(rbo);
534 out:
535 drm_gem_object_put(gobj);
536 return r;
537 }
538
539 /**
540 * radeon_gem_va_update_vm -update the bo_va in its VM
541 *
542 * @rdev: radeon_device pointer
543 * @bo_va: bo_va to update
544 *
545 * Update the bo_va directly after setting it's address. Errors are not
546 * vital here, so they are not reported back to userspace.
547 */
radeon_gem_va_update_vm(struct radeon_device * rdev,struct radeon_bo_va * bo_va)548 static void radeon_gem_va_update_vm(struct radeon_device *rdev,
549 struct radeon_bo_va *bo_va)
550 {
551 struct ttm_validate_buffer tv, *entry;
552 struct radeon_bo_list *vm_bos;
553 struct ww_acquire_ctx ticket;
554 struct list_head list;
555 unsigned domain;
556 int r;
557
558 INIT_LIST_HEAD(&list);
559
560 tv.bo = &bo_va->bo->tbo;
561 tv.num_shared = 1;
562 list_add(&tv.head, &list);
563
564 vm_bos = radeon_vm_get_bos(rdev, bo_va->vm, &list);
565 if (!vm_bos)
566 return;
567
568 r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
569 if (r)
570 goto error_free;
571
572 list_for_each_entry(entry, &list, head) {
573 domain = radeon_mem_type_to_domain(entry->bo->mem.mem_type);
574 /* if anything is swapped out don't swap it in here,
575 just abort and wait for the next CS */
576 if (domain == RADEON_GEM_DOMAIN_CPU)
577 goto error_unreserve;
578 }
579
580 mutex_lock(&bo_va->vm->mutex);
581 r = radeon_vm_clear_freed(rdev, bo_va->vm);
582 if (r)
583 goto error_unlock;
584
585 if (bo_va->it.start)
586 r = radeon_vm_bo_update(rdev, bo_va, &bo_va->bo->tbo.mem);
587
588 error_unlock:
589 mutex_unlock(&bo_va->vm->mutex);
590
591 error_unreserve:
592 ttm_eu_backoff_reservation(&ticket, &list);
593
594 error_free:
595 kvfree(vm_bos);
596
597 if (r && r != -ERESTARTSYS)
598 DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
599 }
600
radeon_gem_va_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)601 int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
602 struct drm_file *filp)
603 {
604 struct drm_radeon_gem_va *args = data;
605 struct drm_gem_object *gobj;
606 struct radeon_device *rdev = dev->dev_private;
607 struct radeon_fpriv *fpriv = filp->driver_priv;
608 struct radeon_bo *rbo;
609 struct radeon_bo_va *bo_va;
610 u32 invalid_flags;
611 int r = 0;
612
613 if (!rdev->vm_manager.enabled) {
614 args->operation = RADEON_VA_RESULT_ERROR;
615 return -ENOTTY;
616 }
617
618 /* !! DONT REMOVE !!
619 * We don't support vm_id yet, to be sure we don't have have broken
620 * userspace, reject anyone trying to use non 0 value thus moving
621 * forward we can use those fields without breaking existant userspace
622 */
623 if (args->vm_id) {
624 args->operation = RADEON_VA_RESULT_ERROR;
625 return -EINVAL;
626 }
627
628 if (args->offset < RADEON_VA_RESERVED_SIZE) {
629 dev_err(&dev->pdev->dev,
630 "offset 0x%lX is in reserved area 0x%X\n",
631 (unsigned long)args->offset,
632 RADEON_VA_RESERVED_SIZE);
633 args->operation = RADEON_VA_RESULT_ERROR;
634 return -EINVAL;
635 }
636
637 /* don't remove, we need to enforce userspace to set the snooped flag
638 * otherwise we will endup with broken userspace and we won't be able
639 * to enable this feature without adding new interface
640 */
641 invalid_flags = RADEON_VM_PAGE_VALID | RADEON_VM_PAGE_SYSTEM;
642 if ((args->flags & invalid_flags)) {
643 dev_err(&dev->pdev->dev, "invalid flags 0x%08X vs 0x%08X\n",
644 args->flags, invalid_flags);
645 args->operation = RADEON_VA_RESULT_ERROR;
646 return -EINVAL;
647 }
648
649 switch (args->operation) {
650 case RADEON_VA_MAP:
651 case RADEON_VA_UNMAP:
652 break;
653 default:
654 dev_err(&dev->pdev->dev, "unsupported operation %d\n",
655 args->operation);
656 args->operation = RADEON_VA_RESULT_ERROR;
657 return -EINVAL;
658 }
659
660 gobj = drm_gem_object_lookup(filp, args->handle);
661 if (gobj == NULL) {
662 args->operation = RADEON_VA_RESULT_ERROR;
663 return -ENOENT;
664 }
665 rbo = gem_to_radeon_bo(gobj);
666 r = radeon_bo_reserve(rbo, false);
667 if (r) {
668 args->operation = RADEON_VA_RESULT_ERROR;
669 drm_gem_object_put(gobj);
670 return r;
671 }
672 bo_va = radeon_vm_bo_find(&fpriv->vm, rbo);
673 if (!bo_va) {
674 args->operation = RADEON_VA_RESULT_ERROR;
675 radeon_bo_unreserve(rbo);
676 drm_gem_object_put(gobj);
677 return -ENOENT;
678 }
679
680 switch (args->operation) {
681 case RADEON_VA_MAP:
682 if (bo_va->it.start) {
683 args->operation = RADEON_VA_RESULT_VA_EXIST;
684 args->offset = bo_va->it.start * RADEON_GPU_PAGE_SIZE;
685 radeon_bo_unreserve(rbo);
686 goto out;
687 }
688 r = radeon_vm_bo_set_addr(rdev, bo_va, args->offset, args->flags);
689 break;
690 case RADEON_VA_UNMAP:
691 r = radeon_vm_bo_set_addr(rdev, bo_va, 0, 0);
692 break;
693 default:
694 break;
695 }
696 if (!r)
697 radeon_gem_va_update_vm(rdev, bo_va);
698 args->operation = RADEON_VA_RESULT_OK;
699 if (r) {
700 args->operation = RADEON_VA_RESULT_ERROR;
701 }
702 out:
703 drm_gem_object_put(gobj);
704 return r;
705 }
706
radeon_gem_op_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)707 int radeon_gem_op_ioctl(struct drm_device *dev, void *data,
708 struct drm_file *filp)
709 {
710 struct drm_radeon_gem_op *args = data;
711 struct drm_gem_object *gobj;
712 struct radeon_bo *robj;
713 int r;
714
715 gobj = drm_gem_object_lookup(filp, args->handle);
716 if (gobj == NULL) {
717 return -ENOENT;
718 }
719 robj = gem_to_radeon_bo(gobj);
720
721 r = -EPERM;
722 if (radeon_ttm_tt_has_userptr(robj->rdev, robj->tbo.ttm))
723 goto out;
724
725 r = radeon_bo_reserve(robj, false);
726 if (unlikely(r))
727 goto out;
728
729 switch (args->op) {
730 case RADEON_GEM_OP_GET_INITIAL_DOMAIN:
731 args->value = robj->initial_domain;
732 break;
733 case RADEON_GEM_OP_SET_INITIAL_DOMAIN:
734 robj->initial_domain = args->value & (RADEON_GEM_DOMAIN_VRAM |
735 RADEON_GEM_DOMAIN_GTT |
736 RADEON_GEM_DOMAIN_CPU);
737 break;
738 default:
739 r = -EINVAL;
740 }
741
742 radeon_bo_unreserve(robj);
743 out:
744 drm_gem_object_put(gobj);
745 return r;
746 }
747
radeon_mode_dumb_create(struct drm_file * file_priv,struct drm_device * dev,struct drm_mode_create_dumb * args)748 int radeon_mode_dumb_create(struct drm_file *file_priv,
749 struct drm_device *dev,
750 struct drm_mode_create_dumb *args)
751 {
752 struct radeon_device *rdev = dev->dev_private;
753 struct drm_gem_object *gobj;
754 uint32_t handle;
755 int r;
756
757 args->pitch = radeon_align_pitch(rdev, args->width,
758 DIV_ROUND_UP(args->bpp, 8), 0);
759 args->size = args->pitch * args->height;
760 args->size = ALIGN(args->size, PAGE_SIZE);
761
762 r = radeon_gem_object_create(rdev, args->size, 0,
763 RADEON_GEM_DOMAIN_VRAM, 0,
764 false, &gobj);
765 if (r)
766 return -ENOMEM;
767
768 r = drm_gem_handle_create(file_priv, gobj, &handle);
769 /* drop reference from allocate - handle holds it now */
770 drm_gem_object_put(gobj);
771 if (r) {
772 return r;
773 }
774 args->handle = handle;
775 return 0;
776 }
777
778 #if defined(CONFIG_DEBUG_FS)
radeon_debugfs_gem_info(struct seq_file * m,void * data)779 static int radeon_debugfs_gem_info(struct seq_file *m, void *data)
780 {
781 struct drm_info_node *node = (struct drm_info_node *)m->private;
782 struct drm_device *dev = node->minor->dev;
783 struct radeon_device *rdev = dev->dev_private;
784 struct radeon_bo *rbo;
785 unsigned i = 0;
786
787 mutex_lock(&rdev->gem.mutex);
788 list_for_each_entry(rbo, &rdev->gem.objects, list) {
789 unsigned domain;
790 const char *placement;
791
792 domain = radeon_mem_type_to_domain(rbo->tbo.mem.mem_type);
793 switch (domain) {
794 case RADEON_GEM_DOMAIN_VRAM:
795 placement = "VRAM";
796 break;
797 case RADEON_GEM_DOMAIN_GTT:
798 placement = " GTT";
799 break;
800 case RADEON_GEM_DOMAIN_CPU:
801 default:
802 placement = " CPU";
803 break;
804 }
805 seq_printf(m, "bo[0x%08x] %8ldkB %8ldMB %s pid %8ld\n",
806 i, radeon_bo_size(rbo) >> 10, radeon_bo_size(rbo) >> 20,
807 placement, (unsigned long)rbo->pid);
808 i++;
809 }
810 mutex_unlock(&rdev->gem.mutex);
811 return 0;
812 }
813
814 static struct drm_info_list radeon_debugfs_gem_list[] = {
815 {"radeon_gem_info", &radeon_debugfs_gem_info, 0, NULL},
816 };
817 #endif
818
radeon_gem_debugfs_init(struct radeon_device * rdev)819 int radeon_gem_debugfs_init(struct radeon_device *rdev)
820 {
821 #if defined(CONFIG_DEBUG_FS)
822 return radeon_debugfs_add_files(rdev, radeon_debugfs_gem_list, 1);
823 #endif
824 return 0;
825 }
826