1 /*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28 #include <linux/ktime.h>
29 #include <linux/pagemap.h>
30 #include <drm/drmP.h>
31 #include <drm/amdgpu_drm.h>
32 #include "amdgpu.h"
33
amdgpu_gem_object_free(struct drm_gem_object * gobj)34 void amdgpu_gem_object_free(struct drm_gem_object *gobj)
35 {
36 struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj);
37
38 if (robj) {
39 amdgpu_mn_unregister(robj);
40 amdgpu_bo_unref(&robj);
41 }
42 }
43
amdgpu_gem_object_create(struct amdgpu_device * adev,unsigned long size,int alignment,u32 initial_domain,u64 flags,bool kernel,struct drm_gem_object ** obj)44 int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
45 int alignment, u32 initial_domain,
46 u64 flags, bool kernel,
47 struct drm_gem_object **obj)
48 {
49 struct amdgpu_bo *robj;
50 unsigned long max_size;
51 int r;
52
53 *obj = NULL;
54 /* At least align on page size */
55 if (alignment < PAGE_SIZE) {
56 alignment = PAGE_SIZE;
57 }
58
59 if (!(initial_domain & (AMDGPU_GEM_DOMAIN_GDS | AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA))) {
60 /* Maximum bo size is the unpinned gtt size since we use the gtt to
61 * handle vram to system pool migrations.
62 */
63 max_size = adev->mc.gtt_size - adev->gart_pin_size;
64 if (size > max_size) {
65 DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n",
66 size >> 20, max_size >> 20);
67 return -ENOMEM;
68 }
69 }
70 retry:
71 r = amdgpu_bo_create(adev, size, alignment, kernel, initial_domain,
72 flags, NULL, NULL, &robj);
73 if (r) {
74 if (r != -ERESTARTSYS) {
75 if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
76 initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
77 goto retry;
78 }
79 DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
80 size, initial_domain, alignment, r);
81 }
82 return r;
83 }
84 *obj = &robj->gem_base;
85
86 return 0;
87 }
88
amdgpu_gem_force_release(struct amdgpu_device * adev)89 void amdgpu_gem_force_release(struct amdgpu_device *adev)
90 {
91 struct drm_device *ddev = adev->ddev;
92 struct drm_file *file;
93
94 mutex_lock(&ddev->filelist_mutex);
95
96 list_for_each_entry(file, &ddev->filelist, lhead) {
97 struct drm_gem_object *gobj;
98 int handle;
99
100 WARN_ONCE(1, "Still active user space clients!\n");
101 spin_lock(&file->table_lock);
102 idr_for_each_entry(&file->object_idr, gobj, handle) {
103 WARN_ONCE(1, "And also active allocations!\n");
104 drm_gem_object_unreference_unlocked(gobj);
105 }
106 idr_destroy(&file->object_idr);
107 spin_unlock(&file->table_lock);
108 }
109
110 mutex_unlock(&ddev->filelist_mutex);
111 }
112
113 /*
114 * Call from drm_gem_handle_create which appear in both new and open ioctl
115 * case.
116 */
amdgpu_gem_object_open(struct drm_gem_object * obj,struct drm_file * file_priv)117 int amdgpu_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
118 {
119 struct amdgpu_bo *abo = gem_to_amdgpu_bo(obj);
120 struct amdgpu_device *adev = abo->adev;
121 struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
122 struct amdgpu_vm *vm = &fpriv->vm;
123 struct amdgpu_bo_va *bo_va;
124 int r;
125 r = amdgpu_bo_reserve(abo, false);
126 if (r)
127 return r;
128
129 bo_va = amdgpu_vm_bo_find(vm, abo);
130 if (!bo_va) {
131 bo_va = amdgpu_vm_bo_add(adev, vm, abo);
132 } else {
133 ++bo_va->ref_count;
134 }
135 amdgpu_bo_unreserve(abo);
136 return 0;
137 }
138
amdgpu_gem_object_close(struct drm_gem_object * obj,struct drm_file * file_priv)139 void amdgpu_gem_object_close(struct drm_gem_object *obj,
140 struct drm_file *file_priv)
141 {
142 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
143 struct amdgpu_device *adev = bo->adev;
144 struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
145 struct amdgpu_vm *vm = &fpriv->vm;
146
147 struct amdgpu_bo_list_entry vm_pd;
148 struct list_head list, duplicates;
149 struct ttm_validate_buffer tv;
150 struct ww_acquire_ctx ticket;
151 struct amdgpu_bo_va *bo_va;
152 int r;
153
154 INIT_LIST_HEAD(&list);
155 INIT_LIST_HEAD(&duplicates);
156
157 tv.bo = &bo->tbo;
158 tv.shared = true;
159 list_add(&tv.head, &list);
160
161 amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
162
163 r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates);
164 if (r) {
165 dev_err(adev->dev, "leaking bo va because "
166 "we fail to reserve bo (%d)\n", r);
167 return;
168 }
169 bo_va = amdgpu_vm_bo_find(vm, bo);
170 if (bo_va) {
171 if (--bo_va->ref_count == 0) {
172 amdgpu_vm_bo_rmv(adev, bo_va);
173 }
174 }
175 ttm_eu_backoff_reservation(&ticket, &list);
176 }
177
amdgpu_gem_handle_lockup(struct amdgpu_device * adev,int r)178 static int amdgpu_gem_handle_lockup(struct amdgpu_device *adev, int r)
179 {
180 if (r == -EDEADLK) {
181 r = amdgpu_gpu_reset(adev);
182 if (!r)
183 r = -EAGAIN;
184 }
185 return r;
186 }
187
188 /*
189 * GEM ioctls.
190 */
amdgpu_gem_create_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)191 int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
192 struct drm_file *filp)
193 {
194 struct amdgpu_device *adev = dev->dev_private;
195 union drm_amdgpu_gem_create *args = data;
196 uint64_t size = args->in.bo_size;
197 struct drm_gem_object *gobj;
198 uint32_t handle;
199 bool kernel = false;
200 int r;
201
202 /* create a gem object to contain this object in */
203 if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS |
204 AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
205 kernel = true;
206 if (args->in.domains == AMDGPU_GEM_DOMAIN_GDS)
207 size = size << AMDGPU_GDS_SHIFT;
208 else if (args->in.domains == AMDGPU_GEM_DOMAIN_GWS)
209 size = size << AMDGPU_GWS_SHIFT;
210 else if (args->in.domains == AMDGPU_GEM_DOMAIN_OA)
211 size = size << AMDGPU_OA_SHIFT;
212 else {
213 r = -EINVAL;
214 goto error_unlock;
215 }
216 }
217 size = roundup(size, PAGE_SIZE);
218
219 r = amdgpu_gem_object_create(adev, size, args->in.alignment,
220 (u32)(0xffffffff & args->in.domains),
221 args->in.domain_flags,
222 kernel, &gobj);
223 if (r)
224 goto error_unlock;
225
226 r = drm_gem_handle_create(filp, gobj, &handle);
227 /* drop reference from allocate - handle holds it now */
228 drm_gem_object_unreference_unlocked(gobj);
229 if (r)
230 goto error_unlock;
231
232 memset(args, 0, sizeof(*args));
233 args->out.handle = handle;
234 return 0;
235
236 error_unlock:
237 r = amdgpu_gem_handle_lockup(adev, r);
238 return r;
239 }
240
amdgpu_gem_userptr_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)241 int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
242 struct drm_file *filp)
243 {
244 struct amdgpu_device *adev = dev->dev_private;
245 struct drm_amdgpu_gem_userptr *args = data;
246 struct drm_gem_object *gobj;
247 struct amdgpu_bo *bo;
248 uint32_t handle;
249 int r;
250
251 if (offset_in_page(args->addr | args->size))
252 return -EINVAL;
253
254 /* reject unknown flag values */
255 if (args->flags & ~(AMDGPU_GEM_USERPTR_READONLY |
256 AMDGPU_GEM_USERPTR_ANONONLY | AMDGPU_GEM_USERPTR_VALIDATE |
257 AMDGPU_GEM_USERPTR_REGISTER))
258 return -EINVAL;
259
260 if (!(args->flags & AMDGPU_GEM_USERPTR_READONLY) &&
261 !(args->flags & AMDGPU_GEM_USERPTR_REGISTER)) {
262
263 /* if we want to write to it we must install a MMU notifier */
264 return -EACCES;
265 }
266
267 /* create a gem object to contain this object in */
268 r = amdgpu_gem_object_create(adev, args->size, 0,
269 AMDGPU_GEM_DOMAIN_CPU, 0,
270 0, &gobj);
271 if (r)
272 goto handle_lockup;
273
274 bo = gem_to_amdgpu_bo(gobj);
275 bo->prefered_domains = AMDGPU_GEM_DOMAIN_GTT;
276 bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
277 r = amdgpu_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags);
278 if (r)
279 goto release_object;
280
281 if (args->flags & AMDGPU_GEM_USERPTR_REGISTER) {
282 r = amdgpu_mn_register(bo, args->addr);
283 if (r)
284 goto release_object;
285 }
286
287 if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) {
288 down_read(¤t->mm->mmap_sem);
289
290 r = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm,
291 bo->tbo.ttm->pages);
292 if (r)
293 goto unlock_mmap_sem;
294
295 r = amdgpu_bo_reserve(bo, true);
296 if (r)
297 goto free_pages;
298
299 amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
300 r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
301 amdgpu_bo_unreserve(bo);
302 if (r)
303 goto free_pages;
304
305 up_read(¤t->mm->mmap_sem);
306 }
307
308 r = drm_gem_handle_create(filp, gobj, &handle);
309 /* drop reference from allocate - handle holds it now */
310 drm_gem_object_unreference_unlocked(gobj);
311 if (r)
312 goto handle_lockup;
313
314 args->handle = handle;
315 return 0;
316
317 free_pages:
318 release_pages(bo->tbo.ttm->pages, bo->tbo.ttm->num_pages, false);
319
320 unlock_mmap_sem:
321 up_read(¤t->mm->mmap_sem);
322
323 release_object:
324 drm_gem_object_unreference_unlocked(gobj);
325
326 handle_lockup:
327 r = amdgpu_gem_handle_lockup(adev, r);
328
329 return r;
330 }
331
amdgpu_mode_dumb_mmap(struct drm_file * filp,struct drm_device * dev,uint32_t handle,uint64_t * offset_p)332 int amdgpu_mode_dumb_mmap(struct drm_file *filp,
333 struct drm_device *dev,
334 uint32_t handle, uint64_t *offset_p)
335 {
336 struct drm_gem_object *gobj;
337 struct amdgpu_bo *robj;
338
339 gobj = drm_gem_object_lookup(filp, handle);
340 if (gobj == NULL) {
341 return -ENOENT;
342 }
343 robj = gem_to_amdgpu_bo(gobj);
344 if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm) ||
345 (robj->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) {
346 drm_gem_object_unreference_unlocked(gobj);
347 return -EPERM;
348 }
349 *offset_p = amdgpu_bo_mmap_offset(robj);
350 drm_gem_object_unreference_unlocked(gobj);
351 return 0;
352 }
353
amdgpu_gem_mmap_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)354 int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data,
355 struct drm_file *filp)
356 {
357 union drm_amdgpu_gem_mmap *args = data;
358 uint32_t handle = args->in.handle;
359 memset(args, 0, sizeof(*args));
360 return amdgpu_mode_dumb_mmap(filp, dev, handle, &args->out.addr_ptr);
361 }
362
363 /**
364 * amdgpu_gem_timeout - calculate jiffies timeout from absolute value
365 *
366 * @timeout_ns: timeout in ns
367 *
368 * Calculate the timeout in jiffies from an absolute timeout in ns.
369 */
amdgpu_gem_timeout(uint64_t timeout_ns)370 unsigned long amdgpu_gem_timeout(uint64_t timeout_ns)
371 {
372 unsigned long timeout_jiffies;
373 ktime_t timeout;
374
375 /* clamp timeout if it's to large */
376 if (((int64_t)timeout_ns) < 0)
377 return MAX_SCHEDULE_TIMEOUT;
378
379 timeout = ktime_sub(ns_to_ktime(timeout_ns), ktime_get());
380 if (ktime_to_ns(timeout) < 0)
381 return 0;
382
383 timeout_jiffies = nsecs_to_jiffies(ktime_to_ns(timeout));
384 /* clamp timeout to avoid unsigned-> signed overflow */
385 if (timeout_jiffies > MAX_SCHEDULE_TIMEOUT )
386 return MAX_SCHEDULE_TIMEOUT - 1;
387
388 return timeout_jiffies;
389 }
390
amdgpu_gem_wait_idle_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)391 int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
392 struct drm_file *filp)
393 {
394 struct amdgpu_device *adev = dev->dev_private;
395 union drm_amdgpu_gem_wait_idle *args = data;
396 struct drm_gem_object *gobj;
397 struct amdgpu_bo *robj;
398 uint32_t handle = args->in.handle;
399 unsigned long timeout = amdgpu_gem_timeout(args->in.timeout);
400 int r = 0;
401 long ret;
402
403 gobj = drm_gem_object_lookup(filp, handle);
404 if (gobj == NULL) {
405 return -ENOENT;
406 }
407 robj = gem_to_amdgpu_bo(gobj);
408 if (timeout == 0)
409 ret = reservation_object_test_signaled_rcu(robj->tbo.resv, true);
410 else
411 ret = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true, timeout);
412
413 /* ret == 0 means not signaled,
414 * ret > 0 means signaled
415 * ret < 0 means interrupted before timeout
416 */
417 if (ret >= 0) {
418 memset(args, 0, sizeof(*args));
419 args->out.status = (ret == 0);
420 } else
421 r = ret;
422
423 drm_gem_object_unreference_unlocked(gobj);
424 r = amdgpu_gem_handle_lockup(adev, r);
425 return r;
426 }
427
amdgpu_gem_metadata_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)428 int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
429 struct drm_file *filp)
430 {
431 struct drm_amdgpu_gem_metadata *args = data;
432 struct drm_gem_object *gobj;
433 struct amdgpu_bo *robj;
434 int r = -1;
435
436 DRM_DEBUG("%d \n", args->handle);
437 gobj = drm_gem_object_lookup(filp, args->handle);
438 if (gobj == NULL)
439 return -ENOENT;
440 robj = gem_to_amdgpu_bo(gobj);
441
442 r = amdgpu_bo_reserve(robj, false);
443 if (unlikely(r != 0))
444 goto out;
445
446 if (args->op == AMDGPU_GEM_METADATA_OP_GET_METADATA) {
447 amdgpu_bo_get_tiling_flags(robj, &args->data.tiling_info);
448 r = amdgpu_bo_get_metadata(robj, args->data.data,
449 sizeof(args->data.data),
450 &args->data.data_size_bytes,
451 &args->data.flags);
452 } else if (args->op == AMDGPU_GEM_METADATA_OP_SET_METADATA) {
453 if (args->data.data_size_bytes > sizeof(args->data.data)) {
454 r = -EINVAL;
455 goto unreserve;
456 }
457 r = amdgpu_bo_set_tiling_flags(robj, args->data.tiling_info);
458 if (!r)
459 r = amdgpu_bo_set_metadata(robj, args->data.data,
460 args->data.data_size_bytes,
461 args->data.flags);
462 }
463
464 unreserve:
465 amdgpu_bo_unreserve(robj);
466 out:
467 drm_gem_object_unreference_unlocked(gobj);
468 return r;
469 }
470
471 /**
472 * amdgpu_gem_va_update_vm -update the bo_va in its VM
473 *
474 * @adev: amdgpu_device pointer
475 * @bo_va: bo_va to update
476 *
477 * Update the bo_va directly after setting it's address. Errors are not
478 * vital here, so they are not reported back to userspace.
479 */
amdgpu_gem_va_update_vm(struct amdgpu_device * adev,struct amdgpu_bo_va * bo_va,uint32_t operation)480 static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
481 struct amdgpu_bo_va *bo_va, uint32_t operation)
482 {
483 struct ttm_validate_buffer tv, *entry;
484 struct amdgpu_bo_list_entry vm_pd;
485 struct ww_acquire_ctx ticket;
486 struct list_head list, duplicates;
487 unsigned domain;
488 int r;
489
490 INIT_LIST_HEAD(&list);
491 INIT_LIST_HEAD(&duplicates);
492
493 tv.bo = &bo_va->bo->tbo;
494 tv.shared = true;
495 list_add(&tv.head, &list);
496
497 amdgpu_vm_get_pd_bo(bo_va->vm, &list, &vm_pd);
498
499 /* Provide duplicates to avoid -EALREADY */
500 r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
501 if (r)
502 goto error_print;
503
504 amdgpu_vm_get_pt_bos(adev, bo_va->vm, &duplicates);
505 list_for_each_entry(entry, &list, head) {
506 domain = amdgpu_mem_type_to_domain(entry->bo->mem.mem_type);
507 /* if anything is swapped out don't swap it in here,
508 just abort and wait for the next CS */
509 if (domain == AMDGPU_GEM_DOMAIN_CPU)
510 goto error_unreserve;
511 }
512 list_for_each_entry(entry, &duplicates, head) {
513 domain = amdgpu_mem_type_to_domain(entry->bo->mem.mem_type);
514 /* if anything is swapped out don't swap it in here,
515 just abort and wait for the next CS */
516 if (domain == AMDGPU_GEM_DOMAIN_CPU)
517 goto error_unreserve;
518 }
519
520 r = amdgpu_vm_update_page_directory(adev, bo_va->vm);
521 if (r)
522 goto error_unreserve;
523
524 r = amdgpu_vm_clear_freed(adev, bo_va->vm);
525 if (r)
526 goto error_unreserve;
527
528 if (operation == AMDGPU_VA_OP_MAP)
529 r = amdgpu_vm_bo_update(adev, bo_va, false);
530
531 error_unreserve:
532 ttm_eu_backoff_reservation(&ticket, &list);
533
534 error_print:
535 if (r && r != -ERESTARTSYS)
536 DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
537 }
538
539
540
amdgpu_gem_va_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)541 int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
542 struct drm_file *filp)
543 {
544 struct drm_amdgpu_gem_va *args = data;
545 struct drm_gem_object *gobj;
546 struct amdgpu_device *adev = dev->dev_private;
547 struct amdgpu_fpriv *fpriv = filp->driver_priv;
548 struct amdgpu_bo *abo;
549 struct amdgpu_bo_va *bo_va;
550 struct ttm_validate_buffer tv, tv_pd;
551 struct ww_acquire_ctx ticket;
552 struct list_head list, duplicates;
553 uint32_t invalid_flags, va_flags = 0;
554 int r = 0;
555
556 if (!adev->vm_manager.enabled)
557 return -ENOTTY;
558
559 if (args->va_address < AMDGPU_VA_RESERVED_SIZE) {
560 dev_err(&dev->pdev->dev,
561 "va_address 0x%lX is in reserved area 0x%X\n",
562 (unsigned long)args->va_address,
563 AMDGPU_VA_RESERVED_SIZE);
564 return -EINVAL;
565 }
566
567 invalid_flags = ~(AMDGPU_VM_DELAY_UPDATE | AMDGPU_VM_PAGE_READABLE |
568 AMDGPU_VM_PAGE_WRITEABLE | AMDGPU_VM_PAGE_EXECUTABLE);
569 if ((args->flags & invalid_flags)) {
570 dev_err(&dev->pdev->dev, "invalid flags 0x%08X vs 0x%08X\n",
571 args->flags, invalid_flags);
572 return -EINVAL;
573 }
574
575 switch (args->operation) {
576 case AMDGPU_VA_OP_MAP:
577 case AMDGPU_VA_OP_UNMAP:
578 break;
579 default:
580 dev_err(&dev->pdev->dev, "unsupported operation %d\n",
581 args->operation);
582 return -EINVAL;
583 }
584
585 gobj = drm_gem_object_lookup(filp, args->handle);
586 if (gobj == NULL)
587 return -ENOENT;
588 abo = gem_to_amdgpu_bo(gobj);
589 INIT_LIST_HEAD(&list);
590 INIT_LIST_HEAD(&duplicates);
591 tv.bo = &abo->tbo;
592 tv.shared = true;
593 list_add(&tv.head, &list);
594
595 tv_pd.bo = &fpriv->vm.page_directory->tbo;
596 tv_pd.shared = true;
597 list_add(&tv_pd.head, &list);
598
599 r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
600 if (r) {
601 drm_gem_object_unreference_unlocked(gobj);
602 return r;
603 }
604
605 bo_va = amdgpu_vm_bo_find(&fpriv->vm, abo);
606 if (!bo_va) {
607 ttm_eu_backoff_reservation(&ticket, &list);
608 drm_gem_object_unreference_unlocked(gobj);
609 return -ENOENT;
610 }
611
612 switch (args->operation) {
613 case AMDGPU_VA_OP_MAP:
614 if (args->flags & AMDGPU_VM_PAGE_READABLE)
615 va_flags |= AMDGPU_PTE_READABLE;
616 if (args->flags & AMDGPU_VM_PAGE_WRITEABLE)
617 va_flags |= AMDGPU_PTE_WRITEABLE;
618 if (args->flags & AMDGPU_VM_PAGE_EXECUTABLE)
619 va_flags |= AMDGPU_PTE_EXECUTABLE;
620 r = amdgpu_vm_bo_map(adev, bo_va, args->va_address,
621 args->offset_in_bo, args->map_size,
622 va_flags);
623 break;
624 case AMDGPU_VA_OP_UNMAP:
625 r = amdgpu_vm_bo_unmap(adev, bo_va, args->va_address);
626 break;
627 default:
628 break;
629 }
630 ttm_eu_backoff_reservation(&ticket, &list);
631 if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) &&
632 !amdgpu_vm_debug)
633 amdgpu_gem_va_update_vm(adev, bo_va, args->operation);
634
635 drm_gem_object_unreference_unlocked(gobj);
636 return r;
637 }
638
amdgpu_gem_op_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)639 int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
640 struct drm_file *filp)
641 {
642 struct drm_amdgpu_gem_op *args = data;
643 struct drm_gem_object *gobj;
644 struct amdgpu_bo *robj;
645 int r;
646
647 gobj = drm_gem_object_lookup(filp, args->handle);
648 if (gobj == NULL) {
649 return -ENOENT;
650 }
651 robj = gem_to_amdgpu_bo(gobj);
652
653 r = amdgpu_bo_reserve(robj, false);
654 if (unlikely(r))
655 goto out;
656
657 switch (args->op) {
658 case AMDGPU_GEM_OP_GET_GEM_CREATE_INFO: {
659 struct drm_amdgpu_gem_create_in info;
660 void __user *out = (void __user *)(long)args->value;
661
662 info.bo_size = robj->gem_base.size;
663 info.alignment = robj->tbo.mem.page_alignment << PAGE_SHIFT;
664 info.domains = robj->prefered_domains;
665 info.domain_flags = robj->flags;
666 amdgpu_bo_unreserve(robj);
667 if (copy_to_user(out, &info, sizeof(info)))
668 r = -EFAULT;
669 break;
670 }
671 case AMDGPU_GEM_OP_SET_PLACEMENT:
672 if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm)) {
673 r = -EPERM;
674 amdgpu_bo_unreserve(robj);
675 break;
676 }
677 robj->prefered_domains = args->value & (AMDGPU_GEM_DOMAIN_VRAM |
678 AMDGPU_GEM_DOMAIN_GTT |
679 AMDGPU_GEM_DOMAIN_CPU);
680 robj->allowed_domains = robj->prefered_domains;
681 if (robj->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
682 robj->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
683
684 amdgpu_bo_unreserve(robj);
685 break;
686 default:
687 amdgpu_bo_unreserve(robj);
688 r = -EINVAL;
689 }
690
691 out:
692 drm_gem_object_unreference_unlocked(gobj);
693 return r;
694 }
695
amdgpu_mode_dumb_create(struct drm_file * file_priv,struct drm_device * dev,struct drm_mode_create_dumb * args)696 int amdgpu_mode_dumb_create(struct drm_file *file_priv,
697 struct drm_device *dev,
698 struct drm_mode_create_dumb *args)
699 {
700 struct amdgpu_device *adev = dev->dev_private;
701 struct drm_gem_object *gobj;
702 uint32_t handle;
703 int r;
704
705 args->pitch = amdgpu_align_pitch(adev, args->width, args->bpp, 0) * ((args->bpp + 1) / 8);
706 args->size = (u64)args->pitch * args->height;
707 args->size = ALIGN(args->size, PAGE_SIZE);
708
709 r = amdgpu_gem_object_create(adev, args->size, 0,
710 AMDGPU_GEM_DOMAIN_VRAM,
711 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
712 ttm_bo_type_device,
713 &gobj);
714 if (r)
715 return -ENOMEM;
716
717 r = drm_gem_handle_create(file_priv, gobj, &handle);
718 /* drop reference from allocate - handle holds it now */
719 drm_gem_object_unreference_unlocked(gobj);
720 if (r) {
721 return r;
722 }
723 args->handle = handle;
724 return 0;
725 }
726
727 #if defined(CONFIG_DEBUG_FS)
amdgpu_debugfs_gem_bo_info(int id,void * ptr,void * data)728 static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data)
729 {
730 struct drm_gem_object *gobj = ptr;
731 struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
732 struct seq_file *m = data;
733
734 unsigned domain;
735 const char *placement;
736 unsigned pin_count;
737
738 domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
739 switch (domain) {
740 case AMDGPU_GEM_DOMAIN_VRAM:
741 placement = "VRAM";
742 break;
743 case AMDGPU_GEM_DOMAIN_GTT:
744 placement = " GTT";
745 break;
746 case AMDGPU_GEM_DOMAIN_CPU:
747 default:
748 placement = " CPU";
749 break;
750 }
751 seq_printf(m, "\t0x%08x: %12ld byte %s @ 0x%010Lx",
752 id, amdgpu_bo_size(bo), placement,
753 amdgpu_bo_gpu_offset(bo));
754
755 pin_count = ACCESS_ONCE(bo->pin_count);
756 if (pin_count)
757 seq_printf(m, " pin count %d", pin_count);
758 seq_printf(m, "\n");
759
760 return 0;
761 }
762
amdgpu_debugfs_gem_info(struct seq_file * m,void * data)763 static int amdgpu_debugfs_gem_info(struct seq_file *m, void *data)
764 {
765 struct drm_info_node *node = (struct drm_info_node *)m->private;
766 struct drm_device *dev = node->minor->dev;
767 struct drm_file *file;
768 int r;
769
770 r = mutex_lock_interruptible(&dev->filelist_mutex);
771 if (r)
772 return r;
773
774 list_for_each_entry(file, &dev->filelist, lhead) {
775 struct task_struct *task;
776
777 /*
778 * Although we have a valid reference on file->pid, that does
779 * not guarantee that the task_struct who called get_pid() is
780 * still alive (e.g. get_pid(current) => fork() => exit()).
781 * Therefore, we need to protect this ->comm access using RCU.
782 */
783 rcu_read_lock();
784 task = pid_task(file->pid, PIDTYPE_PID);
785 seq_printf(m, "pid %8d command %s:\n", pid_nr(file->pid),
786 task ? task->comm : "<unknown>");
787 rcu_read_unlock();
788
789 spin_lock(&file->table_lock);
790 idr_for_each(&file->object_idr, amdgpu_debugfs_gem_bo_info, m);
791 spin_unlock(&file->table_lock);
792 }
793
794 mutex_unlock(&dev->filelist_mutex);
795 return 0;
796 }
797
798 static const struct drm_info_list amdgpu_debugfs_gem_list[] = {
799 {"amdgpu_gem_info", &amdgpu_debugfs_gem_info, 0, NULL},
800 };
801 #endif
802
amdgpu_gem_debugfs_init(struct amdgpu_device * adev)803 int amdgpu_gem_debugfs_init(struct amdgpu_device *adev)
804 {
805 #if defined(CONFIG_DEBUG_FS)
806 return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_gem_list, 1);
807 #endif
808 return 0;
809 }
810