1 /*
2 * Copyright 2014-2018 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22 #include <linux/dma-buf.h>
23 #include <linux/list.h>
24 #include <linux/pagemap.h>
25 #include <linux/sched/mm.h>
26 #include <linux/sched/task.h>
27
28 #include "amdgpu_object.h"
29 #include "amdgpu_vm.h"
30 #include "amdgpu_amdkfd.h"
31 #include "amdgpu_dma_buf.h"
32 #include <uapi/linux/kfd_ioctl.h>
33
34 /* BO flag to indicate a KFD userptr BO */
35 #define AMDGPU_AMDKFD_USERPTR_BO (1ULL << 63)
36
37 /* Userptr restore delay, just long enough to allow consecutive VM
38 * changes to accumulate
39 */
40 #define AMDGPU_USERPTR_RESTORE_DELAY_MS 1
41
42 /* Impose limit on how much memory KFD can use */
43 static struct {
44 uint64_t max_system_mem_limit;
45 uint64_t max_ttm_mem_limit;
46 int64_t system_mem_used;
47 int64_t ttm_mem_used;
48 spinlock_t mem_limit_lock;
49 } kfd_mem_limit;
50
51 static const char * const domain_bit_to_string[] = {
52 "CPU",
53 "GTT",
54 "VRAM",
55 "GDS",
56 "GWS",
57 "OA"
58 };
59
60 #define domain_string(domain) domain_bit_to_string[ffs(domain)-1]
61
62 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work);
63
64
get_amdgpu_device(struct kgd_dev * kgd)65 static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
66 {
67 return (struct amdgpu_device *)kgd;
68 }
69
check_if_add_bo_to_vm(struct amdgpu_vm * avm,struct kgd_mem * mem)70 static bool check_if_add_bo_to_vm(struct amdgpu_vm *avm,
71 struct kgd_mem *mem)
72 {
73 struct kfd_bo_va_list *entry;
74
75 list_for_each_entry(entry, &mem->bo_va_list, bo_list)
76 if (entry->bo_va->base.vm == avm)
77 return false;
78
79 return true;
80 }
81
82 /* Set memory usage limits. Current, limits are
83 * System (TTM + userptr) memory - 15/16th System RAM
84 * TTM memory - 3/8th System RAM
85 */
amdgpu_amdkfd_gpuvm_init_mem_limits(void)86 void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
87 {
88 struct sysinfo si;
89 uint64_t mem;
90
91 si_meminfo(&si);
92 mem = si.totalram - si.totalhigh;
93 mem *= si.mem_unit;
94
95 spin_lock_init(&kfd_mem_limit.mem_limit_lock);
96 kfd_mem_limit.max_system_mem_limit = mem - (mem >> 4);
97 kfd_mem_limit.max_ttm_mem_limit = (mem >> 1) - (mem >> 3);
98 pr_debug("Kernel memory limit %lluM, TTM limit %lluM\n",
99 (kfd_mem_limit.max_system_mem_limit >> 20),
100 (kfd_mem_limit.max_ttm_mem_limit >> 20));
101 }
102
103 /* Estimate page table size needed to represent a given memory size
104 *
105 * With 4KB pages, we need one 8 byte PTE for each 4KB of memory
106 * (factor 512, >> 9). With 2MB pages, we need one 8 byte PTE for 2MB
107 * of memory (factor 256K, >> 18). ROCm user mode tries to optimize
108 * for 2MB pages for TLB efficiency. However, small allocations and
109 * fragmented system memory still need some 4KB pages. We choose a
110 * compromise that should work in most cases without reserving too
111 * much memory for page tables unnecessarily (factor 16K, >> 14).
112 */
113 #define ESTIMATE_PT_SIZE(mem_size) ((mem_size) >> 14)
114
amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device * adev,uint64_t size,u32 domain,bool sg)115 static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
116 uint64_t size, u32 domain, bool sg)
117 {
118 uint64_t reserved_for_pt =
119 ESTIMATE_PT_SIZE(amdgpu_amdkfd_total_mem_size);
120 size_t acc_size, system_mem_needed, ttm_mem_needed, vram_needed;
121 int ret = 0;
122
123 acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
124 sizeof(struct amdgpu_bo));
125
126 vram_needed = 0;
127 if (domain == AMDGPU_GEM_DOMAIN_GTT) {
128 /* TTM GTT memory */
129 system_mem_needed = acc_size + size;
130 ttm_mem_needed = acc_size + size;
131 } else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) {
132 /* Userptr */
133 system_mem_needed = acc_size + size;
134 ttm_mem_needed = acc_size;
135 } else {
136 /* VRAM and SG */
137 system_mem_needed = acc_size;
138 ttm_mem_needed = acc_size;
139 if (domain == AMDGPU_GEM_DOMAIN_VRAM)
140 vram_needed = size;
141 }
142
143 spin_lock(&kfd_mem_limit.mem_limit_lock);
144
145 if (kfd_mem_limit.system_mem_used + system_mem_needed >
146 kfd_mem_limit.max_system_mem_limit)
147 pr_debug("Set no_system_mem_limit=1 if using shared memory\n");
148
149 if ((kfd_mem_limit.system_mem_used + system_mem_needed >
150 kfd_mem_limit.max_system_mem_limit && !no_system_mem_limit) ||
151 (kfd_mem_limit.ttm_mem_used + ttm_mem_needed >
152 kfd_mem_limit.max_ttm_mem_limit) ||
153 (adev->kfd.vram_used + vram_needed >
154 adev->gmc.real_vram_size - reserved_for_pt)) {
155 ret = -ENOMEM;
156 } else {
157 kfd_mem_limit.system_mem_used += system_mem_needed;
158 kfd_mem_limit.ttm_mem_used += ttm_mem_needed;
159 adev->kfd.vram_used += vram_needed;
160 }
161
162 spin_unlock(&kfd_mem_limit.mem_limit_lock);
163 return ret;
164 }
165
unreserve_mem_limit(struct amdgpu_device * adev,uint64_t size,u32 domain,bool sg)166 static void unreserve_mem_limit(struct amdgpu_device *adev,
167 uint64_t size, u32 domain, bool sg)
168 {
169 size_t acc_size;
170
171 acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
172 sizeof(struct amdgpu_bo));
173
174 spin_lock(&kfd_mem_limit.mem_limit_lock);
175 if (domain == AMDGPU_GEM_DOMAIN_GTT) {
176 kfd_mem_limit.system_mem_used -= (acc_size + size);
177 kfd_mem_limit.ttm_mem_used -= (acc_size + size);
178 } else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) {
179 kfd_mem_limit.system_mem_used -= (acc_size + size);
180 kfd_mem_limit.ttm_mem_used -= acc_size;
181 } else {
182 kfd_mem_limit.system_mem_used -= acc_size;
183 kfd_mem_limit.ttm_mem_used -= acc_size;
184 if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
185 adev->kfd.vram_used -= size;
186 WARN_ONCE(adev->kfd.vram_used < 0,
187 "kfd VRAM memory accounting unbalanced");
188 }
189 }
190 WARN_ONCE(kfd_mem_limit.system_mem_used < 0,
191 "kfd system memory accounting unbalanced");
192 WARN_ONCE(kfd_mem_limit.ttm_mem_used < 0,
193 "kfd TTM memory accounting unbalanced");
194
195 spin_unlock(&kfd_mem_limit.mem_limit_lock);
196 }
197
amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo * bo)198 void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo)
199 {
200 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
201 u32 domain = bo->preferred_domains;
202 bool sg = (bo->preferred_domains == AMDGPU_GEM_DOMAIN_CPU);
203
204 if (bo->flags & AMDGPU_AMDKFD_USERPTR_BO) {
205 domain = AMDGPU_GEM_DOMAIN_CPU;
206 sg = false;
207 }
208
209 unreserve_mem_limit(adev, amdgpu_bo_size(bo), domain, sg);
210 }
211
212
213 /* amdgpu_amdkfd_remove_eviction_fence - Removes eviction fence from BO's
214 * reservation object.
215 *
216 * @bo: [IN] Remove eviction fence(s) from this BO
217 * @ef: [IN] This eviction fence is removed if it
218 * is present in the shared list.
219 *
220 * NOTE: Must be called with BO reserved i.e. bo->tbo.resv->lock held.
221 */
amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo * bo,struct amdgpu_amdkfd_fence * ef)222 static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
223 struct amdgpu_amdkfd_fence *ef)
224 {
225 struct dma_resv *resv = bo->tbo.base.resv;
226 struct dma_resv_list *old, *new;
227 unsigned int i, j, k;
228
229 if (!ef)
230 return -EINVAL;
231
232 old = dma_resv_get_list(resv);
233 if (!old)
234 return 0;
235
236 new = kmalloc(offsetof(typeof(*new), shared[old->shared_max]),
237 GFP_KERNEL);
238 if (!new)
239 return -ENOMEM;
240
241 /* Go through all the shared fences in the resevation object and sort
242 * the interesting ones to the end of the list.
243 */
244 for (i = 0, j = old->shared_count, k = 0; i < old->shared_count; ++i) {
245 struct dma_fence *f;
246
247 f = rcu_dereference_protected(old->shared[i],
248 dma_resv_held(resv));
249
250 if (f->context == ef->base.context)
251 RCU_INIT_POINTER(new->shared[--j], f);
252 else
253 RCU_INIT_POINTER(new->shared[k++], f);
254 }
255 new->shared_max = old->shared_max;
256 new->shared_count = k;
257
258 /* Install the new fence list, seqcount provides the barriers */
259 write_seqcount_begin(&resv->seq);
260 RCU_INIT_POINTER(resv->fence, new);
261 write_seqcount_end(&resv->seq);
262
263 /* Drop the references to the removed fences or move them to ef_list */
264 for (i = j, k = 0; i < old->shared_count; ++i) {
265 struct dma_fence *f;
266
267 f = rcu_dereference_protected(new->shared[i],
268 dma_resv_held(resv));
269 dma_fence_put(f);
270 }
271 kfree_rcu(old, rcu);
272
273 return 0;
274 }
275
amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo * bo)276 int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo)
277 {
278 struct amdgpu_bo *root = bo;
279 struct amdgpu_vm_bo_base *vm_bo;
280 struct amdgpu_vm *vm;
281 struct amdkfd_process_info *info;
282 struct amdgpu_amdkfd_fence *ef;
283 int ret;
284
285 /* we can always get vm_bo from root PD bo.*/
286 while (root->parent)
287 root = root->parent;
288
289 vm_bo = root->vm_bo;
290 if (!vm_bo)
291 return 0;
292
293 vm = vm_bo->vm;
294 if (!vm)
295 return 0;
296
297 info = vm->process_info;
298 if (!info || !info->eviction_fence)
299 return 0;
300
301 ef = container_of(dma_fence_get(&info->eviction_fence->base),
302 struct amdgpu_amdkfd_fence, base);
303
304 BUG_ON(!dma_resv_trylock(bo->tbo.base.resv));
305 ret = amdgpu_amdkfd_remove_eviction_fence(bo, ef);
306 dma_resv_unlock(bo->tbo.base.resv);
307
308 dma_fence_put(&ef->base);
309 return ret;
310 }
311
amdgpu_amdkfd_bo_validate(struct amdgpu_bo * bo,uint32_t domain,bool wait)312 static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain,
313 bool wait)
314 {
315 struct ttm_operation_ctx ctx = { false, false };
316 int ret;
317
318 if (WARN(amdgpu_ttm_tt_get_usermm(bo->tbo.ttm),
319 "Called with userptr BO"))
320 return -EINVAL;
321
322 amdgpu_bo_placement_from_domain(bo, domain);
323
324 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
325 if (ret)
326 goto validate_fail;
327 if (wait)
328 amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false);
329
330 validate_fail:
331 return ret;
332 }
333
amdgpu_amdkfd_validate_vm_bo(void * _unused,struct amdgpu_bo * bo)334 static int amdgpu_amdkfd_validate_vm_bo(void *_unused, struct amdgpu_bo *bo)
335 {
336 return amdgpu_amdkfd_bo_validate(bo, bo->allowed_domains, false);
337 }
338
339 /* vm_validate_pt_pd_bos - Validate page table and directory BOs
340 *
341 * Page directories are not updated here because huge page handling
342 * during page table updates can invalidate page directory entries
343 * again. Page directories are only updated after updating page
344 * tables.
345 */
vm_validate_pt_pd_bos(struct amdgpu_vm * vm)346 static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm)
347 {
348 struct amdgpu_bo *pd = vm->root.base.bo;
349 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
350 int ret;
351
352 ret = amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_amdkfd_validate_vm_bo, NULL);
353 if (ret) {
354 pr_err("failed to validate PT BOs\n");
355 return ret;
356 }
357
358 ret = amdgpu_amdkfd_validate_vm_bo(NULL, pd);
359 if (ret) {
360 pr_err("failed to validate PD\n");
361 return ret;
362 }
363
364 vm->pd_phys_addr = amdgpu_gmc_pd_addr(vm->root.base.bo);
365
366 if (vm->use_cpu_for_update) {
367 ret = amdgpu_bo_kmap(pd, NULL);
368 if (ret) {
369 pr_err("failed to kmap PD, ret=%d\n", ret);
370 return ret;
371 }
372 }
373
374 return 0;
375 }
376
vm_update_pds(struct amdgpu_vm * vm,struct amdgpu_sync * sync)377 static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync)
378 {
379 struct amdgpu_bo *pd = vm->root.base.bo;
380 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
381 int ret;
382
383 ret = amdgpu_vm_update_pdes(adev, vm, false);
384 if (ret)
385 return ret;
386
387 return amdgpu_sync_fence(sync, vm->last_update);
388 }
389
get_pte_flags(struct amdgpu_device * adev,struct kgd_mem * mem)390 static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem)
391 {
392 struct amdgpu_device *bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev);
393 bool coherent = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_COHERENT;
394 uint32_t mapping_flags;
395
396 mapping_flags = AMDGPU_VM_PAGE_READABLE;
397 if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE)
398 mapping_flags |= AMDGPU_VM_PAGE_WRITEABLE;
399 if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE)
400 mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
401
402 switch (adev->asic_type) {
403 case CHIP_ARCTURUS:
404 if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
405 if (bo_adev == adev)
406 mapping_flags |= coherent ?
407 AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
408 else
409 mapping_flags |= AMDGPU_VM_MTYPE_UC;
410 } else {
411 mapping_flags |= coherent ?
412 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
413 }
414 break;
415 default:
416 mapping_flags |= coherent ?
417 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
418 }
419
420 return amdgpu_gem_va_map_flags(adev, mapping_flags);
421 }
422
423 /* add_bo_to_vm - Add a BO to a VM
424 *
425 * Everything that needs to bo done only once when a BO is first added
426 * to a VM. It can later be mapped and unmapped many times without
427 * repeating these steps.
428 *
429 * 1. Allocate and initialize BO VA entry data structure
430 * 2. Add BO to the VM
431 * 3. Determine ASIC-specific PTE flags
432 * 4. Alloc page tables and directories if needed
433 * 4a. Validate new page tables and directories
434 */
add_bo_to_vm(struct amdgpu_device * adev,struct kgd_mem * mem,struct amdgpu_vm * vm,bool is_aql,struct kfd_bo_va_list ** p_bo_va_entry)435 static int add_bo_to_vm(struct amdgpu_device *adev, struct kgd_mem *mem,
436 struct amdgpu_vm *vm, bool is_aql,
437 struct kfd_bo_va_list **p_bo_va_entry)
438 {
439 int ret;
440 struct kfd_bo_va_list *bo_va_entry;
441 struct amdgpu_bo *bo = mem->bo;
442 uint64_t va = mem->va;
443 struct list_head *list_bo_va = &mem->bo_va_list;
444 unsigned long bo_size = bo->tbo.mem.size;
445
446 if (!va) {
447 pr_err("Invalid VA when adding BO to VM\n");
448 return -EINVAL;
449 }
450
451 if (is_aql)
452 va += bo_size;
453
454 bo_va_entry = kzalloc(sizeof(*bo_va_entry), GFP_KERNEL);
455 if (!bo_va_entry)
456 return -ENOMEM;
457
458 pr_debug("\t add VA 0x%llx - 0x%llx to vm %p\n", va,
459 va + bo_size, vm);
460
461 /* Add BO to VM internal data structures*/
462 bo_va_entry->bo_va = amdgpu_vm_bo_add(adev, vm, bo);
463 if (!bo_va_entry->bo_va) {
464 ret = -EINVAL;
465 pr_err("Failed to add BO object to VM. ret == %d\n",
466 ret);
467 goto err_vmadd;
468 }
469
470 bo_va_entry->va = va;
471 bo_va_entry->pte_flags = get_pte_flags(adev, mem);
472 bo_va_entry->kgd_dev = (void *)adev;
473 list_add(&bo_va_entry->bo_list, list_bo_va);
474
475 if (p_bo_va_entry)
476 *p_bo_va_entry = bo_va_entry;
477
478 /* Allocate validate page tables if needed */
479 ret = vm_validate_pt_pd_bos(vm);
480 if (ret) {
481 pr_err("validate_pt_pd_bos() failed\n");
482 goto err_alloc_pts;
483 }
484
485 return 0;
486
487 err_alloc_pts:
488 amdgpu_vm_bo_rmv(adev, bo_va_entry->bo_va);
489 list_del(&bo_va_entry->bo_list);
490 err_vmadd:
491 kfree(bo_va_entry);
492 return ret;
493 }
494
remove_bo_from_vm(struct amdgpu_device * adev,struct kfd_bo_va_list * entry,unsigned long size)495 static void remove_bo_from_vm(struct amdgpu_device *adev,
496 struct kfd_bo_va_list *entry, unsigned long size)
497 {
498 pr_debug("\t remove VA 0x%llx - 0x%llx in entry %p\n",
499 entry->va,
500 entry->va + size, entry);
501 amdgpu_vm_bo_rmv(adev, entry->bo_va);
502 list_del(&entry->bo_list);
503 kfree(entry);
504 }
505
add_kgd_mem_to_kfd_bo_list(struct kgd_mem * mem,struct amdkfd_process_info * process_info,bool userptr)506 static void add_kgd_mem_to_kfd_bo_list(struct kgd_mem *mem,
507 struct amdkfd_process_info *process_info,
508 bool userptr)
509 {
510 struct ttm_validate_buffer *entry = &mem->validate_list;
511 struct amdgpu_bo *bo = mem->bo;
512
513 INIT_LIST_HEAD(&entry->head);
514 entry->num_shared = 1;
515 entry->bo = &bo->tbo;
516 mutex_lock(&process_info->lock);
517 if (userptr)
518 list_add_tail(&entry->head, &process_info->userptr_valid_list);
519 else
520 list_add_tail(&entry->head, &process_info->kfd_bo_list);
521 mutex_unlock(&process_info->lock);
522 }
523
remove_kgd_mem_from_kfd_bo_list(struct kgd_mem * mem,struct amdkfd_process_info * process_info)524 static void remove_kgd_mem_from_kfd_bo_list(struct kgd_mem *mem,
525 struct amdkfd_process_info *process_info)
526 {
527 struct ttm_validate_buffer *bo_list_entry;
528
529 bo_list_entry = &mem->validate_list;
530 mutex_lock(&process_info->lock);
531 list_del(&bo_list_entry->head);
532 mutex_unlock(&process_info->lock);
533 }
534
535 /* Initializes user pages. It registers the MMU notifier and validates
536 * the userptr BO in the GTT domain.
537 *
538 * The BO must already be on the userptr_valid_list. Otherwise an
539 * eviction and restore may happen that leaves the new BO unmapped
540 * with the user mode queues running.
541 *
542 * Takes the process_info->lock to protect against concurrent restore
543 * workers.
544 *
545 * Returns 0 for success, negative errno for errors.
546 */
init_user_pages(struct kgd_mem * mem,uint64_t user_addr)547 static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr)
548 {
549 struct amdkfd_process_info *process_info = mem->process_info;
550 struct amdgpu_bo *bo = mem->bo;
551 struct ttm_operation_ctx ctx = { true, false };
552 int ret = 0;
553
554 mutex_lock(&process_info->lock);
555
556 ret = amdgpu_ttm_tt_set_userptr(&bo->tbo, user_addr, 0);
557 if (ret) {
558 pr_err("%s: Failed to set userptr: %d\n", __func__, ret);
559 goto out;
560 }
561
562 ret = amdgpu_mn_register(bo, user_addr);
563 if (ret) {
564 pr_err("%s: Failed to register MMU notifier: %d\n",
565 __func__, ret);
566 goto out;
567 }
568
569 ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
570 if (ret) {
571 pr_err("%s: Failed to get user pages: %d\n", __func__, ret);
572 goto unregister_out;
573 }
574
575 ret = amdgpu_bo_reserve(bo, true);
576 if (ret) {
577 pr_err("%s: Failed to reserve BO\n", __func__);
578 goto release_out;
579 }
580 amdgpu_bo_placement_from_domain(bo, mem->domain);
581 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
582 if (ret)
583 pr_err("%s: failed to validate BO\n", __func__);
584 amdgpu_bo_unreserve(bo);
585
586 release_out:
587 amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
588 unregister_out:
589 if (ret)
590 amdgpu_mn_unregister(bo);
591 out:
592 mutex_unlock(&process_info->lock);
593 return ret;
594 }
595
596 /* Reserving a BO and its page table BOs must happen atomically to
597 * avoid deadlocks. Some operations update multiple VMs at once. Track
598 * all the reservation info in a context structure. Optionally a sync
599 * object can track VM updates.
600 */
601 struct bo_vm_reservation_context {
602 struct amdgpu_bo_list_entry kfd_bo; /* BO list entry for the KFD BO */
603 unsigned int n_vms; /* Number of VMs reserved */
604 struct amdgpu_bo_list_entry *vm_pd; /* Array of VM BO list entries */
605 struct ww_acquire_ctx ticket; /* Reservation ticket */
606 struct list_head list, duplicates; /* BO lists */
607 struct amdgpu_sync *sync; /* Pointer to sync object */
608 bool reserved; /* Whether BOs are reserved */
609 };
610
611 enum bo_vm_match {
612 BO_VM_NOT_MAPPED = 0, /* Match VMs where a BO is not mapped */
613 BO_VM_MAPPED, /* Match VMs where a BO is mapped */
614 BO_VM_ALL, /* Match all VMs a BO was added to */
615 };
616
617 /**
618 * reserve_bo_and_vm - reserve a BO and a VM unconditionally.
619 * @mem: KFD BO structure.
620 * @vm: the VM to reserve.
621 * @ctx: the struct that will be used in unreserve_bo_and_vms().
622 */
reserve_bo_and_vm(struct kgd_mem * mem,struct amdgpu_vm * vm,struct bo_vm_reservation_context * ctx)623 static int reserve_bo_and_vm(struct kgd_mem *mem,
624 struct amdgpu_vm *vm,
625 struct bo_vm_reservation_context *ctx)
626 {
627 struct amdgpu_bo *bo = mem->bo;
628 int ret;
629
630 WARN_ON(!vm);
631
632 ctx->reserved = false;
633 ctx->n_vms = 1;
634 ctx->sync = &mem->sync;
635
636 INIT_LIST_HEAD(&ctx->list);
637 INIT_LIST_HEAD(&ctx->duplicates);
638
639 ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd), GFP_KERNEL);
640 if (!ctx->vm_pd)
641 return -ENOMEM;
642
643 ctx->kfd_bo.priority = 0;
644 ctx->kfd_bo.tv.bo = &bo->tbo;
645 ctx->kfd_bo.tv.num_shared = 1;
646 list_add(&ctx->kfd_bo.tv.head, &ctx->list);
647
648 amdgpu_vm_get_pd_bo(vm, &ctx->list, &ctx->vm_pd[0]);
649
650 ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
651 false, &ctx->duplicates);
652 if (ret) {
653 pr_err("Failed to reserve buffers in ttm.\n");
654 kfree(ctx->vm_pd);
655 ctx->vm_pd = NULL;
656 return ret;
657 }
658
659 ctx->reserved = true;
660 return 0;
661 }
662
663 /**
664 * reserve_bo_and_cond_vms - reserve a BO and some VMs conditionally
665 * @mem: KFD BO structure.
666 * @vm: the VM to reserve. If NULL, then all VMs associated with the BO
667 * is used. Otherwise, a single VM associated with the BO.
668 * @map_type: the mapping status that will be used to filter the VMs.
669 * @ctx: the struct that will be used in unreserve_bo_and_vms().
670 *
671 * Returns 0 for success, negative for failure.
672 */
reserve_bo_and_cond_vms(struct kgd_mem * mem,struct amdgpu_vm * vm,enum bo_vm_match map_type,struct bo_vm_reservation_context * ctx)673 static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
674 struct amdgpu_vm *vm, enum bo_vm_match map_type,
675 struct bo_vm_reservation_context *ctx)
676 {
677 struct amdgpu_bo *bo = mem->bo;
678 struct kfd_bo_va_list *entry;
679 unsigned int i;
680 int ret;
681
682 ctx->reserved = false;
683 ctx->n_vms = 0;
684 ctx->vm_pd = NULL;
685 ctx->sync = &mem->sync;
686
687 INIT_LIST_HEAD(&ctx->list);
688 INIT_LIST_HEAD(&ctx->duplicates);
689
690 list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
691 if ((vm && vm != entry->bo_va->base.vm) ||
692 (entry->is_mapped != map_type
693 && map_type != BO_VM_ALL))
694 continue;
695
696 ctx->n_vms++;
697 }
698
699 if (ctx->n_vms != 0) {
700 ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd),
701 GFP_KERNEL);
702 if (!ctx->vm_pd)
703 return -ENOMEM;
704 }
705
706 ctx->kfd_bo.priority = 0;
707 ctx->kfd_bo.tv.bo = &bo->tbo;
708 ctx->kfd_bo.tv.num_shared = 1;
709 list_add(&ctx->kfd_bo.tv.head, &ctx->list);
710
711 i = 0;
712 list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
713 if ((vm && vm != entry->bo_va->base.vm) ||
714 (entry->is_mapped != map_type
715 && map_type != BO_VM_ALL))
716 continue;
717
718 amdgpu_vm_get_pd_bo(entry->bo_va->base.vm, &ctx->list,
719 &ctx->vm_pd[i]);
720 i++;
721 }
722
723 ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
724 false, &ctx->duplicates);
725 if (ret) {
726 pr_err("Failed to reserve buffers in ttm.\n");
727 kfree(ctx->vm_pd);
728 ctx->vm_pd = NULL;
729 return ret;
730 }
731
732 ctx->reserved = true;
733 return 0;
734 }
735
736 /**
737 * unreserve_bo_and_vms - Unreserve BO and VMs from a reservation context
738 * @ctx: Reservation context to unreserve
739 * @wait: Optionally wait for a sync object representing pending VM updates
740 * @intr: Whether the wait is interruptible
741 *
742 * Also frees any resources allocated in
743 * reserve_bo_and_(cond_)vm(s). Returns the status from
744 * amdgpu_sync_wait.
745 */
unreserve_bo_and_vms(struct bo_vm_reservation_context * ctx,bool wait,bool intr)746 static int unreserve_bo_and_vms(struct bo_vm_reservation_context *ctx,
747 bool wait, bool intr)
748 {
749 int ret = 0;
750
751 if (wait)
752 ret = amdgpu_sync_wait(ctx->sync, intr);
753
754 if (ctx->reserved)
755 ttm_eu_backoff_reservation(&ctx->ticket, &ctx->list);
756 kfree(ctx->vm_pd);
757
758 ctx->sync = NULL;
759
760 ctx->reserved = false;
761 ctx->vm_pd = NULL;
762
763 return ret;
764 }
765
unmap_bo_from_gpuvm(struct amdgpu_device * adev,struct kfd_bo_va_list * entry,struct amdgpu_sync * sync)766 static int unmap_bo_from_gpuvm(struct amdgpu_device *adev,
767 struct kfd_bo_va_list *entry,
768 struct amdgpu_sync *sync)
769 {
770 struct amdgpu_bo_va *bo_va = entry->bo_va;
771 struct amdgpu_vm *vm = bo_va->base.vm;
772
773 amdgpu_vm_bo_unmap(adev, bo_va, entry->va);
774
775 amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update);
776
777 amdgpu_sync_fence(sync, bo_va->last_pt_update);
778
779 return 0;
780 }
781
update_gpuvm_pte(struct amdgpu_device * adev,struct kfd_bo_va_list * entry,struct amdgpu_sync * sync)782 static int update_gpuvm_pte(struct amdgpu_device *adev,
783 struct kfd_bo_va_list *entry,
784 struct amdgpu_sync *sync)
785 {
786 int ret;
787 struct amdgpu_bo_va *bo_va = entry->bo_va;
788
789 /* Update the page tables */
790 ret = amdgpu_vm_bo_update(adev, bo_va, false);
791 if (ret) {
792 pr_err("amdgpu_vm_bo_update failed\n");
793 return ret;
794 }
795
796 return amdgpu_sync_fence(sync, bo_va->last_pt_update);
797 }
798
map_bo_to_gpuvm(struct amdgpu_device * adev,struct kfd_bo_va_list * entry,struct amdgpu_sync * sync,bool no_update_pte)799 static int map_bo_to_gpuvm(struct amdgpu_device *adev,
800 struct kfd_bo_va_list *entry, struct amdgpu_sync *sync,
801 bool no_update_pte)
802 {
803 int ret;
804
805 /* Set virtual address for the allocation */
806 ret = amdgpu_vm_bo_map(adev, entry->bo_va, entry->va, 0,
807 amdgpu_bo_size(entry->bo_va->base.bo),
808 entry->pte_flags);
809 if (ret) {
810 pr_err("Failed to map VA 0x%llx in vm. ret %d\n",
811 entry->va, ret);
812 return ret;
813 }
814
815 if (no_update_pte)
816 return 0;
817
818 ret = update_gpuvm_pte(adev, entry, sync);
819 if (ret) {
820 pr_err("update_gpuvm_pte() failed\n");
821 goto update_gpuvm_pte_failed;
822 }
823
824 return 0;
825
826 update_gpuvm_pte_failed:
827 unmap_bo_from_gpuvm(adev, entry, sync);
828 return ret;
829 }
830
create_doorbell_sg(uint64_t addr,uint32_t size)831 static struct sg_table *create_doorbell_sg(uint64_t addr, uint32_t size)
832 {
833 struct sg_table *sg = kmalloc(sizeof(*sg), GFP_KERNEL);
834
835 if (!sg)
836 return NULL;
837 if (sg_alloc_table(sg, 1, GFP_KERNEL)) {
838 kfree(sg);
839 return NULL;
840 }
841 sg->sgl->dma_address = addr;
842 sg->sgl->length = size;
843 #ifdef CONFIG_NEED_SG_DMA_LENGTH
844 sg->sgl->dma_length = size;
845 #endif
846 return sg;
847 }
848
process_validate_vms(struct amdkfd_process_info * process_info)849 static int process_validate_vms(struct amdkfd_process_info *process_info)
850 {
851 struct amdgpu_vm *peer_vm;
852 int ret;
853
854 list_for_each_entry(peer_vm, &process_info->vm_list_head,
855 vm_list_node) {
856 ret = vm_validate_pt_pd_bos(peer_vm);
857 if (ret)
858 return ret;
859 }
860
861 return 0;
862 }
863
process_sync_pds_resv(struct amdkfd_process_info * process_info,struct amdgpu_sync * sync)864 static int process_sync_pds_resv(struct amdkfd_process_info *process_info,
865 struct amdgpu_sync *sync)
866 {
867 struct amdgpu_vm *peer_vm;
868 int ret;
869
870 list_for_each_entry(peer_vm, &process_info->vm_list_head,
871 vm_list_node) {
872 struct amdgpu_bo *pd = peer_vm->root.base.bo;
873
874 ret = amdgpu_sync_resv(NULL, sync, pd->tbo.base.resv,
875 AMDGPU_SYNC_NE_OWNER,
876 AMDGPU_FENCE_OWNER_KFD);
877 if (ret)
878 return ret;
879 }
880
881 return 0;
882 }
883
process_update_pds(struct amdkfd_process_info * process_info,struct amdgpu_sync * sync)884 static int process_update_pds(struct amdkfd_process_info *process_info,
885 struct amdgpu_sync *sync)
886 {
887 struct amdgpu_vm *peer_vm;
888 int ret;
889
890 list_for_each_entry(peer_vm, &process_info->vm_list_head,
891 vm_list_node) {
892 ret = vm_update_pds(peer_vm, sync);
893 if (ret)
894 return ret;
895 }
896
897 return 0;
898 }
899
init_kfd_vm(struct amdgpu_vm * vm,void ** process_info,struct dma_fence ** ef)900 static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
901 struct dma_fence **ef)
902 {
903 struct amdkfd_process_info *info = NULL;
904 int ret;
905
906 if (!*process_info) {
907 info = kzalloc(sizeof(*info), GFP_KERNEL);
908 if (!info)
909 return -ENOMEM;
910
911 mutex_init(&info->lock);
912 INIT_LIST_HEAD(&info->vm_list_head);
913 INIT_LIST_HEAD(&info->kfd_bo_list);
914 INIT_LIST_HEAD(&info->userptr_valid_list);
915 INIT_LIST_HEAD(&info->userptr_inval_list);
916
917 info->eviction_fence =
918 amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1),
919 current->mm);
920 if (!info->eviction_fence) {
921 pr_err("Failed to create eviction fence\n");
922 ret = -ENOMEM;
923 goto create_evict_fence_fail;
924 }
925
926 info->pid = get_task_pid(current->group_leader, PIDTYPE_PID);
927 atomic_set(&info->evicted_bos, 0);
928 INIT_DELAYED_WORK(&info->restore_userptr_work,
929 amdgpu_amdkfd_restore_userptr_worker);
930
931 *process_info = info;
932 *ef = dma_fence_get(&info->eviction_fence->base);
933 }
934
935 vm->process_info = *process_info;
936
937 /* Validate page directory and attach eviction fence */
938 ret = amdgpu_bo_reserve(vm->root.base.bo, true);
939 if (ret)
940 goto reserve_pd_fail;
941 ret = vm_validate_pt_pd_bos(vm);
942 if (ret) {
943 pr_err("validate_pt_pd_bos() failed\n");
944 goto validate_pd_fail;
945 }
946 ret = amdgpu_bo_sync_wait(vm->root.base.bo,
947 AMDGPU_FENCE_OWNER_KFD, false);
948 if (ret)
949 goto wait_pd_fail;
950 ret = dma_resv_reserve_shared(vm->root.base.bo->tbo.base.resv, 1);
951 if (ret)
952 goto reserve_shared_fail;
953 amdgpu_bo_fence(vm->root.base.bo,
954 &vm->process_info->eviction_fence->base, true);
955 amdgpu_bo_unreserve(vm->root.base.bo);
956
957 /* Update process info */
958 mutex_lock(&vm->process_info->lock);
959 list_add_tail(&vm->vm_list_node,
960 &(vm->process_info->vm_list_head));
961 vm->process_info->n_vms++;
962 mutex_unlock(&vm->process_info->lock);
963
964 return 0;
965
966 reserve_shared_fail:
967 wait_pd_fail:
968 validate_pd_fail:
969 amdgpu_bo_unreserve(vm->root.base.bo);
970 reserve_pd_fail:
971 vm->process_info = NULL;
972 if (info) {
973 /* Two fence references: one in info and one in *ef */
974 dma_fence_put(&info->eviction_fence->base);
975 dma_fence_put(*ef);
976 *ef = NULL;
977 *process_info = NULL;
978 put_pid(info->pid);
979 create_evict_fence_fail:
980 mutex_destroy(&info->lock);
981 kfree(info);
982 }
983 return ret;
984 }
985
amdgpu_amdkfd_gpuvm_create_process_vm(struct kgd_dev * kgd,u32 pasid,void ** vm,void ** process_info,struct dma_fence ** ef)986 int amdgpu_amdkfd_gpuvm_create_process_vm(struct kgd_dev *kgd, u32 pasid,
987 void **vm, void **process_info,
988 struct dma_fence **ef)
989 {
990 struct amdgpu_device *adev = get_amdgpu_device(kgd);
991 struct amdgpu_vm *new_vm;
992 int ret;
993
994 new_vm = kzalloc(sizeof(*new_vm), GFP_KERNEL);
995 if (!new_vm)
996 return -ENOMEM;
997
998 /* Initialize AMDGPU part of the VM */
999 ret = amdgpu_vm_init(adev, new_vm, AMDGPU_VM_CONTEXT_COMPUTE, pasid);
1000 if (ret) {
1001 pr_err("Failed init vm ret %d\n", ret);
1002 goto amdgpu_vm_init_fail;
1003 }
1004
1005 /* Initialize KFD part of the VM and process info */
1006 ret = init_kfd_vm(new_vm, process_info, ef);
1007 if (ret)
1008 goto init_kfd_vm_fail;
1009
1010 *vm = (void *) new_vm;
1011
1012 return 0;
1013
1014 init_kfd_vm_fail:
1015 amdgpu_vm_fini(adev, new_vm);
1016 amdgpu_vm_init_fail:
1017 kfree(new_vm);
1018 return ret;
1019 }
1020
amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev * kgd,struct file * filp,u32 pasid,void ** vm,void ** process_info,struct dma_fence ** ef)1021 int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd,
1022 struct file *filp, u32 pasid,
1023 void **vm, void **process_info,
1024 struct dma_fence **ef)
1025 {
1026 struct amdgpu_device *adev = get_amdgpu_device(kgd);
1027 struct drm_file *drm_priv = filp->private_data;
1028 struct amdgpu_fpriv *drv_priv = drm_priv->driver_priv;
1029 struct amdgpu_vm *avm = &drv_priv->vm;
1030 int ret;
1031
1032 /* Already a compute VM? */
1033 if (avm->process_info)
1034 return -EINVAL;
1035
1036 /* Convert VM into a compute VM */
1037 ret = amdgpu_vm_make_compute(adev, avm, pasid);
1038 if (ret)
1039 return ret;
1040
1041 /* Initialize KFD part of the VM and process info */
1042 ret = init_kfd_vm(avm, process_info, ef);
1043 if (ret)
1044 return ret;
1045
1046 *vm = (void *)avm;
1047
1048 return 0;
1049 }
1050
amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device * adev,struct amdgpu_vm * vm)1051 void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
1052 struct amdgpu_vm *vm)
1053 {
1054 struct amdkfd_process_info *process_info = vm->process_info;
1055 struct amdgpu_bo *pd = vm->root.base.bo;
1056
1057 if (!process_info)
1058 return;
1059
1060 /* Release eviction fence from PD */
1061 amdgpu_bo_reserve(pd, false);
1062 amdgpu_bo_fence(pd, NULL, false);
1063 amdgpu_bo_unreserve(pd);
1064
1065 /* Update process info */
1066 mutex_lock(&process_info->lock);
1067 process_info->n_vms--;
1068 list_del(&vm->vm_list_node);
1069 mutex_unlock(&process_info->lock);
1070
1071 vm->process_info = NULL;
1072
1073 /* Release per-process resources when last compute VM is destroyed */
1074 if (!process_info->n_vms) {
1075 WARN_ON(!list_empty(&process_info->kfd_bo_list));
1076 WARN_ON(!list_empty(&process_info->userptr_valid_list));
1077 WARN_ON(!list_empty(&process_info->userptr_inval_list));
1078
1079 dma_fence_put(&process_info->eviction_fence->base);
1080 cancel_delayed_work_sync(&process_info->restore_userptr_work);
1081 put_pid(process_info->pid);
1082 mutex_destroy(&process_info->lock);
1083 kfree(process_info);
1084 }
1085 }
1086
amdgpu_amdkfd_gpuvm_destroy_process_vm(struct kgd_dev * kgd,void * vm)1087 void amdgpu_amdkfd_gpuvm_destroy_process_vm(struct kgd_dev *kgd, void *vm)
1088 {
1089 struct amdgpu_device *adev = get_amdgpu_device(kgd);
1090 struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1091
1092 if (WARN_ON(!kgd || !vm))
1093 return;
1094
1095 pr_debug("Destroying process vm %p\n", vm);
1096
1097 /* Release the VM context */
1098 amdgpu_vm_fini(adev, avm);
1099 kfree(vm);
1100 }
1101
amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev * kgd,void * vm)1102 void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *vm)
1103 {
1104 struct amdgpu_device *adev = get_amdgpu_device(kgd);
1105 struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1106
1107 if (WARN_ON(!kgd || !vm))
1108 return;
1109
1110 pr_debug("Releasing process vm %p\n", vm);
1111
1112 /* The original pasid of amdgpu vm has already been
1113 * released during making a amdgpu vm to a compute vm
1114 * The current pasid is managed by kfd and will be
1115 * released on kfd process destroy. Set amdgpu pasid
1116 * to 0 to avoid duplicate release.
1117 */
1118 amdgpu_vm_release_compute(adev, avm);
1119 }
1120
amdgpu_amdkfd_gpuvm_get_process_page_dir(void * vm)1121 uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *vm)
1122 {
1123 struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1124 struct amdgpu_bo *pd = avm->root.base.bo;
1125 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
1126
1127 if (adev->asic_type < CHIP_VEGA10)
1128 return avm->pd_phys_addr >> AMDGPU_GPU_PAGE_SHIFT;
1129 return avm->pd_phys_addr;
1130 }
1131
amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(struct kgd_dev * kgd,uint64_t va,uint64_t size,void * vm,struct kgd_mem ** mem,uint64_t * offset,uint32_t flags)1132 int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
1133 struct kgd_dev *kgd, uint64_t va, uint64_t size,
1134 void *vm, struct kgd_mem **mem,
1135 uint64_t *offset, uint32_t flags)
1136 {
1137 struct amdgpu_device *adev = get_amdgpu_device(kgd);
1138 struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1139 enum ttm_bo_type bo_type = ttm_bo_type_device;
1140 struct sg_table *sg = NULL;
1141 uint64_t user_addr = 0;
1142 struct amdgpu_bo *bo;
1143 struct amdgpu_bo_param bp;
1144 u32 domain, alloc_domain;
1145 u64 alloc_flags;
1146 int ret;
1147
1148 /*
1149 * Check on which domain to allocate BO
1150 */
1151 if (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
1152 domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM;
1153 alloc_flags = AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE;
1154 alloc_flags |= (flags & KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC) ?
1155 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED :
1156 AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
1157 } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_GTT) {
1158 domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT;
1159 alloc_flags = 0;
1160 } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
1161 domain = AMDGPU_GEM_DOMAIN_GTT;
1162 alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
1163 alloc_flags = 0;
1164 if (!offset || !*offset)
1165 return -EINVAL;
1166 user_addr = untagged_addr(*offset);
1167 } else if (flags & (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL |
1168 KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) {
1169 domain = AMDGPU_GEM_DOMAIN_GTT;
1170 alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
1171 bo_type = ttm_bo_type_sg;
1172 alloc_flags = 0;
1173 if (size > UINT_MAX)
1174 return -EINVAL;
1175 sg = create_doorbell_sg(*offset, size);
1176 if (!sg)
1177 return -ENOMEM;
1178 } else {
1179 return -EINVAL;
1180 }
1181
1182 *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
1183 if (!*mem) {
1184 ret = -ENOMEM;
1185 goto err;
1186 }
1187 INIT_LIST_HEAD(&(*mem)->bo_va_list);
1188 mutex_init(&(*mem)->lock);
1189 (*mem)->aql_queue = !!(flags & KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM);
1190
1191 /* Workaround for AQL queue wraparound bug. Map the same
1192 * memory twice. That means we only actually allocate half
1193 * the memory.
1194 */
1195 if ((*mem)->aql_queue)
1196 size = size >> 1;
1197
1198 (*mem)->alloc_flags = flags;
1199
1200 amdgpu_sync_create(&(*mem)->sync);
1201
1202 ret = amdgpu_amdkfd_reserve_mem_limit(adev, size, alloc_domain, !!sg);
1203 if (ret) {
1204 pr_debug("Insufficient system memory\n");
1205 goto err_reserve_limit;
1206 }
1207
1208 pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n",
1209 va, size, domain_string(alloc_domain));
1210
1211 memset(&bp, 0, sizeof(bp));
1212 bp.size = size;
1213 bp.byte_align = 1;
1214 bp.domain = alloc_domain;
1215 bp.flags = alloc_flags;
1216 bp.type = bo_type;
1217 bp.resv = NULL;
1218 ret = amdgpu_bo_create(adev, &bp, &bo);
1219 if (ret) {
1220 pr_debug("Failed to create BO on domain %s. ret %d\n",
1221 domain_string(alloc_domain), ret);
1222 goto err_bo_create;
1223 }
1224 if (bo_type == ttm_bo_type_sg) {
1225 bo->tbo.sg = sg;
1226 bo->tbo.ttm->sg = sg;
1227 }
1228 bo->kfd_bo = *mem;
1229 (*mem)->bo = bo;
1230 if (user_addr)
1231 bo->flags |= AMDGPU_AMDKFD_USERPTR_BO;
1232
1233 (*mem)->va = va;
1234 (*mem)->domain = domain;
1235 (*mem)->mapped_to_gpu_memory = 0;
1236 (*mem)->process_info = avm->process_info;
1237 add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, user_addr);
1238
1239 if (user_addr) {
1240 ret = init_user_pages(*mem, user_addr);
1241 if (ret)
1242 goto allocate_init_user_pages_failed;
1243 }
1244
1245 if (offset)
1246 *offset = amdgpu_bo_mmap_offset(bo);
1247
1248 return 0;
1249
1250 allocate_init_user_pages_failed:
1251 remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info);
1252 amdgpu_bo_unref(&bo);
1253 /* Don't unreserve system mem limit twice */
1254 goto err_reserve_limit;
1255 err_bo_create:
1256 unreserve_mem_limit(adev, size, alloc_domain, !!sg);
1257 err_reserve_limit:
1258 mutex_destroy(&(*mem)->lock);
1259 kfree(*mem);
1260 err:
1261 if (sg) {
1262 sg_free_table(sg);
1263 kfree(sg);
1264 }
1265 return ret;
1266 }
1267
amdgpu_amdkfd_gpuvm_free_memory_of_gpu(struct kgd_dev * kgd,struct kgd_mem * mem,uint64_t * size)1268 int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
1269 struct kgd_dev *kgd, struct kgd_mem *mem, uint64_t *size)
1270 {
1271 struct amdkfd_process_info *process_info = mem->process_info;
1272 unsigned long bo_size = mem->bo->tbo.mem.size;
1273 struct kfd_bo_va_list *entry, *tmp;
1274 struct bo_vm_reservation_context ctx;
1275 struct ttm_validate_buffer *bo_list_entry;
1276 unsigned int mapped_to_gpu_memory;
1277 int ret;
1278 bool is_imported = 0;
1279
1280 mutex_lock(&mem->lock);
1281 mapped_to_gpu_memory = mem->mapped_to_gpu_memory;
1282 is_imported = mem->is_imported;
1283 mutex_unlock(&mem->lock);
1284 /* lock is not needed after this, since mem is unused and will
1285 * be freed anyway
1286 */
1287
1288 if (mapped_to_gpu_memory > 0) {
1289 pr_debug("BO VA 0x%llx size 0x%lx is still mapped.\n",
1290 mem->va, bo_size);
1291 return -EBUSY;
1292 }
1293
1294 /* Make sure restore workers don't access the BO any more */
1295 bo_list_entry = &mem->validate_list;
1296 mutex_lock(&process_info->lock);
1297 list_del(&bo_list_entry->head);
1298 mutex_unlock(&process_info->lock);
1299
1300 /* No more MMU notifiers */
1301 amdgpu_mn_unregister(mem->bo);
1302
1303 ret = reserve_bo_and_cond_vms(mem, NULL, BO_VM_ALL, &ctx);
1304 if (unlikely(ret))
1305 return ret;
1306
1307 /* The eviction fence should be removed by the last unmap.
1308 * TODO: Log an error condition if the bo still has the eviction fence
1309 * attached
1310 */
1311 amdgpu_amdkfd_remove_eviction_fence(mem->bo,
1312 process_info->eviction_fence);
1313 pr_debug("Release VA 0x%llx - 0x%llx\n", mem->va,
1314 mem->va + bo_size * (1 + mem->aql_queue));
1315
1316 /* Remove from VM internal data structures */
1317 list_for_each_entry_safe(entry, tmp, &mem->bo_va_list, bo_list)
1318 remove_bo_from_vm((struct amdgpu_device *)entry->kgd_dev,
1319 entry, bo_size);
1320
1321 ret = unreserve_bo_and_vms(&ctx, false, false);
1322
1323 /* Free the sync object */
1324 amdgpu_sync_free(&mem->sync);
1325
1326 /* If the SG is not NULL, it's one we created for a doorbell or mmio
1327 * remap BO. We need to free it.
1328 */
1329 if (mem->bo->tbo.sg) {
1330 sg_free_table(mem->bo->tbo.sg);
1331 kfree(mem->bo->tbo.sg);
1332 }
1333
1334 /* Update the size of the BO being freed if it was allocated from
1335 * VRAM and is not imported.
1336 */
1337 if (size) {
1338 if ((mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_VRAM) &&
1339 (!is_imported))
1340 *size = bo_size;
1341 else
1342 *size = 0;
1343 }
1344
1345 /* Free the BO*/
1346 drm_gem_object_put(&mem->bo->tbo.base);
1347 mutex_destroy(&mem->lock);
1348 kfree(mem);
1349
1350 return ret;
1351 }
1352
amdgpu_amdkfd_gpuvm_map_memory_to_gpu(struct kgd_dev * kgd,struct kgd_mem * mem,void * vm)1353 int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
1354 struct kgd_dev *kgd, struct kgd_mem *mem, void *vm)
1355 {
1356 struct amdgpu_device *adev = get_amdgpu_device(kgd);
1357 struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1358 int ret;
1359 struct amdgpu_bo *bo;
1360 uint32_t domain;
1361 struct kfd_bo_va_list *entry;
1362 struct bo_vm_reservation_context ctx;
1363 struct kfd_bo_va_list *bo_va_entry = NULL;
1364 struct kfd_bo_va_list *bo_va_entry_aql = NULL;
1365 unsigned long bo_size;
1366 bool is_invalid_userptr = false;
1367
1368 bo = mem->bo;
1369 if (!bo) {
1370 pr_err("Invalid BO when mapping memory to GPU\n");
1371 return -EINVAL;
1372 }
1373
1374 /* Make sure restore is not running concurrently. Since we
1375 * don't map invalid userptr BOs, we rely on the next restore
1376 * worker to do the mapping
1377 */
1378 mutex_lock(&mem->process_info->lock);
1379
1380 /* Lock mmap-sem. If we find an invalid userptr BO, we can be
1381 * sure that the MMU notifier is no longer running
1382 * concurrently and the queues are actually stopped
1383 */
1384 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1385 mmap_write_lock(current->mm);
1386 is_invalid_userptr = atomic_read(&mem->invalid);
1387 mmap_write_unlock(current->mm);
1388 }
1389
1390 mutex_lock(&mem->lock);
1391
1392 domain = mem->domain;
1393 bo_size = bo->tbo.mem.size;
1394
1395 pr_debug("Map VA 0x%llx - 0x%llx to vm %p domain %s\n",
1396 mem->va,
1397 mem->va + bo_size * (1 + mem->aql_queue),
1398 vm, domain_string(domain));
1399
1400 ret = reserve_bo_and_vm(mem, vm, &ctx);
1401 if (unlikely(ret))
1402 goto out;
1403
1404 /* Userptr can be marked as "not invalid", but not actually be
1405 * validated yet (still in the system domain). In that case
1406 * the queues are still stopped and we can leave mapping for
1407 * the next restore worker
1408 */
1409 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) &&
1410 bo->tbo.mem.mem_type == TTM_PL_SYSTEM)
1411 is_invalid_userptr = true;
1412
1413 if (check_if_add_bo_to_vm(avm, mem)) {
1414 ret = add_bo_to_vm(adev, mem, avm, false,
1415 &bo_va_entry);
1416 if (ret)
1417 goto add_bo_to_vm_failed;
1418 if (mem->aql_queue) {
1419 ret = add_bo_to_vm(adev, mem, avm,
1420 true, &bo_va_entry_aql);
1421 if (ret)
1422 goto add_bo_to_vm_failed_aql;
1423 }
1424 } else {
1425 ret = vm_validate_pt_pd_bos(avm);
1426 if (unlikely(ret))
1427 goto add_bo_to_vm_failed;
1428 }
1429
1430 if (mem->mapped_to_gpu_memory == 0 &&
1431 !amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1432 /* Validate BO only once. The eviction fence gets added to BO
1433 * the first time it is mapped. Validate will wait for all
1434 * background evictions to complete.
1435 */
1436 ret = amdgpu_amdkfd_bo_validate(bo, domain, true);
1437 if (ret) {
1438 pr_debug("Validate failed\n");
1439 goto map_bo_to_gpuvm_failed;
1440 }
1441 }
1442
1443 list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
1444 if (entry->bo_va->base.vm == vm && !entry->is_mapped) {
1445 pr_debug("\t map VA 0x%llx - 0x%llx in entry %p\n",
1446 entry->va, entry->va + bo_size,
1447 entry);
1448
1449 ret = map_bo_to_gpuvm(adev, entry, ctx.sync,
1450 is_invalid_userptr);
1451 if (ret) {
1452 pr_err("Failed to map bo to gpuvm\n");
1453 goto map_bo_to_gpuvm_failed;
1454 }
1455
1456 ret = vm_update_pds(vm, ctx.sync);
1457 if (ret) {
1458 pr_err("Failed to update page directories\n");
1459 goto map_bo_to_gpuvm_failed;
1460 }
1461
1462 entry->is_mapped = true;
1463 mem->mapped_to_gpu_memory++;
1464 pr_debug("\t INC mapping count %d\n",
1465 mem->mapped_to_gpu_memory);
1466 }
1467 }
1468
1469 if (!amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && !bo->pin_count)
1470 amdgpu_bo_fence(bo,
1471 &avm->process_info->eviction_fence->base,
1472 true);
1473 ret = unreserve_bo_and_vms(&ctx, false, false);
1474
1475 goto out;
1476
1477 map_bo_to_gpuvm_failed:
1478 if (bo_va_entry_aql)
1479 remove_bo_from_vm(adev, bo_va_entry_aql, bo_size);
1480 add_bo_to_vm_failed_aql:
1481 if (bo_va_entry)
1482 remove_bo_from_vm(adev, bo_va_entry, bo_size);
1483 add_bo_to_vm_failed:
1484 unreserve_bo_and_vms(&ctx, false, false);
1485 out:
1486 mutex_unlock(&mem->process_info->lock);
1487 mutex_unlock(&mem->lock);
1488 return ret;
1489 }
1490
amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(struct kgd_dev * kgd,struct kgd_mem * mem,void * vm)1491 int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
1492 struct kgd_dev *kgd, struct kgd_mem *mem, void *vm)
1493 {
1494 struct amdgpu_device *adev = get_amdgpu_device(kgd);
1495 struct amdkfd_process_info *process_info =
1496 ((struct amdgpu_vm *)vm)->process_info;
1497 unsigned long bo_size = mem->bo->tbo.mem.size;
1498 struct kfd_bo_va_list *entry;
1499 struct bo_vm_reservation_context ctx;
1500 int ret;
1501
1502 mutex_lock(&mem->lock);
1503
1504 ret = reserve_bo_and_cond_vms(mem, vm, BO_VM_MAPPED, &ctx);
1505 if (unlikely(ret))
1506 goto out;
1507 /* If no VMs were reserved, it means the BO wasn't actually mapped */
1508 if (ctx.n_vms == 0) {
1509 ret = -EINVAL;
1510 goto unreserve_out;
1511 }
1512
1513 ret = vm_validate_pt_pd_bos((struct amdgpu_vm *)vm);
1514 if (unlikely(ret))
1515 goto unreserve_out;
1516
1517 pr_debug("Unmap VA 0x%llx - 0x%llx from vm %p\n",
1518 mem->va,
1519 mem->va + bo_size * (1 + mem->aql_queue),
1520 vm);
1521
1522 list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
1523 if (entry->bo_va->base.vm == vm && entry->is_mapped) {
1524 pr_debug("\t unmap VA 0x%llx - 0x%llx from entry %p\n",
1525 entry->va,
1526 entry->va + bo_size,
1527 entry);
1528
1529 ret = unmap_bo_from_gpuvm(adev, entry, ctx.sync);
1530 if (ret == 0) {
1531 entry->is_mapped = false;
1532 } else {
1533 pr_err("failed to unmap VA 0x%llx\n",
1534 mem->va);
1535 goto unreserve_out;
1536 }
1537
1538 mem->mapped_to_gpu_memory--;
1539 pr_debug("\t DEC mapping count %d\n",
1540 mem->mapped_to_gpu_memory);
1541 }
1542 }
1543
1544 /* If BO is unmapped from all VMs, unfence it. It can be evicted if
1545 * required.
1546 */
1547 if (mem->mapped_to_gpu_memory == 0 &&
1548 !amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) && !mem->bo->pin_count)
1549 amdgpu_amdkfd_remove_eviction_fence(mem->bo,
1550 process_info->eviction_fence);
1551
1552 unreserve_out:
1553 unreserve_bo_and_vms(&ctx, false, false);
1554 out:
1555 mutex_unlock(&mem->lock);
1556 return ret;
1557 }
1558
amdgpu_amdkfd_gpuvm_sync_memory(struct kgd_dev * kgd,struct kgd_mem * mem,bool intr)1559 int amdgpu_amdkfd_gpuvm_sync_memory(
1560 struct kgd_dev *kgd, struct kgd_mem *mem, bool intr)
1561 {
1562 struct amdgpu_sync sync;
1563 int ret;
1564
1565 amdgpu_sync_create(&sync);
1566
1567 mutex_lock(&mem->lock);
1568 amdgpu_sync_clone(&mem->sync, &sync);
1569 mutex_unlock(&mem->lock);
1570
1571 ret = amdgpu_sync_wait(&sync, intr);
1572 amdgpu_sync_free(&sync);
1573 return ret;
1574 }
1575
amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev * kgd,struct kgd_mem * mem,void ** kptr,uint64_t * size)1576 int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev *kgd,
1577 struct kgd_mem *mem, void **kptr, uint64_t *size)
1578 {
1579 int ret;
1580 struct amdgpu_bo *bo = mem->bo;
1581
1582 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1583 pr_err("userptr can't be mapped to kernel\n");
1584 return -EINVAL;
1585 }
1586
1587 /* delete kgd_mem from kfd_bo_list to avoid re-validating
1588 * this BO in BO's restoring after eviction.
1589 */
1590 mutex_lock(&mem->process_info->lock);
1591
1592 ret = amdgpu_bo_reserve(bo, true);
1593 if (ret) {
1594 pr_err("Failed to reserve bo. ret %d\n", ret);
1595 goto bo_reserve_failed;
1596 }
1597
1598 ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
1599 if (ret) {
1600 pr_err("Failed to pin bo. ret %d\n", ret);
1601 goto pin_failed;
1602 }
1603
1604 ret = amdgpu_bo_kmap(bo, kptr);
1605 if (ret) {
1606 pr_err("Failed to map bo to kernel. ret %d\n", ret);
1607 goto kmap_failed;
1608 }
1609
1610 amdgpu_amdkfd_remove_eviction_fence(
1611 bo, mem->process_info->eviction_fence);
1612 list_del_init(&mem->validate_list.head);
1613
1614 if (size)
1615 *size = amdgpu_bo_size(bo);
1616
1617 amdgpu_bo_unreserve(bo);
1618
1619 mutex_unlock(&mem->process_info->lock);
1620 return 0;
1621
1622 kmap_failed:
1623 amdgpu_bo_unpin(bo);
1624 pin_failed:
1625 amdgpu_bo_unreserve(bo);
1626 bo_reserve_failed:
1627 mutex_unlock(&mem->process_info->lock);
1628
1629 return ret;
1630 }
1631
amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev * kgd,struct kfd_vm_fault_info * mem)1632 int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd,
1633 struct kfd_vm_fault_info *mem)
1634 {
1635 struct amdgpu_device *adev;
1636
1637 adev = (struct amdgpu_device *)kgd;
1638 if (atomic_read(&adev->gmc.vm_fault_info_updated) == 1) {
1639 *mem = *adev->gmc.vm_fault_info;
1640 mb();
1641 atomic_set(&adev->gmc.vm_fault_info_updated, 0);
1642 }
1643 return 0;
1644 }
1645
amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev * kgd,struct dma_buf * dma_buf,uint64_t va,void * vm,struct kgd_mem ** mem,uint64_t * size,uint64_t * mmap_offset)1646 int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
1647 struct dma_buf *dma_buf,
1648 uint64_t va, void *vm,
1649 struct kgd_mem **mem, uint64_t *size,
1650 uint64_t *mmap_offset)
1651 {
1652 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
1653 struct drm_gem_object *obj;
1654 struct amdgpu_bo *bo;
1655 struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1656
1657 if (dma_buf->ops != &amdgpu_dmabuf_ops)
1658 /* Can't handle non-graphics buffers */
1659 return -EINVAL;
1660
1661 obj = dma_buf->priv;
1662 if (drm_to_adev(obj->dev) != adev)
1663 /* Can't handle buffers from other devices */
1664 return -EINVAL;
1665
1666 bo = gem_to_amdgpu_bo(obj);
1667 if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM |
1668 AMDGPU_GEM_DOMAIN_GTT)))
1669 /* Only VRAM and GTT BOs are supported */
1670 return -EINVAL;
1671
1672 *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
1673 if (!*mem)
1674 return -ENOMEM;
1675
1676 if (size)
1677 *size = amdgpu_bo_size(bo);
1678
1679 if (mmap_offset)
1680 *mmap_offset = amdgpu_bo_mmap_offset(bo);
1681
1682 INIT_LIST_HEAD(&(*mem)->bo_va_list);
1683 mutex_init(&(*mem)->lock);
1684
1685 (*mem)->alloc_flags =
1686 ((bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
1687 KFD_IOC_ALLOC_MEM_FLAGS_VRAM : KFD_IOC_ALLOC_MEM_FLAGS_GTT)
1688 | KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE
1689 | KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE;
1690
1691 drm_gem_object_get(&bo->tbo.base);
1692 (*mem)->bo = bo;
1693 (*mem)->va = va;
1694 (*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
1695 AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT;
1696 (*mem)->mapped_to_gpu_memory = 0;
1697 (*mem)->process_info = avm->process_info;
1698 add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, false);
1699 amdgpu_sync_create(&(*mem)->sync);
1700 (*mem)->is_imported = true;
1701
1702 return 0;
1703 }
1704
1705 /* Evict a userptr BO by stopping the queues if necessary
1706 *
1707 * Runs in MMU notifier, may be in RECLAIM_FS context. This means it
1708 * cannot do any memory allocations, and cannot take any locks that
1709 * are held elsewhere while allocating memory. Therefore this is as
1710 * simple as possible, using atomic counters.
1711 *
1712 * It doesn't do anything to the BO itself. The real work happens in
1713 * restore, where we get updated page addresses. This function only
1714 * ensures that GPU access to the BO is stopped.
1715 */
amdgpu_amdkfd_evict_userptr(struct kgd_mem * mem,struct mm_struct * mm)1716 int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem,
1717 struct mm_struct *mm)
1718 {
1719 struct amdkfd_process_info *process_info = mem->process_info;
1720 int evicted_bos;
1721 int r = 0;
1722
1723 atomic_inc(&mem->invalid);
1724 evicted_bos = atomic_inc_return(&process_info->evicted_bos);
1725 if (evicted_bos == 1) {
1726 /* First eviction, stop the queues */
1727 r = kgd2kfd_quiesce_mm(mm);
1728 if (r)
1729 pr_err("Failed to quiesce KFD\n");
1730 schedule_delayed_work(&process_info->restore_userptr_work,
1731 msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
1732 }
1733
1734 return r;
1735 }
1736
1737 /* Update invalid userptr BOs
1738 *
1739 * Moves invalidated (evicted) userptr BOs from userptr_valid_list to
1740 * userptr_inval_list and updates user pages for all BOs that have
1741 * been invalidated since their last update.
1742 */
update_invalid_user_pages(struct amdkfd_process_info * process_info,struct mm_struct * mm)1743 static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
1744 struct mm_struct *mm)
1745 {
1746 struct kgd_mem *mem, *tmp_mem;
1747 struct amdgpu_bo *bo;
1748 struct ttm_operation_ctx ctx = { false, false };
1749 int invalid, ret;
1750
1751 /* Move all invalidated BOs to the userptr_inval_list and
1752 * release their user pages by migration to the CPU domain
1753 */
1754 list_for_each_entry_safe(mem, tmp_mem,
1755 &process_info->userptr_valid_list,
1756 validate_list.head) {
1757 if (!atomic_read(&mem->invalid))
1758 continue; /* BO is still valid */
1759
1760 bo = mem->bo;
1761
1762 if (amdgpu_bo_reserve(bo, true))
1763 return -EAGAIN;
1764 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
1765 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1766 amdgpu_bo_unreserve(bo);
1767 if (ret) {
1768 pr_err("%s: Failed to invalidate userptr BO\n",
1769 __func__);
1770 return -EAGAIN;
1771 }
1772
1773 list_move_tail(&mem->validate_list.head,
1774 &process_info->userptr_inval_list);
1775 }
1776
1777 if (list_empty(&process_info->userptr_inval_list))
1778 return 0; /* All evicted userptr BOs were freed */
1779
1780 /* Go through userptr_inval_list and update any invalid user_pages */
1781 list_for_each_entry(mem, &process_info->userptr_inval_list,
1782 validate_list.head) {
1783 invalid = atomic_read(&mem->invalid);
1784 if (!invalid)
1785 /* BO hasn't been invalidated since the last
1786 * revalidation attempt. Keep its BO list.
1787 */
1788 continue;
1789
1790 bo = mem->bo;
1791
1792 /* Get updated user pages */
1793 ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
1794 if (ret) {
1795 pr_debug("%s: Failed to get user pages: %d\n",
1796 __func__, ret);
1797
1798 /* Return error -EBUSY or -ENOMEM, retry restore */
1799 return ret;
1800 }
1801
1802 /*
1803 * FIXME: Cannot ignore the return code, must hold
1804 * notifier_lock
1805 */
1806 amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
1807
1808 /* Mark the BO as valid unless it was invalidated
1809 * again concurrently.
1810 */
1811 if (atomic_cmpxchg(&mem->invalid, invalid, 0) != invalid)
1812 return -EAGAIN;
1813 }
1814
1815 return 0;
1816 }
1817
1818 /* Validate invalid userptr BOs
1819 *
1820 * Validates BOs on the userptr_inval_list, and moves them back to the
1821 * userptr_valid_list. Also updates GPUVM page tables with new page
1822 * addresses and waits for the page table updates to complete.
1823 */
validate_invalid_user_pages(struct amdkfd_process_info * process_info)1824 static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
1825 {
1826 struct amdgpu_bo_list_entry *pd_bo_list_entries;
1827 struct list_head resv_list, duplicates;
1828 struct ww_acquire_ctx ticket;
1829 struct amdgpu_sync sync;
1830
1831 struct amdgpu_vm *peer_vm;
1832 struct kgd_mem *mem, *tmp_mem;
1833 struct amdgpu_bo *bo;
1834 struct ttm_operation_ctx ctx = { false, false };
1835 int i, ret;
1836
1837 pd_bo_list_entries = kcalloc(process_info->n_vms,
1838 sizeof(struct amdgpu_bo_list_entry),
1839 GFP_KERNEL);
1840 if (!pd_bo_list_entries) {
1841 pr_err("%s: Failed to allocate PD BO list entries\n", __func__);
1842 ret = -ENOMEM;
1843 goto out_no_mem;
1844 }
1845
1846 INIT_LIST_HEAD(&resv_list);
1847 INIT_LIST_HEAD(&duplicates);
1848
1849 /* Get all the page directory BOs that need to be reserved */
1850 i = 0;
1851 list_for_each_entry(peer_vm, &process_info->vm_list_head,
1852 vm_list_node)
1853 amdgpu_vm_get_pd_bo(peer_vm, &resv_list,
1854 &pd_bo_list_entries[i++]);
1855 /* Add the userptr_inval_list entries to resv_list */
1856 list_for_each_entry(mem, &process_info->userptr_inval_list,
1857 validate_list.head) {
1858 list_add_tail(&mem->resv_list.head, &resv_list);
1859 mem->resv_list.bo = mem->validate_list.bo;
1860 mem->resv_list.num_shared = mem->validate_list.num_shared;
1861 }
1862
1863 /* Reserve all BOs and page tables for validation */
1864 ret = ttm_eu_reserve_buffers(&ticket, &resv_list, false, &duplicates);
1865 WARN(!list_empty(&duplicates), "Duplicates should be empty");
1866 if (ret)
1867 goto out_free;
1868
1869 amdgpu_sync_create(&sync);
1870
1871 ret = process_validate_vms(process_info);
1872 if (ret)
1873 goto unreserve_out;
1874
1875 /* Validate BOs and update GPUVM page tables */
1876 list_for_each_entry_safe(mem, tmp_mem,
1877 &process_info->userptr_inval_list,
1878 validate_list.head) {
1879 struct kfd_bo_va_list *bo_va_entry;
1880
1881 bo = mem->bo;
1882
1883 /* Validate the BO if we got user pages */
1884 if (bo->tbo.ttm->pages[0]) {
1885 amdgpu_bo_placement_from_domain(bo, mem->domain);
1886 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1887 if (ret) {
1888 pr_err("%s: failed to validate BO\n", __func__);
1889 goto unreserve_out;
1890 }
1891 }
1892
1893 list_move_tail(&mem->validate_list.head,
1894 &process_info->userptr_valid_list);
1895
1896 /* Update mapping. If the BO was not validated
1897 * (because we couldn't get user pages), this will
1898 * clear the page table entries, which will result in
1899 * VM faults if the GPU tries to access the invalid
1900 * memory.
1901 */
1902 list_for_each_entry(bo_va_entry, &mem->bo_va_list, bo_list) {
1903 if (!bo_va_entry->is_mapped)
1904 continue;
1905
1906 ret = update_gpuvm_pte((struct amdgpu_device *)
1907 bo_va_entry->kgd_dev,
1908 bo_va_entry, &sync);
1909 if (ret) {
1910 pr_err("%s: update PTE failed\n", __func__);
1911 /* make sure this gets validated again */
1912 atomic_inc(&mem->invalid);
1913 goto unreserve_out;
1914 }
1915 }
1916 }
1917
1918 /* Update page directories */
1919 ret = process_update_pds(process_info, &sync);
1920
1921 unreserve_out:
1922 ttm_eu_backoff_reservation(&ticket, &resv_list);
1923 amdgpu_sync_wait(&sync, false);
1924 amdgpu_sync_free(&sync);
1925 out_free:
1926 kfree(pd_bo_list_entries);
1927 out_no_mem:
1928
1929 return ret;
1930 }
1931
1932 /* Worker callback to restore evicted userptr BOs
1933 *
1934 * Tries to update and validate all userptr BOs. If successful and no
1935 * concurrent evictions happened, the queues are restarted. Otherwise,
1936 * reschedule for another attempt later.
1937 */
amdgpu_amdkfd_restore_userptr_worker(struct work_struct * work)1938 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work)
1939 {
1940 struct delayed_work *dwork = to_delayed_work(work);
1941 struct amdkfd_process_info *process_info =
1942 container_of(dwork, struct amdkfd_process_info,
1943 restore_userptr_work);
1944 struct task_struct *usertask;
1945 struct mm_struct *mm;
1946 int evicted_bos;
1947
1948 evicted_bos = atomic_read(&process_info->evicted_bos);
1949 if (!evicted_bos)
1950 return;
1951
1952 /* Reference task and mm in case of concurrent process termination */
1953 usertask = get_pid_task(process_info->pid, PIDTYPE_PID);
1954 if (!usertask)
1955 return;
1956 mm = get_task_mm(usertask);
1957 if (!mm) {
1958 put_task_struct(usertask);
1959 return;
1960 }
1961
1962 mutex_lock(&process_info->lock);
1963
1964 if (update_invalid_user_pages(process_info, mm))
1965 goto unlock_out;
1966 /* userptr_inval_list can be empty if all evicted userptr BOs
1967 * have been freed. In that case there is nothing to validate
1968 * and we can just restart the queues.
1969 */
1970 if (!list_empty(&process_info->userptr_inval_list)) {
1971 if (atomic_read(&process_info->evicted_bos) != evicted_bos)
1972 goto unlock_out; /* Concurrent eviction, try again */
1973
1974 if (validate_invalid_user_pages(process_info))
1975 goto unlock_out;
1976 }
1977 /* Final check for concurrent evicton and atomic update. If
1978 * another eviction happens after successful update, it will
1979 * be a first eviction that calls quiesce_mm. The eviction
1980 * reference counting inside KFD will handle this case.
1981 */
1982 if (atomic_cmpxchg(&process_info->evicted_bos, evicted_bos, 0) !=
1983 evicted_bos)
1984 goto unlock_out;
1985 evicted_bos = 0;
1986 if (kgd2kfd_resume_mm(mm)) {
1987 pr_err("%s: Failed to resume KFD\n", __func__);
1988 /* No recovery from this failure. Probably the CP is
1989 * hanging. No point trying again.
1990 */
1991 }
1992
1993 unlock_out:
1994 mutex_unlock(&process_info->lock);
1995 mmput(mm);
1996 put_task_struct(usertask);
1997
1998 /* If validation failed, reschedule another attempt */
1999 if (evicted_bos)
2000 schedule_delayed_work(&process_info->restore_userptr_work,
2001 msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
2002 }
2003
2004 /** amdgpu_amdkfd_gpuvm_restore_process_bos - Restore all BOs for the given
2005 * KFD process identified by process_info
2006 *
2007 * @process_info: amdkfd_process_info of the KFD process
2008 *
2009 * After memory eviction, restore thread calls this function. The function
2010 * should be called when the Process is still valid. BO restore involves -
2011 *
2012 * 1. Release old eviction fence and create new one
2013 * 2. Get two copies of PD BO list from all the VMs. Keep one copy as pd_list.
2014 * 3 Use the second PD list and kfd_bo_list to create a list (ctx.list) of
2015 * BOs that need to be reserved.
2016 * 4. Reserve all the BOs
2017 * 5. Validate of PD and PT BOs.
2018 * 6. Validate all KFD BOs using kfd_bo_list and Map them and add new fence
2019 * 7. Add fence to all PD and PT BOs.
2020 * 8. Unreserve all BOs
2021 */
amdgpu_amdkfd_gpuvm_restore_process_bos(void * info,struct dma_fence ** ef)2022 int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
2023 {
2024 struct amdgpu_bo_list_entry *pd_bo_list;
2025 struct amdkfd_process_info *process_info = info;
2026 struct amdgpu_vm *peer_vm;
2027 struct kgd_mem *mem;
2028 struct bo_vm_reservation_context ctx;
2029 struct amdgpu_amdkfd_fence *new_fence;
2030 int ret = 0, i;
2031 struct list_head duplicate_save;
2032 struct amdgpu_sync sync_obj;
2033
2034 INIT_LIST_HEAD(&duplicate_save);
2035 INIT_LIST_HEAD(&ctx.list);
2036 INIT_LIST_HEAD(&ctx.duplicates);
2037
2038 pd_bo_list = kcalloc(process_info->n_vms,
2039 sizeof(struct amdgpu_bo_list_entry),
2040 GFP_KERNEL);
2041 if (!pd_bo_list)
2042 return -ENOMEM;
2043
2044 i = 0;
2045 mutex_lock(&process_info->lock);
2046 list_for_each_entry(peer_vm, &process_info->vm_list_head,
2047 vm_list_node)
2048 amdgpu_vm_get_pd_bo(peer_vm, &ctx.list, &pd_bo_list[i++]);
2049
2050 /* Reserve all BOs and page tables/directory. Add all BOs from
2051 * kfd_bo_list to ctx.list
2052 */
2053 list_for_each_entry(mem, &process_info->kfd_bo_list,
2054 validate_list.head) {
2055
2056 list_add_tail(&mem->resv_list.head, &ctx.list);
2057 mem->resv_list.bo = mem->validate_list.bo;
2058 mem->resv_list.num_shared = mem->validate_list.num_shared;
2059 }
2060
2061 ret = ttm_eu_reserve_buffers(&ctx.ticket, &ctx.list,
2062 false, &duplicate_save);
2063 if (ret) {
2064 pr_debug("Memory eviction: TTM Reserve Failed. Try again\n");
2065 goto ttm_reserve_fail;
2066 }
2067
2068 amdgpu_sync_create(&sync_obj);
2069
2070 /* Validate PDs and PTs */
2071 ret = process_validate_vms(process_info);
2072 if (ret)
2073 goto validate_map_fail;
2074
2075 ret = process_sync_pds_resv(process_info, &sync_obj);
2076 if (ret) {
2077 pr_debug("Memory eviction: Failed to sync to PD BO moving fence. Try again\n");
2078 goto validate_map_fail;
2079 }
2080
2081 /* Validate BOs and map them to GPUVM (update VM page tables). */
2082 list_for_each_entry(mem, &process_info->kfd_bo_list,
2083 validate_list.head) {
2084
2085 struct amdgpu_bo *bo = mem->bo;
2086 uint32_t domain = mem->domain;
2087 struct kfd_bo_va_list *bo_va_entry;
2088
2089 ret = amdgpu_amdkfd_bo_validate(bo, domain, false);
2090 if (ret) {
2091 pr_debug("Memory eviction: Validate BOs failed. Try again\n");
2092 goto validate_map_fail;
2093 }
2094 ret = amdgpu_sync_fence(&sync_obj, bo->tbo.moving);
2095 if (ret) {
2096 pr_debug("Memory eviction: Sync BO fence failed. Try again\n");
2097 goto validate_map_fail;
2098 }
2099 list_for_each_entry(bo_va_entry, &mem->bo_va_list,
2100 bo_list) {
2101 ret = update_gpuvm_pte((struct amdgpu_device *)
2102 bo_va_entry->kgd_dev,
2103 bo_va_entry,
2104 &sync_obj);
2105 if (ret) {
2106 pr_debug("Memory eviction: update PTE failed. Try again\n");
2107 goto validate_map_fail;
2108 }
2109 }
2110 }
2111
2112 /* Update page directories */
2113 ret = process_update_pds(process_info, &sync_obj);
2114 if (ret) {
2115 pr_debug("Memory eviction: update PDs failed. Try again\n");
2116 goto validate_map_fail;
2117 }
2118
2119 /* Wait for validate and PT updates to finish */
2120 amdgpu_sync_wait(&sync_obj, false);
2121
2122 /* Release old eviction fence and create new one, because fence only
2123 * goes from unsignaled to signaled, fence cannot be reused.
2124 * Use context and mm from the old fence.
2125 */
2126 new_fence = amdgpu_amdkfd_fence_create(
2127 process_info->eviction_fence->base.context,
2128 process_info->eviction_fence->mm);
2129 if (!new_fence) {
2130 pr_err("Failed to create eviction fence\n");
2131 ret = -ENOMEM;
2132 goto validate_map_fail;
2133 }
2134 dma_fence_put(&process_info->eviction_fence->base);
2135 process_info->eviction_fence = new_fence;
2136 *ef = dma_fence_get(&new_fence->base);
2137
2138 /* Attach new eviction fence to all BOs */
2139 list_for_each_entry(mem, &process_info->kfd_bo_list,
2140 validate_list.head)
2141 amdgpu_bo_fence(mem->bo,
2142 &process_info->eviction_fence->base, true);
2143
2144 /* Attach eviction fence to PD / PT BOs */
2145 list_for_each_entry(peer_vm, &process_info->vm_list_head,
2146 vm_list_node) {
2147 struct amdgpu_bo *bo = peer_vm->root.base.bo;
2148
2149 amdgpu_bo_fence(bo, &process_info->eviction_fence->base, true);
2150 }
2151
2152 validate_map_fail:
2153 ttm_eu_backoff_reservation(&ctx.ticket, &ctx.list);
2154 amdgpu_sync_free(&sync_obj);
2155 ttm_reserve_fail:
2156 mutex_unlock(&process_info->lock);
2157 kfree(pd_bo_list);
2158 return ret;
2159 }
2160
amdgpu_amdkfd_add_gws_to_process(void * info,void * gws,struct kgd_mem ** mem)2161 int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem)
2162 {
2163 struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info;
2164 struct amdgpu_bo *gws_bo = (struct amdgpu_bo *)gws;
2165 int ret;
2166
2167 if (!info || !gws)
2168 return -EINVAL;
2169
2170 *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
2171 if (!*mem)
2172 return -ENOMEM;
2173
2174 mutex_init(&(*mem)->lock);
2175 INIT_LIST_HEAD(&(*mem)->bo_va_list);
2176 (*mem)->bo = amdgpu_bo_ref(gws_bo);
2177 (*mem)->domain = AMDGPU_GEM_DOMAIN_GWS;
2178 (*mem)->process_info = process_info;
2179 add_kgd_mem_to_kfd_bo_list(*mem, process_info, false);
2180 amdgpu_sync_create(&(*mem)->sync);
2181
2182
2183 /* Validate gws bo the first time it is added to process */
2184 mutex_lock(&(*mem)->process_info->lock);
2185 ret = amdgpu_bo_reserve(gws_bo, false);
2186 if (unlikely(ret)) {
2187 pr_err("Reserve gws bo failed %d\n", ret);
2188 goto bo_reservation_failure;
2189 }
2190
2191 ret = amdgpu_amdkfd_bo_validate(gws_bo, AMDGPU_GEM_DOMAIN_GWS, true);
2192 if (ret) {
2193 pr_err("GWS BO validate failed %d\n", ret);
2194 goto bo_validation_failure;
2195 }
2196 /* GWS resource is shared b/t amdgpu and amdkfd
2197 * Add process eviction fence to bo so they can
2198 * evict each other.
2199 */
2200 ret = dma_resv_reserve_shared(gws_bo->tbo.base.resv, 1);
2201 if (ret)
2202 goto reserve_shared_fail;
2203 amdgpu_bo_fence(gws_bo, &process_info->eviction_fence->base, true);
2204 amdgpu_bo_unreserve(gws_bo);
2205 mutex_unlock(&(*mem)->process_info->lock);
2206
2207 return ret;
2208
2209 reserve_shared_fail:
2210 bo_validation_failure:
2211 amdgpu_bo_unreserve(gws_bo);
2212 bo_reservation_failure:
2213 mutex_unlock(&(*mem)->process_info->lock);
2214 amdgpu_sync_free(&(*mem)->sync);
2215 remove_kgd_mem_from_kfd_bo_list(*mem, process_info);
2216 amdgpu_bo_unref(&gws_bo);
2217 mutex_destroy(&(*mem)->lock);
2218 kfree(*mem);
2219 *mem = NULL;
2220 return ret;
2221 }
2222
amdgpu_amdkfd_remove_gws_from_process(void * info,void * mem)2223 int amdgpu_amdkfd_remove_gws_from_process(void *info, void *mem)
2224 {
2225 int ret;
2226 struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info;
2227 struct kgd_mem *kgd_mem = (struct kgd_mem *)mem;
2228 struct amdgpu_bo *gws_bo = kgd_mem->bo;
2229
2230 /* Remove BO from process's validate list so restore worker won't touch
2231 * it anymore
2232 */
2233 remove_kgd_mem_from_kfd_bo_list(kgd_mem, process_info);
2234
2235 ret = amdgpu_bo_reserve(gws_bo, false);
2236 if (unlikely(ret)) {
2237 pr_err("Reserve gws bo failed %d\n", ret);
2238 //TODO add BO back to validate_list?
2239 return ret;
2240 }
2241 amdgpu_amdkfd_remove_eviction_fence(gws_bo,
2242 process_info->eviction_fence);
2243 amdgpu_bo_unreserve(gws_bo);
2244 amdgpu_sync_free(&kgd_mem->sync);
2245 amdgpu_bo_unref(&gws_bo);
2246 mutex_destroy(&kgd_mem->lock);
2247 kfree(mem);
2248 return 0;
2249 }
2250
2251 /* Returns GPU-specific tiling mode information */
amdgpu_amdkfd_get_tile_config(struct kgd_dev * kgd,struct tile_config * config)2252 int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd,
2253 struct tile_config *config)
2254 {
2255 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
2256
2257 config->gb_addr_config = adev->gfx.config.gb_addr_config;
2258 config->tile_config_ptr = adev->gfx.config.tile_mode_array;
2259 config->num_tile_configs =
2260 ARRAY_SIZE(adev->gfx.config.tile_mode_array);
2261 config->macro_tile_config_ptr =
2262 adev->gfx.config.macrotile_mode_array;
2263 config->num_macro_tile_configs =
2264 ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
2265
2266 /* Those values are not set from GFX9 onwards */
2267 config->num_banks = adev->gfx.config.num_banks;
2268 config->num_ranks = adev->gfx.config.num_ranks;
2269
2270 return 0;
2271 }
2272