• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2014-2018 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 #include <linux/dma-buf.h>
23 #include <linux/list.h>
24 #include <linux/pagemap.h>
25 #include <linux/sched/mm.h>
26 #include <linux/sched/task.h>
27 
28 #include "amdgpu_object.h"
29 #include "amdgpu_vm.h"
30 #include "amdgpu_amdkfd.h"
31 #include "amdgpu_dma_buf.h"
32 #include <uapi/linux/kfd_ioctl.h>
33 
34 /* BO flag to indicate a KFD userptr BO */
35 #define AMDGPU_AMDKFD_USERPTR_BO (1ULL << 63)
36 
37 /* Userptr restore delay, just long enough to allow consecutive VM
38  * changes to accumulate
39  */
40 #define AMDGPU_USERPTR_RESTORE_DELAY_MS 1
41 
42 /* Impose limit on how much memory KFD can use */
43 static struct {
44 	uint64_t max_system_mem_limit;
45 	uint64_t max_ttm_mem_limit;
46 	int64_t system_mem_used;
47 	int64_t ttm_mem_used;
48 	spinlock_t mem_limit_lock;
49 } kfd_mem_limit;
50 
51 static const char * const domain_bit_to_string[] = {
52 		"CPU",
53 		"GTT",
54 		"VRAM",
55 		"GDS",
56 		"GWS",
57 		"OA"
58 };
59 
60 #define domain_string(domain) domain_bit_to_string[ffs(domain)-1]
61 
62 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work);
63 
64 
get_amdgpu_device(struct kgd_dev * kgd)65 static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
66 {
67 	return (struct amdgpu_device *)kgd;
68 }
69 
check_if_add_bo_to_vm(struct amdgpu_vm * avm,struct kgd_mem * mem)70 static bool check_if_add_bo_to_vm(struct amdgpu_vm *avm,
71 		struct kgd_mem *mem)
72 {
73 	struct kfd_bo_va_list *entry;
74 
75 	list_for_each_entry(entry, &mem->bo_va_list, bo_list)
76 		if (entry->bo_va->base.vm == avm)
77 			return false;
78 
79 	return true;
80 }
81 
82 /* Set memory usage limits. Current, limits are
83  *  System (TTM + userptr) memory - 15/16th System RAM
84  *  TTM memory - 3/8th System RAM
85  */
amdgpu_amdkfd_gpuvm_init_mem_limits(void)86 void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
87 {
88 	struct sysinfo si;
89 	uint64_t mem;
90 
91 	si_meminfo(&si);
92 	mem = si.totalram - si.totalhigh;
93 	mem *= si.mem_unit;
94 
95 	spin_lock_init(&kfd_mem_limit.mem_limit_lock);
96 	kfd_mem_limit.max_system_mem_limit = mem - (mem >> 4);
97 	kfd_mem_limit.max_ttm_mem_limit = (mem >> 1) - (mem >> 3);
98 	pr_debug("Kernel memory limit %lluM, TTM limit %lluM\n",
99 		(kfd_mem_limit.max_system_mem_limit >> 20),
100 		(kfd_mem_limit.max_ttm_mem_limit >> 20));
101 }
102 
103 /* Estimate page table size needed to represent a given memory size
104  *
105  * With 4KB pages, we need one 8 byte PTE for each 4KB of memory
106  * (factor 512, >> 9). With 2MB pages, we need one 8 byte PTE for 2MB
107  * of memory (factor 256K, >> 18). ROCm user mode tries to optimize
108  * for 2MB pages for TLB efficiency. However, small allocations and
109  * fragmented system memory still need some 4KB pages. We choose a
110  * compromise that should work in most cases without reserving too
111  * much memory for page tables unnecessarily (factor 16K, >> 14).
112  */
113 #define ESTIMATE_PT_SIZE(mem_size) ((mem_size) >> 14)
114 
amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device * adev,uint64_t size,u32 domain,bool sg)115 static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
116 		uint64_t size, u32 domain, bool sg)
117 {
118 	uint64_t reserved_for_pt =
119 		ESTIMATE_PT_SIZE(amdgpu_amdkfd_total_mem_size);
120 	size_t acc_size, system_mem_needed, ttm_mem_needed, vram_needed;
121 	int ret = 0;
122 
123 	acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
124 				       sizeof(struct amdgpu_bo));
125 
126 	vram_needed = 0;
127 	if (domain == AMDGPU_GEM_DOMAIN_GTT) {
128 		/* TTM GTT memory */
129 		system_mem_needed = acc_size + size;
130 		ttm_mem_needed = acc_size + size;
131 	} else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) {
132 		/* Userptr */
133 		system_mem_needed = acc_size + size;
134 		ttm_mem_needed = acc_size;
135 	} else {
136 		/* VRAM and SG */
137 		system_mem_needed = acc_size;
138 		ttm_mem_needed = acc_size;
139 		if (domain == AMDGPU_GEM_DOMAIN_VRAM)
140 			vram_needed = size;
141 	}
142 
143 	spin_lock(&kfd_mem_limit.mem_limit_lock);
144 
145 	if (kfd_mem_limit.system_mem_used + system_mem_needed >
146 	    kfd_mem_limit.max_system_mem_limit)
147 		pr_debug("Set no_system_mem_limit=1 if using shared memory\n");
148 
149 	if ((kfd_mem_limit.system_mem_used + system_mem_needed >
150 	     kfd_mem_limit.max_system_mem_limit && !no_system_mem_limit) ||
151 	    (kfd_mem_limit.ttm_mem_used + ttm_mem_needed >
152 	     kfd_mem_limit.max_ttm_mem_limit) ||
153 	    (adev->kfd.vram_used + vram_needed >
154 	     adev->gmc.real_vram_size - reserved_for_pt)) {
155 		ret = -ENOMEM;
156 	} else {
157 		kfd_mem_limit.system_mem_used += system_mem_needed;
158 		kfd_mem_limit.ttm_mem_used += ttm_mem_needed;
159 		adev->kfd.vram_used += vram_needed;
160 	}
161 
162 	spin_unlock(&kfd_mem_limit.mem_limit_lock);
163 	return ret;
164 }
165 
unreserve_mem_limit(struct amdgpu_device * adev,uint64_t size,u32 domain,bool sg)166 static void unreserve_mem_limit(struct amdgpu_device *adev,
167 		uint64_t size, u32 domain, bool sg)
168 {
169 	size_t acc_size;
170 
171 	acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
172 				       sizeof(struct amdgpu_bo));
173 
174 	spin_lock(&kfd_mem_limit.mem_limit_lock);
175 	if (domain == AMDGPU_GEM_DOMAIN_GTT) {
176 		kfd_mem_limit.system_mem_used -= (acc_size + size);
177 		kfd_mem_limit.ttm_mem_used -= (acc_size + size);
178 	} else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) {
179 		kfd_mem_limit.system_mem_used -= (acc_size + size);
180 		kfd_mem_limit.ttm_mem_used -= acc_size;
181 	} else {
182 		kfd_mem_limit.system_mem_used -= acc_size;
183 		kfd_mem_limit.ttm_mem_used -= acc_size;
184 		if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
185 			adev->kfd.vram_used -= size;
186 			WARN_ONCE(adev->kfd.vram_used < 0,
187 				  "kfd VRAM memory accounting unbalanced");
188 		}
189 	}
190 	WARN_ONCE(kfd_mem_limit.system_mem_used < 0,
191 		  "kfd system memory accounting unbalanced");
192 	WARN_ONCE(kfd_mem_limit.ttm_mem_used < 0,
193 		  "kfd TTM memory accounting unbalanced");
194 
195 	spin_unlock(&kfd_mem_limit.mem_limit_lock);
196 }
197 
amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo * bo)198 void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo)
199 {
200 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
201 	u32 domain = bo->preferred_domains;
202 	bool sg = (bo->preferred_domains == AMDGPU_GEM_DOMAIN_CPU);
203 
204 	if (bo->flags & AMDGPU_AMDKFD_USERPTR_BO) {
205 		domain = AMDGPU_GEM_DOMAIN_CPU;
206 		sg = false;
207 	}
208 
209 	unreserve_mem_limit(adev, amdgpu_bo_size(bo), domain, sg);
210 }
211 
212 
213 /* amdgpu_amdkfd_remove_eviction_fence - Removes eviction fence from BO's
214  *  reservation object.
215  *
216  * @bo: [IN] Remove eviction fence(s) from this BO
217  * @ef: [IN] This eviction fence is removed if it
218  *  is present in the shared list.
219  *
220  * NOTE: Must be called with BO reserved i.e. bo->tbo.resv->lock held.
221  */
amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo * bo,struct amdgpu_amdkfd_fence * ef)222 static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
223 					struct amdgpu_amdkfd_fence *ef)
224 {
225 	struct dma_resv *resv = bo->tbo.base.resv;
226 	struct dma_resv_list *old, *new;
227 	unsigned int i, j, k;
228 
229 	if (!ef)
230 		return -EINVAL;
231 
232 	old = dma_resv_get_list(resv);
233 	if (!old)
234 		return 0;
235 
236 	new = kmalloc(offsetof(typeof(*new), shared[old->shared_max]),
237 		      GFP_KERNEL);
238 	if (!new)
239 		return -ENOMEM;
240 
241 	/* Go through all the shared fences in the resevation object and sort
242 	 * the interesting ones to the end of the list.
243 	 */
244 	for (i = 0, j = old->shared_count, k = 0; i < old->shared_count; ++i) {
245 		struct dma_fence *f;
246 
247 		f = rcu_dereference_protected(old->shared[i],
248 					      dma_resv_held(resv));
249 
250 		if (f->context == ef->base.context)
251 			RCU_INIT_POINTER(new->shared[--j], f);
252 		else
253 			RCU_INIT_POINTER(new->shared[k++], f);
254 	}
255 	new->shared_max = old->shared_max;
256 	new->shared_count = k;
257 
258 	/* Install the new fence list, seqcount provides the barriers */
259 	write_seqcount_begin(&resv->seq);
260 	RCU_INIT_POINTER(resv->fence, new);
261 	write_seqcount_end(&resv->seq);
262 
263 	/* Drop the references to the removed fences or move them to ef_list */
264 	for (i = j, k = 0; i < old->shared_count; ++i) {
265 		struct dma_fence *f;
266 
267 		f = rcu_dereference_protected(new->shared[i],
268 					      dma_resv_held(resv));
269 		dma_fence_put(f);
270 	}
271 	kfree_rcu(old, rcu);
272 
273 	return 0;
274 }
275 
amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo * bo)276 int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo)
277 {
278 	struct amdgpu_bo *root = bo;
279 	struct amdgpu_vm_bo_base *vm_bo;
280 	struct amdgpu_vm *vm;
281 	struct amdkfd_process_info *info;
282 	struct amdgpu_amdkfd_fence *ef;
283 	int ret;
284 
285 	/* we can always get vm_bo from root PD bo.*/
286 	while (root->parent)
287 		root = root->parent;
288 
289 	vm_bo = root->vm_bo;
290 	if (!vm_bo)
291 		return 0;
292 
293 	vm = vm_bo->vm;
294 	if (!vm)
295 		return 0;
296 
297 	info = vm->process_info;
298 	if (!info || !info->eviction_fence)
299 		return 0;
300 
301 	ef = container_of(dma_fence_get(&info->eviction_fence->base),
302 			struct amdgpu_amdkfd_fence, base);
303 
304 	BUG_ON(!dma_resv_trylock(bo->tbo.base.resv));
305 	ret = amdgpu_amdkfd_remove_eviction_fence(bo, ef);
306 	dma_resv_unlock(bo->tbo.base.resv);
307 
308 	dma_fence_put(&ef->base);
309 	return ret;
310 }
311 
amdgpu_amdkfd_bo_validate(struct amdgpu_bo * bo,uint32_t domain,bool wait)312 static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain,
313 				     bool wait)
314 {
315 	struct ttm_operation_ctx ctx = { false, false };
316 	int ret;
317 
318 	if (WARN(amdgpu_ttm_tt_get_usermm(bo->tbo.ttm),
319 		 "Called with userptr BO"))
320 		return -EINVAL;
321 
322 	amdgpu_bo_placement_from_domain(bo, domain);
323 
324 	ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
325 	if (ret)
326 		goto validate_fail;
327 	if (wait)
328 		amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false);
329 
330 validate_fail:
331 	return ret;
332 }
333 
amdgpu_amdkfd_validate_vm_bo(void * _unused,struct amdgpu_bo * bo)334 static int amdgpu_amdkfd_validate_vm_bo(void *_unused, struct amdgpu_bo *bo)
335 {
336 	return amdgpu_amdkfd_bo_validate(bo, bo->allowed_domains, false);
337 }
338 
339 /* vm_validate_pt_pd_bos - Validate page table and directory BOs
340  *
341  * Page directories are not updated here because huge page handling
342  * during page table updates can invalidate page directory entries
343  * again. Page directories are only updated after updating page
344  * tables.
345  */
vm_validate_pt_pd_bos(struct amdgpu_vm * vm)346 static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm)
347 {
348 	struct amdgpu_bo *pd = vm->root.base.bo;
349 	struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
350 	int ret;
351 
352 	ret = amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_amdkfd_validate_vm_bo, NULL);
353 	if (ret) {
354 		pr_err("failed to validate PT BOs\n");
355 		return ret;
356 	}
357 
358 	ret = amdgpu_amdkfd_validate_vm_bo(NULL, pd);
359 	if (ret) {
360 		pr_err("failed to validate PD\n");
361 		return ret;
362 	}
363 
364 	vm->pd_phys_addr = amdgpu_gmc_pd_addr(vm->root.base.bo);
365 
366 	if (vm->use_cpu_for_update) {
367 		ret = amdgpu_bo_kmap(pd, NULL);
368 		if (ret) {
369 			pr_err("failed to kmap PD, ret=%d\n", ret);
370 			return ret;
371 		}
372 	}
373 
374 	return 0;
375 }
376 
vm_update_pds(struct amdgpu_vm * vm,struct amdgpu_sync * sync)377 static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync)
378 {
379 	struct amdgpu_bo *pd = vm->root.base.bo;
380 	struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
381 	int ret;
382 
383 	ret = amdgpu_vm_update_pdes(adev, vm, false);
384 	if (ret)
385 		return ret;
386 
387 	return amdgpu_sync_fence(sync, vm->last_update);
388 }
389 
get_pte_flags(struct amdgpu_device * adev,struct kgd_mem * mem)390 static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem)
391 {
392 	struct amdgpu_device *bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev);
393 	bool coherent = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_COHERENT;
394 	uint32_t mapping_flags;
395 
396 	mapping_flags = AMDGPU_VM_PAGE_READABLE;
397 	if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE)
398 		mapping_flags |= AMDGPU_VM_PAGE_WRITEABLE;
399 	if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE)
400 		mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
401 
402 	switch (adev->asic_type) {
403 	case CHIP_ARCTURUS:
404 		if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
405 			if (bo_adev == adev)
406 				mapping_flags |= coherent ?
407 					AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
408 			else
409 				mapping_flags |= AMDGPU_VM_MTYPE_UC;
410 		} else {
411 			mapping_flags |= coherent ?
412 				AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
413 		}
414 		break;
415 	default:
416 		mapping_flags |= coherent ?
417 			AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
418 	}
419 
420 	return amdgpu_gem_va_map_flags(adev, mapping_flags);
421 }
422 
423 /* add_bo_to_vm - Add a BO to a VM
424  *
425  * Everything that needs to bo done only once when a BO is first added
426  * to a VM. It can later be mapped and unmapped many times without
427  * repeating these steps.
428  *
429  * 1. Allocate and initialize BO VA entry data structure
430  * 2. Add BO to the VM
431  * 3. Determine ASIC-specific PTE flags
432  * 4. Alloc page tables and directories if needed
433  * 4a.  Validate new page tables and directories
434  */
add_bo_to_vm(struct amdgpu_device * adev,struct kgd_mem * mem,struct amdgpu_vm * vm,bool is_aql,struct kfd_bo_va_list ** p_bo_va_entry)435 static int add_bo_to_vm(struct amdgpu_device *adev, struct kgd_mem *mem,
436 		struct amdgpu_vm *vm, bool is_aql,
437 		struct kfd_bo_va_list **p_bo_va_entry)
438 {
439 	int ret;
440 	struct kfd_bo_va_list *bo_va_entry;
441 	struct amdgpu_bo *bo = mem->bo;
442 	uint64_t va = mem->va;
443 	struct list_head *list_bo_va = &mem->bo_va_list;
444 	unsigned long bo_size = bo->tbo.mem.size;
445 
446 	if (!va) {
447 		pr_err("Invalid VA when adding BO to VM\n");
448 		return -EINVAL;
449 	}
450 
451 	if (is_aql)
452 		va += bo_size;
453 
454 	bo_va_entry = kzalloc(sizeof(*bo_va_entry), GFP_KERNEL);
455 	if (!bo_va_entry)
456 		return -ENOMEM;
457 
458 	pr_debug("\t add VA 0x%llx - 0x%llx to vm %p\n", va,
459 			va + bo_size, vm);
460 
461 	/* Add BO to VM internal data structures*/
462 	bo_va_entry->bo_va = amdgpu_vm_bo_add(adev, vm, bo);
463 	if (!bo_va_entry->bo_va) {
464 		ret = -EINVAL;
465 		pr_err("Failed to add BO object to VM. ret == %d\n",
466 				ret);
467 		goto err_vmadd;
468 	}
469 
470 	bo_va_entry->va = va;
471 	bo_va_entry->pte_flags = get_pte_flags(adev, mem);
472 	bo_va_entry->kgd_dev = (void *)adev;
473 	list_add(&bo_va_entry->bo_list, list_bo_va);
474 
475 	if (p_bo_va_entry)
476 		*p_bo_va_entry = bo_va_entry;
477 
478 	/* Allocate validate page tables if needed */
479 	ret = vm_validate_pt_pd_bos(vm);
480 	if (ret) {
481 		pr_err("validate_pt_pd_bos() failed\n");
482 		goto err_alloc_pts;
483 	}
484 
485 	return 0;
486 
487 err_alloc_pts:
488 	amdgpu_vm_bo_rmv(adev, bo_va_entry->bo_va);
489 	list_del(&bo_va_entry->bo_list);
490 err_vmadd:
491 	kfree(bo_va_entry);
492 	return ret;
493 }
494 
remove_bo_from_vm(struct amdgpu_device * adev,struct kfd_bo_va_list * entry,unsigned long size)495 static void remove_bo_from_vm(struct amdgpu_device *adev,
496 		struct kfd_bo_va_list *entry, unsigned long size)
497 {
498 	pr_debug("\t remove VA 0x%llx - 0x%llx in entry %p\n",
499 			entry->va,
500 			entry->va + size, entry);
501 	amdgpu_vm_bo_rmv(adev, entry->bo_va);
502 	list_del(&entry->bo_list);
503 	kfree(entry);
504 }
505 
add_kgd_mem_to_kfd_bo_list(struct kgd_mem * mem,struct amdkfd_process_info * process_info,bool userptr)506 static void add_kgd_mem_to_kfd_bo_list(struct kgd_mem *mem,
507 				struct amdkfd_process_info *process_info,
508 				bool userptr)
509 {
510 	struct ttm_validate_buffer *entry = &mem->validate_list;
511 	struct amdgpu_bo *bo = mem->bo;
512 
513 	INIT_LIST_HEAD(&entry->head);
514 	entry->num_shared = 1;
515 	entry->bo = &bo->tbo;
516 	mutex_lock(&process_info->lock);
517 	if (userptr)
518 		list_add_tail(&entry->head, &process_info->userptr_valid_list);
519 	else
520 		list_add_tail(&entry->head, &process_info->kfd_bo_list);
521 	mutex_unlock(&process_info->lock);
522 }
523 
remove_kgd_mem_from_kfd_bo_list(struct kgd_mem * mem,struct amdkfd_process_info * process_info)524 static void remove_kgd_mem_from_kfd_bo_list(struct kgd_mem *mem,
525 		struct amdkfd_process_info *process_info)
526 {
527 	struct ttm_validate_buffer *bo_list_entry;
528 
529 	bo_list_entry = &mem->validate_list;
530 	mutex_lock(&process_info->lock);
531 	list_del(&bo_list_entry->head);
532 	mutex_unlock(&process_info->lock);
533 }
534 
535 /* Initializes user pages. It registers the MMU notifier and validates
536  * the userptr BO in the GTT domain.
537  *
538  * The BO must already be on the userptr_valid_list. Otherwise an
539  * eviction and restore may happen that leaves the new BO unmapped
540  * with the user mode queues running.
541  *
542  * Takes the process_info->lock to protect against concurrent restore
543  * workers.
544  *
545  * Returns 0 for success, negative errno for errors.
546  */
init_user_pages(struct kgd_mem * mem,uint64_t user_addr)547 static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr)
548 {
549 	struct amdkfd_process_info *process_info = mem->process_info;
550 	struct amdgpu_bo *bo = mem->bo;
551 	struct ttm_operation_ctx ctx = { true, false };
552 	int ret = 0;
553 
554 	mutex_lock(&process_info->lock);
555 
556 	ret = amdgpu_ttm_tt_set_userptr(&bo->tbo, user_addr, 0);
557 	if (ret) {
558 		pr_err("%s: Failed to set userptr: %d\n", __func__, ret);
559 		goto out;
560 	}
561 
562 	ret = amdgpu_mn_register(bo, user_addr);
563 	if (ret) {
564 		pr_err("%s: Failed to register MMU notifier: %d\n",
565 		       __func__, ret);
566 		goto out;
567 	}
568 
569 	ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
570 	if (ret) {
571 		pr_err("%s: Failed to get user pages: %d\n", __func__, ret);
572 		goto unregister_out;
573 	}
574 
575 	ret = amdgpu_bo_reserve(bo, true);
576 	if (ret) {
577 		pr_err("%s: Failed to reserve BO\n", __func__);
578 		goto release_out;
579 	}
580 	amdgpu_bo_placement_from_domain(bo, mem->domain);
581 	ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
582 	if (ret)
583 		pr_err("%s: failed to validate BO\n", __func__);
584 	amdgpu_bo_unreserve(bo);
585 
586 release_out:
587 	amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
588 unregister_out:
589 	if (ret)
590 		amdgpu_mn_unregister(bo);
591 out:
592 	mutex_unlock(&process_info->lock);
593 	return ret;
594 }
595 
596 /* Reserving a BO and its page table BOs must happen atomically to
597  * avoid deadlocks. Some operations update multiple VMs at once. Track
598  * all the reservation info in a context structure. Optionally a sync
599  * object can track VM updates.
600  */
601 struct bo_vm_reservation_context {
602 	struct amdgpu_bo_list_entry kfd_bo; /* BO list entry for the KFD BO */
603 	unsigned int n_vms;		    /* Number of VMs reserved	    */
604 	struct amdgpu_bo_list_entry *vm_pd; /* Array of VM BO list entries  */
605 	struct ww_acquire_ctx ticket;	    /* Reservation ticket	    */
606 	struct list_head list, duplicates;  /* BO lists			    */
607 	struct amdgpu_sync *sync;	    /* Pointer to sync object	    */
608 	bool reserved;			    /* Whether BOs are reserved	    */
609 };
610 
611 enum bo_vm_match {
612 	BO_VM_NOT_MAPPED = 0,	/* Match VMs where a BO is not mapped */
613 	BO_VM_MAPPED,		/* Match VMs where a BO is mapped     */
614 	BO_VM_ALL,		/* Match all VMs a BO was added to    */
615 };
616 
617 /**
618  * reserve_bo_and_vm - reserve a BO and a VM unconditionally.
619  * @mem: KFD BO structure.
620  * @vm: the VM to reserve.
621  * @ctx: the struct that will be used in unreserve_bo_and_vms().
622  */
reserve_bo_and_vm(struct kgd_mem * mem,struct amdgpu_vm * vm,struct bo_vm_reservation_context * ctx)623 static int reserve_bo_and_vm(struct kgd_mem *mem,
624 			      struct amdgpu_vm *vm,
625 			      struct bo_vm_reservation_context *ctx)
626 {
627 	struct amdgpu_bo *bo = mem->bo;
628 	int ret;
629 
630 	WARN_ON(!vm);
631 
632 	ctx->reserved = false;
633 	ctx->n_vms = 1;
634 	ctx->sync = &mem->sync;
635 
636 	INIT_LIST_HEAD(&ctx->list);
637 	INIT_LIST_HEAD(&ctx->duplicates);
638 
639 	ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd), GFP_KERNEL);
640 	if (!ctx->vm_pd)
641 		return -ENOMEM;
642 
643 	ctx->kfd_bo.priority = 0;
644 	ctx->kfd_bo.tv.bo = &bo->tbo;
645 	ctx->kfd_bo.tv.num_shared = 1;
646 	list_add(&ctx->kfd_bo.tv.head, &ctx->list);
647 
648 	amdgpu_vm_get_pd_bo(vm, &ctx->list, &ctx->vm_pd[0]);
649 
650 	ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
651 				     false, &ctx->duplicates);
652 	if (ret) {
653 		pr_err("Failed to reserve buffers in ttm.\n");
654 		kfree(ctx->vm_pd);
655 		ctx->vm_pd = NULL;
656 		return ret;
657 	}
658 
659 	ctx->reserved = true;
660 	return 0;
661 }
662 
663 /**
664  * reserve_bo_and_cond_vms - reserve a BO and some VMs conditionally
665  * @mem: KFD BO structure.
666  * @vm: the VM to reserve. If NULL, then all VMs associated with the BO
667  * is used. Otherwise, a single VM associated with the BO.
668  * @map_type: the mapping status that will be used to filter the VMs.
669  * @ctx: the struct that will be used in unreserve_bo_and_vms().
670  *
671  * Returns 0 for success, negative for failure.
672  */
reserve_bo_and_cond_vms(struct kgd_mem * mem,struct amdgpu_vm * vm,enum bo_vm_match map_type,struct bo_vm_reservation_context * ctx)673 static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
674 				struct amdgpu_vm *vm, enum bo_vm_match map_type,
675 				struct bo_vm_reservation_context *ctx)
676 {
677 	struct amdgpu_bo *bo = mem->bo;
678 	struct kfd_bo_va_list *entry;
679 	unsigned int i;
680 	int ret;
681 
682 	ctx->reserved = false;
683 	ctx->n_vms = 0;
684 	ctx->vm_pd = NULL;
685 	ctx->sync = &mem->sync;
686 
687 	INIT_LIST_HEAD(&ctx->list);
688 	INIT_LIST_HEAD(&ctx->duplicates);
689 
690 	list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
691 		if ((vm && vm != entry->bo_va->base.vm) ||
692 			(entry->is_mapped != map_type
693 			&& map_type != BO_VM_ALL))
694 			continue;
695 
696 		ctx->n_vms++;
697 	}
698 
699 	if (ctx->n_vms != 0) {
700 		ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd),
701 				     GFP_KERNEL);
702 		if (!ctx->vm_pd)
703 			return -ENOMEM;
704 	}
705 
706 	ctx->kfd_bo.priority = 0;
707 	ctx->kfd_bo.tv.bo = &bo->tbo;
708 	ctx->kfd_bo.tv.num_shared = 1;
709 	list_add(&ctx->kfd_bo.tv.head, &ctx->list);
710 
711 	i = 0;
712 	list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
713 		if ((vm && vm != entry->bo_va->base.vm) ||
714 			(entry->is_mapped != map_type
715 			&& map_type != BO_VM_ALL))
716 			continue;
717 
718 		amdgpu_vm_get_pd_bo(entry->bo_va->base.vm, &ctx->list,
719 				&ctx->vm_pd[i]);
720 		i++;
721 	}
722 
723 	ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
724 				     false, &ctx->duplicates);
725 	if (ret) {
726 		pr_err("Failed to reserve buffers in ttm.\n");
727 		kfree(ctx->vm_pd);
728 		ctx->vm_pd = NULL;
729 		return ret;
730 	}
731 
732 	ctx->reserved = true;
733 	return 0;
734 }
735 
736 /**
737  * unreserve_bo_and_vms - Unreserve BO and VMs from a reservation context
738  * @ctx: Reservation context to unreserve
739  * @wait: Optionally wait for a sync object representing pending VM updates
740  * @intr: Whether the wait is interruptible
741  *
742  * Also frees any resources allocated in
743  * reserve_bo_and_(cond_)vm(s). Returns the status from
744  * amdgpu_sync_wait.
745  */
unreserve_bo_and_vms(struct bo_vm_reservation_context * ctx,bool wait,bool intr)746 static int unreserve_bo_and_vms(struct bo_vm_reservation_context *ctx,
747 				 bool wait, bool intr)
748 {
749 	int ret = 0;
750 
751 	if (wait)
752 		ret = amdgpu_sync_wait(ctx->sync, intr);
753 
754 	if (ctx->reserved)
755 		ttm_eu_backoff_reservation(&ctx->ticket, &ctx->list);
756 	kfree(ctx->vm_pd);
757 
758 	ctx->sync = NULL;
759 
760 	ctx->reserved = false;
761 	ctx->vm_pd = NULL;
762 
763 	return ret;
764 }
765 
unmap_bo_from_gpuvm(struct amdgpu_device * adev,struct kfd_bo_va_list * entry,struct amdgpu_sync * sync)766 static int unmap_bo_from_gpuvm(struct amdgpu_device *adev,
767 				struct kfd_bo_va_list *entry,
768 				struct amdgpu_sync *sync)
769 {
770 	struct amdgpu_bo_va *bo_va = entry->bo_va;
771 	struct amdgpu_vm *vm = bo_va->base.vm;
772 
773 	amdgpu_vm_bo_unmap(adev, bo_va, entry->va);
774 
775 	amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update);
776 
777 	amdgpu_sync_fence(sync, bo_va->last_pt_update);
778 
779 	return 0;
780 }
781 
update_gpuvm_pte(struct amdgpu_device * adev,struct kfd_bo_va_list * entry,struct amdgpu_sync * sync)782 static int update_gpuvm_pte(struct amdgpu_device *adev,
783 		struct kfd_bo_va_list *entry,
784 		struct amdgpu_sync *sync)
785 {
786 	int ret;
787 	struct amdgpu_bo_va *bo_va = entry->bo_va;
788 
789 	/* Update the page tables  */
790 	ret = amdgpu_vm_bo_update(adev, bo_va, false);
791 	if (ret) {
792 		pr_err("amdgpu_vm_bo_update failed\n");
793 		return ret;
794 	}
795 
796 	return amdgpu_sync_fence(sync, bo_va->last_pt_update);
797 }
798 
map_bo_to_gpuvm(struct amdgpu_device * adev,struct kfd_bo_va_list * entry,struct amdgpu_sync * sync,bool no_update_pte)799 static int map_bo_to_gpuvm(struct amdgpu_device *adev,
800 		struct kfd_bo_va_list *entry, struct amdgpu_sync *sync,
801 		bool no_update_pte)
802 {
803 	int ret;
804 
805 	/* Set virtual address for the allocation */
806 	ret = amdgpu_vm_bo_map(adev, entry->bo_va, entry->va, 0,
807 			       amdgpu_bo_size(entry->bo_va->base.bo),
808 			       entry->pte_flags);
809 	if (ret) {
810 		pr_err("Failed to map VA 0x%llx in vm. ret %d\n",
811 				entry->va, ret);
812 		return ret;
813 	}
814 
815 	if (no_update_pte)
816 		return 0;
817 
818 	ret = update_gpuvm_pte(adev, entry, sync);
819 	if (ret) {
820 		pr_err("update_gpuvm_pte() failed\n");
821 		goto update_gpuvm_pte_failed;
822 	}
823 
824 	return 0;
825 
826 update_gpuvm_pte_failed:
827 	unmap_bo_from_gpuvm(adev, entry, sync);
828 	return ret;
829 }
830 
create_doorbell_sg(uint64_t addr,uint32_t size)831 static struct sg_table *create_doorbell_sg(uint64_t addr, uint32_t size)
832 {
833 	struct sg_table *sg = kmalloc(sizeof(*sg), GFP_KERNEL);
834 
835 	if (!sg)
836 		return NULL;
837 	if (sg_alloc_table(sg, 1, GFP_KERNEL)) {
838 		kfree(sg);
839 		return NULL;
840 	}
841 	sg->sgl->dma_address = addr;
842 	sg->sgl->length = size;
843 #ifdef CONFIG_NEED_SG_DMA_LENGTH
844 	sg->sgl->dma_length = size;
845 #endif
846 	return sg;
847 }
848 
process_validate_vms(struct amdkfd_process_info * process_info)849 static int process_validate_vms(struct amdkfd_process_info *process_info)
850 {
851 	struct amdgpu_vm *peer_vm;
852 	int ret;
853 
854 	list_for_each_entry(peer_vm, &process_info->vm_list_head,
855 			    vm_list_node) {
856 		ret = vm_validate_pt_pd_bos(peer_vm);
857 		if (ret)
858 			return ret;
859 	}
860 
861 	return 0;
862 }
863 
process_sync_pds_resv(struct amdkfd_process_info * process_info,struct amdgpu_sync * sync)864 static int process_sync_pds_resv(struct amdkfd_process_info *process_info,
865 				 struct amdgpu_sync *sync)
866 {
867 	struct amdgpu_vm *peer_vm;
868 	int ret;
869 
870 	list_for_each_entry(peer_vm, &process_info->vm_list_head,
871 			    vm_list_node) {
872 		struct amdgpu_bo *pd = peer_vm->root.base.bo;
873 
874 		ret = amdgpu_sync_resv(NULL, sync, pd->tbo.base.resv,
875 				       AMDGPU_SYNC_NE_OWNER,
876 				       AMDGPU_FENCE_OWNER_KFD);
877 		if (ret)
878 			return ret;
879 	}
880 
881 	return 0;
882 }
883 
process_update_pds(struct amdkfd_process_info * process_info,struct amdgpu_sync * sync)884 static int process_update_pds(struct amdkfd_process_info *process_info,
885 			      struct amdgpu_sync *sync)
886 {
887 	struct amdgpu_vm *peer_vm;
888 	int ret;
889 
890 	list_for_each_entry(peer_vm, &process_info->vm_list_head,
891 			    vm_list_node) {
892 		ret = vm_update_pds(peer_vm, sync);
893 		if (ret)
894 			return ret;
895 	}
896 
897 	return 0;
898 }
899 
init_kfd_vm(struct amdgpu_vm * vm,void ** process_info,struct dma_fence ** ef)900 static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
901 		       struct dma_fence **ef)
902 {
903 	struct amdkfd_process_info *info = NULL;
904 	int ret;
905 
906 	if (!*process_info) {
907 		info = kzalloc(sizeof(*info), GFP_KERNEL);
908 		if (!info)
909 			return -ENOMEM;
910 
911 		mutex_init(&info->lock);
912 		INIT_LIST_HEAD(&info->vm_list_head);
913 		INIT_LIST_HEAD(&info->kfd_bo_list);
914 		INIT_LIST_HEAD(&info->userptr_valid_list);
915 		INIT_LIST_HEAD(&info->userptr_inval_list);
916 
917 		info->eviction_fence =
918 			amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1),
919 						   current->mm);
920 		if (!info->eviction_fence) {
921 			pr_err("Failed to create eviction fence\n");
922 			ret = -ENOMEM;
923 			goto create_evict_fence_fail;
924 		}
925 
926 		info->pid = get_task_pid(current->group_leader, PIDTYPE_PID);
927 		atomic_set(&info->evicted_bos, 0);
928 		INIT_DELAYED_WORK(&info->restore_userptr_work,
929 				  amdgpu_amdkfd_restore_userptr_worker);
930 
931 		*process_info = info;
932 		*ef = dma_fence_get(&info->eviction_fence->base);
933 	}
934 
935 	vm->process_info = *process_info;
936 
937 	/* Validate page directory and attach eviction fence */
938 	ret = amdgpu_bo_reserve(vm->root.base.bo, true);
939 	if (ret)
940 		goto reserve_pd_fail;
941 	ret = vm_validate_pt_pd_bos(vm);
942 	if (ret) {
943 		pr_err("validate_pt_pd_bos() failed\n");
944 		goto validate_pd_fail;
945 	}
946 	ret = amdgpu_bo_sync_wait(vm->root.base.bo,
947 				  AMDGPU_FENCE_OWNER_KFD, false);
948 	if (ret)
949 		goto wait_pd_fail;
950 	ret = dma_resv_reserve_shared(vm->root.base.bo->tbo.base.resv, 1);
951 	if (ret)
952 		goto reserve_shared_fail;
953 	amdgpu_bo_fence(vm->root.base.bo,
954 			&vm->process_info->eviction_fence->base, true);
955 	amdgpu_bo_unreserve(vm->root.base.bo);
956 
957 	/* Update process info */
958 	mutex_lock(&vm->process_info->lock);
959 	list_add_tail(&vm->vm_list_node,
960 			&(vm->process_info->vm_list_head));
961 	vm->process_info->n_vms++;
962 	mutex_unlock(&vm->process_info->lock);
963 
964 	return 0;
965 
966 reserve_shared_fail:
967 wait_pd_fail:
968 validate_pd_fail:
969 	amdgpu_bo_unreserve(vm->root.base.bo);
970 reserve_pd_fail:
971 	vm->process_info = NULL;
972 	if (info) {
973 		/* Two fence references: one in info and one in *ef */
974 		dma_fence_put(&info->eviction_fence->base);
975 		dma_fence_put(*ef);
976 		*ef = NULL;
977 		*process_info = NULL;
978 		put_pid(info->pid);
979 create_evict_fence_fail:
980 		mutex_destroy(&info->lock);
981 		kfree(info);
982 	}
983 	return ret;
984 }
985 
amdgpu_amdkfd_gpuvm_create_process_vm(struct kgd_dev * kgd,u32 pasid,void ** vm,void ** process_info,struct dma_fence ** ef)986 int amdgpu_amdkfd_gpuvm_create_process_vm(struct kgd_dev *kgd, u32 pasid,
987 					  void **vm, void **process_info,
988 					  struct dma_fence **ef)
989 {
990 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
991 	struct amdgpu_vm *new_vm;
992 	int ret;
993 
994 	new_vm = kzalloc(sizeof(*new_vm), GFP_KERNEL);
995 	if (!new_vm)
996 		return -ENOMEM;
997 
998 	/* Initialize AMDGPU part of the VM */
999 	ret = amdgpu_vm_init(adev, new_vm, AMDGPU_VM_CONTEXT_COMPUTE, pasid);
1000 	if (ret) {
1001 		pr_err("Failed init vm ret %d\n", ret);
1002 		goto amdgpu_vm_init_fail;
1003 	}
1004 
1005 	/* Initialize KFD part of the VM and process info */
1006 	ret = init_kfd_vm(new_vm, process_info, ef);
1007 	if (ret)
1008 		goto init_kfd_vm_fail;
1009 
1010 	*vm = (void *) new_vm;
1011 
1012 	return 0;
1013 
1014 init_kfd_vm_fail:
1015 	amdgpu_vm_fini(adev, new_vm);
1016 amdgpu_vm_init_fail:
1017 	kfree(new_vm);
1018 	return ret;
1019 }
1020 
amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev * kgd,struct file * filp,u32 pasid,void ** vm,void ** process_info,struct dma_fence ** ef)1021 int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd,
1022 					   struct file *filp, u32 pasid,
1023 					   void **vm, void **process_info,
1024 					   struct dma_fence **ef)
1025 {
1026 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
1027 	struct amdgpu_fpriv *drv_priv;
1028 	struct amdgpu_vm *avm;
1029 	int ret;
1030 
1031 	ret = amdgpu_file_to_fpriv(filp, &drv_priv);
1032 	if (ret)
1033 		return ret;
1034 	avm = &drv_priv->vm;
1035 
1036 	/* Already a compute VM? */
1037 	if (avm->process_info)
1038 		return -EINVAL;
1039 
1040 	/* Convert VM into a compute VM */
1041 	ret = amdgpu_vm_make_compute(adev, avm, pasid);
1042 	if (ret)
1043 		return ret;
1044 
1045 	/* Initialize KFD part of the VM and process info */
1046 	ret = init_kfd_vm(avm, process_info, ef);
1047 	if (ret)
1048 		return ret;
1049 
1050 	*vm = (void *)avm;
1051 
1052 	return 0;
1053 }
1054 
amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device * adev,struct amdgpu_vm * vm)1055 void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
1056 				    struct amdgpu_vm *vm)
1057 {
1058 	struct amdkfd_process_info *process_info = vm->process_info;
1059 	struct amdgpu_bo *pd = vm->root.base.bo;
1060 
1061 	if (!process_info)
1062 		return;
1063 
1064 	/* Release eviction fence from PD */
1065 	amdgpu_bo_reserve(pd, false);
1066 	amdgpu_bo_fence(pd, NULL, false);
1067 	amdgpu_bo_unreserve(pd);
1068 
1069 	/* Update process info */
1070 	mutex_lock(&process_info->lock);
1071 	process_info->n_vms--;
1072 	list_del(&vm->vm_list_node);
1073 	mutex_unlock(&process_info->lock);
1074 
1075 	vm->process_info = NULL;
1076 
1077 	/* Release per-process resources when last compute VM is destroyed */
1078 	if (!process_info->n_vms) {
1079 		WARN_ON(!list_empty(&process_info->kfd_bo_list));
1080 		WARN_ON(!list_empty(&process_info->userptr_valid_list));
1081 		WARN_ON(!list_empty(&process_info->userptr_inval_list));
1082 
1083 		dma_fence_put(&process_info->eviction_fence->base);
1084 		cancel_delayed_work_sync(&process_info->restore_userptr_work);
1085 		put_pid(process_info->pid);
1086 		mutex_destroy(&process_info->lock);
1087 		kfree(process_info);
1088 	}
1089 }
1090 
amdgpu_amdkfd_gpuvm_destroy_process_vm(struct kgd_dev * kgd,void * vm)1091 void amdgpu_amdkfd_gpuvm_destroy_process_vm(struct kgd_dev *kgd, void *vm)
1092 {
1093 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
1094 	struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1095 
1096 	if (WARN_ON(!kgd || !vm))
1097 		return;
1098 
1099 	pr_debug("Destroying process vm %p\n", vm);
1100 
1101 	/* Release the VM context */
1102 	amdgpu_vm_fini(adev, avm);
1103 	kfree(vm);
1104 }
1105 
amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev * kgd,void * vm)1106 void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *vm)
1107 {
1108 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
1109         struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1110 
1111 	if (WARN_ON(!kgd || !vm))
1112                 return;
1113 
1114         pr_debug("Releasing process vm %p\n", vm);
1115 
1116         /* The original pasid of amdgpu vm has already been
1117          * released during making a amdgpu vm to a compute vm
1118          * The current pasid is managed by kfd and will be
1119          * released on kfd process destroy. Set amdgpu pasid
1120          * to 0 to avoid duplicate release.
1121          */
1122 	amdgpu_vm_release_compute(adev, avm);
1123 }
1124 
amdgpu_amdkfd_gpuvm_get_process_page_dir(void * vm)1125 uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *vm)
1126 {
1127 	struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1128 	struct amdgpu_bo *pd = avm->root.base.bo;
1129 	struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
1130 
1131 	if (adev->asic_type < CHIP_VEGA10)
1132 		return avm->pd_phys_addr >> AMDGPU_GPU_PAGE_SHIFT;
1133 	return avm->pd_phys_addr;
1134 }
1135 
amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(struct kgd_dev * kgd,uint64_t va,uint64_t size,void * vm,struct kgd_mem ** mem,uint64_t * offset,uint32_t flags)1136 int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
1137 		struct kgd_dev *kgd, uint64_t va, uint64_t size,
1138 		void *vm, struct kgd_mem **mem,
1139 		uint64_t *offset, uint32_t flags)
1140 {
1141 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
1142 	struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1143 	enum ttm_bo_type bo_type = ttm_bo_type_device;
1144 	struct sg_table *sg = NULL;
1145 	uint64_t user_addr = 0;
1146 	struct amdgpu_bo *bo;
1147 	struct amdgpu_bo_param bp;
1148 	u32 domain, alloc_domain;
1149 	u64 alloc_flags;
1150 	int ret;
1151 
1152 	/*
1153 	 * Check on which domain to allocate BO
1154 	 */
1155 	if (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
1156 		domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM;
1157 		alloc_flags = AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE;
1158 		alloc_flags |= (flags & KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC) ?
1159 			AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED :
1160 			AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
1161 	} else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_GTT) {
1162 		domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT;
1163 		alloc_flags = 0;
1164 	} else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
1165 		domain = AMDGPU_GEM_DOMAIN_GTT;
1166 		alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
1167 		alloc_flags = 0;
1168 		if (!offset || !*offset)
1169 			return -EINVAL;
1170 		user_addr = untagged_addr(*offset);
1171 	} else if (flags & (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL |
1172 			KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) {
1173 		domain = AMDGPU_GEM_DOMAIN_GTT;
1174 		alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
1175 		bo_type = ttm_bo_type_sg;
1176 		alloc_flags = 0;
1177 		if (size > UINT_MAX)
1178 			return -EINVAL;
1179 		sg = create_doorbell_sg(*offset, size);
1180 		if (!sg)
1181 			return -ENOMEM;
1182 	} else {
1183 		return -EINVAL;
1184 	}
1185 
1186 	*mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
1187 	if (!*mem) {
1188 		ret = -ENOMEM;
1189 		goto err;
1190 	}
1191 	INIT_LIST_HEAD(&(*mem)->bo_va_list);
1192 	mutex_init(&(*mem)->lock);
1193 	(*mem)->aql_queue = !!(flags & KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM);
1194 
1195 	/* Workaround for AQL queue wraparound bug. Map the same
1196 	 * memory twice. That means we only actually allocate half
1197 	 * the memory.
1198 	 */
1199 	if ((*mem)->aql_queue)
1200 		size = size >> 1;
1201 
1202 	(*mem)->alloc_flags = flags;
1203 
1204 	amdgpu_sync_create(&(*mem)->sync);
1205 
1206 	ret = amdgpu_amdkfd_reserve_mem_limit(adev, size, alloc_domain, !!sg);
1207 	if (ret) {
1208 		pr_debug("Insufficient system memory\n");
1209 		goto err_reserve_limit;
1210 	}
1211 
1212 	pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n",
1213 			va, size, domain_string(alloc_domain));
1214 
1215 	memset(&bp, 0, sizeof(bp));
1216 	bp.size = size;
1217 	bp.byte_align = 1;
1218 	bp.domain = alloc_domain;
1219 	bp.flags = alloc_flags;
1220 	bp.type = bo_type;
1221 	bp.resv = NULL;
1222 	ret = amdgpu_bo_create(adev, &bp, &bo);
1223 	if (ret) {
1224 		pr_debug("Failed to create BO on domain %s. ret %d\n",
1225 				domain_string(alloc_domain), ret);
1226 		goto err_bo_create;
1227 	}
1228 	if (bo_type == ttm_bo_type_sg) {
1229 		bo->tbo.sg = sg;
1230 		bo->tbo.ttm->sg = sg;
1231 	}
1232 	bo->kfd_bo = *mem;
1233 	(*mem)->bo = bo;
1234 	if (user_addr)
1235 		bo->flags |= AMDGPU_AMDKFD_USERPTR_BO;
1236 
1237 	(*mem)->va = va;
1238 	(*mem)->domain = domain;
1239 	(*mem)->mapped_to_gpu_memory = 0;
1240 	(*mem)->process_info = avm->process_info;
1241 	add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, user_addr);
1242 
1243 	if (user_addr) {
1244 		ret = init_user_pages(*mem, user_addr);
1245 		if (ret)
1246 			goto allocate_init_user_pages_failed;
1247 	}
1248 
1249 	if (offset)
1250 		*offset = amdgpu_bo_mmap_offset(bo);
1251 
1252 	return 0;
1253 
1254 allocate_init_user_pages_failed:
1255 	remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info);
1256 	amdgpu_bo_unref(&bo);
1257 	/* Don't unreserve system mem limit twice */
1258 	goto err_reserve_limit;
1259 err_bo_create:
1260 	unreserve_mem_limit(adev, size, alloc_domain, !!sg);
1261 err_reserve_limit:
1262 	mutex_destroy(&(*mem)->lock);
1263 	kfree(*mem);
1264 err:
1265 	if (sg) {
1266 		sg_free_table(sg);
1267 		kfree(sg);
1268 	}
1269 	return ret;
1270 }
1271 
amdgpu_amdkfd_gpuvm_free_memory_of_gpu(struct kgd_dev * kgd,struct kgd_mem * mem,uint64_t * size)1272 int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
1273 		struct kgd_dev *kgd, struct kgd_mem *mem, uint64_t *size)
1274 {
1275 	struct amdkfd_process_info *process_info = mem->process_info;
1276 	unsigned long bo_size = mem->bo->tbo.mem.size;
1277 	struct kfd_bo_va_list *entry, *tmp;
1278 	struct bo_vm_reservation_context ctx;
1279 	struct ttm_validate_buffer *bo_list_entry;
1280 	unsigned int mapped_to_gpu_memory;
1281 	int ret;
1282 	bool is_imported = 0;
1283 
1284 	mutex_lock(&mem->lock);
1285 	mapped_to_gpu_memory = mem->mapped_to_gpu_memory;
1286 	is_imported = mem->is_imported;
1287 	mutex_unlock(&mem->lock);
1288 	/* lock is not needed after this, since mem is unused and will
1289 	 * be freed anyway
1290 	 */
1291 
1292 	if (mapped_to_gpu_memory > 0) {
1293 		pr_debug("BO VA 0x%llx size 0x%lx is still mapped.\n",
1294 				mem->va, bo_size);
1295 		return -EBUSY;
1296 	}
1297 
1298 	/* Make sure restore workers don't access the BO any more */
1299 	bo_list_entry = &mem->validate_list;
1300 	mutex_lock(&process_info->lock);
1301 	list_del(&bo_list_entry->head);
1302 	mutex_unlock(&process_info->lock);
1303 
1304 	/* No more MMU notifiers */
1305 	amdgpu_mn_unregister(mem->bo);
1306 
1307 	ret = reserve_bo_and_cond_vms(mem, NULL, BO_VM_ALL, &ctx);
1308 	if (unlikely(ret))
1309 		return ret;
1310 
1311 	/* The eviction fence should be removed by the last unmap.
1312 	 * TODO: Log an error condition if the bo still has the eviction fence
1313 	 * attached
1314 	 */
1315 	amdgpu_amdkfd_remove_eviction_fence(mem->bo,
1316 					process_info->eviction_fence);
1317 	pr_debug("Release VA 0x%llx - 0x%llx\n", mem->va,
1318 		mem->va + bo_size * (1 + mem->aql_queue));
1319 
1320 	/* Remove from VM internal data structures */
1321 	list_for_each_entry_safe(entry, tmp, &mem->bo_va_list, bo_list)
1322 		remove_bo_from_vm((struct amdgpu_device *)entry->kgd_dev,
1323 				entry, bo_size);
1324 
1325 	ret = unreserve_bo_and_vms(&ctx, false, false);
1326 
1327 	/* Free the sync object */
1328 	amdgpu_sync_free(&mem->sync);
1329 
1330 	/* If the SG is not NULL, it's one we created for a doorbell or mmio
1331 	 * remap BO. We need to free it.
1332 	 */
1333 	if (mem->bo->tbo.sg) {
1334 		sg_free_table(mem->bo->tbo.sg);
1335 		kfree(mem->bo->tbo.sg);
1336 	}
1337 
1338 	/* Update the size of the BO being freed if it was allocated from
1339 	 * VRAM and is not imported.
1340 	 */
1341 	if (size) {
1342 		if ((mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_VRAM) &&
1343 		    (!is_imported))
1344 			*size = bo_size;
1345 		else
1346 			*size = 0;
1347 	}
1348 
1349 	/* Free the BO*/
1350 	drm_gem_object_put(&mem->bo->tbo.base);
1351 	mutex_destroy(&mem->lock);
1352 	kfree(mem);
1353 
1354 	return ret;
1355 }
1356 
amdgpu_amdkfd_gpuvm_map_memory_to_gpu(struct kgd_dev * kgd,struct kgd_mem * mem,void * vm)1357 int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
1358 		struct kgd_dev *kgd, struct kgd_mem *mem, void *vm)
1359 {
1360 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
1361 	struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1362 	int ret;
1363 	struct amdgpu_bo *bo;
1364 	uint32_t domain;
1365 	struct kfd_bo_va_list *entry;
1366 	struct bo_vm_reservation_context ctx;
1367 	struct kfd_bo_va_list *bo_va_entry = NULL;
1368 	struct kfd_bo_va_list *bo_va_entry_aql = NULL;
1369 	unsigned long bo_size;
1370 	bool is_invalid_userptr = false;
1371 
1372 	bo = mem->bo;
1373 	if (!bo) {
1374 		pr_err("Invalid BO when mapping memory to GPU\n");
1375 		return -EINVAL;
1376 	}
1377 
1378 	/* Make sure restore is not running concurrently. Since we
1379 	 * don't map invalid userptr BOs, we rely on the next restore
1380 	 * worker to do the mapping
1381 	 */
1382 	mutex_lock(&mem->process_info->lock);
1383 
1384 	/* Lock mmap-sem. If we find an invalid userptr BO, we can be
1385 	 * sure that the MMU notifier is no longer running
1386 	 * concurrently and the queues are actually stopped
1387 	 */
1388 	if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1389 		mmap_write_lock(current->mm);
1390 		is_invalid_userptr = atomic_read(&mem->invalid);
1391 		mmap_write_unlock(current->mm);
1392 	}
1393 
1394 	mutex_lock(&mem->lock);
1395 
1396 	domain = mem->domain;
1397 	bo_size = bo->tbo.mem.size;
1398 
1399 	pr_debug("Map VA 0x%llx - 0x%llx to vm %p domain %s\n",
1400 			mem->va,
1401 			mem->va + bo_size * (1 + mem->aql_queue),
1402 			vm, domain_string(domain));
1403 
1404 	ret = reserve_bo_and_vm(mem, vm, &ctx);
1405 	if (unlikely(ret))
1406 		goto out;
1407 
1408 	/* Userptr can be marked as "not invalid", but not actually be
1409 	 * validated yet (still in the system domain). In that case
1410 	 * the queues are still stopped and we can leave mapping for
1411 	 * the next restore worker
1412 	 */
1413 	if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) &&
1414 	    bo->tbo.mem.mem_type == TTM_PL_SYSTEM)
1415 		is_invalid_userptr = true;
1416 
1417 	if (check_if_add_bo_to_vm(avm, mem)) {
1418 		ret = add_bo_to_vm(adev, mem, avm, false,
1419 				&bo_va_entry);
1420 		if (ret)
1421 			goto add_bo_to_vm_failed;
1422 		if (mem->aql_queue) {
1423 			ret = add_bo_to_vm(adev, mem, avm,
1424 					true, &bo_va_entry_aql);
1425 			if (ret)
1426 				goto add_bo_to_vm_failed_aql;
1427 		}
1428 	} else {
1429 		ret = vm_validate_pt_pd_bos(avm);
1430 		if (unlikely(ret))
1431 			goto add_bo_to_vm_failed;
1432 	}
1433 
1434 	if (mem->mapped_to_gpu_memory == 0 &&
1435 	    !amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1436 		/* Validate BO only once. The eviction fence gets added to BO
1437 		 * the first time it is mapped. Validate will wait for all
1438 		 * background evictions to complete.
1439 		 */
1440 		ret = amdgpu_amdkfd_bo_validate(bo, domain, true);
1441 		if (ret) {
1442 			pr_debug("Validate failed\n");
1443 			goto map_bo_to_gpuvm_failed;
1444 		}
1445 	}
1446 
1447 	list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
1448 		if (entry->bo_va->base.vm == vm && !entry->is_mapped) {
1449 			pr_debug("\t map VA 0x%llx - 0x%llx in entry %p\n",
1450 					entry->va, entry->va + bo_size,
1451 					entry);
1452 
1453 			ret = map_bo_to_gpuvm(adev, entry, ctx.sync,
1454 					      is_invalid_userptr);
1455 			if (ret) {
1456 				pr_err("Failed to map bo to gpuvm\n");
1457 				goto map_bo_to_gpuvm_failed;
1458 			}
1459 
1460 			ret = vm_update_pds(vm, ctx.sync);
1461 			if (ret) {
1462 				pr_err("Failed to update page directories\n");
1463 				goto map_bo_to_gpuvm_failed;
1464 			}
1465 
1466 			entry->is_mapped = true;
1467 			mem->mapped_to_gpu_memory++;
1468 			pr_debug("\t INC mapping count %d\n",
1469 					mem->mapped_to_gpu_memory);
1470 		}
1471 	}
1472 
1473 	if (!amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && !bo->pin_count)
1474 		amdgpu_bo_fence(bo,
1475 				&avm->process_info->eviction_fence->base,
1476 				true);
1477 	ret = unreserve_bo_and_vms(&ctx, false, false);
1478 
1479 	goto out;
1480 
1481 map_bo_to_gpuvm_failed:
1482 	if (bo_va_entry_aql)
1483 		remove_bo_from_vm(adev, bo_va_entry_aql, bo_size);
1484 add_bo_to_vm_failed_aql:
1485 	if (bo_va_entry)
1486 		remove_bo_from_vm(adev, bo_va_entry, bo_size);
1487 add_bo_to_vm_failed:
1488 	unreserve_bo_and_vms(&ctx, false, false);
1489 out:
1490 	mutex_unlock(&mem->process_info->lock);
1491 	mutex_unlock(&mem->lock);
1492 	return ret;
1493 }
1494 
amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(struct kgd_dev * kgd,struct kgd_mem * mem,void * vm)1495 int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
1496 		struct kgd_dev *kgd, struct kgd_mem *mem, void *vm)
1497 {
1498 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
1499 	struct amdkfd_process_info *process_info =
1500 		((struct amdgpu_vm *)vm)->process_info;
1501 	unsigned long bo_size = mem->bo->tbo.mem.size;
1502 	struct kfd_bo_va_list *entry;
1503 	struct bo_vm_reservation_context ctx;
1504 	int ret;
1505 
1506 	mutex_lock(&mem->lock);
1507 
1508 	ret = reserve_bo_and_cond_vms(mem, vm, BO_VM_MAPPED, &ctx);
1509 	if (unlikely(ret))
1510 		goto out;
1511 	/* If no VMs were reserved, it means the BO wasn't actually mapped */
1512 	if (ctx.n_vms == 0) {
1513 		ret = -EINVAL;
1514 		goto unreserve_out;
1515 	}
1516 
1517 	ret = vm_validate_pt_pd_bos((struct amdgpu_vm *)vm);
1518 	if (unlikely(ret))
1519 		goto unreserve_out;
1520 
1521 	pr_debug("Unmap VA 0x%llx - 0x%llx from vm %p\n",
1522 		mem->va,
1523 		mem->va + bo_size * (1 + mem->aql_queue),
1524 		vm);
1525 
1526 	list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
1527 		if (entry->bo_va->base.vm == vm && entry->is_mapped) {
1528 			pr_debug("\t unmap VA 0x%llx - 0x%llx from entry %p\n",
1529 					entry->va,
1530 					entry->va + bo_size,
1531 					entry);
1532 
1533 			ret = unmap_bo_from_gpuvm(adev, entry, ctx.sync);
1534 			if (ret == 0) {
1535 				entry->is_mapped = false;
1536 			} else {
1537 				pr_err("failed to unmap VA 0x%llx\n",
1538 						mem->va);
1539 				goto unreserve_out;
1540 			}
1541 
1542 			mem->mapped_to_gpu_memory--;
1543 			pr_debug("\t DEC mapping count %d\n",
1544 					mem->mapped_to_gpu_memory);
1545 		}
1546 	}
1547 
1548 	/* If BO is unmapped from all VMs, unfence it. It can be evicted if
1549 	 * required.
1550 	 */
1551 	if (mem->mapped_to_gpu_memory == 0 &&
1552 	    !amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) && !mem->bo->pin_count)
1553 		amdgpu_amdkfd_remove_eviction_fence(mem->bo,
1554 						process_info->eviction_fence);
1555 
1556 unreserve_out:
1557 	unreserve_bo_and_vms(&ctx, false, false);
1558 out:
1559 	mutex_unlock(&mem->lock);
1560 	return ret;
1561 }
1562 
amdgpu_amdkfd_gpuvm_sync_memory(struct kgd_dev * kgd,struct kgd_mem * mem,bool intr)1563 int amdgpu_amdkfd_gpuvm_sync_memory(
1564 		struct kgd_dev *kgd, struct kgd_mem *mem, bool intr)
1565 {
1566 	struct amdgpu_sync sync;
1567 	int ret;
1568 
1569 	amdgpu_sync_create(&sync);
1570 
1571 	mutex_lock(&mem->lock);
1572 	amdgpu_sync_clone(&mem->sync, &sync);
1573 	mutex_unlock(&mem->lock);
1574 
1575 	ret = amdgpu_sync_wait(&sync, intr);
1576 	amdgpu_sync_free(&sync);
1577 	return ret;
1578 }
1579 
amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev * kgd,struct kgd_mem * mem,void ** kptr,uint64_t * size)1580 int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev *kgd,
1581 		struct kgd_mem *mem, void **kptr, uint64_t *size)
1582 {
1583 	int ret;
1584 	struct amdgpu_bo *bo = mem->bo;
1585 
1586 	if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1587 		pr_err("userptr can't be mapped to kernel\n");
1588 		return -EINVAL;
1589 	}
1590 
1591 	/* delete kgd_mem from kfd_bo_list to avoid re-validating
1592 	 * this BO in BO's restoring after eviction.
1593 	 */
1594 	mutex_lock(&mem->process_info->lock);
1595 
1596 	ret = amdgpu_bo_reserve(bo, true);
1597 	if (ret) {
1598 		pr_err("Failed to reserve bo. ret %d\n", ret);
1599 		goto bo_reserve_failed;
1600 	}
1601 
1602 	ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
1603 	if (ret) {
1604 		pr_err("Failed to pin bo. ret %d\n", ret);
1605 		goto pin_failed;
1606 	}
1607 
1608 	ret = amdgpu_bo_kmap(bo, kptr);
1609 	if (ret) {
1610 		pr_err("Failed to map bo to kernel. ret %d\n", ret);
1611 		goto kmap_failed;
1612 	}
1613 
1614 	amdgpu_amdkfd_remove_eviction_fence(
1615 		bo, mem->process_info->eviction_fence);
1616 	list_del_init(&mem->validate_list.head);
1617 
1618 	if (size)
1619 		*size = amdgpu_bo_size(bo);
1620 
1621 	amdgpu_bo_unreserve(bo);
1622 
1623 	mutex_unlock(&mem->process_info->lock);
1624 	return 0;
1625 
1626 kmap_failed:
1627 	amdgpu_bo_unpin(bo);
1628 pin_failed:
1629 	amdgpu_bo_unreserve(bo);
1630 bo_reserve_failed:
1631 	mutex_unlock(&mem->process_info->lock);
1632 
1633 	return ret;
1634 }
1635 
amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev * kgd,struct kfd_vm_fault_info * mem)1636 int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd,
1637 					      struct kfd_vm_fault_info *mem)
1638 {
1639 	struct amdgpu_device *adev;
1640 
1641 	adev = (struct amdgpu_device *)kgd;
1642 	if (atomic_read(&adev->gmc.vm_fault_info_updated) == 1) {
1643 		*mem = *adev->gmc.vm_fault_info;
1644 		mb();
1645 		atomic_set(&adev->gmc.vm_fault_info_updated, 0);
1646 	}
1647 	return 0;
1648 }
1649 
amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev * kgd,struct dma_buf * dma_buf,uint64_t va,void * vm,struct kgd_mem ** mem,uint64_t * size,uint64_t * mmap_offset)1650 int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
1651 				      struct dma_buf *dma_buf,
1652 				      uint64_t va, void *vm,
1653 				      struct kgd_mem **mem, uint64_t *size,
1654 				      uint64_t *mmap_offset)
1655 {
1656 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
1657 	struct drm_gem_object *obj;
1658 	struct amdgpu_bo *bo;
1659 	struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1660 
1661 	if (dma_buf->ops != &amdgpu_dmabuf_ops)
1662 		/* Can't handle non-graphics buffers */
1663 		return -EINVAL;
1664 
1665 	obj = dma_buf->priv;
1666 	if (drm_to_adev(obj->dev) != adev)
1667 		/* Can't handle buffers from other devices */
1668 		return -EINVAL;
1669 
1670 	bo = gem_to_amdgpu_bo(obj);
1671 	if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM |
1672 				    AMDGPU_GEM_DOMAIN_GTT)))
1673 		/* Only VRAM and GTT BOs are supported */
1674 		return -EINVAL;
1675 
1676 	*mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
1677 	if (!*mem)
1678 		return -ENOMEM;
1679 
1680 	if (size)
1681 		*size = amdgpu_bo_size(bo);
1682 
1683 	if (mmap_offset)
1684 		*mmap_offset = amdgpu_bo_mmap_offset(bo);
1685 
1686 	INIT_LIST_HEAD(&(*mem)->bo_va_list);
1687 	mutex_init(&(*mem)->lock);
1688 
1689 	(*mem)->alloc_flags =
1690 		((bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
1691 		KFD_IOC_ALLOC_MEM_FLAGS_VRAM : KFD_IOC_ALLOC_MEM_FLAGS_GTT)
1692 		| KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE
1693 		| KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE;
1694 
1695 	drm_gem_object_get(&bo->tbo.base);
1696 	(*mem)->bo = bo;
1697 	(*mem)->va = va;
1698 	(*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
1699 		AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT;
1700 	(*mem)->mapped_to_gpu_memory = 0;
1701 	(*mem)->process_info = avm->process_info;
1702 	add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, false);
1703 	amdgpu_sync_create(&(*mem)->sync);
1704 	(*mem)->is_imported = true;
1705 
1706 	return 0;
1707 }
1708 
1709 /* Evict a userptr BO by stopping the queues if necessary
1710  *
1711  * Runs in MMU notifier, may be in RECLAIM_FS context. This means it
1712  * cannot do any memory allocations, and cannot take any locks that
1713  * are held elsewhere while allocating memory. Therefore this is as
1714  * simple as possible, using atomic counters.
1715  *
1716  * It doesn't do anything to the BO itself. The real work happens in
1717  * restore, where we get updated page addresses. This function only
1718  * ensures that GPU access to the BO is stopped.
1719  */
amdgpu_amdkfd_evict_userptr(struct kgd_mem * mem,struct mm_struct * mm)1720 int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem,
1721 				struct mm_struct *mm)
1722 {
1723 	struct amdkfd_process_info *process_info = mem->process_info;
1724 	int evicted_bos;
1725 	int r = 0;
1726 
1727 	atomic_inc(&mem->invalid);
1728 	evicted_bos = atomic_inc_return(&process_info->evicted_bos);
1729 	if (evicted_bos == 1) {
1730 		/* First eviction, stop the queues */
1731 		r = kgd2kfd_quiesce_mm(mm);
1732 		if (r)
1733 			pr_err("Failed to quiesce KFD\n");
1734 		schedule_delayed_work(&process_info->restore_userptr_work,
1735 			msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
1736 	}
1737 
1738 	return r;
1739 }
1740 
1741 /* Update invalid userptr BOs
1742  *
1743  * Moves invalidated (evicted) userptr BOs from userptr_valid_list to
1744  * userptr_inval_list and updates user pages for all BOs that have
1745  * been invalidated since their last update.
1746  */
update_invalid_user_pages(struct amdkfd_process_info * process_info,struct mm_struct * mm)1747 static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
1748 				     struct mm_struct *mm)
1749 {
1750 	struct kgd_mem *mem, *tmp_mem;
1751 	struct amdgpu_bo *bo;
1752 	struct ttm_operation_ctx ctx = { false, false };
1753 	int invalid, ret;
1754 
1755 	/* Move all invalidated BOs to the userptr_inval_list and
1756 	 * release their user pages by migration to the CPU domain
1757 	 */
1758 	list_for_each_entry_safe(mem, tmp_mem,
1759 				 &process_info->userptr_valid_list,
1760 				 validate_list.head) {
1761 		if (!atomic_read(&mem->invalid))
1762 			continue; /* BO is still valid */
1763 
1764 		bo = mem->bo;
1765 
1766 		if (amdgpu_bo_reserve(bo, true))
1767 			return -EAGAIN;
1768 		amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
1769 		ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1770 		amdgpu_bo_unreserve(bo);
1771 		if (ret) {
1772 			pr_err("%s: Failed to invalidate userptr BO\n",
1773 			       __func__);
1774 			return -EAGAIN;
1775 		}
1776 
1777 		list_move_tail(&mem->validate_list.head,
1778 			       &process_info->userptr_inval_list);
1779 	}
1780 
1781 	if (list_empty(&process_info->userptr_inval_list))
1782 		return 0; /* All evicted userptr BOs were freed */
1783 
1784 	/* Go through userptr_inval_list and update any invalid user_pages */
1785 	list_for_each_entry(mem, &process_info->userptr_inval_list,
1786 			    validate_list.head) {
1787 		invalid = atomic_read(&mem->invalid);
1788 		if (!invalid)
1789 			/* BO hasn't been invalidated since the last
1790 			 * revalidation attempt. Keep its BO list.
1791 			 */
1792 			continue;
1793 
1794 		bo = mem->bo;
1795 
1796 		/* Get updated user pages */
1797 		ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
1798 		if (ret) {
1799 			pr_debug("%s: Failed to get user pages: %d\n",
1800 				__func__, ret);
1801 
1802 			/* Return error -EBUSY or -ENOMEM, retry restore */
1803 			return ret;
1804 		}
1805 
1806 		/*
1807 		 * FIXME: Cannot ignore the return code, must hold
1808 		 * notifier_lock
1809 		 */
1810 		amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
1811 
1812 		/* Mark the BO as valid unless it was invalidated
1813 		 * again concurrently.
1814 		 */
1815 		if (atomic_cmpxchg(&mem->invalid, invalid, 0) != invalid)
1816 			return -EAGAIN;
1817 	}
1818 
1819 	return 0;
1820 }
1821 
1822 /* Validate invalid userptr BOs
1823  *
1824  * Validates BOs on the userptr_inval_list, and moves them back to the
1825  * userptr_valid_list. Also updates GPUVM page tables with new page
1826  * addresses and waits for the page table updates to complete.
1827  */
validate_invalid_user_pages(struct amdkfd_process_info * process_info)1828 static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
1829 {
1830 	struct amdgpu_bo_list_entry *pd_bo_list_entries;
1831 	struct list_head resv_list, duplicates;
1832 	struct ww_acquire_ctx ticket;
1833 	struct amdgpu_sync sync;
1834 
1835 	struct amdgpu_vm *peer_vm;
1836 	struct kgd_mem *mem, *tmp_mem;
1837 	struct amdgpu_bo *bo;
1838 	struct ttm_operation_ctx ctx = { false, false };
1839 	int i, ret;
1840 
1841 	pd_bo_list_entries = kcalloc(process_info->n_vms,
1842 				     sizeof(struct amdgpu_bo_list_entry),
1843 				     GFP_KERNEL);
1844 	if (!pd_bo_list_entries) {
1845 		pr_err("%s: Failed to allocate PD BO list entries\n", __func__);
1846 		ret = -ENOMEM;
1847 		goto out_no_mem;
1848 	}
1849 
1850 	INIT_LIST_HEAD(&resv_list);
1851 	INIT_LIST_HEAD(&duplicates);
1852 
1853 	/* Get all the page directory BOs that need to be reserved */
1854 	i = 0;
1855 	list_for_each_entry(peer_vm, &process_info->vm_list_head,
1856 			    vm_list_node)
1857 		amdgpu_vm_get_pd_bo(peer_vm, &resv_list,
1858 				    &pd_bo_list_entries[i++]);
1859 	/* Add the userptr_inval_list entries to resv_list */
1860 	list_for_each_entry(mem, &process_info->userptr_inval_list,
1861 			    validate_list.head) {
1862 		list_add_tail(&mem->resv_list.head, &resv_list);
1863 		mem->resv_list.bo = mem->validate_list.bo;
1864 		mem->resv_list.num_shared = mem->validate_list.num_shared;
1865 	}
1866 
1867 	/* Reserve all BOs and page tables for validation */
1868 	ret = ttm_eu_reserve_buffers(&ticket, &resv_list, false, &duplicates);
1869 	WARN(!list_empty(&duplicates), "Duplicates should be empty");
1870 	if (ret)
1871 		goto out_free;
1872 
1873 	amdgpu_sync_create(&sync);
1874 
1875 	ret = process_validate_vms(process_info);
1876 	if (ret)
1877 		goto unreserve_out;
1878 
1879 	/* Validate BOs and update GPUVM page tables */
1880 	list_for_each_entry_safe(mem, tmp_mem,
1881 				 &process_info->userptr_inval_list,
1882 				 validate_list.head) {
1883 		struct kfd_bo_va_list *bo_va_entry;
1884 
1885 		bo = mem->bo;
1886 
1887 		/* Validate the BO if we got user pages */
1888 		if (bo->tbo.ttm->pages[0]) {
1889 			amdgpu_bo_placement_from_domain(bo, mem->domain);
1890 			ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1891 			if (ret) {
1892 				pr_err("%s: failed to validate BO\n", __func__);
1893 				goto unreserve_out;
1894 			}
1895 		}
1896 
1897 		list_move_tail(&mem->validate_list.head,
1898 			       &process_info->userptr_valid_list);
1899 
1900 		/* Update mapping. If the BO was not validated
1901 		 * (because we couldn't get user pages), this will
1902 		 * clear the page table entries, which will result in
1903 		 * VM faults if the GPU tries to access the invalid
1904 		 * memory.
1905 		 */
1906 		list_for_each_entry(bo_va_entry, &mem->bo_va_list, bo_list) {
1907 			if (!bo_va_entry->is_mapped)
1908 				continue;
1909 
1910 			ret = update_gpuvm_pte((struct amdgpu_device *)
1911 					       bo_va_entry->kgd_dev,
1912 					       bo_va_entry, &sync);
1913 			if (ret) {
1914 				pr_err("%s: update PTE failed\n", __func__);
1915 				/* make sure this gets validated again */
1916 				atomic_inc(&mem->invalid);
1917 				goto unreserve_out;
1918 			}
1919 		}
1920 	}
1921 
1922 	/* Update page directories */
1923 	ret = process_update_pds(process_info, &sync);
1924 
1925 unreserve_out:
1926 	ttm_eu_backoff_reservation(&ticket, &resv_list);
1927 	amdgpu_sync_wait(&sync, false);
1928 	amdgpu_sync_free(&sync);
1929 out_free:
1930 	kfree(pd_bo_list_entries);
1931 out_no_mem:
1932 
1933 	return ret;
1934 }
1935 
1936 /* Worker callback to restore evicted userptr BOs
1937  *
1938  * Tries to update and validate all userptr BOs. If successful and no
1939  * concurrent evictions happened, the queues are restarted. Otherwise,
1940  * reschedule for another attempt later.
1941  */
amdgpu_amdkfd_restore_userptr_worker(struct work_struct * work)1942 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work)
1943 {
1944 	struct delayed_work *dwork = to_delayed_work(work);
1945 	struct amdkfd_process_info *process_info =
1946 		container_of(dwork, struct amdkfd_process_info,
1947 			     restore_userptr_work);
1948 	struct task_struct *usertask;
1949 	struct mm_struct *mm;
1950 	int evicted_bos;
1951 
1952 	evicted_bos = atomic_read(&process_info->evicted_bos);
1953 	if (!evicted_bos)
1954 		return;
1955 
1956 	/* Reference task and mm in case of concurrent process termination */
1957 	usertask = get_pid_task(process_info->pid, PIDTYPE_PID);
1958 	if (!usertask)
1959 		return;
1960 	mm = get_task_mm(usertask);
1961 	if (!mm) {
1962 		put_task_struct(usertask);
1963 		return;
1964 	}
1965 
1966 	mutex_lock(&process_info->lock);
1967 
1968 	if (update_invalid_user_pages(process_info, mm))
1969 		goto unlock_out;
1970 	/* userptr_inval_list can be empty if all evicted userptr BOs
1971 	 * have been freed. In that case there is nothing to validate
1972 	 * and we can just restart the queues.
1973 	 */
1974 	if (!list_empty(&process_info->userptr_inval_list)) {
1975 		if (atomic_read(&process_info->evicted_bos) != evicted_bos)
1976 			goto unlock_out; /* Concurrent eviction, try again */
1977 
1978 		if (validate_invalid_user_pages(process_info))
1979 			goto unlock_out;
1980 	}
1981 	/* Final check for concurrent evicton and atomic update. If
1982 	 * another eviction happens after successful update, it will
1983 	 * be a first eviction that calls quiesce_mm. The eviction
1984 	 * reference counting inside KFD will handle this case.
1985 	 */
1986 	if (atomic_cmpxchg(&process_info->evicted_bos, evicted_bos, 0) !=
1987 	    evicted_bos)
1988 		goto unlock_out;
1989 	evicted_bos = 0;
1990 	if (kgd2kfd_resume_mm(mm)) {
1991 		pr_err("%s: Failed to resume KFD\n", __func__);
1992 		/* No recovery from this failure. Probably the CP is
1993 		 * hanging. No point trying again.
1994 		 */
1995 	}
1996 
1997 unlock_out:
1998 	mutex_unlock(&process_info->lock);
1999 	mmput(mm);
2000 	put_task_struct(usertask);
2001 
2002 	/* If validation failed, reschedule another attempt */
2003 	if (evicted_bos)
2004 		schedule_delayed_work(&process_info->restore_userptr_work,
2005 			msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
2006 }
2007 
2008 /** amdgpu_amdkfd_gpuvm_restore_process_bos - Restore all BOs for the given
2009  *   KFD process identified by process_info
2010  *
2011  * @process_info: amdkfd_process_info of the KFD process
2012  *
2013  * After memory eviction, restore thread calls this function. The function
2014  * should be called when the Process is still valid. BO restore involves -
2015  *
2016  * 1.  Release old eviction fence and create new one
2017  * 2.  Get two copies of PD BO list from all the VMs. Keep one copy as pd_list.
2018  * 3   Use the second PD list and kfd_bo_list to create a list (ctx.list) of
2019  *     BOs that need to be reserved.
2020  * 4.  Reserve all the BOs
2021  * 5.  Validate of PD and PT BOs.
2022  * 6.  Validate all KFD BOs using kfd_bo_list and Map them and add new fence
2023  * 7.  Add fence to all PD and PT BOs.
2024  * 8.  Unreserve all BOs
2025  */
amdgpu_amdkfd_gpuvm_restore_process_bos(void * info,struct dma_fence ** ef)2026 int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
2027 {
2028 	struct amdgpu_bo_list_entry *pd_bo_list;
2029 	struct amdkfd_process_info *process_info = info;
2030 	struct amdgpu_vm *peer_vm;
2031 	struct kgd_mem *mem;
2032 	struct bo_vm_reservation_context ctx;
2033 	struct amdgpu_amdkfd_fence *new_fence;
2034 	int ret = 0, i;
2035 	struct list_head duplicate_save;
2036 	struct amdgpu_sync sync_obj;
2037 
2038 	INIT_LIST_HEAD(&duplicate_save);
2039 	INIT_LIST_HEAD(&ctx.list);
2040 	INIT_LIST_HEAD(&ctx.duplicates);
2041 
2042 	pd_bo_list = kcalloc(process_info->n_vms,
2043 			     sizeof(struct amdgpu_bo_list_entry),
2044 			     GFP_KERNEL);
2045 	if (!pd_bo_list)
2046 		return -ENOMEM;
2047 
2048 	i = 0;
2049 	mutex_lock(&process_info->lock);
2050 	list_for_each_entry(peer_vm, &process_info->vm_list_head,
2051 			vm_list_node)
2052 		amdgpu_vm_get_pd_bo(peer_vm, &ctx.list, &pd_bo_list[i++]);
2053 
2054 	/* Reserve all BOs and page tables/directory. Add all BOs from
2055 	 * kfd_bo_list to ctx.list
2056 	 */
2057 	list_for_each_entry(mem, &process_info->kfd_bo_list,
2058 			    validate_list.head) {
2059 
2060 		list_add_tail(&mem->resv_list.head, &ctx.list);
2061 		mem->resv_list.bo = mem->validate_list.bo;
2062 		mem->resv_list.num_shared = mem->validate_list.num_shared;
2063 	}
2064 
2065 	ret = ttm_eu_reserve_buffers(&ctx.ticket, &ctx.list,
2066 				     false, &duplicate_save);
2067 	if (ret) {
2068 		pr_debug("Memory eviction: TTM Reserve Failed. Try again\n");
2069 		goto ttm_reserve_fail;
2070 	}
2071 
2072 	amdgpu_sync_create(&sync_obj);
2073 
2074 	/* Validate PDs and PTs */
2075 	ret = process_validate_vms(process_info);
2076 	if (ret)
2077 		goto validate_map_fail;
2078 
2079 	ret = process_sync_pds_resv(process_info, &sync_obj);
2080 	if (ret) {
2081 		pr_debug("Memory eviction: Failed to sync to PD BO moving fence. Try again\n");
2082 		goto validate_map_fail;
2083 	}
2084 
2085 	/* Validate BOs and map them to GPUVM (update VM page tables). */
2086 	list_for_each_entry(mem, &process_info->kfd_bo_list,
2087 			    validate_list.head) {
2088 
2089 		struct amdgpu_bo *bo = mem->bo;
2090 		uint32_t domain = mem->domain;
2091 		struct kfd_bo_va_list *bo_va_entry;
2092 
2093 		ret = amdgpu_amdkfd_bo_validate(bo, domain, false);
2094 		if (ret) {
2095 			pr_debug("Memory eviction: Validate BOs failed. Try again\n");
2096 			goto validate_map_fail;
2097 		}
2098 		ret = amdgpu_sync_fence(&sync_obj, bo->tbo.moving);
2099 		if (ret) {
2100 			pr_debug("Memory eviction: Sync BO fence failed. Try again\n");
2101 			goto validate_map_fail;
2102 		}
2103 		list_for_each_entry(bo_va_entry, &mem->bo_va_list,
2104 				    bo_list) {
2105 			ret = update_gpuvm_pte((struct amdgpu_device *)
2106 					      bo_va_entry->kgd_dev,
2107 					      bo_va_entry,
2108 					      &sync_obj);
2109 			if (ret) {
2110 				pr_debug("Memory eviction: update PTE failed. Try again\n");
2111 				goto validate_map_fail;
2112 			}
2113 		}
2114 	}
2115 
2116 	/* Update page directories */
2117 	ret = process_update_pds(process_info, &sync_obj);
2118 	if (ret) {
2119 		pr_debug("Memory eviction: update PDs failed. Try again\n");
2120 		goto validate_map_fail;
2121 	}
2122 
2123 	/* Wait for validate and PT updates to finish */
2124 	amdgpu_sync_wait(&sync_obj, false);
2125 
2126 	/* Release old eviction fence and create new one, because fence only
2127 	 * goes from unsignaled to signaled, fence cannot be reused.
2128 	 * Use context and mm from the old fence.
2129 	 */
2130 	new_fence = amdgpu_amdkfd_fence_create(
2131 				process_info->eviction_fence->base.context,
2132 				process_info->eviction_fence->mm);
2133 	if (!new_fence) {
2134 		pr_err("Failed to create eviction fence\n");
2135 		ret = -ENOMEM;
2136 		goto validate_map_fail;
2137 	}
2138 	dma_fence_put(&process_info->eviction_fence->base);
2139 	process_info->eviction_fence = new_fence;
2140 	*ef = dma_fence_get(&new_fence->base);
2141 
2142 	/* Attach new eviction fence to all BOs */
2143 	list_for_each_entry(mem, &process_info->kfd_bo_list,
2144 		validate_list.head)
2145 		amdgpu_bo_fence(mem->bo,
2146 			&process_info->eviction_fence->base, true);
2147 
2148 	/* Attach eviction fence to PD / PT BOs */
2149 	list_for_each_entry(peer_vm, &process_info->vm_list_head,
2150 			    vm_list_node) {
2151 		struct amdgpu_bo *bo = peer_vm->root.base.bo;
2152 
2153 		amdgpu_bo_fence(bo, &process_info->eviction_fence->base, true);
2154 	}
2155 
2156 validate_map_fail:
2157 	ttm_eu_backoff_reservation(&ctx.ticket, &ctx.list);
2158 	amdgpu_sync_free(&sync_obj);
2159 ttm_reserve_fail:
2160 	mutex_unlock(&process_info->lock);
2161 	kfree(pd_bo_list);
2162 	return ret;
2163 }
2164 
amdgpu_amdkfd_add_gws_to_process(void * info,void * gws,struct kgd_mem ** mem)2165 int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem)
2166 {
2167 	struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info;
2168 	struct amdgpu_bo *gws_bo = (struct amdgpu_bo *)gws;
2169 	int ret;
2170 
2171 	if (!info || !gws)
2172 		return -EINVAL;
2173 
2174 	*mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
2175 	if (!*mem)
2176 		return -ENOMEM;
2177 
2178 	mutex_init(&(*mem)->lock);
2179 	INIT_LIST_HEAD(&(*mem)->bo_va_list);
2180 	(*mem)->bo = amdgpu_bo_ref(gws_bo);
2181 	(*mem)->domain = AMDGPU_GEM_DOMAIN_GWS;
2182 	(*mem)->process_info = process_info;
2183 	add_kgd_mem_to_kfd_bo_list(*mem, process_info, false);
2184 	amdgpu_sync_create(&(*mem)->sync);
2185 
2186 
2187 	/* Validate gws bo the first time it is added to process */
2188 	mutex_lock(&(*mem)->process_info->lock);
2189 	ret = amdgpu_bo_reserve(gws_bo, false);
2190 	if (unlikely(ret)) {
2191 		pr_err("Reserve gws bo failed %d\n", ret);
2192 		goto bo_reservation_failure;
2193 	}
2194 
2195 	ret = amdgpu_amdkfd_bo_validate(gws_bo, AMDGPU_GEM_DOMAIN_GWS, true);
2196 	if (ret) {
2197 		pr_err("GWS BO validate failed %d\n", ret);
2198 		goto bo_validation_failure;
2199 	}
2200 	/* GWS resource is shared b/t amdgpu and amdkfd
2201 	 * Add process eviction fence to bo so they can
2202 	 * evict each other.
2203 	 */
2204 	ret = dma_resv_reserve_shared(gws_bo->tbo.base.resv, 1);
2205 	if (ret)
2206 		goto reserve_shared_fail;
2207 	amdgpu_bo_fence(gws_bo, &process_info->eviction_fence->base, true);
2208 	amdgpu_bo_unreserve(gws_bo);
2209 	mutex_unlock(&(*mem)->process_info->lock);
2210 
2211 	return ret;
2212 
2213 reserve_shared_fail:
2214 bo_validation_failure:
2215 	amdgpu_bo_unreserve(gws_bo);
2216 bo_reservation_failure:
2217 	mutex_unlock(&(*mem)->process_info->lock);
2218 	amdgpu_sync_free(&(*mem)->sync);
2219 	remove_kgd_mem_from_kfd_bo_list(*mem, process_info);
2220 	amdgpu_bo_unref(&gws_bo);
2221 	mutex_destroy(&(*mem)->lock);
2222 	kfree(*mem);
2223 	*mem = NULL;
2224 	return ret;
2225 }
2226 
amdgpu_amdkfd_remove_gws_from_process(void * info,void * mem)2227 int amdgpu_amdkfd_remove_gws_from_process(void *info, void *mem)
2228 {
2229 	int ret;
2230 	struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info;
2231 	struct kgd_mem *kgd_mem = (struct kgd_mem *)mem;
2232 	struct amdgpu_bo *gws_bo = kgd_mem->bo;
2233 
2234 	/* Remove BO from process's validate list so restore worker won't touch
2235 	 * it anymore
2236 	 */
2237 	remove_kgd_mem_from_kfd_bo_list(kgd_mem, process_info);
2238 
2239 	ret = amdgpu_bo_reserve(gws_bo, false);
2240 	if (unlikely(ret)) {
2241 		pr_err("Reserve gws bo failed %d\n", ret);
2242 		//TODO add BO back to validate_list?
2243 		return ret;
2244 	}
2245 	amdgpu_amdkfd_remove_eviction_fence(gws_bo,
2246 			process_info->eviction_fence);
2247 	amdgpu_bo_unreserve(gws_bo);
2248 	amdgpu_sync_free(&kgd_mem->sync);
2249 	amdgpu_bo_unref(&gws_bo);
2250 	mutex_destroy(&kgd_mem->lock);
2251 	kfree(mem);
2252 	return 0;
2253 }
2254 
2255 /* Returns GPU-specific tiling mode information */
amdgpu_amdkfd_get_tile_config(struct kgd_dev * kgd,struct tile_config * config)2256 int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd,
2257 				struct tile_config *config)
2258 {
2259 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
2260 
2261 	config->gb_addr_config = adev->gfx.config.gb_addr_config;
2262 	config->tile_config_ptr = adev->gfx.config.tile_mode_array;
2263 	config->num_tile_configs =
2264 			ARRAY_SIZE(adev->gfx.config.tile_mode_array);
2265 	config->macro_tile_config_ptr =
2266 			adev->gfx.config.macrotile_mode_array;
2267 	config->num_macro_tile_configs =
2268 			ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
2269 
2270 	/* Those values are not set from GFX9 onwards */
2271 	config->num_banks = adev->gfx.config.num_banks;
2272 	config->num_ranks = adev->gfx.config.num_ranks;
2273 
2274 	return 0;
2275 }
2276