• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /*
3  * Copyright 2020-2021 Advanced Micro Devices, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include <linux/types.h>
25 #include <linux/sched/task.h>
26 #include "amdgpu_sync.h"
27 #include "amdgpu_object.h"
28 #include "amdgpu_vm.h"
29 #include "amdgpu_mn.h"
30 #include "amdgpu.h"
31 #include "amdgpu_xgmi.h"
32 #include "kfd_priv.h"
33 #include "kfd_svm.h"
34 #include "kfd_migrate.h"
35 
36 #define AMDGPU_SVM_RANGE_RESTORE_DELAY_MS 1
37 
38 /* Long enough to ensure no retry fault comes after svm range is restored and
39  * page table is updated.
40  */
41 #define AMDGPU_SVM_RANGE_RETRY_FAULT_PENDING	2000
42 
43 static void svm_range_evict_svm_bo_worker(struct work_struct *work);
44 static bool
45 svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni,
46 				    const struct mmu_notifier_range *range,
47 				    unsigned long cur_seq);
48 
49 static const struct mmu_interval_notifier_ops svm_range_mn_ops = {
50 	.invalidate = svm_range_cpu_invalidate_pagetables,
51 };
52 
53 /**
54  * svm_range_unlink - unlink svm_range from lists and interval tree
55  * @prange: svm range structure to be removed
56  *
57  * Remove the svm_range from the svms and svm_bo lists and the svms
58  * interval tree.
59  *
60  * Context: The caller must hold svms->lock
61  */
svm_range_unlink(struct svm_range * prange)62 static void svm_range_unlink(struct svm_range *prange)
63 {
64 	pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms,
65 		 prange, prange->start, prange->last);
66 
67 	if (prange->svm_bo) {
68 		spin_lock(&prange->svm_bo->list_lock);
69 		list_del(&prange->svm_bo_list);
70 		spin_unlock(&prange->svm_bo->list_lock);
71 	}
72 
73 	list_del(&prange->list);
74 	if (prange->it_node.start != 0 && prange->it_node.last != 0)
75 		interval_tree_remove(&prange->it_node, &prange->svms->objects);
76 }
77 
78 static void
svm_range_add_notifier_locked(struct mm_struct * mm,struct svm_range * prange)79 svm_range_add_notifier_locked(struct mm_struct *mm, struct svm_range *prange)
80 {
81 	pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms,
82 		 prange, prange->start, prange->last);
83 
84 	mmu_interval_notifier_insert_locked(&prange->notifier, mm,
85 				     prange->start << PAGE_SHIFT,
86 				     prange->npages << PAGE_SHIFT,
87 				     &svm_range_mn_ops);
88 }
89 
90 /**
91  * svm_range_add_to_svms - add svm range to svms
92  * @prange: svm range structure to be added
93  *
94  * Add the svm range to svms interval tree and link list
95  *
96  * Context: The caller must hold svms->lock
97  */
svm_range_add_to_svms(struct svm_range * prange)98 static void svm_range_add_to_svms(struct svm_range *prange)
99 {
100 	pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms,
101 		 prange, prange->start, prange->last);
102 
103 	list_add_tail(&prange->list, &prange->svms->list);
104 	prange->it_node.start = prange->start;
105 	prange->it_node.last = prange->last;
106 	interval_tree_insert(&prange->it_node, &prange->svms->objects);
107 }
108 
svm_range_remove_notifier(struct svm_range * prange)109 static void svm_range_remove_notifier(struct svm_range *prange)
110 {
111 	pr_debug("remove notifier svms 0x%p prange 0x%p [0x%lx 0x%lx]\n",
112 		 prange->svms, prange,
113 		 prange->notifier.interval_tree.start >> PAGE_SHIFT,
114 		 prange->notifier.interval_tree.last >> PAGE_SHIFT);
115 
116 	if (prange->notifier.interval_tree.start != 0 &&
117 	    prange->notifier.interval_tree.last != 0)
118 		mmu_interval_notifier_remove(&prange->notifier);
119 }
120 
121 static bool
svm_is_valid_dma_mapping_addr(struct device * dev,dma_addr_t dma_addr)122 svm_is_valid_dma_mapping_addr(struct device *dev, dma_addr_t dma_addr)
123 {
124 	return dma_addr && !dma_mapping_error(dev, dma_addr) &&
125 	       !(dma_addr & SVM_RANGE_VRAM_DOMAIN);
126 }
127 
128 static int
svm_range_dma_map_dev(struct amdgpu_device * adev,struct svm_range * prange,unsigned long offset,unsigned long npages,unsigned long * hmm_pfns,uint32_t gpuidx)129 svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange,
130 		      unsigned long offset, unsigned long npages,
131 		      unsigned long *hmm_pfns, uint32_t gpuidx)
132 {
133 	enum dma_data_direction dir = DMA_BIDIRECTIONAL;
134 	dma_addr_t *addr = prange->dma_addr[gpuidx];
135 	struct device *dev = adev->dev;
136 	struct page *page;
137 	int i, r;
138 
139 	if (!addr) {
140 		addr = kvmalloc_array(prange->npages, sizeof(*addr),
141 				      GFP_KERNEL | __GFP_ZERO);
142 		if (!addr)
143 			return -ENOMEM;
144 		prange->dma_addr[gpuidx] = addr;
145 	}
146 
147 	addr += offset;
148 	for (i = 0; i < npages; i++) {
149 		if (svm_is_valid_dma_mapping_addr(dev, addr[i]))
150 			dma_unmap_page(dev, addr[i], PAGE_SIZE, dir);
151 
152 		page = hmm_pfn_to_page(hmm_pfns[i]);
153 		if (is_zone_device_page(page)) {
154 			struct amdgpu_device *bo_adev =
155 					amdgpu_ttm_adev(prange->svm_bo->bo->tbo.bdev);
156 
157 			addr[i] = (hmm_pfns[i] << PAGE_SHIFT) +
158 				   bo_adev->vm_manager.vram_base_offset -
159 				   bo_adev->kfd.dev->pgmap.range.start;
160 			addr[i] |= SVM_RANGE_VRAM_DOMAIN;
161 			pr_debug("vram address detected: 0x%llx\n", addr[i]);
162 			continue;
163 		}
164 		addr[i] = dma_map_page(dev, page, 0, PAGE_SIZE, dir);
165 		r = dma_mapping_error(dev, addr[i]);
166 		if (r) {
167 			pr_debug("failed %d dma_map_page\n", r);
168 			return r;
169 		}
170 		pr_debug("dma mapping 0x%llx for page addr 0x%lx\n",
171 			 addr[i] >> PAGE_SHIFT, page_to_pfn(page));
172 	}
173 	return 0;
174 }
175 
176 static int
svm_range_dma_map(struct svm_range * prange,unsigned long * bitmap,unsigned long offset,unsigned long npages,unsigned long * hmm_pfns)177 svm_range_dma_map(struct svm_range *prange, unsigned long *bitmap,
178 		  unsigned long offset, unsigned long npages,
179 		  unsigned long *hmm_pfns)
180 {
181 	struct kfd_process *p;
182 	uint32_t gpuidx;
183 	int r;
184 
185 	p = container_of(prange->svms, struct kfd_process, svms);
186 
187 	for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) {
188 		struct kfd_process_device *pdd;
189 		struct amdgpu_device *adev;
190 
191 		pr_debug("mapping to gpu idx 0x%x\n", gpuidx);
192 		pdd = kfd_process_device_from_gpuidx(p, gpuidx);
193 		if (!pdd) {
194 			pr_debug("failed to find device idx %d\n", gpuidx);
195 			return -EINVAL;
196 		}
197 		adev = (struct amdgpu_device *)pdd->dev->kgd;
198 
199 		r = svm_range_dma_map_dev(adev, prange, offset, npages,
200 					  hmm_pfns, gpuidx);
201 		if (r)
202 			break;
203 	}
204 
205 	return r;
206 }
207 
svm_range_dma_unmap(struct device * dev,dma_addr_t * dma_addr,unsigned long offset,unsigned long npages)208 void svm_range_dma_unmap(struct device *dev, dma_addr_t *dma_addr,
209 			 unsigned long offset, unsigned long npages)
210 {
211 	enum dma_data_direction dir = DMA_BIDIRECTIONAL;
212 	int i;
213 
214 	if (!dma_addr)
215 		return;
216 
217 	for (i = offset; i < offset + npages; i++) {
218 		if (!svm_is_valid_dma_mapping_addr(dev, dma_addr[i]))
219 			continue;
220 		pr_debug("dma unmapping 0x%llx\n", dma_addr[i] >> PAGE_SHIFT);
221 		dma_unmap_page(dev, dma_addr[i], PAGE_SIZE, dir);
222 		dma_addr[i] = 0;
223 	}
224 }
225 
svm_range_free_dma_mappings(struct svm_range * prange)226 void svm_range_free_dma_mappings(struct svm_range *prange)
227 {
228 	struct kfd_process_device *pdd;
229 	dma_addr_t *dma_addr;
230 	struct device *dev;
231 	struct kfd_process *p;
232 	uint32_t gpuidx;
233 
234 	p = container_of(prange->svms, struct kfd_process, svms);
235 
236 	for (gpuidx = 0; gpuidx < MAX_GPU_INSTANCE; gpuidx++) {
237 		dma_addr = prange->dma_addr[gpuidx];
238 		if (!dma_addr)
239 			continue;
240 
241 		pdd = kfd_process_device_from_gpuidx(p, gpuidx);
242 		if (!pdd) {
243 			pr_debug("failed to find device idx %d\n", gpuidx);
244 			continue;
245 		}
246 		dev = &pdd->dev->pdev->dev;
247 		svm_range_dma_unmap(dev, dma_addr, 0, prange->npages);
248 		kvfree(dma_addr);
249 		prange->dma_addr[gpuidx] = NULL;
250 	}
251 }
252 
svm_range_free(struct svm_range * prange)253 static void svm_range_free(struct svm_range *prange)
254 {
255 	pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms, prange,
256 		 prange->start, prange->last);
257 
258 	svm_range_vram_node_free(prange);
259 	svm_range_free_dma_mappings(prange);
260 	mutex_destroy(&prange->lock);
261 	mutex_destroy(&prange->migrate_mutex);
262 	kfree(prange);
263 }
264 
265 static void
svm_range_set_default_attributes(int32_t * location,int32_t * prefetch_loc,uint8_t * granularity,uint32_t * flags)266 svm_range_set_default_attributes(int32_t *location, int32_t *prefetch_loc,
267 				 uint8_t *granularity, uint32_t *flags)
268 {
269 	*location = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
270 	*prefetch_loc = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
271 	*granularity = 9;
272 	*flags =
273 		KFD_IOCTL_SVM_FLAG_HOST_ACCESS | KFD_IOCTL_SVM_FLAG_COHERENT;
274 }
275 
276 static struct
svm_range_new(struct svm_range_list * svms,uint64_t start,uint64_t last)277 svm_range *svm_range_new(struct svm_range_list *svms, uint64_t start,
278 			 uint64_t last)
279 {
280 	uint64_t size = last - start + 1;
281 	struct svm_range *prange;
282 	struct kfd_process *p;
283 
284 	prange = kzalloc(sizeof(*prange), GFP_KERNEL);
285 	if (!prange)
286 		return NULL;
287 	prange->npages = size;
288 	prange->svms = svms;
289 	prange->start = start;
290 	prange->last = last;
291 	INIT_LIST_HEAD(&prange->list);
292 	INIT_LIST_HEAD(&prange->update_list);
293 	INIT_LIST_HEAD(&prange->remove_list);
294 	INIT_LIST_HEAD(&prange->insert_list);
295 	INIT_LIST_HEAD(&prange->svm_bo_list);
296 	INIT_LIST_HEAD(&prange->deferred_list);
297 	INIT_LIST_HEAD(&prange->child_list);
298 	atomic_set(&prange->invalid, 0);
299 	prange->validate_timestamp = 0;
300 	mutex_init(&prange->migrate_mutex);
301 	mutex_init(&prange->lock);
302 
303 	p = container_of(svms, struct kfd_process, svms);
304 	if (p->xnack_enabled)
305 		bitmap_copy(prange->bitmap_access, svms->bitmap_supported,
306 			    MAX_GPU_INSTANCE);
307 
308 	svm_range_set_default_attributes(&prange->preferred_loc,
309 					 &prange->prefetch_loc,
310 					 &prange->granularity, &prange->flags);
311 
312 	pr_debug("svms 0x%p [0x%llx 0x%llx]\n", svms, start, last);
313 
314 	return prange;
315 }
316 
svm_bo_ref_unless_zero(struct svm_range_bo * svm_bo)317 static bool svm_bo_ref_unless_zero(struct svm_range_bo *svm_bo)
318 {
319 	if (!svm_bo || !kref_get_unless_zero(&svm_bo->kref))
320 		return false;
321 
322 	return true;
323 }
324 
svm_range_bo_release(struct kref * kref)325 static void svm_range_bo_release(struct kref *kref)
326 {
327 	struct svm_range_bo *svm_bo;
328 
329 	svm_bo = container_of(kref, struct svm_range_bo, kref);
330 	spin_lock(&svm_bo->list_lock);
331 	while (!list_empty(&svm_bo->range_list)) {
332 		struct svm_range *prange =
333 				list_first_entry(&svm_bo->range_list,
334 						struct svm_range, svm_bo_list);
335 		/* list_del_init tells a concurrent svm_range_vram_node_new when
336 		 * it's safe to reuse the svm_bo pointer and svm_bo_list head.
337 		 */
338 		list_del_init(&prange->svm_bo_list);
339 		spin_unlock(&svm_bo->list_lock);
340 
341 		pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms,
342 			 prange->start, prange->last);
343 		mutex_lock(&prange->lock);
344 		prange->svm_bo = NULL;
345 		mutex_unlock(&prange->lock);
346 
347 		spin_lock(&svm_bo->list_lock);
348 	}
349 	spin_unlock(&svm_bo->list_lock);
350 	if (!dma_fence_is_signaled(&svm_bo->eviction_fence->base))
351 		/* We're not in the eviction worker. Signal the fence. */
352 		dma_fence_signal(&svm_bo->eviction_fence->base);
353 	dma_fence_put(&svm_bo->eviction_fence->base);
354 	amdgpu_bo_unref(&svm_bo->bo);
355 	kfree(svm_bo);
356 }
357 
svm_range_bo_unref(struct svm_range_bo * svm_bo)358 void svm_range_bo_unref(struct svm_range_bo *svm_bo)
359 {
360 	if (!svm_bo)
361 		return;
362 
363 	kref_put(&svm_bo->kref, svm_range_bo_release);
364 }
365 
366 static bool
svm_range_validate_svm_bo(struct amdgpu_device * adev,struct svm_range * prange)367 svm_range_validate_svm_bo(struct amdgpu_device *adev, struct svm_range *prange)
368 {
369 	struct amdgpu_device *bo_adev;
370 
371 	mutex_lock(&prange->lock);
372 	if (!prange->svm_bo) {
373 		mutex_unlock(&prange->lock);
374 		return false;
375 	}
376 	if (prange->ttm_res) {
377 		/* We still have a reference, all is well */
378 		mutex_unlock(&prange->lock);
379 		return true;
380 	}
381 	if (svm_bo_ref_unless_zero(prange->svm_bo)) {
382 		/*
383 		 * Migrate from GPU to GPU, remove range from source bo_adev
384 		 * svm_bo range list, and return false to allocate svm_bo from
385 		 * destination adev.
386 		 */
387 		bo_adev = amdgpu_ttm_adev(prange->svm_bo->bo->tbo.bdev);
388 		if (bo_adev != adev) {
389 			mutex_unlock(&prange->lock);
390 
391 			spin_lock(&prange->svm_bo->list_lock);
392 			list_del_init(&prange->svm_bo_list);
393 			spin_unlock(&prange->svm_bo->list_lock);
394 
395 			svm_range_bo_unref(prange->svm_bo);
396 			return false;
397 		}
398 		if (READ_ONCE(prange->svm_bo->evicting)) {
399 			struct dma_fence *f;
400 			struct svm_range_bo *svm_bo;
401 			/* The BO is getting evicted,
402 			 * we need to get a new one
403 			 */
404 			mutex_unlock(&prange->lock);
405 			svm_bo = prange->svm_bo;
406 			f = dma_fence_get(&svm_bo->eviction_fence->base);
407 			svm_range_bo_unref(prange->svm_bo);
408 			/* wait for the fence to avoid long spin-loop
409 			 * at list_empty_careful
410 			 */
411 			dma_fence_wait(f, false);
412 			dma_fence_put(f);
413 		} else {
414 			/* The BO was still around and we got
415 			 * a new reference to it
416 			 */
417 			mutex_unlock(&prange->lock);
418 			pr_debug("reuse old bo svms 0x%p [0x%lx 0x%lx]\n",
419 				 prange->svms, prange->start, prange->last);
420 
421 			prange->ttm_res = prange->svm_bo->bo->tbo.resource;
422 			return true;
423 		}
424 
425 	} else {
426 		mutex_unlock(&prange->lock);
427 	}
428 
429 	/* We need a new svm_bo. Spin-loop to wait for concurrent
430 	 * svm_range_bo_release to finish removing this range from
431 	 * its range list and set prange->svm_bo to null. After this,
432 	 * it is safe to reuse the svm_bo pointer and svm_bo_list head.
433 	 */
434 	while (!list_empty_careful(&prange->svm_bo_list) || prange->svm_bo)
435 		cond_resched();
436 
437 	return false;
438 }
439 
svm_range_bo_new(void)440 static struct svm_range_bo *svm_range_bo_new(void)
441 {
442 	struct svm_range_bo *svm_bo;
443 
444 	svm_bo = kzalloc(sizeof(*svm_bo), GFP_KERNEL);
445 	if (!svm_bo)
446 		return NULL;
447 
448 	kref_init(&svm_bo->kref);
449 	INIT_LIST_HEAD(&svm_bo->range_list);
450 	spin_lock_init(&svm_bo->list_lock);
451 
452 	return svm_bo;
453 }
454 
455 int
svm_range_vram_node_new(struct amdgpu_device * adev,struct svm_range * prange,bool clear)456 svm_range_vram_node_new(struct amdgpu_device *adev, struct svm_range *prange,
457 			bool clear)
458 {
459 	struct amdgpu_bo_param bp;
460 	struct svm_range_bo *svm_bo;
461 	struct amdgpu_bo_user *ubo;
462 	struct amdgpu_bo *bo;
463 	struct kfd_process *p;
464 	struct mm_struct *mm;
465 	int r;
466 
467 	p = container_of(prange->svms, struct kfd_process, svms);
468 	pr_debug("pasid: %x svms 0x%p [0x%lx 0x%lx]\n", p->pasid, prange->svms,
469 		 prange->start, prange->last);
470 
471 	if (svm_range_validate_svm_bo(adev, prange))
472 		return 0;
473 
474 	svm_bo = svm_range_bo_new();
475 	if (!svm_bo) {
476 		pr_debug("failed to alloc svm bo\n");
477 		return -ENOMEM;
478 	}
479 	mm = get_task_mm(p->lead_thread);
480 	if (!mm) {
481 		pr_debug("failed to get mm\n");
482 		kfree(svm_bo);
483 		return -ESRCH;
484 	}
485 	svm_bo->svms = prange->svms;
486 	svm_bo->eviction_fence =
487 		amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1),
488 					   mm,
489 					   svm_bo);
490 	mmput(mm);
491 	INIT_WORK(&svm_bo->eviction_work, svm_range_evict_svm_bo_worker);
492 	svm_bo->evicting = 0;
493 	memset(&bp, 0, sizeof(bp));
494 	bp.size = prange->npages * PAGE_SIZE;
495 	bp.byte_align = PAGE_SIZE;
496 	bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
497 	bp.flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
498 	bp.flags |= clear ? AMDGPU_GEM_CREATE_VRAM_CLEARED : 0;
499 	bp.flags |= AMDGPU_AMDKFD_CREATE_SVM_BO;
500 	bp.type = ttm_bo_type_device;
501 	bp.resv = NULL;
502 
503 	r = amdgpu_bo_create_user(adev, &bp, &ubo);
504 	if (r) {
505 		pr_debug("failed %d to create bo\n", r);
506 		goto create_bo_failed;
507 	}
508 	bo = &ubo->bo;
509 	r = amdgpu_bo_reserve(bo, true);
510 	if (r) {
511 		pr_debug("failed %d to reserve bo\n", r);
512 		goto reserve_bo_failed;
513 	}
514 
515 	r = dma_resv_reserve_shared(bo->tbo.base.resv, 1);
516 	if (r) {
517 		pr_debug("failed %d to reserve bo\n", r);
518 		amdgpu_bo_unreserve(bo);
519 		goto reserve_bo_failed;
520 	}
521 	amdgpu_bo_fence(bo, &svm_bo->eviction_fence->base, true);
522 
523 	amdgpu_bo_unreserve(bo);
524 
525 	svm_bo->bo = bo;
526 	prange->svm_bo = svm_bo;
527 	prange->ttm_res = bo->tbo.resource;
528 	prange->offset = 0;
529 
530 	spin_lock(&svm_bo->list_lock);
531 	list_add(&prange->svm_bo_list, &svm_bo->range_list);
532 	spin_unlock(&svm_bo->list_lock);
533 
534 	return 0;
535 
536 reserve_bo_failed:
537 	amdgpu_bo_unref(&bo);
538 create_bo_failed:
539 	dma_fence_put(&svm_bo->eviction_fence->base);
540 	kfree(svm_bo);
541 	prange->ttm_res = NULL;
542 
543 	return r;
544 }
545 
svm_range_vram_node_free(struct svm_range * prange)546 void svm_range_vram_node_free(struct svm_range *prange)
547 {
548 	/* serialize prange->svm_bo unref */
549 	mutex_lock(&prange->lock);
550 	/* prange->svm_bo has not been unref */
551 	if (prange->ttm_res) {
552 		prange->ttm_res = NULL;
553 		mutex_unlock(&prange->lock);
554 		svm_range_bo_unref(prange->svm_bo);
555 	} else
556 		mutex_unlock(&prange->lock);
557 }
558 
559 struct amdgpu_device *
svm_range_get_adev_by_id(struct svm_range * prange,uint32_t gpu_id)560 svm_range_get_adev_by_id(struct svm_range *prange, uint32_t gpu_id)
561 {
562 	struct kfd_process_device *pdd;
563 	struct kfd_process *p;
564 	int32_t gpu_idx;
565 
566 	p = container_of(prange->svms, struct kfd_process, svms);
567 
568 	gpu_idx = kfd_process_gpuidx_from_gpuid(p, gpu_id);
569 	if (gpu_idx < 0) {
570 		pr_debug("failed to get device by id 0x%x\n", gpu_id);
571 		return NULL;
572 	}
573 	pdd = kfd_process_device_from_gpuidx(p, gpu_idx);
574 	if (!pdd) {
575 		pr_debug("failed to get device by idx 0x%x\n", gpu_idx);
576 		return NULL;
577 	}
578 
579 	return (struct amdgpu_device *)pdd->dev->kgd;
580 }
581 
582 struct kfd_process_device *
svm_range_get_pdd_by_adev(struct svm_range * prange,struct amdgpu_device * adev)583 svm_range_get_pdd_by_adev(struct svm_range *prange, struct amdgpu_device *adev)
584 {
585 	struct kfd_process *p;
586 	int32_t gpu_idx, gpuid;
587 	int r;
588 
589 	p = container_of(prange->svms, struct kfd_process, svms);
590 
591 	r = kfd_process_gpuid_from_kgd(p, adev, &gpuid, &gpu_idx);
592 	if (r) {
593 		pr_debug("failed to get device id by adev %p\n", adev);
594 		return NULL;
595 	}
596 
597 	return kfd_process_device_from_gpuidx(p, gpu_idx);
598 }
599 
svm_range_bo_validate(void * param,struct amdgpu_bo * bo)600 static int svm_range_bo_validate(void *param, struct amdgpu_bo *bo)
601 {
602 	struct ttm_operation_ctx ctx = { false, false };
603 
604 	amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
605 
606 	return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
607 }
608 
609 static int
svm_range_check_attr(struct kfd_process * p,uint32_t nattr,struct kfd_ioctl_svm_attribute * attrs)610 svm_range_check_attr(struct kfd_process *p,
611 		     uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs)
612 {
613 	uint32_t i;
614 
615 	for (i = 0; i < nattr; i++) {
616 		uint32_t val = attrs[i].value;
617 		int gpuidx = MAX_GPU_INSTANCE;
618 
619 		switch (attrs[i].type) {
620 		case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
621 			if (val != KFD_IOCTL_SVM_LOCATION_SYSMEM &&
622 			    val != KFD_IOCTL_SVM_LOCATION_UNDEFINED)
623 				gpuidx = kfd_process_gpuidx_from_gpuid(p, val);
624 			break;
625 		case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
626 			if (val != KFD_IOCTL_SVM_LOCATION_SYSMEM)
627 				gpuidx = kfd_process_gpuidx_from_gpuid(p, val);
628 			break;
629 		case KFD_IOCTL_SVM_ATTR_ACCESS:
630 		case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
631 		case KFD_IOCTL_SVM_ATTR_NO_ACCESS:
632 			gpuidx = kfd_process_gpuidx_from_gpuid(p, val);
633 			break;
634 		case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
635 			break;
636 		case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
637 			break;
638 		case KFD_IOCTL_SVM_ATTR_GRANULARITY:
639 			break;
640 		default:
641 			pr_debug("unknown attr type 0x%x\n", attrs[i].type);
642 			return -EINVAL;
643 		}
644 
645 		if (gpuidx < 0) {
646 			pr_debug("no GPU 0x%x found\n", val);
647 			return -EINVAL;
648 		} else if (gpuidx < MAX_GPU_INSTANCE &&
649 			   !test_bit(gpuidx, p->svms.bitmap_supported)) {
650 			pr_debug("GPU 0x%x not supported\n", val);
651 			return -EINVAL;
652 		}
653 	}
654 
655 	return 0;
656 }
657 
658 static void
svm_range_apply_attrs(struct kfd_process * p,struct svm_range * prange,uint32_t nattr,struct kfd_ioctl_svm_attribute * attrs)659 svm_range_apply_attrs(struct kfd_process *p, struct svm_range *prange,
660 		      uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs)
661 {
662 	uint32_t i;
663 	int gpuidx;
664 
665 	for (i = 0; i < nattr; i++) {
666 		switch (attrs[i].type) {
667 		case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
668 			prange->preferred_loc = attrs[i].value;
669 			break;
670 		case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
671 			prange->prefetch_loc = attrs[i].value;
672 			break;
673 		case KFD_IOCTL_SVM_ATTR_ACCESS:
674 		case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
675 		case KFD_IOCTL_SVM_ATTR_NO_ACCESS:
676 			gpuidx = kfd_process_gpuidx_from_gpuid(p,
677 							       attrs[i].value);
678 			if (attrs[i].type == KFD_IOCTL_SVM_ATTR_NO_ACCESS) {
679 				bitmap_clear(prange->bitmap_access, gpuidx, 1);
680 				bitmap_clear(prange->bitmap_aip, gpuidx, 1);
681 			} else if (attrs[i].type == KFD_IOCTL_SVM_ATTR_ACCESS) {
682 				bitmap_set(prange->bitmap_access, gpuidx, 1);
683 				bitmap_clear(prange->bitmap_aip, gpuidx, 1);
684 			} else {
685 				bitmap_clear(prange->bitmap_access, gpuidx, 1);
686 				bitmap_set(prange->bitmap_aip, gpuidx, 1);
687 			}
688 			break;
689 		case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
690 			prange->flags |= attrs[i].value;
691 			break;
692 		case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
693 			prange->flags &= ~attrs[i].value;
694 			break;
695 		case KFD_IOCTL_SVM_ATTR_GRANULARITY:
696 			prange->granularity = min_t(uint32_t, attrs[i].value, 0x3F);
697 			break;
698 		default:
699 			WARN_ONCE(1, "svm_range_check_attrs wasn't called?");
700 		}
701 	}
702 }
703 
704 /**
705  * svm_range_debug_dump - print all range information from svms
706  * @svms: svm range list header
707  *
708  * debug output svm range start, end, prefetch location from svms
709  * interval tree and link list
710  *
711  * Context: The caller must hold svms->lock
712  */
svm_range_debug_dump(struct svm_range_list * svms)713 static void svm_range_debug_dump(struct svm_range_list *svms)
714 {
715 	struct interval_tree_node *node;
716 	struct svm_range *prange;
717 
718 	pr_debug("dump svms 0x%p list\n", svms);
719 	pr_debug("range\tstart\tpage\tend\t\tlocation\n");
720 
721 	list_for_each_entry(prange, &svms->list, list) {
722 		pr_debug("0x%p 0x%lx\t0x%llx\t0x%llx\t0x%x\n",
723 			 prange, prange->start, prange->npages,
724 			 prange->start + prange->npages - 1,
725 			 prange->actual_loc);
726 	}
727 
728 	pr_debug("dump svms 0x%p interval tree\n", svms);
729 	pr_debug("range\tstart\tpage\tend\t\tlocation\n");
730 	node = interval_tree_iter_first(&svms->objects, 0, ~0ULL);
731 	while (node) {
732 		prange = container_of(node, struct svm_range, it_node);
733 		pr_debug("0x%p 0x%lx\t0x%llx\t0x%llx\t0x%x\n",
734 			 prange, prange->start, prange->npages,
735 			 prange->start + prange->npages - 1,
736 			 prange->actual_loc);
737 		node = interval_tree_iter_next(node, 0, ~0ULL);
738 	}
739 }
740 
741 static bool
svm_range_is_same_attrs(struct svm_range * old,struct svm_range * new)742 svm_range_is_same_attrs(struct svm_range *old, struct svm_range *new)
743 {
744 	return (old->prefetch_loc == new->prefetch_loc &&
745 		old->flags == new->flags &&
746 		old->granularity == new->granularity);
747 }
748 
749 static int
svm_range_split_array(void * ppnew,void * ppold,size_t size,uint64_t old_start,uint64_t old_n,uint64_t new_start,uint64_t new_n)750 svm_range_split_array(void *ppnew, void *ppold, size_t size,
751 		      uint64_t old_start, uint64_t old_n,
752 		      uint64_t new_start, uint64_t new_n)
753 {
754 	unsigned char *new, *old, *pold;
755 	uint64_t d;
756 
757 	if (!ppold)
758 		return 0;
759 	pold = *(unsigned char **)ppold;
760 	if (!pold)
761 		return 0;
762 
763 	new = kvmalloc_array(new_n, size, GFP_KERNEL);
764 	if (!new)
765 		return -ENOMEM;
766 
767 	d = (new_start - old_start) * size;
768 	memcpy(new, pold + d, new_n * size);
769 
770 	old = kvmalloc_array(old_n, size, GFP_KERNEL);
771 	if (!old) {
772 		kvfree(new);
773 		return -ENOMEM;
774 	}
775 
776 	d = (new_start == old_start) ? new_n * size : 0;
777 	memcpy(old, pold + d, old_n * size);
778 
779 	kvfree(pold);
780 	*(void **)ppold = old;
781 	*(void **)ppnew = new;
782 
783 	return 0;
784 }
785 
786 static int
svm_range_split_pages(struct svm_range * new,struct svm_range * old,uint64_t start,uint64_t last)787 svm_range_split_pages(struct svm_range *new, struct svm_range *old,
788 		      uint64_t start, uint64_t last)
789 {
790 	uint64_t npages = last - start + 1;
791 	int i, r;
792 
793 	for (i = 0; i < MAX_GPU_INSTANCE; i++) {
794 		r = svm_range_split_array(&new->dma_addr[i], &old->dma_addr[i],
795 					  sizeof(*old->dma_addr[i]), old->start,
796 					  npages, new->start, new->npages);
797 		if (r)
798 			return r;
799 	}
800 
801 	return 0;
802 }
803 
804 static int
svm_range_split_nodes(struct svm_range * new,struct svm_range * old,uint64_t start,uint64_t last)805 svm_range_split_nodes(struct svm_range *new, struct svm_range *old,
806 		      uint64_t start, uint64_t last)
807 {
808 	uint64_t npages = last - start + 1;
809 
810 	pr_debug("svms 0x%p new prange 0x%p start 0x%lx [0x%llx 0x%llx]\n",
811 		 new->svms, new, new->start, start, last);
812 
813 	if (new->start == old->start) {
814 		new->offset = old->offset;
815 		old->offset += new->npages;
816 	} else {
817 		new->offset = old->offset + npages;
818 	}
819 
820 	new->svm_bo = svm_range_bo_ref(old->svm_bo);
821 	new->ttm_res = old->ttm_res;
822 
823 	spin_lock(&new->svm_bo->list_lock);
824 	list_add(&new->svm_bo_list, &new->svm_bo->range_list);
825 	spin_unlock(&new->svm_bo->list_lock);
826 
827 	return 0;
828 }
829 
830 /**
831  * svm_range_split_adjust - split range and adjust
832  *
833  * @new: new range
834  * @old: the old range
835  * @start: the old range adjust to start address in pages
836  * @last: the old range adjust to last address in pages
837  *
838  * Copy system memory dma_addr or vram ttm_res in old range to new
839  * range from new_start up to size new->npages, the remaining old range is from
840  * start to last
841  *
842  * Return:
843  * 0 - OK, -ENOMEM - out of memory
844  */
845 static int
svm_range_split_adjust(struct svm_range * new,struct svm_range * old,uint64_t start,uint64_t last)846 svm_range_split_adjust(struct svm_range *new, struct svm_range *old,
847 		      uint64_t start, uint64_t last)
848 {
849 	int r;
850 
851 	pr_debug("svms 0x%p new 0x%lx old [0x%lx 0x%lx] => [0x%llx 0x%llx]\n",
852 		 new->svms, new->start, old->start, old->last, start, last);
853 
854 	if (new->start < old->start ||
855 	    new->last > old->last) {
856 		WARN_ONCE(1, "invalid new range start or last\n");
857 		return -EINVAL;
858 	}
859 
860 	r = svm_range_split_pages(new, old, start, last);
861 	if (r)
862 		return r;
863 
864 	if (old->actual_loc && old->ttm_res) {
865 		r = svm_range_split_nodes(new, old, start, last);
866 		if (r)
867 			return r;
868 	}
869 
870 	old->npages = last - start + 1;
871 	old->start = start;
872 	old->last = last;
873 	new->flags = old->flags;
874 	new->preferred_loc = old->preferred_loc;
875 	new->prefetch_loc = old->prefetch_loc;
876 	new->actual_loc = old->actual_loc;
877 	new->granularity = old->granularity;
878 	bitmap_copy(new->bitmap_access, old->bitmap_access, MAX_GPU_INSTANCE);
879 	bitmap_copy(new->bitmap_aip, old->bitmap_aip, MAX_GPU_INSTANCE);
880 
881 	return 0;
882 }
883 
884 /**
885  * svm_range_split - split a range in 2 ranges
886  *
887  * @prange: the svm range to split
888  * @start: the remaining range start address in pages
889  * @last: the remaining range last address in pages
890  * @new: the result new range generated
891  *
892  * Two cases only:
893  * case 1: if start == prange->start
894  *         prange ==> prange[start, last]
895  *         new range [last + 1, prange->last]
896  *
897  * case 2: if last == prange->last
898  *         prange ==> prange[start, last]
899  *         new range [prange->start, start - 1]
900  *
901  * Return:
902  * 0 - OK, -ENOMEM - out of memory, -EINVAL - invalid start, last
903  */
904 static int
svm_range_split(struct svm_range * prange,uint64_t start,uint64_t last,struct svm_range ** new)905 svm_range_split(struct svm_range *prange, uint64_t start, uint64_t last,
906 		struct svm_range **new)
907 {
908 	uint64_t old_start = prange->start;
909 	uint64_t old_last = prange->last;
910 	struct svm_range_list *svms;
911 	int r = 0;
912 
913 	pr_debug("svms 0x%p [0x%llx 0x%llx] to [0x%llx 0x%llx]\n", prange->svms,
914 		 old_start, old_last, start, last);
915 
916 	if (old_start != start && old_last != last)
917 		return -EINVAL;
918 	if (start < old_start || last > old_last)
919 		return -EINVAL;
920 
921 	svms = prange->svms;
922 	if (old_start == start)
923 		*new = svm_range_new(svms, last + 1, old_last);
924 	else
925 		*new = svm_range_new(svms, old_start, start - 1);
926 	if (!*new)
927 		return -ENOMEM;
928 
929 	r = svm_range_split_adjust(*new, prange, start, last);
930 	if (r) {
931 		pr_debug("failed %d split [0x%llx 0x%llx] to [0x%llx 0x%llx]\n",
932 			 r, old_start, old_last, start, last);
933 		svm_range_free(*new);
934 		*new = NULL;
935 	}
936 
937 	return r;
938 }
939 
940 static int
svm_range_split_tail(struct svm_range * prange,uint64_t new_last,struct list_head * insert_list)941 svm_range_split_tail(struct svm_range *prange,
942 		     uint64_t new_last, struct list_head *insert_list)
943 {
944 	struct svm_range *tail;
945 	int r = svm_range_split(prange, prange->start, new_last, &tail);
946 
947 	if (!r)
948 		list_add(&tail->insert_list, insert_list);
949 	return r;
950 }
951 
952 static int
svm_range_split_head(struct svm_range * prange,uint64_t new_start,struct list_head * insert_list)953 svm_range_split_head(struct svm_range *prange,
954 		     uint64_t new_start, struct list_head *insert_list)
955 {
956 	struct svm_range *head;
957 	int r = svm_range_split(prange, new_start, prange->last, &head);
958 
959 	if (!r)
960 		list_add(&head->insert_list, insert_list);
961 	return r;
962 }
963 
964 static void
svm_range_add_child(struct svm_range * prange,struct mm_struct * mm,struct svm_range * pchild,enum svm_work_list_ops op)965 svm_range_add_child(struct svm_range *prange, struct mm_struct *mm,
966 		    struct svm_range *pchild, enum svm_work_list_ops op)
967 {
968 	pr_debug("add child 0x%p [0x%lx 0x%lx] to prange 0x%p child list %d\n",
969 		 pchild, pchild->start, pchild->last, prange, op);
970 
971 	pchild->work_item.mm = mm;
972 	pchild->work_item.op = op;
973 	list_add_tail(&pchild->child_list, &prange->child_list);
974 }
975 
976 /**
977  * svm_range_split_by_granularity - collect ranges within granularity boundary
978  *
979  * @p: the process with svms list
980  * @mm: mm structure
981  * @addr: the vm fault address in pages, to split the prange
982  * @parent: parent range if prange is from child list
983  * @prange: prange to split
984  *
985  * Trims @prange to be a single aligned block of prange->granularity if
986  * possible. The head and tail are added to the child_list in @parent.
987  *
988  * Context: caller must hold mmap_read_lock and prange->lock
989  *
990  * Return:
991  * 0 - OK, otherwise error code
992  */
993 int
svm_range_split_by_granularity(struct kfd_process * p,struct mm_struct * mm,unsigned long addr,struct svm_range * parent,struct svm_range * prange)994 svm_range_split_by_granularity(struct kfd_process *p, struct mm_struct *mm,
995 			       unsigned long addr, struct svm_range *parent,
996 			       struct svm_range *prange)
997 {
998 	struct svm_range *head, *tail;
999 	unsigned long start, last, size;
1000 	int r;
1001 
1002 	/* Align splited range start and size to granularity size, then a single
1003 	 * PTE will be used for whole range, this reduces the number of PTE
1004 	 * updated and the L1 TLB space used for translation.
1005 	 */
1006 	size = 1UL << prange->granularity;
1007 	start = ALIGN_DOWN(addr, size);
1008 	last = ALIGN(addr + 1, size) - 1;
1009 
1010 	pr_debug("svms 0x%p split [0x%lx 0x%lx] to [0x%lx 0x%lx] size 0x%lx\n",
1011 		 prange->svms, prange->start, prange->last, start, last, size);
1012 
1013 	if (start > prange->start) {
1014 		r = svm_range_split(prange, start, prange->last, &head);
1015 		if (r)
1016 			return r;
1017 		svm_range_add_child(parent, mm, head, SVM_OP_ADD_RANGE);
1018 	}
1019 
1020 	if (last < prange->last) {
1021 		r = svm_range_split(prange, prange->start, last, &tail);
1022 		if (r)
1023 			return r;
1024 		svm_range_add_child(parent, mm, tail, SVM_OP_ADD_RANGE);
1025 	}
1026 
1027 	/* xnack on, update mapping on GPUs with ACCESS_IN_PLACE */
1028 	if (p->xnack_enabled && prange->work_item.op == SVM_OP_ADD_RANGE) {
1029 		prange->work_item.op = SVM_OP_ADD_RANGE_AND_MAP;
1030 		pr_debug("change prange 0x%p [0x%lx 0x%lx] op %d\n",
1031 			 prange, prange->start, prange->last,
1032 			 SVM_OP_ADD_RANGE_AND_MAP);
1033 	}
1034 	return 0;
1035 }
1036 
1037 static uint64_t
svm_range_get_pte_flags(struct amdgpu_device * adev,struct svm_range * prange,int domain)1038 svm_range_get_pte_flags(struct amdgpu_device *adev, struct svm_range *prange,
1039 			int domain)
1040 {
1041 	struct amdgpu_device *bo_adev;
1042 	uint32_t flags = prange->flags;
1043 	uint32_t mapping_flags = 0;
1044 	uint64_t pte_flags;
1045 	bool snoop = (domain != SVM_RANGE_VRAM_DOMAIN);
1046 	bool coherent = flags & KFD_IOCTL_SVM_FLAG_COHERENT;
1047 
1048 	if (domain == SVM_RANGE_VRAM_DOMAIN)
1049 		bo_adev = amdgpu_ttm_adev(prange->svm_bo->bo->tbo.bdev);
1050 
1051 	switch (adev->asic_type) {
1052 	case CHIP_ARCTURUS:
1053 		if (domain == SVM_RANGE_VRAM_DOMAIN) {
1054 			if (bo_adev == adev) {
1055 				mapping_flags |= coherent ?
1056 					AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
1057 			} else {
1058 				mapping_flags |= coherent ?
1059 					AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1060 				if (amdgpu_xgmi_same_hive(adev, bo_adev))
1061 					snoop = true;
1062 			}
1063 		} else {
1064 			mapping_flags |= coherent ?
1065 				AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1066 		}
1067 		break;
1068 	case CHIP_ALDEBARAN:
1069 		if (domain == SVM_RANGE_VRAM_DOMAIN) {
1070 			if (bo_adev == adev) {
1071 				mapping_flags |= coherent ?
1072 					AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
1073 				if (adev->gmc.xgmi.connected_to_cpu)
1074 					snoop = true;
1075 			} else {
1076 				mapping_flags |= coherent ?
1077 					AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1078 				if (amdgpu_xgmi_same_hive(adev, bo_adev))
1079 					snoop = true;
1080 			}
1081 		} else {
1082 			mapping_flags |= coherent ?
1083 				AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1084 		}
1085 		break;
1086 	default:
1087 		mapping_flags |= coherent ?
1088 			AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1089 	}
1090 
1091 	mapping_flags |= AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE;
1092 
1093 	if (flags & KFD_IOCTL_SVM_FLAG_GPU_RO)
1094 		mapping_flags &= ~AMDGPU_VM_PAGE_WRITEABLE;
1095 	if (flags & KFD_IOCTL_SVM_FLAG_GPU_EXEC)
1096 		mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
1097 
1098 	pte_flags = AMDGPU_PTE_VALID;
1099 	pte_flags |= (domain == SVM_RANGE_VRAM_DOMAIN) ? 0 : AMDGPU_PTE_SYSTEM;
1100 	pte_flags |= snoop ? AMDGPU_PTE_SNOOPED : 0;
1101 
1102 	pte_flags |= amdgpu_gem_va_map_flags(adev, mapping_flags);
1103 	return pte_flags;
1104 }
1105 
1106 static int
svm_range_unmap_from_gpu(struct amdgpu_device * adev,struct amdgpu_vm * vm,uint64_t start,uint64_t last,struct dma_fence ** fence)1107 svm_range_unmap_from_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm,
1108 			 uint64_t start, uint64_t last,
1109 			 struct dma_fence **fence)
1110 {
1111 	uint64_t init_pte_value = 0;
1112 
1113 	pr_debug("[0x%llx 0x%llx]\n", start, last);
1114 
1115 	return amdgpu_vm_bo_update_mapping(adev, adev, vm, false, true, NULL,
1116 					   start, last, init_pte_value, 0,
1117 					   NULL, NULL, fence, NULL);
1118 }
1119 
1120 static int
svm_range_unmap_from_gpus(struct svm_range * prange,unsigned long start,unsigned long last)1121 svm_range_unmap_from_gpus(struct svm_range *prange, unsigned long start,
1122 			  unsigned long last)
1123 {
1124 	DECLARE_BITMAP(bitmap, MAX_GPU_INSTANCE);
1125 	struct kfd_process_device *pdd;
1126 	struct dma_fence *fence = NULL;
1127 	struct amdgpu_device *adev;
1128 	struct kfd_process *p;
1129 	uint32_t gpuidx;
1130 	int r = 0;
1131 
1132 	bitmap_or(bitmap, prange->bitmap_access, prange->bitmap_aip,
1133 		  MAX_GPU_INSTANCE);
1134 	p = container_of(prange->svms, struct kfd_process, svms);
1135 
1136 	for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) {
1137 		pr_debug("unmap from gpu idx 0x%x\n", gpuidx);
1138 		pdd = kfd_process_device_from_gpuidx(p, gpuidx);
1139 		if (!pdd) {
1140 			pr_debug("failed to find device idx %d\n", gpuidx);
1141 			return -EINVAL;
1142 		}
1143 		adev = (struct amdgpu_device *)pdd->dev->kgd;
1144 
1145 		r = svm_range_unmap_from_gpu(adev, drm_priv_to_vm(pdd->drm_priv),
1146 					     start, last, &fence);
1147 		if (r)
1148 			break;
1149 
1150 		if (fence) {
1151 			r = dma_fence_wait(fence, false);
1152 			dma_fence_put(fence);
1153 			fence = NULL;
1154 			if (r)
1155 				break;
1156 		}
1157 		amdgpu_amdkfd_flush_gpu_tlb_pasid((struct kgd_dev *)adev,
1158 					p->pasid, TLB_FLUSH_HEAVYWEIGHT);
1159 	}
1160 
1161 	return r;
1162 }
1163 
1164 static int
svm_range_map_to_gpu(struct amdgpu_device * adev,struct amdgpu_vm * vm,struct svm_range * prange,unsigned long offset,unsigned long npages,bool readonly,dma_addr_t * dma_addr,struct amdgpu_device * bo_adev,struct dma_fence ** fence)1165 svm_range_map_to_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm,
1166 		     struct svm_range *prange, unsigned long offset,
1167 		     unsigned long npages, bool readonly, dma_addr_t *dma_addr,
1168 		     struct amdgpu_device *bo_adev, struct dma_fence **fence)
1169 {
1170 	struct amdgpu_bo_va bo_va;
1171 	bool table_freed = false;
1172 	uint64_t pte_flags;
1173 	unsigned long last_start;
1174 	int last_domain;
1175 	int r = 0;
1176 	int64_t i, j;
1177 
1178 	last_start = prange->start + offset;
1179 
1180 	pr_debug("svms 0x%p [0x%lx 0x%lx] readonly %d\n", prange->svms,
1181 		 last_start, last_start + npages - 1, readonly);
1182 
1183 	if (prange->svm_bo && prange->ttm_res)
1184 		bo_va.is_xgmi = amdgpu_xgmi_same_hive(adev, bo_adev);
1185 
1186 	for (i = offset; i < offset + npages; i++) {
1187 		last_domain = dma_addr[i] & SVM_RANGE_VRAM_DOMAIN;
1188 		dma_addr[i] &= ~SVM_RANGE_VRAM_DOMAIN;
1189 
1190 		/* Collect all pages in the same address range and memory domain
1191 		 * that can be mapped with a single call to update mapping.
1192 		 */
1193 		if (i < offset + npages - 1 &&
1194 		    last_domain == (dma_addr[i + 1] & SVM_RANGE_VRAM_DOMAIN))
1195 			continue;
1196 
1197 		pr_debug("Mapping range [0x%lx 0x%llx] on domain: %s\n",
1198 			 last_start, prange->start + i, last_domain ? "GPU" : "CPU");
1199 
1200 		pte_flags = svm_range_get_pte_flags(adev, prange, last_domain);
1201 		if (readonly)
1202 			pte_flags &= ~AMDGPU_PTE_WRITEABLE;
1203 
1204 		pr_debug("svms 0x%p map [0x%lx 0x%llx] vram %d PTE 0x%llx\n",
1205 			 prange->svms, last_start, prange->start + i,
1206 			 (last_domain == SVM_RANGE_VRAM_DOMAIN) ? 1 : 0,
1207 			 pte_flags);
1208 
1209 		r = amdgpu_vm_bo_update_mapping(adev, bo_adev, vm, false, false,
1210 						NULL, last_start,
1211 						prange->start + i, pte_flags,
1212 						last_start - prange->start,
1213 						NULL, dma_addr,
1214 						&vm->last_update,
1215 						&table_freed);
1216 
1217 		for (j = last_start - prange->start; j <= i; j++)
1218 			dma_addr[j] |= last_domain;
1219 
1220 		if (r) {
1221 			pr_debug("failed %d to map to gpu 0x%lx\n", r, prange->start);
1222 			goto out;
1223 		}
1224 		last_start = prange->start + i + 1;
1225 	}
1226 
1227 	r = amdgpu_vm_update_pdes(adev, vm, false);
1228 	if (r) {
1229 		pr_debug("failed %d to update directories 0x%lx\n", r,
1230 			 prange->start);
1231 		goto out;
1232 	}
1233 
1234 	if (fence)
1235 		*fence = dma_fence_get(vm->last_update);
1236 
1237 	if (table_freed) {
1238 		struct kfd_process *p;
1239 
1240 		p = container_of(prange->svms, struct kfd_process, svms);
1241 		amdgpu_amdkfd_flush_gpu_tlb_pasid((struct kgd_dev *)adev,
1242 						p->pasid, TLB_FLUSH_LEGACY);
1243 	}
1244 out:
1245 	return r;
1246 }
1247 
1248 static int
svm_range_map_to_gpus(struct svm_range * prange,unsigned long offset,unsigned long npages,bool readonly,unsigned long * bitmap,bool wait)1249 svm_range_map_to_gpus(struct svm_range *prange, unsigned long offset,
1250 		      unsigned long npages, bool readonly,
1251 		      unsigned long *bitmap, bool wait)
1252 {
1253 	struct kfd_process_device *pdd;
1254 	struct amdgpu_device *bo_adev;
1255 	struct amdgpu_device *adev;
1256 	struct kfd_process *p;
1257 	struct dma_fence *fence = NULL;
1258 	uint32_t gpuidx;
1259 	int r = 0;
1260 
1261 	if (prange->svm_bo && prange->ttm_res)
1262 		bo_adev = amdgpu_ttm_adev(prange->svm_bo->bo->tbo.bdev);
1263 	else
1264 		bo_adev = NULL;
1265 
1266 	p = container_of(prange->svms, struct kfd_process, svms);
1267 	for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) {
1268 		pr_debug("mapping to gpu idx 0x%x\n", gpuidx);
1269 		pdd = kfd_process_device_from_gpuidx(p, gpuidx);
1270 		if (!pdd) {
1271 			pr_debug("failed to find device idx %d\n", gpuidx);
1272 			return -EINVAL;
1273 		}
1274 		adev = (struct amdgpu_device *)pdd->dev->kgd;
1275 
1276 		pdd = kfd_bind_process_to_device(pdd->dev, p);
1277 		if (IS_ERR(pdd))
1278 			return -EINVAL;
1279 
1280 		if (bo_adev && adev != bo_adev &&
1281 		    !amdgpu_xgmi_same_hive(adev, bo_adev)) {
1282 			pr_debug("cannot map to device idx %d\n", gpuidx);
1283 			continue;
1284 		}
1285 
1286 		r = svm_range_map_to_gpu(adev, drm_priv_to_vm(pdd->drm_priv),
1287 					 prange, offset, npages, readonly,
1288 					 prange->dma_addr[gpuidx],
1289 					 bo_adev, wait ? &fence : NULL);
1290 		if (r)
1291 			break;
1292 
1293 		if (fence) {
1294 			r = dma_fence_wait(fence, false);
1295 			dma_fence_put(fence);
1296 			fence = NULL;
1297 			if (r) {
1298 				pr_debug("failed %d to dma fence wait\n", r);
1299 				break;
1300 			}
1301 		}
1302 	}
1303 
1304 	return r;
1305 }
1306 
1307 struct svm_validate_context {
1308 	struct kfd_process *process;
1309 	struct svm_range *prange;
1310 	bool intr;
1311 	unsigned long bitmap[MAX_GPU_INSTANCE];
1312 	struct ttm_validate_buffer tv[MAX_GPU_INSTANCE];
1313 	struct list_head validate_list;
1314 	struct ww_acquire_ctx ticket;
1315 };
1316 
svm_range_reserve_bos(struct svm_validate_context * ctx)1317 static int svm_range_reserve_bos(struct svm_validate_context *ctx)
1318 {
1319 	struct kfd_process_device *pdd;
1320 	struct amdgpu_device *adev;
1321 	struct amdgpu_vm *vm;
1322 	uint32_t gpuidx;
1323 	int r;
1324 
1325 	INIT_LIST_HEAD(&ctx->validate_list);
1326 	for_each_set_bit(gpuidx, ctx->bitmap, MAX_GPU_INSTANCE) {
1327 		pdd = kfd_process_device_from_gpuidx(ctx->process, gpuidx);
1328 		if (!pdd) {
1329 			pr_debug("failed to find device idx %d\n", gpuidx);
1330 			return -EINVAL;
1331 		}
1332 		adev = (struct amdgpu_device *)pdd->dev->kgd;
1333 		vm = drm_priv_to_vm(pdd->drm_priv);
1334 
1335 		ctx->tv[gpuidx].bo = &vm->root.bo->tbo;
1336 		ctx->tv[gpuidx].num_shared = 4;
1337 		list_add(&ctx->tv[gpuidx].head, &ctx->validate_list);
1338 	}
1339 
1340 	r = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->validate_list,
1341 				   ctx->intr, NULL);
1342 	if (r) {
1343 		pr_debug("failed %d to reserve bo\n", r);
1344 		return r;
1345 	}
1346 
1347 	for_each_set_bit(gpuidx, ctx->bitmap, MAX_GPU_INSTANCE) {
1348 		pdd = kfd_process_device_from_gpuidx(ctx->process, gpuidx);
1349 		if (!pdd) {
1350 			pr_debug("failed to find device idx %d\n", gpuidx);
1351 			r = -EINVAL;
1352 			goto unreserve_out;
1353 		}
1354 		adev = (struct amdgpu_device *)pdd->dev->kgd;
1355 
1356 		r = amdgpu_vm_validate_pt_bos(adev, drm_priv_to_vm(pdd->drm_priv),
1357 					      svm_range_bo_validate, NULL);
1358 		if (r) {
1359 			pr_debug("failed %d validate pt bos\n", r);
1360 			goto unreserve_out;
1361 		}
1362 	}
1363 
1364 	return 0;
1365 
1366 unreserve_out:
1367 	ttm_eu_backoff_reservation(&ctx->ticket, &ctx->validate_list);
1368 	return r;
1369 }
1370 
svm_range_unreserve_bos(struct svm_validate_context * ctx)1371 static void svm_range_unreserve_bos(struct svm_validate_context *ctx)
1372 {
1373 	ttm_eu_backoff_reservation(&ctx->ticket, &ctx->validate_list);
1374 }
1375 
kfd_svm_page_owner(struct kfd_process * p,int32_t gpuidx)1376 static void *kfd_svm_page_owner(struct kfd_process *p, int32_t gpuidx)
1377 {
1378 	struct kfd_process_device *pdd;
1379 	struct amdgpu_device *adev;
1380 
1381 	pdd = kfd_process_device_from_gpuidx(p, gpuidx);
1382 	adev = (struct amdgpu_device *)pdd->dev->kgd;
1383 
1384 	return SVM_ADEV_PGMAP_OWNER(adev);
1385 }
1386 
1387 /*
1388  * Validation+GPU mapping with concurrent invalidation (MMU notifiers)
1389  *
1390  * To prevent concurrent destruction or change of range attributes, the
1391  * svm_read_lock must be held. The caller must not hold the svm_write_lock
1392  * because that would block concurrent evictions and lead to deadlocks. To
1393  * serialize concurrent migrations or validations of the same range, the
1394  * prange->migrate_mutex must be held.
1395  *
1396  * For VRAM ranges, the SVM BO must be allocated and valid (protected by its
1397  * eviction fence.
1398  *
1399  * The following sequence ensures race-free validation and GPU mapping:
1400  *
1401  * 1. Reserve page table (and SVM BO if range is in VRAM)
1402  * 2. hmm_range_fault to get page addresses (if system memory)
1403  * 3. DMA-map pages (if system memory)
1404  * 4-a. Take notifier lock
1405  * 4-b. Check that pages still valid (mmu_interval_read_retry)
1406  * 4-c. Check that the range was not split or otherwise invalidated
1407  * 4-d. Update GPU page table
1408  * 4.e. Release notifier lock
1409  * 5. Release page table (and SVM BO) reservation
1410  */
svm_range_validate_and_map(struct mm_struct * mm,struct svm_range * prange,int32_t gpuidx,bool intr,bool wait)1411 static int svm_range_validate_and_map(struct mm_struct *mm,
1412 				      struct svm_range *prange,
1413 				      int32_t gpuidx, bool intr, bool wait)
1414 {
1415 	struct svm_validate_context ctx;
1416 	unsigned long start, end, addr;
1417 	struct kfd_process *p;
1418 	void *owner;
1419 	int32_t idx;
1420 	int r = 0;
1421 
1422 	ctx.process = container_of(prange->svms, struct kfd_process, svms);
1423 	ctx.prange = prange;
1424 	ctx.intr = intr;
1425 
1426 	if (gpuidx < MAX_GPU_INSTANCE) {
1427 		bitmap_zero(ctx.bitmap, MAX_GPU_INSTANCE);
1428 		bitmap_set(ctx.bitmap, gpuidx, 1);
1429 	} else if (ctx.process->xnack_enabled) {
1430 		bitmap_copy(ctx.bitmap, prange->bitmap_aip, MAX_GPU_INSTANCE);
1431 
1432 		/* If prefetch range to GPU, or GPU retry fault migrate range to
1433 		 * GPU, which has ACCESS attribute to the range, create mapping
1434 		 * on that GPU.
1435 		 */
1436 		if (prange->actual_loc) {
1437 			gpuidx = kfd_process_gpuidx_from_gpuid(ctx.process,
1438 							prange->actual_loc);
1439 			if (gpuidx < 0) {
1440 				WARN_ONCE(1, "failed get device by id 0x%x\n",
1441 					 prange->actual_loc);
1442 				return -EINVAL;
1443 			}
1444 			if (test_bit(gpuidx, prange->bitmap_access))
1445 				bitmap_set(ctx.bitmap, gpuidx, 1);
1446 		}
1447 	} else {
1448 		bitmap_or(ctx.bitmap, prange->bitmap_access,
1449 			  prange->bitmap_aip, MAX_GPU_INSTANCE);
1450 	}
1451 
1452 	if (bitmap_empty(ctx.bitmap, MAX_GPU_INSTANCE))
1453 		return 0;
1454 
1455 	if (prange->actual_loc && !prange->ttm_res) {
1456 		/* This should never happen. actual_loc gets set by
1457 		 * svm_migrate_ram_to_vram after allocating a BO.
1458 		 */
1459 		WARN(1, "VRAM BO missing during validation\n");
1460 		return -EINVAL;
1461 	}
1462 
1463 	svm_range_reserve_bos(&ctx);
1464 
1465 	p = container_of(prange->svms, struct kfd_process, svms);
1466 	owner = kfd_svm_page_owner(p, find_first_bit(ctx.bitmap,
1467 						MAX_GPU_INSTANCE));
1468 	for_each_set_bit(idx, ctx.bitmap, MAX_GPU_INSTANCE) {
1469 		if (kfd_svm_page_owner(p, idx) != owner) {
1470 			owner = NULL;
1471 			break;
1472 		}
1473 	}
1474 
1475 	start = prange->start << PAGE_SHIFT;
1476 	end = (prange->last + 1) << PAGE_SHIFT;
1477 	for (addr = start; addr < end && !r; ) {
1478 		struct hmm_range *hmm_range;
1479 		struct vm_area_struct *vma;
1480 		unsigned long next;
1481 		unsigned long offset;
1482 		unsigned long npages;
1483 		bool readonly;
1484 
1485 		vma = find_vma(mm, addr);
1486 		if (!vma || addr < vma->vm_start) {
1487 			r = -EFAULT;
1488 			goto unreserve_out;
1489 		}
1490 		readonly = !(vma->vm_flags & VM_WRITE);
1491 
1492 		next = min(vma->vm_end, end);
1493 		npages = (next - addr) >> PAGE_SHIFT;
1494 		WRITE_ONCE(p->svms.faulting_task, current);
1495 		r = amdgpu_hmm_range_get_pages(&prange->notifier, mm, NULL,
1496 					       addr, npages, &hmm_range,
1497 					       readonly, true, owner);
1498 		WRITE_ONCE(p->svms.faulting_task, NULL);
1499 		if (r) {
1500 			pr_debug("failed %d to get svm range pages\n", r);
1501 			goto unreserve_out;
1502 		}
1503 
1504 		offset = (addr - start) >> PAGE_SHIFT;
1505 		r = svm_range_dma_map(prange, ctx.bitmap, offset, npages,
1506 				      hmm_range->hmm_pfns);
1507 		if (r) {
1508 			pr_debug("failed %d to dma map range\n", r);
1509 			goto unreserve_out;
1510 		}
1511 
1512 		svm_range_lock(prange);
1513 		if (amdgpu_hmm_range_get_pages_done(hmm_range)) {
1514 			pr_debug("hmm update the range, need validate again\n");
1515 			r = -EAGAIN;
1516 			goto unlock_out;
1517 		}
1518 		if (!list_empty(&prange->child_list)) {
1519 			pr_debug("range split by unmap in parallel, validate again\n");
1520 			r = -EAGAIN;
1521 			goto unlock_out;
1522 		}
1523 
1524 		r = svm_range_map_to_gpus(prange, offset, npages, readonly,
1525 					  ctx.bitmap, wait);
1526 
1527 unlock_out:
1528 		svm_range_unlock(prange);
1529 
1530 		addr = next;
1531 	}
1532 
1533 	if (addr == end)
1534 		prange->validated_once = true;
1535 
1536 unreserve_out:
1537 	svm_range_unreserve_bos(&ctx);
1538 
1539 	if (!r)
1540 		prange->validate_timestamp = ktime_to_us(ktime_get());
1541 
1542 	return r;
1543 }
1544 
1545 /**
1546  * svm_range_list_lock_and_flush_work - flush pending deferred work
1547  *
1548  * @svms: the svm range list
1549  * @mm: the mm structure
1550  *
1551  * Context: Returns with mmap write lock held, pending deferred work flushed
1552  *
1553  */
1554 static void
svm_range_list_lock_and_flush_work(struct svm_range_list * svms,struct mm_struct * mm)1555 svm_range_list_lock_and_flush_work(struct svm_range_list *svms,
1556 				   struct mm_struct *mm)
1557 {
1558 retry_flush_work:
1559 	flush_work(&svms->deferred_list_work);
1560 	mmap_write_lock(mm);
1561 
1562 	if (list_empty(&svms->deferred_range_list))
1563 		return;
1564 	mmap_write_unlock(mm);
1565 	pr_debug("retry flush\n");
1566 	goto retry_flush_work;
1567 }
1568 
svm_range_restore_work(struct work_struct * work)1569 static void svm_range_restore_work(struct work_struct *work)
1570 {
1571 	struct delayed_work *dwork = to_delayed_work(work);
1572 	struct svm_range_list *svms;
1573 	struct svm_range *prange;
1574 	struct kfd_process *p;
1575 	struct mm_struct *mm;
1576 	int evicted_ranges;
1577 	int invalid;
1578 	int r;
1579 
1580 	svms = container_of(dwork, struct svm_range_list, restore_work);
1581 	evicted_ranges = atomic_read(&svms->evicted_ranges);
1582 	if (!evicted_ranges)
1583 		return;
1584 
1585 	pr_debug("restore svm ranges\n");
1586 
1587 	/* kfd_process_notifier_release destroys this worker thread. So during
1588 	 * the lifetime of this thread, kfd_process and mm will be valid.
1589 	 */
1590 	p = container_of(svms, struct kfd_process, svms);
1591 	mm = p->mm;
1592 	if (!mm)
1593 		return;
1594 
1595 	svm_range_list_lock_and_flush_work(svms, mm);
1596 	mutex_lock(&svms->lock);
1597 
1598 	evicted_ranges = atomic_read(&svms->evicted_ranges);
1599 
1600 	list_for_each_entry(prange, &svms->list, list) {
1601 		invalid = atomic_read(&prange->invalid);
1602 		if (!invalid)
1603 			continue;
1604 
1605 		pr_debug("restoring svms 0x%p prange 0x%p [0x%lx %lx] inv %d\n",
1606 			 prange->svms, prange, prange->start, prange->last,
1607 			 invalid);
1608 
1609 		/*
1610 		 * If range is migrating, wait for migration is done.
1611 		 */
1612 		mutex_lock(&prange->migrate_mutex);
1613 
1614 		r = svm_range_validate_and_map(mm, prange, MAX_GPU_INSTANCE,
1615 					       false, true);
1616 		if (r)
1617 			pr_debug("failed %d to map 0x%lx to gpus\n", r,
1618 				 prange->start);
1619 
1620 		mutex_unlock(&prange->migrate_mutex);
1621 		if (r)
1622 			goto out_reschedule;
1623 
1624 		if (atomic_cmpxchg(&prange->invalid, invalid, 0) != invalid)
1625 			goto out_reschedule;
1626 	}
1627 
1628 	if (atomic_cmpxchg(&svms->evicted_ranges, evicted_ranges, 0) !=
1629 	    evicted_ranges)
1630 		goto out_reschedule;
1631 
1632 	evicted_ranges = 0;
1633 
1634 	r = kgd2kfd_resume_mm(mm);
1635 	if (r) {
1636 		/* No recovery from this failure. Probably the CP is
1637 		 * hanging. No point trying again.
1638 		 */
1639 		pr_debug("failed %d to resume KFD\n", r);
1640 	}
1641 
1642 	pr_debug("restore svm ranges successfully\n");
1643 
1644 out_reschedule:
1645 	mutex_unlock(&svms->lock);
1646 	mmap_write_unlock(mm);
1647 
1648 	/* If validation failed, reschedule another attempt */
1649 	if (evicted_ranges) {
1650 		pr_debug("reschedule to restore svm range\n");
1651 		schedule_delayed_work(&svms->restore_work,
1652 			msecs_to_jiffies(AMDGPU_SVM_RANGE_RESTORE_DELAY_MS));
1653 	}
1654 }
1655 
1656 /**
1657  * svm_range_evict - evict svm range
1658  *
1659  * Stop all queues of the process to ensure GPU doesn't access the memory, then
1660  * return to let CPU evict the buffer and proceed CPU pagetable update.
1661  *
1662  * Don't need use lock to sync cpu pagetable invalidation with GPU execution.
1663  * If invalidation happens while restore work is running, restore work will
1664  * restart to ensure to get the latest CPU pages mapping to GPU, then start
1665  * the queues.
1666  */
1667 static int
svm_range_evict(struct svm_range * prange,struct mm_struct * mm,unsigned long start,unsigned long last)1668 svm_range_evict(struct svm_range *prange, struct mm_struct *mm,
1669 		unsigned long start, unsigned long last)
1670 {
1671 	struct svm_range_list *svms = prange->svms;
1672 	struct svm_range *pchild;
1673 	struct kfd_process *p;
1674 	int r = 0;
1675 
1676 	p = container_of(svms, struct kfd_process, svms);
1677 
1678 	pr_debug("invalidate svms 0x%p prange [0x%lx 0x%lx] [0x%lx 0x%lx]\n",
1679 		 svms, prange->start, prange->last, start, last);
1680 
1681 	if (!p->xnack_enabled) {
1682 		int evicted_ranges;
1683 
1684 		list_for_each_entry(pchild, &prange->child_list, child_list) {
1685 			mutex_lock_nested(&pchild->lock, 1);
1686 			if (pchild->start <= last && pchild->last >= start) {
1687 				pr_debug("increment pchild invalid [0x%lx 0x%lx]\n",
1688 					 pchild->start, pchild->last);
1689 				atomic_inc(&pchild->invalid);
1690 			}
1691 			mutex_unlock(&pchild->lock);
1692 		}
1693 
1694 		if (prange->start <= last && prange->last >= start)
1695 			atomic_inc(&prange->invalid);
1696 
1697 		evicted_ranges = atomic_inc_return(&svms->evicted_ranges);
1698 		if (evicted_ranges != 1)
1699 			return r;
1700 
1701 		pr_debug("evicting svms 0x%p range [0x%lx 0x%lx]\n",
1702 			 prange->svms, prange->start, prange->last);
1703 
1704 		/* First eviction, stop the queues */
1705 		r = kgd2kfd_quiesce_mm(mm);
1706 		if (r)
1707 			pr_debug("failed to quiesce KFD\n");
1708 
1709 		pr_debug("schedule to restore svm %p ranges\n", svms);
1710 		schedule_delayed_work(&svms->restore_work,
1711 			msecs_to_jiffies(AMDGPU_SVM_RANGE_RESTORE_DELAY_MS));
1712 	} else {
1713 		unsigned long s, l;
1714 
1715 		pr_debug("invalidate unmap svms 0x%p [0x%lx 0x%lx] from GPUs\n",
1716 			 prange->svms, start, last);
1717 		list_for_each_entry(pchild, &prange->child_list, child_list) {
1718 			mutex_lock_nested(&pchild->lock, 1);
1719 			s = max(start, pchild->start);
1720 			l = min(last, pchild->last);
1721 			if (l >= s)
1722 				svm_range_unmap_from_gpus(pchild, s, l);
1723 			mutex_unlock(&pchild->lock);
1724 		}
1725 		s = max(start, prange->start);
1726 		l = min(last, prange->last);
1727 		if (l >= s)
1728 			svm_range_unmap_from_gpus(prange, s, l);
1729 	}
1730 
1731 	return r;
1732 }
1733 
svm_range_clone(struct svm_range * old)1734 static struct svm_range *svm_range_clone(struct svm_range *old)
1735 {
1736 	struct svm_range *new;
1737 
1738 	new = svm_range_new(old->svms, old->start, old->last);
1739 	if (!new)
1740 		return NULL;
1741 
1742 	if (old->svm_bo) {
1743 		new->ttm_res = old->ttm_res;
1744 		new->offset = old->offset;
1745 		new->svm_bo = svm_range_bo_ref(old->svm_bo);
1746 		spin_lock(&new->svm_bo->list_lock);
1747 		list_add(&new->svm_bo_list, &new->svm_bo->range_list);
1748 		spin_unlock(&new->svm_bo->list_lock);
1749 	}
1750 	new->flags = old->flags;
1751 	new->preferred_loc = old->preferred_loc;
1752 	new->prefetch_loc = old->prefetch_loc;
1753 	new->actual_loc = old->actual_loc;
1754 	new->granularity = old->granularity;
1755 	bitmap_copy(new->bitmap_access, old->bitmap_access, MAX_GPU_INSTANCE);
1756 	bitmap_copy(new->bitmap_aip, old->bitmap_aip, MAX_GPU_INSTANCE);
1757 
1758 	return new;
1759 }
1760 
1761 /**
1762  * svm_range_add - add svm range and handle overlap
1763  * @p: the range add to this process svms
1764  * @start: page size aligned
1765  * @size: page size aligned
1766  * @nattr: number of attributes
1767  * @attrs: array of attributes
1768  * @update_list: output, the ranges need validate and update GPU mapping
1769  * @insert_list: output, the ranges need insert to svms
1770  * @remove_list: output, the ranges are replaced and need remove from svms
1771  *
1772  * Check if the virtual address range has overlap with any existing ranges,
1773  * split partly overlapping ranges and add new ranges in the gaps. All changes
1774  * should be applied to the range_list and interval tree transactionally. If
1775  * any range split or allocation fails, the entire update fails. Therefore any
1776  * existing overlapping svm_ranges are cloned and the original svm_ranges left
1777  * unchanged.
1778  *
1779  * If the transaction succeeds, the caller can update and insert clones and
1780  * new ranges, then free the originals.
1781  *
1782  * Otherwise the caller can free the clones and new ranges, while the old
1783  * svm_ranges remain unchanged.
1784  *
1785  * Context: Process context, caller must hold svms->lock
1786  *
1787  * Return:
1788  * 0 - OK, otherwise error code
1789  */
1790 static int
svm_range_add(struct kfd_process * p,uint64_t start,uint64_t size,uint32_t nattr,struct kfd_ioctl_svm_attribute * attrs,struct list_head * update_list,struct list_head * insert_list,struct list_head * remove_list)1791 svm_range_add(struct kfd_process *p, uint64_t start, uint64_t size,
1792 	      uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs,
1793 	      struct list_head *update_list, struct list_head *insert_list,
1794 	      struct list_head *remove_list)
1795 {
1796 	unsigned long last = start + size - 1UL;
1797 	struct svm_range_list *svms = &p->svms;
1798 	struct interval_tree_node *node;
1799 	struct svm_range new = {0};
1800 	struct svm_range *prange;
1801 	struct svm_range *tmp;
1802 	int r = 0;
1803 
1804 	pr_debug("svms 0x%p [0x%llx 0x%lx]\n", &p->svms, start, last);
1805 
1806 	INIT_LIST_HEAD(update_list);
1807 	INIT_LIST_HEAD(insert_list);
1808 	INIT_LIST_HEAD(remove_list);
1809 	svm_range_apply_attrs(p, &new, nattr, attrs);
1810 
1811 	node = interval_tree_iter_first(&svms->objects, start, last);
1812 	while (node) {
1813 		struct interval_tree_node *next;
1814 		struct svm_range *old;
1815 		unsigned long next_start;
1816 
1817 		pr_debug("found overlap node [0x%lx 0x%lx]\n", node->start,
1818 			 node->last);
1819 
1820 		old = container_of(node, struct svm_range, it_node);
1821 		next = interval_tree_iter_next(node, start, last);
1822 		next_start = min(node->last, last) + 1;
1823 
1824 		if (node->start < start || node->last > last) {
1825 			/* node intersects the updated range, clone+split it */
1826 			prange = svm_range_clone(old);
1827 			if (!prange) {
1828 				r = -ENOMEM;
1829 				goto out;
1830 			}
1831 
1832 			list_add(&old->remove_list, remove_list);
1833 			list_add(&prange->insert_list, insert_list);
1834 
1835 			if (node->start < start) {
1836 				pr_debug("change old range start\n");
1837 				r = svm_range_split_head(prange, start,
1838 							 insert_list);
1839 				if (r)
1840 					goto out;
1841 			}
1842 			if (node->last > last) {
1843 				pr_debug("change old range last\n");
1844 				r = svm_range_split_tail(prange, last,
1845 							 insert_list);
1846 				if (r)
1847 					goto out;
1848 			}
1849 		} else {
1850 			/* The node is contained within start..last,
1851 			 * just update it
1852 			 */
1853 			prange = old;
1854 		}
1855 
1856 		if (!svm_range_is_same_attrs(prange, &new))
1857 			list_add(&prange->update_list, update_list);
1858 
1859 		/* insert a new node if needed */
1860 		if (node->start > start) {
1861 			prange = svm_range_new(prange->svms, start,
1862 					       node->start - 1);
1863 			if (!prange) {
1864 				r = -ENOMEM;
1865 				goto out;
1866 			}
1867 
1868 			list_add(&prange->insert_list, insert_list);
1869 			list_add(&prange->update_list, update_list);
1870 		}
1871 
1872 		node = next;
1873 		start = next_start;
1874 	}
1875 
1876 	/* add a final range at the end if needed */
1877 	if (start <= last) {
1878 		prange = svm_range_new(svms, start, last);
1879 		if (!prange) {
1880 			r = -ENOMEM;
1881 			goto out;
1882 		}
1883 		list_add(&prange->insert_list, insert_list);
1884 		list_add(&prange->update_list, update_list);
1885 	}
1886 
1887 out:
1888 	if (r)
1889 		list_for_each_entry_safe(prange, tmp, insert_list, insert_list)
1890 			svm_range_free(prange);
1891 
1892 	return r;
1893 }
1894 
1895 static void
svm_range_update_notifier_and_interval_tree(struct mm_struct * mm,struct svm_range * prange)1896 svm_range_update_notifier_and_interval_tree(struct mm_struct *mm,
1897 					    struct svm_range *prange)
1898 {
1899 	unsigned long start;
1900 	unsigned long last;
1901 
1902 	start = prange->notifier.interval_tree.start >> PAGE_SHIFT;
1903 	last = prange->notifier.interval_tree.last >> PAGE_SHIFT;
1904 
1905 	if (prange->start == start && prange->last == last)
1906 		return;
1907 
1908 	pr_debug("up notifier 0x%p prange 0x%p [0x%lx 0x%lx] [0x%lx 0x%lx]\n",
1909 		  prange->svms, prange, start, last, prange->start,
1910 		  prange->last);
1911 
1912 	if (start != 0 && last != 0) {
1913 		interval_tree_remove(&prange->it_node, &prange->svms->objects);
1914 		svm_range_remove_notifier(prange);
1915 	}
1916 	prange->it_node.start = prange->start;
1917 	prange->it_node.last = prange->last;
1918 
1919 	interval_tree_insert(&prange->it_node, &prange->svms->objects);
1920 	svm_range_add_notifier_locked(mm, prange);
1921 }
1922 
1923 static void
svm_range_handle_list_op(struct svm_range_list * svms,struct svm_range * prange)1924 svm_range_handle_list_op(struct svm_range_list *svms, struct svm_range *prange)
1925 {
1926 	struct mm_struct *mm = prange->work_item.mm;
1927 
1928 	switch (prange->work_item.op) {
1929 	case SVM_OP_NULL:
1930 		pr_debug("NULL OP 0x%p prange 0x%p [0x%lx 0x%lx]\n",
1931 			 svms, prange, prange->start, prange->last);
1932 		break;
1933 	case SVM_OP_UNMAP_RANGE:
1934 		pr_debug("remove 0x%p prange 0x%p [0x%lx 0x%lx]\n",
1935 			 svms, prange, prange->start, prange->last);
1936 		svm_range_unlink(prange);
1937 		svm_range_remove_notifier(prange);
1938 		svm_range_free(prange);
1939 		break;
1940 	case SVM_OP_UPDATE_RANGE_NOTIFIER:
1941 		pr_debug("update notifier 0x%p prange 0x%p [0x%lx 0x%lx]\n",
1942 			 svms, prange, prange->start, prange->last);
1943 		svm_range_update_notifier_and_interval_tree(mm, prange);
1944 		break;
1945 	case SVM_OP_UPDATE_RANGE_NOTIFIER_AND_MAP:
1946 		pr_debug("update and map 0x%p prange 0x%p [0x%lx 0x%lx]\n",
1947 			 svms, prange, prange->start, prange->last);
1948 		svm_range_update_notifier_and_interval_tree(mm, prange);
1949 		/* TODO: implement deferred validation and mapping */
1950 		break;
1951 	case SVM_OP_ADD_RANGE:
1952 		pr_debug("add 0x%p prange 0x%p [0x%lx 0x%lx]\n", svms, prange,
1953 			 prange->start, prange->last);
1954 		svm_range_add_to_svms(prange);
1955 		svm_range_add_notifier_locked(mm, prange);
1956 		break;
1957 	case SVM_OP_ADD_RANGE_AND_MAP:
1958 		pr_debug("add and map 0x%p prange 0x%p [0x%lx 0x%lx]\n", svms,
1959 			 prange, prange->start, prange->last);
1960 		svm_range_add_to_svms(prange);
1961 		svm_range_add_notifier_locked(mm, prange);
1962 		/* TODO: implement deferred validation and mapping */
1963 		break;
1964 	default:
1965 		WARN_ONCE(1, "Unknown prange 0x%p work op %d\n", prange,
1966 			 prange->work_item.op);
1967 	}
1968 }
1969 
svm_range_drain_retry_fault(struct svm_range_list * svms)1970 static void svm_range_drain_retry_fault(struct svm_range_list *svms)
1971 {
1972 	struct kfd_process_device *pdd;
1973 	struct amdgpu_device *adev;
1974 	struct kfd_process *p;
1975 	uint32_t i;
1976 
1977 	p = container_of(svms, struct kfd_process, svms);
1978 
1979 	for_each_set_bit(i, svms->bitmap_supported, p->n_pdds) {
1980 		pdd = p->pdds[i];
1981 		if (!pdd)
1982 			continue;
1983 
1984 		pr_debug("drain retry fault gpu %d svms %p\n", i, svms);
1985 		adev = (struct amdgpu_device *)pdd->dev->kgd;
1986 
1987 		amdgpu_ih_wait_on_checkpoint_process(adev, &adev->irq.ih1);
1988 		pr_debug("drain retry fault gpu %d svms 0x%p done\n", i, svms);
1989 	}
1990 }
1991 
svm_range_deferred_list_work(struct work_struct * work)1992 static void svm_range_deferred_list_work(struct work_struct *work)
1993 {
1994 	struct svm_range_list *svms;
1995 	struct svm_range *prange;
1996 	struct mm_struct *mm;
1997 
1998 	svms = container_of(work, struct svm_range_list, deferred_list_work);
1999 	pr_debug("enter svms 0x%p\n", svms);
2000 
2001 	spin_lock(&svms->deferred_list_lock);
2002 	while (!list_empty(&svms->deferred_range_list)) {
2003 		prange = list_first_entry(&svms->deferred_range_list,
2004 					  struct svm_range, deferred_list);
2005 		spin_unlock(&svms->deferred_list_lock);
2006 		pr_debug("prange 0x%p [0x%lx 0x%lx] op %d\n", prange,
2007 			 prange->start, prange->last, prange->work_item.op);
2008 
2009 		/* Make sure no stale retry fault coming after range is freed */
2010 		if (prange->work_item.op == SVM_OP_UNMAP_RANGE)
2011 			svm_range_drain_retry_fault(prange->svms);
2012 
2013 		mm = prange->work_item.mm;
2014 		mmap_write_lock(mm);
2015 		mutex_lock(&svms->lock);
2016 
2017 		/* Remove from deferred_list must be inside mmap write lock,
2018 		 * otherwise, svm_range_list_lock_and_flush_work may hold mmap
2019 		 * write lock, and continue because deferred_list is empty, then
2020 		 * deferred_list handle is blocked by mmap write lock.
2021 		 */
2022 		spin_lock(&svms->deferred_list_lock);
2023 		list_del_init(&prange->deferred_list);
2024 		spin_unlock(&svms->deferred_list_lock);
2025 
2026 		mutex_lock(&prange->migrate_mutex);
2027 		while (!list_empty(&prange->child_list)) {
2028 			struct svm_range *pchild;
2029 
2030 			pchild = list_first_entry(&prange->child_list,
2031 						struct svm_range, child_list);
2032 			pr_debug("child prange 0x%p op %d\n", pchild,
2033 				 pchild->work_item.op);
2034 			list_del_init(&pchild->child_list);
2035 			svm_range_handle_list_op(svms, pchild);
2036 		}
2037 		mutex_unlock(&prange->migrate_mutex);
2038 
2039 		svm_range_handle_list_op(svms, prange);
2040 		mutex_unlock(&svms->lock);
2041 		mmap_write_unlock(mm);
2042 
2043 		spin_lock(&svms->deferred_list_lock);
2044 	}
2045 	spin_unlock(&svms->deferred_list_lock);
2046 
2047 	pr_debug("exit svms 0x%p\n", svms);
2048 }
2049 
2050 void
svm_range_add_list_work(struct svm_range_list * svms,struct svm_range * prange,struct mm_struct * mm,enum svm_work_list_ops op)2051 svm_range_add_list_work(struct svm_range_list *svms, struct svm_range *prange,
2052 			struct mm_struct *mm, enum svm_work_list_ops op)
2053 {
2054 	spin_lock(&svms->deferred_list_lock);
2055 	/* if prange is on the deferred list */
2056 	if (!list_empty(&prange->deferred_list)) {
2057 		pr_debug("update exist prange 0x%p work op %d\n", prange, op);
2058 		WARN_ONCE(prange->work_item.mm != mm, "unmatch mm\n");
2059 		if (op != SVM_OP_NULL &&
2060 		    prange->work_item.op != SVM_OP_UNMAP_RANGE)
2061 			prange->work_item.op = op;
2062 	} else {
2063 		prange->work_item.op = op;
2064 		prange->work_item.mm = mm;
2065 		list_add_tail(&prange->deferred_list,
2066 			      &prange->svms->deferred_range_list);
2067 		pr_debug("add prange 0x%p [0x%lx 0x%lx] to work list op %d\n",
2068 			 prange, prange->start, prange->last, op);
2069 	}
2070 	spin_unlock(&svms->deferred_list_lock);
2071 }
2072 
schedule_deferred_list_work(struct svm_range_list * svms)2073 void schedule_deferred_list_work(struct svm_range_list *svms)
2074 {
2075 	spin_lock(&svms->deferred_list_lock);
2076 	if (!list_empty(&svms->deferred_range_list))
2077 		schedule_work(&svms->deferred_list_work);
2078 	spin_unlock(&svms->deferred_list_lock);
2079 }
2080 
2081 static void
svm_range_unmap_split(struct mm_struct * mm,struct svm_range * parent,struct svm_range * prange,unsigned long start,unsigned long last)2082 svm_range_unmap_split(struct mm_struct *mm, struct svm_range *parent,
2083 		      struct svm_range *prange, unsigned long start,
2084 		      unsigned long last)
2085 {
2086 	struct svm_range *head;
2087 	struct svm_range *tail;
2088 
2089 	if (prange->work_item.op == SVM_OP_UNMAP_RANGE) {
2090 		pr_debug("prange 0x%p [0x%lx 0x%lx] is already freed\n", prange,
2091 			 prange->start, prange->last);
2092 		return;
2093 	}
2094 	if (start > prange->last || last < prange->start)
2095 		return;
2096 
2097 	head = tail = prange;
2098 	if (start > prange->start)
2099 		svm_range_split(prange, prange->start, start - 1, &tail);
2100 	if (last < tail->last)
2101 		svm_range_split(tail, last + 1, tail->last, &head);
2102 
2103 	if (head != prange && tail != prange) {
2104 		svm_range_add_child(parent, mm, head, SVM_OP_UNMAP_RANGE);
2105 		svm_range_add_child(parent, mm, tail, SVM_OP_ADD_RANGE);
2106 	} else if (tail != prange) {
2107 		svm_range_add_child(parent, mm, tail, SVM_OP_UNMAP_RANGE);
2108 	} else if (head != prange) {
2109 		svm_range_add_child(parent, mm, head, SVM_OP_UNMAP_RANGE);
2110 	} else if (parent != prange) {
2111 		prange->work_item.op = SVM_OP_UNMAP_RANGE;
2112 	}
2113 }
2114 
2115 static void
svm_range_unmap_from_cpu(struct mm_struct * mm,struct svm_range * prange,unsigned long start,unsigned long last)2116 svm_range_unmap_from_cpu(struct mm_struct *mm, struct svm_range *prange,
2117 			 unsigned long start, unsigned long last)
2118 {
2119 	struct svm_range_list *svms;
2120 	struct svm_range *pchild;
2121 	struct kfd_process *p;
2122 	unsigned long s, l;
2123 	bool unmap_parent;
2124 
2125 	p = kfd_lookup_process_by_mm(mm);
2126 	if (!p)
2127 		return;
2128 	svms = &p->svms;
2129 
2130 	pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] [0x%lx 0x%lx]\n", svms,
2131 		 prange, prange->start, prange->last, start, last);
2132 
2133 	unmap_parent = start <= prange->start && last >= prange->last;
2134 
2135 	list_for_each_entry(pchild, &prange->child_list, child_list) {
2136 		mutex_lock_nested(&pchild->lock, 1);
2137 		s = max(start, pchild->start);
2138 		l = min(last, pchild->last);
2139 		if (l >= s)
2140 			svm_range_unmap_from_gpus(pchild, s, l);
2141 		svm_range_unmap_split(mm, prange, pchild, start, last);
2142 		mutex_unlock(&pchild->lock);
2143 	}
2144 	s = max(start, prange->start);
2145 	l = min(last, prange->last);
2146 	if (l >= s)
2147 		svm_range_unmap_from_gpus(prange, s, l);
2148 	svm_range_unmap_split(mm, prange, prange, start, last);
2149 
2150 	if (unmap_parent)
2151 		svm_range_add_list_work(svms, prange, mm, SVM_OP_UNMAP_RANGE);
2152 	else
2153 		svm_range_add_list_work(svms, prange, mm,
2154 					SVM_OP_UPDATE_RANGE_NOTIFIER);
2155 	schedule_deferred_list_work(svms);
2156 
2157 	kfd_unref_process(p);
2158 }
2159 
2160 /**
2161  * svm_range_cpu_invalidate_pagetables - interval notifier callback
2162  *
2163  * If event is MMU_NOTIFY_UNMAP, this is from CPU unmap range, otherwise, it
2164  * is from migration, or CPU page invalidation callback.
2165  *
2166  * For unmap event, unmap range from GPUs, remove prange from svms in a delayed
2167  * work thread, and split prange if only part of prange is unmapped.
2168  *
2169  * For invalidation event, if GPU retry fault is not enabled, evict the queues,
2170  * then schedule svm_range_restore_work to update GPU mapping and resume queues.
2171  * If GPU retry fault is enabled, unmap the svm range from GPU, retry fault will
2172  * update GPU mapping to recover.
2173  *
2174  * Context: mmap lock, notifier_invalidate_start lock are held
2175  *          for invalidate event, prange lock is held if this is from migration
2176  */
2177 static bool
svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier * mni,const struct mmu_notifier_range * range,unsigned long cur_seq)2178 svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni,
2179 				    const struct mmu_notifier_range *range,
2180 				    unsigned long cur_seq)
2181 {
2182 	struct svm_range *prange;
2183 	unsigned long start;
2184 	unsigned long last;
2185 
2186 	if (range->event == MMU_NOTIFY_RELEASE)
2187 		return true;
2188 	if (!mmget_not_zero(mni->mm))
2189 		return true;
2190 
2191 	start = mni->interval_tree.start;
2192 	last = mni->interval_tree.last;
2193 	start = (start > range->start ? start : range->start) >> PAGE_SHIFT;
2194 	last = (last < (range->end - 1) ? last : range->end - 1) >> PAGE_SHIFT;
2195 	pr_debug("[0x%lx 0x%lx] range[0x%lx 0x%lx] notifier[0x%lx 0x%lx] %d\n",
2196 		 start, last, range->start >> PAGE_SHIFT,
2197 		 (range->end - 1) >> PAGE_SHIFT,
2198 		 mni->interval_tree.start >> PAGE_SHIFT,
2199 		 mni->interval_tree.last >> PAGE_SHIFT, range->event);
2200 
2201 	prange = container_of(mni, struct svm_range, notifier);
2202 
2203 	svm_range_lock(prange);
2204 	mmu_interval_set_seq(mni, cur_seq);
2205 
2206 	switch (range->event) {
2207 	case MMU_NOTIFY_UNMAP:
2208 		svm_range_unmap_from_cpu(mni->mm, prange, start, last);
2209 		break;
2210 	default:
2211 		svm_range_evict(prange, mni->mm, start, last);
2212 		break;
2213 	}
2214 
2215 	svm_range_unlock(prange);
2216 	mmput(mni->mm);
2217 
2218 	return true;
2219 }
2220 
2221 /**
2222  * svm_range_from_addr - find svm range from fault address
2223  * @svms: svm range list header
2224  * @addr: address to search range interval tree, in pages
2225  * @parent: parent range if range is on child list
2226  *
2227  * Context: The caller must hold svms->lock
2228  *
2229  * Return: the svm_range found or NULL
2230  */
2231 struct svm_range *
svm_range_from_addr(struct svm_range_list * svms,unsigned long addr,struct svm_range ** parent)2232 svm_range_from_addr(struct svm_range_list *svms, unsigned long addr,
2233 		    struct svm_range **parent)
2234 {
2235 	struct interval_tree_node *node;
2236 	struct svm_range *prange;
2237 	struct svm_range *pchild;
2238 
2239 	node = interval_tree_iter_first(&svms->objects, addr, addr);
2240 	if (!node)
2241 		return NULL;
2242 
2243 	prange = container_of(node, struct svm_range, it_node);
2244 	pr_debug("address 0x%lx prange [0x%lx 0x%lx] node [0x%lx 0x%lx]\n",
2245 		 addr, prange->start, prange->last, node->start, node->last);
2246 
2247 	if (addr >= prange->start && addr <= prange->last) {
2248 		if (parent)
2249 			*parent = prange;
2250 		return prange;
2251 	}
2252 	list_for_each_entry(pchild, &prange->child_list, child_list)
2253 		if (addr >= pchild->start && addr <= pchild->last) {
2254 			pr_debug("found address 0x%lx pchild [0x%lx 0x%lx]\n",
2255 				 addr, pchild->start, pchild->last);
2256 			if (parent)
2257 				*parent = prange;
2258 			return pchild;
2259 		}
2260 
2261 	return NULL;
2262 }
2263 
2264 /* svm_range_best_restore_location - decide the best fault restore location
2265  * @prange: svm range structure
2266  * @adev: the GPU on which vm fault happened
2267  *
2268  * This is only called when xnack is on, to decide the best location to restore
2269  * the range mapping after GPU vm fault. Caller uses the best location to do
2270  * migration if actual loc is not best location, then update GPU page table
2271  * mapping to the best location.
2272  *
2273  * If vm fault gpu is range preferred loc, the best_loc is preferred loc.
2274  * If vm fault gpu idx is on range ACCESSIBLE bitmap, best_loc is vm fault gpu
2275  * If vm fault gpu idx is on range ACCESSIBLE_IN_PLACE bitmap, then
2276  *    if range actual loc is cpu, best_loc is cpu
2277  *    if vm fault gpu is on xgmi same hive of range actual loc gpu, best_loc is
2278  *    range actual loc.
2279  * Otherwise, GPU no access, best_loc is -1.
2280  *
2281  * Return:
2282  * -1 means vm fault GPU no access
2283  * 0 for CPU or GPU id
2284  */
2285 static int32_t
svm_range_best_restore_location(struct svm_range * prange,struct amdgpu_device * adev,int32_t * gpuidx)2286 svm_range_best_restore_location(struct svm_range *prange,
2287 				struct amdgpu_device *adev,
2288 				int32_t *gpuidx)
2289 {
2290 	struct amdgpu_device *bo_adev;
2291 	struct kfd_process *p;
2292 	uint32_t gpuid;
2293 	int r;
2294 
2295 	p = container_of(prange->svms, struct kfd_process, svms);
2296 
2297 	r = kfd_process_gpuid_from_kgd(p, adev, &gpuid, gpuidx);
2298 	if (r < 0) {
2299 		pr_debug("failed to get gpuid from kgd\n");
2300 		return -1;
2301 	}
2302 
2303 	if (prange->preferred_loc == gpuid)
2304 		return prange->preferred_loc;
2305 
2306 	if (test_bit(*gpuidx, prange->bitmap_access))
2307 		return gpuid;
2308 
2309 	if (test_bit(*gpuidx, prange->bitmap_aip)) {
2310 		if (!prange->actual_loc)
2311 			return 0;
2312 
2313 		bo_adev = svm_range_get_adev_by_id(prange, prange->actual_loc);
2314 		if (amdgpu_xgmi_same_hive(adev, bo_adev))
2315 			return prange->actual_loc;
2316 		else
2317 			return 0;
2318 	}
2319 
2320 	return -1;
2321 }
2322 static int
svm_range_get_range_boundaries(struct kfd_process * p,int64_t addr,unsigned long * start,unsigned long * last)2323 svm_range_get_range_boundaries(struct kfd_process *p, int64_t addr,
2324 				unsigned long *start, unsigned long *last)
2325 {
2326 	struct vm_area_struct *vma;
2327 	struct interval_tree_node *node;
2328 	struct rb_node *rb_node;
2329 	unsigned long start_limit, end_limit;
2330 
2331 	vma = find_vma(p->mm, addr << PAGE_SHIFT);
2332 	if (!vma || (addr << PAGE_SHIFT) < vma->vm_start) {
2333 		pr_debug("VMA does not exist in address [0x%llx]\n", addr);
2334 		return -EFAULT;
2335 	}
2336 	start_limit = max(vma->vm_start >> PAGE_SHIFT,
2337 		      (unsigned long)ALIGN_DOWN(addr, 2UL << 8));
2338 	end_limit = min(vma->vm_end >> PAGE_SHIFT,
2339 		    (unsigned long)ALIGN(addr + 1, 2UL << 8));
2340 	/* First range that starts after the fault address */
2341 	node = interval_tree_iter_first(&p->svms.objects, addr + 1, ULONG_MAX);
2342 	if (node) {
2343 		end_limit = min(end_limit, node->start);
2344 		/* Last range that ends before the fault address */
2345 		rb_node = rb_prev(&node->rb);
2346 	} else {
2347 		/* Last range must end before addr because
2348 		 * there was no range after addr
2349 		 */
2350 		rb_node = rb_last(&p->svms.objects.rb_root);
2351 	}
2352 	if (rb_node) {
2353 		node = container_of(rb_node, struct interval_tree_node, rb);
2354 		if (node->last >= addr) {
2355 			WARN(1, "Overlap with prev node and page fault addr\n");
2356 			return -EFAULT;
2357 		}
2358 		start_limit = max(start_limit, node->last + 1);
2359 	}
2360 
2361 	*start = start_limit;
2362 	*last = end_limit - 1;
2363 
2364 	pr_debug("vma start: 0x%lx start: 0x%lx vma end: 0x%lx last: 0x%lx\n",
2365 		  vma->vm_start >> PAGE_SHIFT, *start,
2366 		  vma->vm_end >> PAGE_SHIFT, *last);
2367 
2368 	return 0;
2369 
2370 }
2371 static struct
svm_range_create_unregistered_range(struct amdgpu_device * adev,struct kfd_process * p,struct mm_struct * mm,int64_t addr)2372 svm_range *svm_range_create_unregistered_range(struct amdgpu_device *adev,
2373 						struct kfd_process *p,
2374 						struct mm_struct *mm,
2375 						int64_t addr)
2376 {
2377 	struct svm_range *prange = NULL;
2378 	unsigned long start, last;
2379 	uint32_t gpuid, gpuidx;
2380 
2381 	if (svm_range_get_range_boundaries(p, addr, &start, &last))
2382 		return NULL;
2383 
2384 	prange = svm_range_new(&p->svms, start, last);
2385 	if (!prange) {
2386 		pr_debug("Failed to create prange in address [0x%llx]\n", addr);
2387 		return NULL;
2388 	}
2389 	if (kfd_process_gpuid_from_kgd(p, adev, &gpuid, &gpuidx)) {
2390 		pr_debug("failed to get gpuid from kgd\n");
2391 		svm_range_free(prange);
2392 		return NULL;
2393 	}
2394 
2395 	svm_range_add_to_svms(prange);
2396 	svm_range_add_notifier_locked(mm, prange);
2397 
2398 	return prange;
2399 }
2400 
2401 /* svm_range_skip_recover - decide if prange can be recovered
2402  * @prange: svm range structure
2403  *
2404  * GPU vm retry fault handle skip recover the range for cases:
2405  * 1. prange is on deferred list to be removed after unmap, it is stale fault,
2406  *    deferred list work will drain the stale fault before free the prange.
2407  * 2. prange is on deferred list to add interval notifier after split, or
2408  * 3. prange is child range, it is split from parent prange, recover later
2409  *    after interval notifier is added.
2410  *
2411  * Return: true to skip recover, false to recover
2412  */
svm_range_skip_recover(struct svm_range * prange)2413 static bool svm_range_skip_recover(struct svm_range *prange)
2414 {
2415 	struct svm_range_list *svms = prange->svms;
2416 
2417 	spin_lock(&svms->deferred_list_lock);
2418 	if (list_empty(&prange->deferred_list) &&
2419 	    list_empty(&prange->child_list)) {
2420 		spin_unlock(&svms->deferred_list_lock);
2421 		return false;
2422 	}
2423 	spin_unlock(&svms->deferred_list_lock);
2424 
2425 	if (prange->work_item.op == SVM_OP_UNMAP_RANGE) {
2426 		pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] unmapped\n",
2427 			 svms, prange, prange->start, prange->last);
2428 		return true;
2429 	}
2430 	if (prange->work_item.op == SVM_OP_ADD_RANGE_AND_MAP ||
2431 	    prange->work_item.op == SVM_OP_ADD_RANGE) {
2432 		pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] not added yet\n",
2433 			 svms, prange, prange->start, prange->last);
2434 		return true;
2435 	}
2436 	return false;
2437 }
2438 
2439 static void
svm_range_count_fault(struct amdgpu_device * adev,struct kfd_process * p,int32_t gpuidx)2440 svm_range_count_fault(struct amdgpu_device *adev, struct kfd_process *p,
2441 		      int32_t gpuidx)
2442 {
2443 	struct kfd_process_device *pdd;
2444 
2445 	/* fault is on different page of same range
2446 	 * or fault is skipped to recover later
2447 	 * or fault is on invalid virtual address
2448 	 */
2449 	if (gpuidx == MAX_GPU_INSTANCE) {
2450 		uint32_t gpuid;
2451 		int r;
2452 
2453 		r = kfd_process_gpuid_from_kgd(p, adev, &gpuid, &gpuidx);
2454 		if (r < 0)
2455 			return;
2456 	}
2457 
2458 	/* fault is recovered
2459 	 * or fault cannot recover because GPU no access on the range
2460 	 */
2461 	pdd = kfd_process_device_from_gpuidx(p, gpuidx);
2462 	if (pdd)
2463 		WRITE_ONCE(pdd->faults, pdd->faults + 1);
2464 }
2465 
2466 static bool
svm_fault_allowed(struct mm_struct * mm,uint64_t addr,bool write_fault)2467 svm_fault_allowed(struct mm_struct *mm, uint64_t addr, bool write_fault)
2468 {
2469 	unsigned long requested = VM_READ;
2470 	struct vm_area_struct *vma;
2471 
2472 	if (write_fault)
2473 		requested |= VM_WRITE;
2474 
2475 	vma = find_vma(mm, addr << PAGE_SHIFT);
2476 	if (!vma || (addr << PAGE_SHIFT) < vma->vm_start) {
2477 		pr_debug("address 0x%llx VMA is removed\n", addr);
2478 		return true;
2479 	}
2480 
2481 	pr_debug("requested 0x%lx, vma permission flags 0x%lx\n", requested,
2482 		vma->vm_flags);
2483 	return (vma->vm_flags & requested) == requested;
2484 }
2485 
2486 int
svm_range_restore_pages(struct amdgpu_device * adev,unsigned int pasid,uint64_t addr,bool write_fault)2487 svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
2488 			uint64_t addr, bool write_fault)
2489 {
2490 	struct mm_struct *mm = NULL;
2491 	struct svm_range_list *svms;
2492 	struct svm_range *prange;
2493 	struct kfd_process *p;
2494 	uint64_t timestamp;
2495 	int32_t best_loc;
2496 	int32_t gpuidx = MAX_GPU_INSTANCE;
2497 	bool write_locked = false;
2498 	int r = 0;
2499 
2500 	if (!KFD_IS_SVM_API_SUPPORTED(adev->kfd.dev)) {
2501 		pr_debug("device does not support SVM\n");
2502 		return -EFAULT;
2503 	}
2504 
2505 	p = kfd_lookup_process_by_pasid(pasid);
2506 	if (!p) {
2507 		pr_debug("kfd process not founded pasid 0x%x\n", pasid);
2508 		return -ESRCH;
2509 	}
2510 	if (!p->xnack_enabled) {
2511 		pr_debug("XNACK not enabled for pasid 0x%x\n", pasid);
2512 		r = -EFAULT;
2513 		goto out;
2514 	}
2515 	svms = &p->svms;
2516 
2517 	pr_debug("restoring svms 0x%p fault address 0x%llx\n", svms, addr);
2518 
2519 	mm = get_task_mm(p->lead_thread);
2520 	if (!mm) {
2521 		pr_debug("svms 0x%p failed to get mm\n", svms);
2522 		r = -ESRCH;
2523 		goto out;
2524 	}
2525 
2526 	mmap_read_lock(mm);
2527 retry_write_locked:
2528 	mutex_lock(&svms->lock);
2529 	prange = svm_range_from_addr(svms, addr, NULL);
2530 	if (!prange) {
2531 		pr_debug("failed to find prange svms 0x%p address [0x%llx]\n",
2532 			 svms, addr);
2533 		if (!write_locked) {
2534 			/* Need the write lock to create new range with MMU notifier.
2535 			 * Also flush pending deferred work to make sure the interval
2536 			 * tree is up to date before we add a new range
2537 			 */
2538 			mutex_unlock(&svms->lock);
2539 			mmap_read_unlock(mm);
2540 			mmap_write_lock(mm);
2541 			write_locked = true;
2542 			goto retry_write_locked;
2543 		}
2544 		prange = svm_range_create_unregistered_range(adev, p, mm, addr);
2545 		if (!prange) {
2546 			pr_debug("failed to create unregistered range svms 0x%p address [0x%llx]\n",
2547 				 svms, addr);
2548 			mmap_write_downgrade(mm);
2549 			r = -EFAULT;
2550 			goto out_unlock_svms;
2551 		}
2552 	}
2553 	if (write_locked)
2554 		mmap_write_downgrade(mm);
2555 
2556 	mutex_lock(&prange->migrate_mutex);
2557 
2558 	if (svm_range_skip_recover(prange)) {
2559 		amdgpu_gmc_filter_faults_remove(adev, addr, pasid);
2560 		goto out_unlock_range;
2561 	}
2562 
2563 	timestamp = ktime_to_us(ktime_get()) - prange->validate_timestamp;
2564 	/* skip duplicate vm fault on different pages of same range */
2565 	if (timestamp < AMDGPU_SVM_RANGE_RETRY_FAULT_PENDING) {
2566 		pr_debug("svms 0x%p [0x%lx %lx] already restored\n",
2567 			 svms, prange->start, prange->last);
2568 		goto out_unlock_range;
2569 	}
2570 
2571 	if (!svm_fault_allowed(mm, addr, write_fault)) {
2572 		pr_debug("fault addr 0x%llx no %s permission\n", addr,
2573 			write_fault ? "write" : "read");
2574 		r = -EPERM;
2575 		goto out_unlock_range;
2576 	}
2577 
2578 	best_loc = svm_range_best_restore_location(prange, adev, &gpuidx);
2579 	if (best_loc == -1) {
2580 		pr_debug("svms %p failed get best restore loc [0x%lx 0x%lx]\n",
2581 			 svms, prange->start, prange->last);
2582 		r = -EACCES;
2583 		goto out_unlock_range;
2584 	}
2585 
2586 	pr_debug("svms %p [0x%lx 0x%lx] best restore 0x%x, actual loc 0x%x\n",
2587 		 svms, prange->start, prange->last, best_loc,
2588 		 prange->actual_loc);
2589 
2590 	if (prange->actual_loc != best_loc) {
2591 		if (best_loc) {
2592 			r = svm_migrate_to_vram(prange, best_loc, mm);
2593 			if (r) {
2594 				pr_debug("svm_migrate_to_vram failed (%d) at %llx, falling back to system memory\n",
2595 					 r, addr);
2596 				/* Fallback to system memory if migration to
2597 				 * VRAM failed
2598 				 */
2599 				if (prange->actual_loc)
2600 					r = svm_migrate_vram_to_ram(prange, mm);
2601 				else
2602 					r = 0;
2603 			}
2604 		} else {
2605 			r = svm_migrate_vram_to_ram(prange, mm);
2606 		}
2607 		if (r) {
2608 			pr_debug("failed %d to migrate svms %p [0x%lx 0x%lx]\n",
2609 				 r, svms, prange->start, prange->last);
2610 			goto out_unlock_range;
2611 		}
2612 	}
2613 
2614 	r = svm_range_validate_and_map(mm, prange, gpuidx, false, false);
2615 	if (r)
2616 		pr_debug("failed %d to map svms 0x%p [0x%lx 0x%lx] to gpus\n",
2617 			 r, svms, prange->start, prange->last);
2618 
2619 out_unlock_range:
2620 	mutex_unlock(&prange->migrate_mutex);
2621 out_unlock_svms:
2622 	mutex_unlock(&svms->lock);
2623 	mmap_read_unlock(mm);
2624 
2625 	svm_range_count_fault(adev, p, gpuidx);
2626 
2627 	mmput(mm);
2628 out:
2629 	kfd_unref_process(p);
2630 
2631 	if (r == -EAGAIN) {
2632 		pr_debug("recover vm fault later\n");
2633 		amdgpu_gmc_filter_faults_remove(adev, addr, pasid);
2634 		r = 0;
2635 	}
2636 	return r;
2637 }
2638 
svm_range_list_fini(struct kfd_process * p)2639 void svm_range_list_fini(struct kfd_process *p)
2640 {
2641 	struct svm_range *prange;
2642 	struct svm_range *next;
2643 
2644 	pr_debug("pasid 0x%x svms 0x%p\n", p->pasid, &p->svms);
2645 
2646 	/* Ensure list work is finished before process is destroyed */
2647 	flush_work(&p->svms.deferred_list_work);
2648 
2649 	list_for_each_entry_safe(prange, next, &p->svms.list, list) {
2650 		svm_range_unlink(prange);
2651 		svm_range_remove_notifier(prange);
2652 		svm_range_free(prange);
2653 	}
2654 
2655 	mutex_destroy(&p->svms.lock);
2656 
2657 	pr_debug("pasid 0x%x svms 0x%p done\n", p->pasid, &p->svms);
2658 }
2659 
svm_range_list_init(struct kfd_process * p)2660 int svm_range_list_init(struct kfd_process *p)
2661 {
2662 	struct svm_range_list *svms = &p->svms;
2663 	int i;
2664 
2665 	svms->objects = RB_ROOT_CACHED;
2666 	mutex_init(&svms->lock);
2667 	INIT_LIST_HEAD(&svms->list);
2668 	atomic_set(&svms->evicted_ranges, 0);
2669 	INIT_DELAYED_WORK(&svms->restore_work, svm_range_restore_work);
2670 	INIT_WORK(&svms->deferred_list_work, svm_range_deferred_list_work);
2671 	INIT_LIST_HEAD(&svms->deferred_range_list);
2672 	spin_lock_init(&svms->deferred_list_lock);
2673 
2674 	for (i = 0; i < p->n_pdds; i++)
2675 		if (KFD_IS_SVM_API_SUPPORTED(p->pdds[i]->dev))
2676 			bitmap_set(svms->bitmap_supported, i, 1);
2677 
2678 	return 0;
2679 }
2680 
2681 /**
2682  * svm_range_is_valid - check if virtual address range is valid
2683  * @mm: current process mm_struct
2684  * @start: range start address, in pages
2685  * @size: range size, in pages
2686  *
2687  * Valid virtual address range means it belongs to one or more VMAs
2688  *
2689  * Context: Process context
2690  *
2691  * Return:
2692  *  true - valid svm range
2693  *  false - invalid svm range
2694  */
2695 static bool
svm_range_is_valid(struct mm_struct * mm,uint64_t start,uint64_t size)2696 svm_range_is_valid(struct mm_struct *mm, uint64_t start, uint64_t size)
2697 {
2698 	const unsigned long device_vma = VM_IO | VM_PFNMAP | VM_MIXEDMAP;
2699 	struct vm_area_struct *vma;
2700 	unsigned long end;
2701 
2702 	start <<= PAGE_SHIFT;
2703 	end = start + (size << PAGE_SHIFT);
2704 
2705 	do {
2706 		vma = find_vma(mm, start);
2707 		if (!vma || start < vma->vm_start ||
2708 		    (vma->vm_flags & device_vma))
2709 			return false;
2710 		start = min(end, vma->vm_end);
2711 	} while (start < end);
2712 
2713 	return true;
2714 }
2715 
2716 /**
2717  * svm_range_best_prefetch_location - decide the best prefetch location
2718  * @prange: svm range structure
2719  *
2720  * For xnack off:
2721  * If range map to single GPU, the best prefetch location is prefetch_loc, which
2722  * can be CPU or GPU.
2723  *
2724  * If range is ACCESS or ACCESS_IN_PLACE by mGPUs, only if mGPU connection on
2725  * XGMI same hive, the best prefetch location is prefetch_loc GPU, othervise
2726  * the best prefetch location is always CPU, because GPU can not have coherent
2727  * mapping VRAM of other GPUs even with large-BAR PCIe connection.
2728  *
2729  * For xnack on:
2730  * If range is not ACCESS_IN_PLACE by mGPUs, the best prefetch location is
2731  * prefetch_loc, other GPU access will generate vm fault and trigger migration.
2732  *
2733  * If range is ACCESS_IN_PLACE by mGPUs, only if mGPU connection on XGMI same
2734  * hive, the best prefetch location is prefetch_loc GPU, otherwise the best
2735  * prefetch location is always CPU.
2736  *
2737  * Context: Process context
2738  *
2739  * Return:
2740  * 0 for CPU or GPU id
2741  */
2742 static uint32_t
svm_range_best_prefetch_location(struct svm_range * prange)2743 svm_range_best_prefetch_location(struct svm_range *prange)
2744 {
2745 	DECLARE_BITMAP(bitmap, MAX_GPU_INSTANCE);
2746 	uint32_t best_loc = prange->prefetch_loc;
2747 	struct kfd_process_device *pdd;
2748 	struct amdgpu_device *bo_adev;
2749 	struct amdgpu_device *adev;
2750 	struct kfd_process *p;
2751 	uint32_t gpuidx;
2752 
2753 	p = container_of(prange->svms, struct kfd_process, svms);
2754 
2755 	if (!best_loc || best_loc == KFD_IOCTL_SVM_LOCATION_UNDEFINED)
2756 		goto out;
2757 
2758 	bo_adev = svm_range_get_adev_by_id(prange, best_loc);
2759 	if (!bo_adev) {
2760 		WARN_ONCE(1, "failed to get device by id 0x%x\n", best_loc);
2761 		best_loc = 0;
2762 		goto out;
2763 	}
2764 
2765 	if (p->xnack_enabled)
2766 		bitmap_copy(bitmap, prange->bitmap_aip, MAX_GPU_INSTANCE);
2767 	else
2768 		bitmap_or(bitmap, prange->bitmap_access, prange->bitmap_aip,
2769 			  MAX_GPU_INSTANCE);
2770 
2771 	for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) {
2772 		pdd = kfd_process_device_from_gpuidx(p, gpuidx);
2773 		if (!pdd) {
2774 			pr_debug("failed to get device by idx 0x%x\n", gpuidx);
2775 			continue;
2776 		}
2777 		adev = (struct amdgpu_device *)pdd->dev->kgd;
2778 
2779 		if (adev == bo_adev)
2780 			continue;
2781 
2782 		if (!amdgpu_xgmi_same_hive(adev, bo_adev)) {
2783 			best_loc = 0;
2784 			break;
2785 		}
2786 	}
2787 
2788 out:
2789 	pr_debug("xnack %d svms 0x%p [0x%lx 0x%lx] best loc 0x%x\n",
2790 		 p->xnack_enabled, &p->svms, prange->start, prange->last,
2791 		 best_loc);
2792 
2793 	return best_loc;
2794 }
2795 
2796 /* FIXME: This is a workaround for page locking bug when some pages are
2797  * invalid during migration to VRAM
2798  */
svm_range_prefault(struct svm_range * prange,struct mm_struct * mm,void * owner)2799 void svm_range_prefault(struct svm_range *prange, struct mm_struct *mm,
2800 			void *owner)
2801 {
2802 	struct hmm_range *hmm_range;
2803 	int r;
2804 
2805 	if (prange->validated_once)
2806 		return;
2807 
2808 	r = amdgpu_hmm_range_get_pages(&prange->notifier, mm, NULL,
2809 				       prange->start << PAGE_SHIFT,
2810 				       prange->npages, &hmm_range,
2811 				       false, true, owner);
2812 	if (!r) {
2813 		amdgpu_hmm_range_get_pages_done(hmm_range);
2814 		prange->validated_once = true;
2815 	}
2816 }
2817 
2818 /* svm_range_trigger_migration - start page migration if prefetch loc changed
2819  * @mm: current process mm_struct
2820  * @prange: svm range structure
2821  * @migrated: output, true if migration is triggered
2822  *
2823  * If range perfetch_loc is GPU, actual loc is cpu 0, then migrate the range
2824  * from ram to vram.
2825  * If range prefetch_loc is cpu 0, actual loc is GPU, then migrate the range
2826  * from vram to ram.
2827  *
2828  * If GPU vm fault retry is not enabled, migration interact with MMU notifier
2829  * and restore work:
2830  * 1. migrate_vma_setup invalidate pages, MMU notifier callback svm_range_evict
2831  *    stops all queues, schedule restore work
2832  * 2. svm_range_restore_work wait for migration is done by
2833  *    a. svm_range_validate_vram takes prange->migrate_mutex
2834  *    b. svm_range_validate_ram HMM get pages wait for CPU fault handle returns
2835  * 3. restore work update mappings of GPU, resume all queues.
2836  *
2837  * Context: Process context
2838  *
2839  * Return:
2840  * 0 - OK, otherwise - error code of migration
2841  */
2842 static int
svm_range_trigger_migration(struct mm_struct * mm,struct svm_range * prange,bool * migrated)2843 svm_range_trigger_migration(struct mm_struct *mm, struct svm_range *prange,
2844 			    bool *migrated)
2845 {
2846 	uint32_t best_loc;
2847 	int r = 0;
2848 
2849 	*migrated = false;
2850 	best_loc = svm_range_best_prefetch_location(prange);
2851 
2852 	if (best_loc == KFD_IOCTL_SVM_LOCATION_UNDEFINED ||
2853 	    best_loc == prange->actual_loc)
2854 		return 0;
2855 
2856 	if (!best_loc) {
2857 		r = svm_migrate_vram_to_ram(prange, mm);
2858 		*migrated = !r;
2859 		return r;
2860 	}
2861 
2862 	r = svm_migrate_to_vram(prange, best_loc, mm);
2863 	*migrated = !r;
2864 
2865 	return r;
2866 }
2867 
svm_range_schedule_evict_svm_bo(struct amdgpu_amdkfd_fence * fence)2868 int svm_range_schedule_evict_svm_bo(struct amdgpu_amdkfd_fence *fence)
2869 {
2870 	/* Dereferencing fence->svm_bo is safe here because the fence hasn't
2871 	 * signaled yet and we're under the protection of the fence->lock.
2872 	 * After the fence is signaled in svm_range_bo_release, we cannot get
2873 	 * here any more.
2874 	 *
2875 	 * Reference is dropped in svm_range_evict_svm_bo_worker.
2876 	 */
2877 	if (svm_bo_ref_unless_zero(fence->svm_bo)) {
2878 		WRITE_ONCE(fence->svm_bo->evicting, 1);
2879 		schedule_work(&fence->svm_bo->eviction_work);
2880 	}
2881 
2882 	return 0;
2883 }
2884 
svm_range_evict_svm_bo_worker(struct work_struct * work)2885 static void svm_range_evict_svm_bo_worker(struct work_struct *work)
2886 {
2887 	struct svm_range_bo *svm_bo;
2888 	struct kfd_process *p;
2889 	struct mm_struct *mm;
2890 
2891 	svm_bo = container_of(work, struct svm_range_bo, eviction_work);
2892 
2893 	/* svm_range_bo_release destroys this worker thread. So during
2894 	 * the lifetime of this thread, kfd_process and mm will be valid.
2895 	 */
2896 	p = container_of(svm_bo->svms, struct kfd_process, svms);
2897 	mm = p->mm;
2898 	if (!mm)
2899 		return;
2900 
2901 	mmap_read_lock(mm);
2902 	spin_lock(&svm_bo->list_lock);
2903 	while (!list_empty(&svm_bo->range_list)) {
2904 		struct svm_range *prange =
2905 				list_first_entry(&svm_bo->range_list,
2906 						struct svm_range, svm_bo_list);
2907 		list_del_init(&prange->svm_bo_list);
2908 		spin_unlock(&svm_bo->list_lock);
2909 
2910 		pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms,
2911 			 prange->start, prange->last);
2912 
2913 		mutex_lock(&prange->migrate_mutex);
2914 		svm_migrate_vram_to_ram(prange, svm_bo->eviction_fence->mm);
2915 
2916 		mutex_lock(&prange->lock);
2917 		prange->svm_bo = NULL;
2918 		mutex_unlock(&prange->lock);
2919 
2920 		mutex_unlock(&prange->migrate_mutex);
2921 
2922 		spin_lock(&svm_bo->list_lock);
2923 	}
2924 	spin_unlock(&svm_bo->list_lock);
2925 	mmap_read_unlock(mm);
2926 
2927 	dma_fence_signal(&svm_bo->eviction_fence->base);
2928 	/* This is the last reference to svm_bo, after svm_range_vram_node_free
2929 	 * has been called in svm_migrate_vram_to_ram
2930 	 */
2931 	WARN_ONCE(kref_read(&svm_bo->kref) != 1, "This was not the last reference\n");
2932 	svm_range_bo_unref(svm_bo);
2933 }
2934 
2935 static int
svm_range_set_attr(struct kfd_process * p,uint64_t start,uint64_t size,uint32_t nattr,struct kfd_ioctl_svm_attribute * attrs)2936 svm_range_set_attr(struct kfd_process *p, uint64_t start, uint64_t size,
2937 		   uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs)
2938 {
2939 	struct mm_struct *mm = current->mm;
2940 	struct list_head update_list;
2941 	struct list_head insert_list;
2942 	struct list_head remove_list;
2943 	struct svm_range_list *svms;
2944 	struct svm_range *prange;
2945 	struct svm_range *next;
2946 	int r = 0;
2947 
2948 	pr_debug("pasid 0x%x svms 0x%p [0x%llx 0x%llx] pages 0x%llx\n",
2949 		 p->pasid, &p->svms, start, start + size - 1, size);
2950 
2951 	r = svm_range_check_attr(p, nattr, attrs);
2952 	if (r)
2953 		return r;
2954 
2955 	svms = &p->svms;
2956 
2957 	svm_range_list_lock_and_flush_work(svms, mm);
2958 
2959 	if (!svm_range_is_valid(mm, start, size)) {
2960 		pr_debug("invalid range\n");
2961 		r = -EFAULT;
2962 		mmap_write_unlock(mm);
2963 		goto out;
2964 	}
2965 
2966 	mutex_lock(&svms->lock);
2967 
2968 	/* Add new range and split existing ranges as needed */
2969 	r = svm_range_add(p, start, size, nattr, attrs, &update_list,
2970 			  &insert_list, &remove_list);
2971 	if (r) {
2972 		mutex_unlock(&svms->lock);
2973 		mmap_write_unlock(mm);
2974 		goto out;
2975 	}
2976 	/* Apply changes as a transaction */
2977 	list_for_each_entry_safe(prange, next, &insert_list, insert_list) {
2978 		svm_range_add_to_svms(prange);
2979 		svm_range_add_notifier_locked(mm, prange);
2980 	}
2981 	list_for_each_entry(prange, &update_list, update_list) {
2982 		svm_range_apply_attrs(p, prange, nattr, attrs);
2983 		/* TODO: unmap ranges from GPU that lost access */
2984 	}
2985 	list_for_each_entry_safe(prange, next, &remove_list,
2986 				remove_list) {
2987 		pr_debug("unlink old 0x%p prange 0x%p [0x%lx 0x%lx]\n",
2988 			 prange->svms, prange, prange->start,
2989 			 prange->last);
2990 		svm_range_unlink(prange);
2991 		svm_range_remove_notifier(prange);
2992 		svm_range_free(prange);
2993 	}
2994 
2995 	mmap_write_downgrade(mm);
2996 	/* Trigger migrations and revalidate and map to GPUs as needed. If
2997 	 * this fails we may be left with partially completed actions. There
2998 	 * is no clean way of rolling back to the previous state in such a
2999 	 * case because the rollback wouldn't be guaranteed to work either.
3000 	 */
3001 	list_for_each_entry(prange, &update_list, update_list) {
3002 		bool migrated;
3003 
3004 		mutex_lock(&prange->migrate_mutex);
3005 
3006 		r = svm_range_trigger_migration(mm, prange, &migrated);
3007 		if (r)
3008 			goto out_unlock_range;
3009 
3010 		if (migrated && !p->xnack_enabled) {
3011 			pr_debug("restore_work will update mappings of GPUs\n");
3012 			mutex_unlock(&prange->migrate_mutex);
3013 			continue;
3014 		}
3015 
3016 		r = svm_range_validate_and_map(mm, prange, MAX_GPU_INSTANCE,
3017 					       true, true);
3018 		if (r)
3019 			pr_debug("failed %d to map svm range\n", r);
3020 
3021 out_unlock_range:
3022 		mutex_unlock(&prange->migrate_mutex);
3023 		if (r)
3024 			break;
3025 	}
3026 
3027 	svm_range_debug_dump(svms);
3028 
3029 	mutex_unlock(&svms->lock);
3030 	mmap_read_unlock(mm);
3031 out:
3032 	pr_debug("pasid 0x%x svms 0x%p [0x%llx 0x%llx] done, r=%d\n", p->pasid,
3033 		 &p->svms, start, start + size - 1, r);
3034 
3035 	return r;
3036 }
3037 
3038 static int
svm_range_get_attr(struct kfd_process * p,uint64_t start,uint64_t size,uint32_t nattr,struct kfd_ioctl_svm_attribute * attrs)3039 svm_range_get_attr(struct kfd_process *p, uint64_t start, uint64_t size,
3040 		   uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs)
3041 {
3042 	DECLARE_BITMAP(bitmap_access, MAX_GPU_INSTANCE);
3043 	DECLARE_BITMAP(bitmap_aip, MAX_GPU_INSTANCE);
3044 	bool get_preferred_loc = false;
3045 	bool get_prefetch_loc = false;
3046 	bool get_granularity = false;
3047 	bool get_accessible = false;
3048 	bool get_flags = false;
3049 	uint64_t last = start + size - 1UL;
3050 	struct mm_struct *mm = current->mm;
3051 	uint8_t granularity = 0xff;
3052 	struct interval_tree_node *node;
3053 	struct svm_range_list *svms;
3054 	struct svm_range *prange;
3055 	uint32_t prefetch_loc = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
3056 	uint32_t location = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
3057 	uint32_t flags_and = 0xffffffff;
3058 	uint32_t flags_or = 0;
3059 	int gpuidx;
3060 	uint32_t i;
3061 
3062 	pr_debug("svms 0x%p [0x%llx 0x%llx] nattr 0x%x\n", &p->svms, start,
3063 		 start + size - 1, nattr);
3064 
3065 	/* Flush pending deferred work to avoid racing with deferred actions from
3066 	 * previous memory map changes (e.g. munmap). Concurrent memory map changes
3067 	 * can still race with get_attr because we don't hold the mmap lock. But that
3068 	 * would be a race condition in the application anyway, and undefined
3069 	 * behaviour is acceptable in that case.
3070 	 */
3071 	flush_work(&p->svms.deferred_list_work);
3072 
3073 	mmap_read_lock(mm);
3074 	if (!svm_range_is_valid(mm, start, size)) {
3075 		pr_debug("invalid range\n");
3076 		mmap_read_unlock(mm);
3077 		return -EINVAL;
3078 	}
3079 	mmap_read_unlock(mm);
3080 
3081 	for (i = 0; i < nattr; i++) {
3082 		switch (attrs[i].type) {
3083 		case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
3084 			get_preferred_loc = true;
3085 			break;
3086 		case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
3087 			get_prefetch_loc = true;
3088 			break;
3089 		case KFD_IOCTL_SVM_ATTR_ACCESS:
3090 			get_accessible = true;
3091 			break;
3092 		case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
3093 		case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
3094 			get_flags = true;
3095 			break;
3096 		case KFD_IOCTL_SVM_ATTR_GRANULARITY:
3097 			get_granularity = true;
3098 			break;
3099 		case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
3100 		case KFD_IOCTL_SVM_ATTR_NO_ACCESS:
3101 			fallthrough;
3102 		default:
3103 			pr_debug("get invalid attr type 0x%x\n", attrs[i].type);
3104 			return -EINVAL;
3105 		}
3106 	}
3107 
3108 	svms = &p->svms;
3109 
3110 	mutex_lock(&svms->lock);
3111 
3112 	node = interval_tree_iter_first(&svms->objects, start, last);
3113 	if (!node) {
3114 		pr_debug("range attrs not found return default values\n");
3115 		svm_range_set_default_attributes(&location, &prefetch_loc,
3116 						 &granularity, &flags_and);
3117 		flags_or = flags_and;
3118 		if (p->xnack_enabled)
3119 			bitmap_copy(bitmap_access, svms->bitmap_supported,
3120 				    MAX_GPU_INSTANCE);
3121 		else
3122 			bitmap_zero(bitmap_access, MAX_GPU_INSTANCE);
3123 		bitmap_zero(bitmap_aip, MAX_GPU_INSTANCE);
3124 		goto fill_values;
3125 	}
3126 	bitmap_copy(bitmap_access, svms->bitmap_supported, MAX_GPU_INSTANCE);
3127 	bitmap_copy(bitmap_aip, svms->bitmap_supported, MAX_GPU_INSTANCE);
3128 
3129 	while (node) {
3130 		struct interval_tree_node *next;
3131 
3132 		prange = container_of(node, struct svm_range, it_node);
3133 		next = interval_tree_iter_next(node, start, last);
3134 
3135 		if (get_preferred_loc) {
3136 			if (prange->preferred_loc ==
3137 					KFD_IOCTL_SVM_LOCATION_UNDEFINED ||
3138 			    (location != KFD_IOCTL_SVM_LOCATION_UNDEFINED &&
3139 			     location != prange->preferred_loc)) {
3140 				location = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
3141 				get_preferred_loc = false;
3142 			} else {
3143 				location = prange->preferred_loc;
3144 			}
3145 		}
3146 		if (get_prefetch_loc) {
3147 			if (prange->prefetch_loc ==
3148 					KFD_IOCTL_SVM_LOCATION_UNDEFINED ||
3149 			    (prefetch_loc != KFD_IOCTL_SVM_LOCATION_UNDEFINED &&
3150 			     prefetch_loc != prange->prefetch_loc)) {
3151 				prefetch_loc = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
3152 				get_prefetch_loc = false;
3153 			} else {
3154 				prefetch_loc = prange->prefetch_loc;
3155 			}
3156 		}
3157 		if (get_accessible) {
3158 			bitmap_and(bitmap_access, bitmap_access,
3159 				   prange->bitmap_access, MAX_GPU_INSTANCE);
3160 			bitmap_and(bitmap_aip, bitmap_aip,
3161 				   prange->bitmap_aip, MAX_GPU_INSTANCE);
3162 		}
3163 		if (get_flags) {
3164 			flags_and &= prange->flags;
3165 			flags_or |= prange->flags;
3166 		}
3167 
3168 		if (get_granularity && prange->granularity < granularity)
3169 			granularity = prange->granularity;
3170 
3171 		node = next;
3172 	}
3173 fill_values:
3174 	mutex_unlock(&svms->lock);
3175 
3176 	for (i = 0; i < nattr; i++) {
3177 		switch (attrs[i].type) {
3178 		case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
3179 			attrs[i].value = location;
3180 			break;
3181 		case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
3182 			attrs[i].value = prefetch_loc;
3183 			break;
3184 		case KFD_IOCTL_SVM_ATTR_ACCESS:
3185 			gpuidx = kfd_process_gpuidx_from_gpuid(p,
3186 							       attrs[i].value);
3187 			if (gpuidx < 0) {
3188 				pr_debug("invalid gpuid %x\n", attrs[i].value);
3189 				return -EINVAL;
3190 			}
3191 			if (test_bit(gpuidx, bitmap_access))
3192 				attrs[i].type = KFD_IOCTL_SVM_ATTR_ACCESS;
3193 			else if (test_bit(gpuidx, bitmap_aip))
3194 				attrs[i].type =
3195 					KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE;
3196 			else
3197 				attrs[i].type = KFD_IOCTL_SVM_ATTR_NO_ACCESS;
3198 			break;
3199 		case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
3200 			attrs[i].value = flags_and;
3201 			break;
3202 		case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
3203 			attrs[i].value = ~flags_or;
3204 			break;
3205 		case KFD_IOCTL_SVM_ATTR_GRANULARITY:
3206 			attrs[i].value = (uint32_t)granularity;
3207 			break;
3208 		}
3209 	}
3210 
3211 	return 0;
3212 }
3213 
3214 int
svm_ioctl(struct kfd_process * p,enum kfd_ioctl_svm_op op,uint64_t start,uint64_t size,uint32_t nattrs,struct kfd_ioctl_svm_attribute * attrs)3215 svm_ioctl(struct kfd_process *p, enum kfd_ioctl_svm_op op, uint64_t start,
3216 	  uint64_t size, uint32_t nattrs, struct kfd_ioctl_svm_attribute *attrs)
3217 {
3218 	int r;
3219 
3220 	start >>= PAGE_SHIFT;
3221 	size >>= PAGE_SHIFT;
3222 
3223 	switch (op) {
3224 	case KFD_IOCTL_SVM_OP_SET_ATTR:
3225 		r = svm_range_set_attr(p, start, size, nattrs, attrs);
3226 		break;
3227 	case KFD_IOCTL_SVM_OP_GET_ATTR:
3228 		r = svm_range_get_attr(p, start, size, nattrs, attrs);
3229 		break;
3230 	default:
3231 		r = EINVAL;
3232 		break;
3233 	}
3234 
3235 	return r;
3236 }
3237