• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 
29 #include <linux/dma-fence-array.h>
30 #include <linux/interval_tree_generic.h>
31 #include <linux/idr.h>
32 #include <linux/dma-buf.h>
33 
34 #include <drm/amdgpu_drm.h>
35 #include <drm/drm_drv.h>
36 #include "amdgpu.h"
37 #include "amdgpu_trace.h"
38 #include "amdgpu_amdkfd.h"
39 #include "amdgpu_gmc.h"
40 #include "amdgpu_xgmi.h"
41 #include "amdgpu_dma_buf.h"
42 #include "amdgpu_res_cursor.h"
43 #include "kfd_svm.h"
44 
45 /**
46  * DOC: GPUVM
47  *
48  * GPUVM is similar to the legacy gart on older asics, however
49  * rather than there being a single global gart table
50  * for the entire GPU, there are multiple VM page tables active
51  * at any given time.  The VM page tables can contain a mix
52  * vram pages and system memory pages and system memory pages
53  * can be mapped as snooped (cached system pages) or unsnooped
54  * (uncached system pages).
55  * Each VM has an ID associated with it and there is a page table
56  * associated with each VMID.  When execting a command buffer,
57  * the kernel tells the the ring what VMID to use for that command
58  * buffer.  VMIDs are allocated dynamically as commands are submitted.
59  * The userspace drivers maintain their own address space and the kernel
60  * sets up their pages tables accordingly when they submit their
61  * command buffers and a VMID is assigned.
62  * Cayman/Trinity support up to 8 active VMs at any given time;
63  * SI supports 16.
64  */
65 
66 #define START(node) ((node)->start)
67 #define LAST(node) ((node)->last)
68 
69 INTERVAL_TREE_DEFINE(struct amdgpu_bo_va_mapping, rb, uint64_t, __subtree_last,
70 		     START, LAST, static, amdgpu_vm_it)
71 
72 #undef START
73 #undef LAST
74 
75 /**
76  * struct amdgpu_prt_cb - Helper to disable partial resident texture feature from a fence callback
77  */
78 struct amdgpu_prt_cb {
79 
80 	/**
81 	 * @adev: amdgpu device
82 	 */
83 	struct amdgpu_device *adev;
84 
85 	/**
86 	 * @cb: callback
87 	 */
88 	struct dma_fence_cb cb;
89 };
90 
91 /**
92  * amdgpu_vm_set_pasid - manage pasid and vm ptr mapping
93  *
94  * @adev: amdgpu_device pointer
95  * @vm: amdgpu_vm pointer
96  * @pasid: the pasid the VM is using on this GPU
97  *
98  * Set the pasid this VM is using on this GPU, can also be used to remove the
99  * pasid by passing in zero.
100  *
101  */
amdgpu_vm_set_pasid(struct amdgpu_device * adev,struct amdgpu_vm * vm,u32 pasid)102 int amdgpu_vm_set_pasid(struct amdgpu_device *adev, struct amdgpu_vm *vm,
103 			u32 pasid)
104 {
105 	int r;
106 
107 	if (vm->pasid == pasid)
108 		return 0;
109 
110 	if (vm->pasid) {
111 		r = xa_err(xa_erase_irq(&adev->vm_manager.pasids, vm->pasid));
112 		if (r < 0)
113 			return r;
114 
115 		vm->pasid = 0;
116 	}
117 
118 	if (pasid) {
119 		r = xa_err(xa_store_irq(&adev->vm_manager.pasids, pasid, vm,
120 					GFP_KERNEL));
121 		if (r < 0)
122 			return r;
123 
124 		vm->pasid = pasid;
125 	}
126 
127 
128 	return 0;
129 }
130 
131 /*
132  * vm eviction_lock can be taken in MMU notifiers. Make sure no reclaim-FS
133  * happens while holding this lock anywhere to prevent deadlocks when
134  * an MMU notifier runs in reclaim-FS context.
135  */
amdgpu_vm_eviction_lock(struct amdgpu_vm * vm)136 static inline void amdgpu_vm_eviction_lock(struct amdgpu_vm *vm)
137 {
138 	mutex_lock(&vm->eviction_lock);
139 	vm->saved_flags = memalloc_noreclaim_save();
140 }
141 
amdgpu_vm_eviction_trylock(struct amdgpu_vm * vm)142 static inline int amdgpu_vm_eviction_trylock(struct amdgpu_vm *vm)
143 {
144 	if (mutex_trylock(&vm->eviction_lock)) {
145 		vm->saved_flags = memalloc_noreclaim_save();
146 		return 1;
147 	}
148 	return 0;
149 }
150 
amdgpu_vm_eviction_unlock(struct amdgpu_vm * vm)151 static inline void amdgpu_vm_eviction_unlock(struct amdgpu_vm *vm)
152 {
153 	memalloc_noreclaim_restore(vm->saved_flags);
154 	mutex_unlock(&vm->eviction_lock);
155 }
156 
157 /**
158  * amdgpu_vm_level_shift - return the addr shift for each level
159  *
160  * @adev: amdgpu_device pointer
161  * @level: VMPT level
162  *
163  * Returns:
164  * The number of bits the pfn needs to be right shifted for a level.
165  */
amdgpu_vm_level_shift(struct amdgpu_device * adev,unsigned level)166 static unsigned amdgpu_vm_level_shift(struct amdgpu_device *adev,
167 				      unsigned level)
168 {
169 	switch (level) {
170 	case AMDGPU_VM_PDB2:
171 	case AMDGPU_VM_PDB1:
172 	case AMDGPU_VM_PDB0:
173 		return 9 * (AMDGPU_VM_PDB0 - level) +
174 			adev->vm_manager.block_size;
175 	case AMDGPU_VM_PTB:
176 		return 0;
177 	default:
178 		return ~0;
179 	}
180 }
181 
182 /**
183  * amdgpu_vm_num_entries - return the number of entries in a PD/PT
184  *
185  * @adev: amdgpu_device pointer
186  * @level: VMPT level
187  *
188  * Returns:
189  * The number of entries in a page directory or page table.
190  */
amdgpu_vm_num_entries(struct amdgpu_device * adev,unsigned level)191 static unsigned amdgpu_vm_num_entries(struct amdgpu_device *adev,
192 				      unsigned level)
193 {
194 	unsigned shift = amdgpu_vm_level_shift(adev,
195 					       adev->vm_manager.root_level);
196 
197 	if (level == adev->vm_manager.root_level)
198 		/* For the root directory */
199 		return round_up(adev->vm_manager.max_pfn, 1ULL << shift)
200 			>> shift;
201 	else if (level != AMDGPU_VM_PTB)
202 		/* Everything in between */
203 		return 512;
204 	else
205 		/* For the page tables on the leaves */
206 		return AMDGPU_VM_PTE_COUNT(adev);
207 }
208 
209 /**
210  * amdgpu_vm_num_ats_entries - return the number of ATS entries in the root PD
211  *
212  * @adev: amdgpu_device pointer
213  *
214  * Returns:
215  * The number of entries in the root page directory which needs the ATS setting.
216  */
amdgpu_vm_num_ats_entries(struct amdgpu_device * adev)217 static unsigned amdgpu_vm_num_ats_entries(struct amdgpu_device *adev)
218 {
219 	unsigned shift;
220 
221 	shift = amdgpu_vm_level_shift(adev, adev->vm_manager.root_level);
222 	return AMDGPU_GMC_HOLE_START >> (shift + AMDGPU_GPU_PAGE_SHIFT);
223 }
224 
225 /**
226  * amdgpu_vm_entries_mask - the mask to get the entry number of a PD/PT
227  *
228  * @adev: amdgpu_device pointer
229  * @level: VMPT level
230  *
231  * Returns:
232  * The mask to extract the entry number of a PD/PT from an address.
233  */
amdgpu_vm_entries_mask(struct amdgpu_device * adev,unsigned int level)234 static uint32_t amdgpu_vm_entries_mask(struct amdgpu_device *adev,
235 				       unsigned int level)
236 {
237 	if (level <= adev->vm_manager.root_level)
238 		return 0xffffffff;
239 	else if (level != AMDGPU_VM_PTB)
240 		return 0x1ff;
241 	else
242 		return AMDGPU_VM_PTE_COUNT(adev) - 1;
243 }
244 
245 /**
246  * amdgpu_vm_bo_size - returns the size of the BOs in bytes
247  *
248  * @adev: amdgpu_device pointer
249  * @level: VMPT level
250  *
251  * Returns:
252  * The size of the BO for a page directory or page table in bytes.
253  */
amdgpu_vm_bo_size(struct amdgpu_device * adev,unsigned level)254 static unsigned amdgpu_vm_bo_size(struct amdgpu_device *adev, unsigned level)
255 {
256 	return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_num_entries(adev, level) * 8);
257 }
258 
259 /**
260  * amdgpu_vm_bo_evicted - vm_bo is evicted
261  *
262  * @vm_bo: vm_bo which is evicted
263  *
264  * State for PDs/PTs and per VM BOs which are not at the location they should
265  * be.
266  */
amdgpu_vm_bo_evicted(struct amdgpu_vm_bo_base * vm_bo)267 static void amdgpu_vm_bo_evicted(struct amdgpu_vm_bo_base *vm_bo)
268 {
269 	struct amdgpu_vm *vm = vm_bo->vm;
270 	struct amdgpu_bo *bo = vm_bo->bo;
271 
272 	vm_bo->moved = true;
273 	if (bo->tbo.type == ttm_bo_type_kernel)
274 		list_move(&vm_bo->vm_status, &vm->evicted);
275 	else
276 		list_move_tail(&vm_bo->vm_status, &vm->evicted);
277 }
278 /**
279  * amdgpu_vm_bo_moved - vm_bo is moved
280  *
281  * @vm_bo: vm_bo which is moved
282  *
283  * State for per VM BOs which are moved, but that change is not yet reflected
284  * in the page tables.
285  */
amdgpu_vm_bo_moved(struct amdgpu_vm_bo_base * vm_bo)286 static void amdgpu_vm_bo_moved(struct amdgpu_vm_bo_base *vm_bo)
287 {
288 	list_move(&vm_bo->vm_status, &vm_bo->vm->moved);
289 }
290 
291 /**
292  * amdgpu_vm_bo_idle - vm_bo is idle
293  *
294  * @vm_bo: vm_bo which is now idle
295  *
296  * State for PDs/PTs and per VM BOs which have gone through the state machine
297  * and are now idle.
298  */
amdgpu_vm_bo_idle(struct amdgpu_vm_bo_base * vm_bo)299 static void amdgpu_vm_bo_idle(struct amdgpu_vm_bo_base *vm_bo)
300 {
301 	list_move(&vm_bo->vm_status, &vm_bo->vm->idle);
302 	vm_bo->moved = false;
303 }
304 
305 /**
306  * amdgpu_vm_bo_invalidated - vm_bo is invalidated
307  *
308  * @vm_bo: vm_bo which is now invalidated
309  *
310  * State for normal BOs which are invalidated and that change not yet reflected
311  * in the PTs.
312  */
amdgpu_vm_bo_invalidated(struct amdgpu_vm_bo_base * vm_bo)313 static void amdgpu_vm_bo_invalidated(struct amdgpu_vm_bo_base *vm_bo)
314 {
315 	spin_lock(&vm_bo->vm->invalidated_lock);
316 	list_move(&vm_bo->vm_status, &vm_bo->vm->invalidated);
317 	spin_unlock(&vm_bo->vm->invalidated_lock);
318 }
319 
320 /**
321  * amdgpu_vm_bo_relocated - vm_bo is reloacted
322  *
323  * @vm_bo: vm_bo which is relocated
324  *
325  * State for PDs/PTs which needs to update their parent PD.
326  * For the root PD, just move to idle state.
327  */
amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base * vm_bo)328 static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base *vm_bo)
329 {
330 	if (vm_bo->bo->parent)
331 		list_move(&vm_bo->vm_status, &vm_bo->vm->relocated);
332 	else
333 		amdgpu_vm_bo_idle(vm_bo);
334 }
335 
336 /**
337  * amdgpu_vm_bo_done - vm_bo is done
338  *
339  * @vm_bo: vm_bo which is now done
340  *
341  * State for normal BOs which are invalidated and that change has been updated
342  * in the PTs.
343  */
amdgpu_vm_bo_done(struct amdgpu_vm_bo_base * vm_bo)344 static void amdgpu_vm_bo_done(struct amdgpu_vm_bo_base *vm_bo)
345 {
346 	spin_lock(&vm_bo->vm->invalidated_lock);
347 	list_move(&vm_bo->vm_status, &vm_bo->vm->done);
348 	spin_unlock(&vm_bo->vm->invalidated_lock);
349 }
350 
351 /**
352  * amdgpu_vm_bo_base_init - Adds bo to the list of bos associated with the vm
353  *
354  * @base: base structure for tracking BO usage in a VM
355  * @vm: vm to which bo is to be added
356  * @bo: amdgpu buffer object
357  *
358  * Initialize a bo_va_base structure and add it to the appropriate lists
359  *
360  */
amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base * base,struct amdgpu_vm * vm,struct amdgpu_bo * bo)361 static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
362 				   struct amdgpu_vm *vm,
363 				   struct amdgpu_bo *bo)
364 {
365 	base->vm = vm;
366 	base->bo = bo;
367 	base->next = NULL;
368 	INIT_LIST_HEAD(&base->vm_status);
369 
370 	if (!bo)
371 		return;
372 	base->next = bo->vm_bo;
373 	bo->vm_bo = base;
374 
375 	if (bo->tbo.base.resv != vm->root.bo->tbo.base.resv)
376 		return;
377 
378 	vm->bulk_moveable = false;
379 	if (bo->tbo.type == ttm_bo_type_kernel && bo->parent)
380 		amdgpu_vm_bo_relocated(base);
381 	else
382 		amdgpu_vm_bo_idle(base);
383 
384 	if (bo->preferred_domains &
385 	    amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type))
386 		return;
387 
388 	/*
389 	 * we checked all the prerequisites, but it looks like this per vm bo
390 	 * is currently evicted. add the bo to the evicted list to make sure it
391 	 * is validated on next vm use to avoid fault.
392 	 * */
393 	amdgpu_vm_bo_evicted(base);
394 }
395 
396 /**
397  * amdgpu_vm_pt_parent - get the parent page directory
398  *
399  * @pt: child page table
400  *
401  * Helper to get the parent entry for the child page table. NULL if we are at
402  * the root page directory.
403  */
amdgpu_vm_pt_parent(struct amdgpu_vm_bo_base * pt)404 static struct amdgpu_vm_bo_base *amdgpu_vm_pt_parent(struct amdgpu_vm_bo_base *pt)
405 {
406 	struct amdgpu_bo *parent = pt->bo->parent;
407 
408 	if (!parent)
409 		return NULL;
410 
411 	return parent->vm_bo;
412 }
413 
414 /*
415  * amdgpu_vm_pt_cursor - state for for_each_amdgpu_vm_pt
416  */
417 struct amdgpu_vm_pt_cursor {
418 	uint64_t pfn;
419 	struct amdgpu_vm_bo_base *parent;
420 	struct amdgpu_vm_bo_base *entry;
421 	unsigned level;
422 };
423 
424 /**
425  * amdgpu_vm_pt_start - start PD/PT walk
426  *
427  * @adev: amdgpu_device pointer
428  * @vm: amdgpu_vm structure
429  * @start: start address of the walk
430  * @cursor: state to initialize
431  *
432  * Initialize a amdgpu_vm_pt_cursor to start a walk.
433  */
amdgpu_vm_pt_start(struct amdgpu_device * adev,struct amdgpu_vm * vm,uint64_t start,struct amdgpu_vm_pt_cursor * cursor)434 static void amdgpu_vm_pt_start(struct amdgpu_device *adev,
435 			       struct amdgpu_vm *vm, uint64_t start,
436 			       struct amdgpu_vm_pt_cursor *cursor)
437 {
438 	cursor->pfn = start;
439 	cursor->parent = NULL;
440 	cursor->entry = &vm->root;
441 	cursor->level = adev->vm_manager.root_level;
442 }
443 
444 /**
445  * amdgpu_vm_pt_descendant - go to child node
446  *
447  * @adev: amdgpu_device pointer
448  * @cursor: current state
449  *
450  * Walk to the child node of the current node.
451  * Returns:
452  * True if the walk was possible, false otherwise.
453  */
amdgpu_vm_pt_descendant(struct amdgpu_device * adev,struct amdgpu_vm_pt_cursor * cursor)454 static bool amdgpu_vm_pt_descendant(struct amdgpu_device *adev,
455 				    struct amdgpu_vm_pt_cursor *cursor)
456 {
457 	unsigned mask, shift, idx;
458 
459 	if ((cursor->level == AMDGPU_VM_PTB) || !cursor->entry ||
460 	    !cursor->entry->bo)
461 		return false;
462 
463 	mask = amdgpu_vm_entries_mask(adev, cursor->level);
464 	shift = amdgpu_vm_level_shift(adev, cursor->level);
465 
466 	++cursor->level;
467 	idx = (cursor->pfn >> shift) & mask;
468 	cursor->parent = cursor->entry;
469 	cursor->entry = &to_amdgpu_bo_vm(cursor->entry->bo)->entries[idx];
470 	return true;
471 }
472 
473 /**
474  * amdgpu_vm_pt_sibling - go to sibling node
475  *
476  * @adev: amdgpu_device pointer
477  * @cursor: current state
478  *
479  * Walk to the sibling node of the current node.
480  * Returns:
481  * True if the walk was possible, false otherwise.
482  */
amdgpu_vm_pt_sibling(struct amdgpu_device * adev,struct amdgpu_vm_pt_cursor * cursor)483 static bool amdgpu_vm_pt_sibling(struct amdgpu_device *adev,
484 				 struct amdgpu_vm_pt_cursor *cursor)
485 {
486 	unsigned shift, num_entries;
487 
488 	/* Root doesn't have a sibling */
489 	if (!cursor->parent)
490 		return false;
491 
492 	/* Go to our parents and see if we got a sibling */
493 	shift = amdgpu_vm_level_shift(adev, cursor->level - 1);
494 	num_entries = amdgpu_vm_num_entries(adev, cursor->level - 1);
495 
496 	if (cursor->entry == &to_amdgpu_bo_vm(cursor->parent->bo)->entries[num_entries - 1])
497 		return false;
498 
499 	cursor->pfn += 1ULL << shift;
500 	cursor->pfn &= ~((1ULL << shift) - 1);
501 	++cursor->entry;
502 	return true;
503 }
504 
505 /**
506  * amdgpu_vm_pt_ancestor - go to parent node
507  *
508  * @cursor: current state
509  *
510  * Walk to the parent node of the current node.
511  * Returns:
512  * True if the walk was possible, false otherwise.
513  */
amdgpu_vm_pt_ancestor(struct amdgpu_vm_pt_cursor * cursor)514 static bool amdgpu_vm_pt_ancestor(struct amdgpu_vm_pt_cursor *cursor)
515 {
516 	if (!cursor->parent)
517 		return false;
518 
519 	--cursor->level;
520 	cursor->entry = cursor->parent;
521 	cursor->parent = amdgpu_vm_pt_parent(cursor->parent);
522 	return true;
523 }
524 
525 /**
526  * amdgpu_vm_pt_next - get next PD/PT in hieratchy
527  *
528  * @adev: amdgpu_device pointer
529  * @cursor: current state
530  *
531  * Walk the PD/PT tree to the next node.
532  */
amdgpu_vm_pt_next(struct amdgpu_device * adev,struct amdgpu_vm_pt_cursor * cursor)533 static void amdgpu_vm_pt_next(struct amdgpu_device *adev,
534 			      struct amdgpu_vm_pt_cursor *cursor)
535 {
536 	/* First try a newborn child */
537 	if (amdgpu_vm_pt_descendant(adev, cursor))
538 		return;
539 
540 	/* If that didn't worked try to find a sibling */
541 	while (!amdgpu_vm_pt_sibling(adev, cursor)) {
542 		/* No sibling, go to our parents and grandparents */
543 		if (!amdgpu_vm_pt_ancestor(cursor)) {
544 			cursor->pfn = ~0ll;
545 			return;
546 		}
547 	}
548 }
549 
550 /**
551  * amdgpu_vm_pt_first_dfs - start a deep first search
552  *
553  * @adev: amdgpu_device structure
554  * @vm: amdgpu_vm structure
555  * @start: optional cursor to start with
556  * @cursor: state to initialize
557  *
558  * Starts a deep first traversal of the PD/PT tree.
559  */
amdgpu_vm_pt_first_dfs(struct amdgpu_device * adev,struct amdgpu_vm * vm,struct amdgpu_vm_pt_cursor * start,struct amdgpu_vm_pt_cursor * cursor)560 static void amdgpu_vm_pt_first_dfs(struct amdgpu_device *adev,
561 				   struct amdgpu_vm *vm,
562 				   struct amdgpu_vm_pt_cursor *start,
563 				   struct amdgpu_vm_pt_cursor *cursor)
564 {
565 	if (start)
566 		*cursor = *start;
567 	else
568 		amdgpu_vm_pt_start(adev, vm, 0, cursor);
569 	while (amdgpu_vm_pt_descendant(adev, cursor));
570 }
571 
572 /**
573  * amdgpu_vm_pt_continue_dfs - check if the deep first search should continue
574  *
575  * @start: starting point for the search
576  * @entry: current entry
577  *
578  * Returns:
579  * True when the search should continue, false otherwise.
580  */
amdgpu_vm_pt_continue_dfs(struct amdgpu_vm_pt_cursor * start,struct amdgpu_vm_bo_base * entry)581 static bool amdgpu_vm_pt_continue_dfs(struct amdgpu_vm_pt_cursor *start,
582 				      struct amdgpu_vm_bo_base *entry)
583 {
584 	return entry && (!start || entry != start->entry);
585 }
586 
587 /**
588  * amdgpu_vm_pt_next_dfs - get the next node for a deep first search
589  *
590  * @adev: amdgpu_device structure
591  * @cursor: current state
592  *
593  * Move the cursor to the next node in a deep first search.
594  */
amdgpu_vm_pt_next_dfs(struct amdgpu_device * adev,struct amdgpu_vm_pt_cursor * cursor)595 static void amdgpu_vm_pt_next_dfs(struct amdgpu_device *adev,
596 				  struct amdgpu_vm_pt_cursor *cursor)
597 {
598 	if (!cursor->entry)
599 		return;
600 
601 	if (!cursor->parent)
602 		cursor->entry = NULL;
603 	else if (amdgpu_vm_pt_sibling(adev, cursor))
604 		while (amdgpu_vm_pt_descendant(adev, cursor));
605 	else
606 		amdgpu_vm_pt_ancestor(cursor);
607 }
608 
609 /*
610  * for_each_amdgpu_vm_pt_dfs_safe - safe deep first search of all PDs/PTs
611  */
612 #define for_each_amdgpu_vm_pt_dfs_safe(adev, vm, start, cursor, entry)		\
613 	for (amdgpu_vm_pt_first_dfs((adev), (vm), (start), &(cursor)),		\
614 	     (entry) = (cursor).entry, amdgpu_vm_pt_next_dfs((adev), &(cursor));\
615 	     amdgpu_vm_pt_continue_dfs((start), (entry));			\
616 	     (entry) = (cursor).entry, amdgpu_vm_pt_next_dfs((adev), &(cursor)))
617 
618 /**
619  * amdgpu_vm_get_pd_bo - add the VM PD to a validation list
620  *
621  * @vm: vm providing the BOs
622  * @validated: head of validation list
623  * @entry: entry to add
624  *
625  * Add the page directory to the list of BOs to
626  * validate for command submission.
627  */
amdgpu_vm_get_pd_bo(struct amdgpu_vm * vm,struct list_head * validated,struct amdgpu_bo_list_entry * entry)628 void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
629 			 struct list_head *validated,
630 			 struct amdgpu_bo_list_entry *entry)
631 {
632 	entry->priority = 0;
633 	entry->tv.bo = &vm->root.bo->tbo;
634 	/* Two for VM updates, one for TTM and one for the CS job */
635 	entry->tv.num_shared = 4;
636 	entry->user_pages = NULL;
637 	list_add(&entry->tv.head, validated);
638 }
639 
640 /**
641  * amdgpu_vm_del_from_lru_notify - update bulk_moveable flag
642  *
643  * @bo: BO which was removed from the LRU
644  *
645  * Make sure the bulk_moveable flag is updated when a BO is removed from the
646  * LRU.
647  */
amdgpu_vm_del_from_lru_notify(struct ttm_buffer_object * bo)648 void amdgpu_vm_del_from_lru_notify(struct ttm_buffer_object *bo)
649 {
650 	struct amdgpu_bo *abo;
651 	struct amdgpu_vm_bo_base *bo_base;
652 
653 	if (!amdgpu_bo_is_amdgpu_bo(bo))
654 		return;
655 
656 	if (bo->pin_count)
657 		return;
658 
659 	abo = ttm_to_amdgpu_bo(bo);
660 	if (!abo->parent)
661 		return;
662 	for (bo_base = abo->vm_bo; bo_base; bo_base = bo_base->next) {
663 		struct amdgpu_vm *vm = bo_base->vm;
664 
665 		if (abo->tbo.base.resv == vm->root.bo->tbo.base.resv)
666 			vm->bulk_moveable = false;
667 	}
668 
669 }
670 /**
671  * amdgpu_vm_move_to_lru_tail - move all BOs to the end of LRU
672  *
673  * @adev: amdgpu device pointer
674  * @vm: vm providing the BOs
675  *
676  * Move all BOs to the end of LRU and remember their positions to put them
677  * together.
678  */
amdgpu_vm_move_to_lru_tail(struct amdgpu_device * adev,struct amdgpu_vm * vm)679 void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
680 				struct amdgpu_vm *vm)
681 {
682 	struct amdgpu_vm_bo_base *bo_base;
683 
684 	if (vm->bulk_moveable) {
685 		spin_lock(&adev->mman.bdev.lru_lock);
686 		ttm_bo_bulk_move_lru_tail(&vm->lru_bulk_move);
687 		spin_unlock(&adev->mman.bdev.lru_lock);
688 		return;
689 	}
690 
691 	memset(&vm->lru_bulk_move, 0, sizeof(vm->lru_bulk_move));
692 
693 	spin_lock(&adev->mman.bdev.lru_lock);
694 	list_for_each_entry(bo_base, &vm->idle, vm_status) {
695 		struct amdgpu_bo *bo = bo_base->bo;
696 		struct amdgpu_bo *shadow = amdgpu_bo_shadowed(bo);
697 
698 		if (!bo->parent)
699 			continue;
700 
701 		ttm_bo_move_to_lru_tail(&bo->tbo, bo->tbo.resource,
702 					&vm->lru_bulk_move);
703 		if (shadow)
704 			ttm_bo_move_to_lru_tail(&shadow->tbo,
705 						shadow->tbo.resource,
706 						&vm->lru_bulk_move);
707 	}
708 	spin_unlock(&adev->mman.bdev.lru_lock);
709 
710 	vm->bulk_moveable = true;
711 }
712 
713 /**
714  * amdgpu_vm_validate_pt_bos - validate the page table BOs
715  *
716  * @adev: amdgpu device pointer
717  * @vm: vm providing the BOs
718  * @validate: callback to do the validation
719  * @param: parameter for the validation callback
720  *
721  * Validate the page table BOs on command submission if neccessary.
722  *
723  * Returns:
724  * Validation result.
725  */
amdgpu_vm_validate_pt_bos(struct amdgpu_device * adev,struct amdgpu_vm * vm,int (* validate)(void * p,struct amdgpu_bo * bo),void * param)726 int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
727 			      int (*validate)(void *p, struct amdgpu_bo *bo),
728 			      void *param)
729 {
730 	struct amdgpu_vm_bo_base *bo_base, *tmp;
731 	int r;
732 
733 	vm->bulk_moveable &= list_empty(&vm->evicted);
734 
735 	list_for_each_entry_safe(bo_base, tmp, &vm->evicted, vm_status) {
736 		struct amdgpu_bo *bo = bo_base->bo;
737 		struct amdgpu_bo *shadow = amdgpu_bo_shadowed(bo);
738 
739 		r = validate(param, bo);
740 		if (r)
741 			return r;
742 		if (shadow) {
743 			r = validate(param, shadow);
744 			if (r)
745 				return r;
746 		}
747 
748 		if (bo->tbo.type != ttm_bo_type_kernel) {
749 			amdgpu_vm_bo_moved(bo_base);
750 		} else {
751 			vm->update_funcs->map_table(to_amdgpu_bo_vm(bo));
752 			amdgpu_vm_bo_relocated(bo_base);
753 		}
754 	}
755 
756 	amdgpu_vm_eviction_lock(vm);
757 	vm->evicting = false;
758 	amdgpu_vm_eviction_unlock(vm);
759 
760 	return 0;
761 }
762 
763 /**
764  * amdgpu_vm_ready - check VM is ready for updates
765  *
766  * @vm: VM to check
767  *
768  * Check if all VM PDs/PTs are ready for updates
769  *
770  * Returns:
771  * True if VM is not evicting.
772  */
amdgpu_vm_ready(struct amdgpu_vm * vm)773 bool amdgpu_vm_ready(struct amdgpu_vm *vm)
774 {
775 	bool ret;
776 
777 	amdgpu_vm_eviction_lock(vm);
778 	ret = !vm->evicting;
779 	amdgpu_vm_eviction_unlock(vm);
780 
781 	return ret && list_empty(&vm->evicted);
782 }
783 
784 /**
785  * amdgpu_vm_clear_bo - initially clear the PDs/PTs
786  *
787  * @adev: amdgpu_device pointer
788  * @vm: VM to clear BO from
789  * @vmbo: BO to clear
790  * @immediate: use an immediate update
791  *
792  * Root PD needs to be reserved when calling this.
793  *
794  * Returns:
795  * 0 on success, errno otherwise.
796  */
amdgpu_vm_clear_bo(struct amdgpu_device * adev,struct amdgpu_vm * vm,struct amdgpu_bo_vm * vmbo,bool immediate)797 static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
798 			      struct amdgpu_vm *vm,
799 			      struct amdgpu_bo_vm *vmbo,
800 			      bool immediate)
801 {
802 	struct ttm_operation_ctx ctx = { true, false };
803 	unsigned level = adev->vm_manager.root_level;
804 	struct amdgpu_vm_update_params params;
805 	struct amdgpu_bo *ancestor = &vmbo->bo;
806 	struct amdgpu_bo *bo = &vmbo->bo;
807 	unsigned entries, ats_entries;
808 	uint64_t addr;
809 	int r;
810 
811 	/* Figure out our place in the hierarchy */
812 	if (ancestor->parent) {
813 		++level;
814 		while (ancestor->parent->parent) {
815 			++level;
816 			ancestor = ancestor->parent;
817 		}
818 	}
819 
820 	entries = amdgpu_bo_size(bo) / 8;
821 	if (!vm->pte_support_ats) {
822 		ats_entries = 0;
823 
824 	} else if (!bo->parent) {
825 		ats_entries = amdgpu_vm_num_ats_entries(adev);
826 		ats_entries = min(ats_entries, entries);
827 		entries -= ats_entries;
828 
829 	} else {
830 		struct amdgpu_vm_bo_base *pt;
831 
832 		pt = ancestor->vm_bo;
833 		ats_entries = amdgpu_vm_num_ats_entries(adev);
834 		if ((pt - to_amdgpu_bo_vm(vm->root.bo)->entries) >= ats_entries) {
835 			ats_entries = 0;
836 		} else {
837 			ats_entries = entries;
838 			entries = 0;
839 		}
840 	}
841 
842 	r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
843 	if (r)
844 		return r;
845 
846 	if (vmbo->shadow) {
847 		struct amdgpu_bo *shadow = vmbo->shadow;
848 
849 		r = ttm_bo_validate(&shadow->tbo, &shadow->placement, &ctx);
850 		if (r)
851 			return r;
852 	}
853 
854 	r = vm->update_funcs->map_table(vmbo);
855 	if (r)
856 		return r;
857 
858 	memset(&params, 0, sizeof(params));
859 	params.adev = adev;
860 	params.vm = vm;
861 	params.immediate = immediate;
862 
863 	r = vm->update_funcs->prepare(&params, NULL, AMDGPU_SYNC_EXPLICIT);
864 	if (r)
865 		return r;
866 
867 	addr = 0;
868 	if (ats_entries) {
869 		uint64_t value = 0, flags;
870 
871 		flags = AMDGPU_PTE_DEFAULT_ATC;
872 		if (level != AMDGPU_VM_PTB) {
873 			/* Handle leaf PDEs as PTEs */
874 			flags |= AMDGPU_PDE_PTE;
875 			amdgpu_gmc_get_vm_pde(adev, level, &value, &flags);
876 		}
877 
878 		r = vm->update_funcs->update(&params, vmbo, addr, 0, ats_entries,
879 					     value, flags);
880 		if (r)
881 			return r;
882 
883 		addr += ats_entries * 8;
884 	}
885 
886 	if (entries) {
887 		uint64_t value = 0, flags = 0;
888 
889 		if (adev->asic_type >= CHIP_VEGA10) {
890 			if (level != AMDGPU_VM_PTB) {
891 				/* Handle leaf PDEs as PTEs */
892 				flags |= AMDGPU_PDE_PTE;
893 				amdgpu_gmc_get_vm_pde(adev, level,
894 						      &value, &flags);
895 			} else {
896 				/* Workaround for fault priority problem on GMC9 */
897 				flags = AMDGPU_PTE_EXECUTABLE;
898 			}
899 		}
900 
901 		r = vm->update_funcs->update(&params, vmbo, addr, 0, entries,
902 					     value, flags);
903 		if (r)
904 			return r;
905 	}
906 
907 	return vm->update_funcs->commit(&params, NULL);
908 }
909 
910 /**
911  * amdgpu_vm_pt_create - create bo for PD/PT
912  *
913  * @adev: amdgpu_device pointer
914  * @vm: requesting vm
915  * @level: the page table level
916  * @immediate: use a immediate update
917  * @vmbo: pointer to the buffer object pointer
918  */
amdgpu_vm_pt_create(struct amdgpu_device * adev,struct amdgpu_vm * vm,int level,bool immediate,struct amdgpu_bo_vm ** vmbo)919 static int amdgpu_vm_pt_create(struct amdgpu_device *adev,
920 			       struct amdgpu_vm *vm,
921 			       int level, bool immediate,
922 			       struct amdgpu_bo_vm **vmbo)
923 {
924 	struct amdgpu_bo_param bp;
925 	struct amdgpu_bo *bo;
926 	struct dma_resv *resv;
927 	unsigned int num_entries;
928 	int r;
929 
930 	memset(&bp, 0, sizeof(bp));
931 
932 	bp.size = amdgpu_vm_bo_size(adev, level);
933 	bp.byte_align = AMDGPU_GPU_PAGE_SIZE;
934 	bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
935 	bp.domain = amdgpu_bo_get_preferred_domain(adev, bp.domain);
936 	bp.flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
937 		AMDGPU_GEM_CREATE_CPU_GTT_USWC;
938 
939 	if (level < AMDGPU_VM_PTB)
940 		num_entries = amdgpu_vm_num_entries(adev, level);
941 	else
942 		num_entries = 0;
943 
944 	bp.bo_ptr_size = struct_size((*vmbo), entries, num_entries);
945 
946 	if (vm->use_cpu_for_update)
947 		bp.flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
948 
949 	bp.type = ttm_bo_type_kernel;
950 	bp.no_wait_gpu = immediate;
951 	if (vm->root.bo)
952 		bp.resv = vm->root.bo->tbo.base.resv;
953 
954 	r = amdgpu_bo_create_vm(adev, &bp, vmbo);
955 	if (r)
956 		return r;
957 
958 	bo = &(*vmbo)->bo;
959 	if (vm->is_compute_context || (adev->flags & AMD_IS_APU)) {
960 		(*vmbo)->shadow = NULL;
961 		return 0;
962 	}
963 
964 	if (!bp.resv)
965 		WARN_ON(dma_resv_lock(bo->tbo.base.resv,
966 				      NULL));
967 	resv = bp.resv;
968 	memset(&bp, 0, sizeof(bp));
969 	bp.size = amdgpu_vm_bo_size(adev, level);
970 	bp.domain = AMDGPU_GEM_DOMAIN_GTT;
971 	bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC;
972 	bp.type = ttm_bo_type_kernel;
973 	bp.resv = bo->tbo.base.resv;
974 	bp.bo_ptr_size = sizeof(struct amdgpu_bo);
975 
976 	r = amdgpu_bo_create(adev, &bp, &(*vmbo)->shadow);
977 
978 	if (!resv)
979 		dma_resv_unlock(bo->tbo.base.resv);
980 
981 	if (r) {
982 		amdgpu_bo_unref(&bo);
983 		return r;
984 	}
985 
986 	amdgpu_bo_add_to_shadow_list(*vmbo);
987 
988 	return 0;
989 }
990 
991 /**
992  * amdgpu_vm_alloc_pts - Allocate a specific page table
993  *
994  * @adev: amdgpu_device pointer
995  * @vm: VM to allocate page tables for
996  * @cursor: Which page table to allocate
997  * @immediate: use an immediate update
998  *
999  * Make sure a specific page table or directory is allocated.
1000  *
1001  * Returns:
1002  * 1 if page table needed to be allocated, 0 if page table was already
1003  * allocated, negative errno if an error occurred.
1004  */
amdgpu_vm_alloc_pts(struct amdgpu_device * adev,struct amdgpu_vm * vm,struct amdgpu_vm_pt_cursor * cursor,bool immediate)1005 static int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
1006 			       struct amdgpu_vm *vm,
1007 			       struct amdgpu_vm_pt_cursor *cursor,
1008 			       bool immediate)
1009 {
1010 	struct amdgpu_vm_bo_base *entry = cursor->entry;
1011 	struct amdgpu_bo *pt_bo;
1012 	struct amdgpu_bo_vm *pt;
1013 	int r;
1014 
1015 	if (entry->bo)
1016 		return 0;
1017 
1018 	r = amdgpu_vm_pt_create(adev, vm, cursor->level, immediate, &pt);
1019 	if (r)
1020 		return r;
1021 
1022 	/* Keep a reference to the root directory to avoid
1023 	 * freeing them up in the wrong order.
1024 	 */
1025 	pt_bo = &pt->bo;
1026 	pt_bo->parent = amdgpu_bo_ref(cursor->parent->bo);
1027 	amdgpu_vm_bo_base_init(entry, vm, pt_bo);
1028 	r = amdgpu_vm_clear_bo(adev, vm, pt, immediate);
1029 	if (r)
1030 		goto error_free_pt;
1031 
1032 	return 0;
1033 
1034 error_free_pt:
1035 	amdgpu_bo_unref(&pt->shadow);
1036 	amdgpu_bo_unref(&pt_bo);
1037 	return r;
1038 }
1039 
1040 /**
1041  * amdgpu_vm_free_table - fre one PD/PT
1042  *
1043  * @entry: PDE to free
1044  */
amdgpu_vm_free_table(struct amdgpu_vm_bo_base * entry)1045 static void amdgpu_vm_free_table(struct amdgpu_vm_bo_base *entry)
1046 {
1047 	struct amdgpu_bo *shadow;
1048 
1049 	if (!entry->bo)
1050 		return;
1051 	shadow = amdgpu_bo_shadowed(entry->bo);
1052 	entry->bo->vm_bo = NULL;
1053 	list_del(&entry->vm_status);
1054 	amdgpu_bo_unref(&shadow);
1055 	amdgpu_bo_unref(&entry->bo);
1056 }
1057 
1058 /**
1059  * amdgpu_vm_free_pts - free PD/PT levels
1060  *
1061  * @adev: amdgpu device structure
1062  * @vm: amdgpu vm structure
1063  * @start: optional cursor where to start freeing PDs/PTs
1064  *
1065  * Free the page directory or page table level and all sub levels.
1066  */
amdgpu_vm_free_pts(struct amdgpu_device * adev,struct amdgpu_vm * vm,struct amdgpu_vm_pt_cursor * start)1067 static void amdgpu_vm_free_pts(struct amdgpu_device *adev,
1068 			       struct amdgpu_vm *vm,
1069 			       struct amdgpu_vm_pt_cursor *start)
1070 {
1071 	struct amdgpu_vm_pt_cursor cursor;
1072 	struct amdgpu_vm_bo_base *entry;
1073 
1074 	vm->bulk_moveable = false;
1075 
1076 	for_each_amdgpu_vm_pt_dfs_safe(adev, vm, start, cursor, entry)
1077 		amdgpu_vm_free_table(entry);
1078 
1079 	if (start)
1080 		amdgpu_vm_free_table(start->entry);
1081 }
1082 
1083 /**
1084  * amdgpu_vm_check_compute_bug - check whether asic has compute vm bug
1085  *
1086  * @adev: amdgpu_device pointer
1087  */
amdgpu_vm_check_compute_bug(struct amdgpu_device * adev)1088 void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev)
1089 {
1090 	const struct amdgpu_ip_block *ip_block;
1091 	bool has_compute_vm_bug;
1092 	struct amdgpu_ring *ring;
1093 	int i;
1094 
1095 	has_compute_vm_bug = false;
1096 
1097 	ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
1098 	if (ip_block) {
1099 		/* Compute has a VM bug for GFX version < 7.
1100 		   Compute has a VM bug for GFX 8 MEC firmware version < 673.*/
1101 		if (ip_block->version->major <= 7)
1102 			has_compute_vm_bug = true;
1103 		else if (ip_block->version->major == 8)
1104 			if (adev->gfx.mec_fw_version < 673)
1105 				has_compute_vm_bug = true;
1106 	}
1107 
1108 	for (i = 0; i < adev->num_rings; i++) {
1109 		ring = adev->rings[i];
1110 		if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
1111 			/* only compute rings */
1112 			ring->has_compute_vm_bug = has_compute_vm_bug;
1113 		else
1114 			ring->has_compute_vm_bug = false;
1115 	}
1116 }
1117 
1118 /**
1119  * amdgpu_vm_need_pipeline_sync - Check if pipe sync is needed for job.
1120  *
1121  * @ring: ring on which the job will be submitted
1122  * @job: job to submit
1123  *
1124  * Returns:
1125  * True if sync is needed.
1126  */
amdgpu_vm_need_pipeline_sync(struct amdgpu_ring * ring,struct amdgpu_job * job)1127 bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
1128 				  struct amdgpu_job *job)
1129 {
1130 	struct amdgpu_device *adev = ring->adev;
1131 	unsigned vmhub = ring->funcs->vmhub;
1132 	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
1133 	struct amdgpu_vmid *id;
1134 	bool gds_switch_needed;
1135 	bool vm_flush_needed = job->vm_needs_flush || ring->has_compute_vm_bug;
1136 
1137 	if (job->vmid == 0)
1138 		return false;
1139 	id = &id_mgr->ids[job->vmid];
1140 	gds_switch_needed = ring->funcs->emit_gds_switch && (
1141 		id->gds_base != job->gds_base ||
1142 		id->gds_size != job->gds_size ||
1143 		id->gws_base != job->gws_base ||
1144 		id->gws_size != job->gws_size ||
1145 		id->oa_base != job->oa_base ||
1146 		id->oa_size != job->oa_size);
1147 
1148 	if (amdgpu_vmid_had_gpu_reset(adev, id))
1149 		return true;
1150 
1151 	return vm_flush_needed || gds_switch_needed;
1152 }
1153 
1154 /**
1155  * amdgpu_vm_flush - hardware flush the vm
1156  *
1157  * @ring: ring to use for flush
1158  * @job:  related job
1159  * @need_pipe_sync: is pipe sync needed
1160  *
1161  * Emit a VM flush when it is necessary.
1162  *
1163  * Returns:
1164  * 0 on success, errno otherwise.
1165  */
amdgpu_vm_flush(struct amdgpu_ring * ring,struct amdgpu_job * job,bool need_pipe_sync)1166 int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
1167 		    bool need_pipe_sync)
1168 {
1169 	struct amdgpu_device *adev = ring->adev;
1170 	unsigned vmhub = ring->funcs->vmhub;
1171 	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
1172 	struct amdgpu_vmid *id = &id_mgr->ids[job->vmid];
1173 	bool gds_switch_needed = ring->funcs->emit_gds_switch && (
1174 		id->gds_base != job->gds_base ||
1175 		id->gds_size != job->gds_size ||
1176 		id->gws_base != job->gws_base ||
1177 		id->gws_size != job->gws_size ||
1178 		id->oa_base != job->oa_base ||
1179 		id->oa_size != job->oa_size);
1180 	bool vm_flush_needed = job->vm_needs_flush;
1181 	struct dma_fence *fence = NULL;
1182 	bool pasid_mapping_needed = false;
1183 	unsigned patch_offset = 0;
1184 	bool update_spm_vmid_needed = (job->vm && (job->vm->reserved_vmid[vmhub] != NULL));
1185 	int r;
1186 
1187 	if (update_spm_vmid_needed && adev->gfx.rlc.funcs->update_spm_vmid)
1188 		adev->gfx.rlc.funcs->update_spm_vmid(adev, job->vmid);
1189 
1190 	if (amdgpu_vmid_had_gpu_reset(adev, id)) {
1191 		gds_switch_needed = true;
1192 		vm_flush_needed = true;
1193 		pasid_mapping_needed = true;
1194 	}
1195 
1196 	mutex_lock(&id_mgr->lock);
1197 	if (id->pasid != job->pasid || !id->pasid_mapping ||
1198 	    !dma_fence_is_signaled(id->pasid_mapping))
1199 		pasid_mapping_needed = true;
1200 	mutex_unlock(&id_mgr->lock);
1201 
1202 	gds_switch_needed &= !!ring->funcs->emit_gds_switch;
1203 	vm_flush_needed &= !!ring->funcs->emit_vm_flush  &&
1204 			job->vm_pd_addr != AMDGPU_BO_INVALID_OFFSET;
1205 	pasid_mapping_needed &= adev->gmc.gmc_funcs->emit_pasid_mapping &&
1206 		ring->funcs->emit_wreg;
1207 
1208 	if (!vm_flush_needed && !gds_switch_needed && !need_pipe_sync)
1209 		return 0;
1210 
1211 	if (ring->funcs->init_cond_exec)
1212 		patch_offset = amdgpu_ring_init_cond_exec(ring);
1213 
1214 	if (need_pipe_sync)
1215 		amdgpu_ring_emit_pipeline_sync(ring);
1216 
1217 	if (vm_flush_needed) {
1218 		trace_amdgpu_vm_flush(ring, job->vmid, job->vm_pd_addr);
1219 		amdgpu_ring_emit_vm_flush(ring, job->vmid, job->vm_pd_addr);
1220 	}
1221 
1222 	if (pasid_mapping_needed)
1223 		amdgpu_gmc_emit_pasid_mapping(ring, job->vmid, job->pasid);
1224 
1225 	if (vm_flush_needed || pasid_mapping_needed) {
1226 		r = amdgpu_fence_emit(ring, &fence, NULL, 0);
1227 		if (r)
1228 			return r;
1229 	}
1230 
1231 	if (vm_flush_needed) {
1232 		mutex_lock(&id_mgr->lock);
1233 		dma_fence_put(id->last_flush);
1234 		id->last_flush = dma_fence_get(fence);
1235 		id->current_gpu_reset_count =
1236 			atomic_read(&adev->gpu_reset_counter);
1237 		mutex_unlock(&id_mgr->lock);
1238 	}
1239 
1240 	if (pasid_mapping_needed) {
1241 		mutex_lock(&id_mgr->lock);
1242 		id->pasid = job->pasid;
1243 		dma_fence_put(id->pasid_mapping);
1244 		id->pasid_mapping = dma_fence_get(fence);
1245 		mutex_unlock(&id_mgr->lock);
1246 	}
1247 	dma_fence_put(fence);
1248 
1249 	if (ring->funcs->emit_gds_switch && gds_switch_needed) {
1250 		id->gds_base = job->gds_base;
1251 		id->gds_size = job->gds_size;
1252 		id->gws_base = job->gws_base;
1253 		id->gws_size = job->gws_size;
1254 		id->oa_base = job->oa_base;
1255 		id->oa_size = job->oa_size;
1256 		amdgpu_ring_emit_gds_switch(ring, job->vmid, job->gds_base,
1257 					    job->gds_size, job->gws_base,
1258 					    job->gws_size, job->oa_base,
1259 					    job->oa_size);
1260 	}
1261 
1262 	if (ring->funcs->patch_cond_exec)
1263 		amdgpu_ring_patch_cond_exec(ring, patch_offset);
1264 
1265 	/* the double SWITCH_BUFFER here *cannot* be skipped by COND_EXEC */
1266 	if (ring->funcs->emit_switch_buffer) {
1267 		amdgpu_ring_emit_switch_buffer(ring);
1268 		amdgpu_ring_emit_switch_buffer(ring);
1269 	}
1270 	return 0;
1271 }
1272 
1273 /**
1274  * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
1275  *
1276  * @vm: requested vm
1277  * @bo: requested buffer object
1278  *
1279  * Find @bo inside the requested vm.
1280  * Search inside the @bos vm list for the requested vm
1281  * Returns the found bo_va or NULL if none is found
1282  *
1283  * Object has to be reserved!
1284  *
1285  * Returns:
1286  * Found bo_va or NULL.
1287  */
amdgpu_vm_bo_find(struct amdgpu_vm * vm,struct amdgpu_bo * bo)1288 struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
1289 				       struct amdgpu_bo *bo)
1290 {
1291 	struct amdgpu_vm_bo_base *base;
1292 
1293 	for (base = bo->vm_bo; base; base = base->next) {
1294 		if (base->vm != vm)
1295 			continue;
1296 
1297 		return container_of(base, struct amdgpu_bo_va, base);
1298 	}
1299 	return NULL;
1300 }
1301 
1302 /**
1303  * amdgpu_vm_map_gart - Resolve gart mapping of addr
1304  *
1305  * @pages_addr: optional DMA address to use for lookup
1306  * @addr: the unmapped addr
1307  *
1308  * Look up the physical address of the page that the pte resolves
1309  * to.
1310  *
1311  * Returns:
1312  * The pointer for the page table entry.
1313  */
amdgpu_vm_map_gart(const dma_addr_t * pages_addr,uint64_t addr)1314 uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
1315 {
1316 	uint64_t result;
1317 
1318 	/* page table offset */
1319 	result = pages_addr[addr >> PAGE_SHIFT];
1320 
1321 	/* in case cpu page size != gpu page size*/
1322 	result |= addr & (~PAGE_MASK);
1323 
1324 	result &= 0xFFFFFFFFFFFFF000ULL;
1325 
1326 	return result;
1327 }
1328 
1329 /**
1330  * amdgpu_vm_update_pde - update a single level in the hierarchy
1331  *
1332  * @params: parameters for the update
1333  * @vm: requested vm
1334  * @entry: entry to update
1335  *
1336  * Makes sure the requested entry in parent is up to date.
1337  */
amdgpu_vm_update_pde(struct amdgpu_vm_update_params * params,struct amdgpu_vm * vm,struct amdgpu_vm_bo_base * entry)1338 static int amdgpu_vm_update_pde(struct amdgpu_vm_update_params *params,
1339 				struct amdgpu_vm *vm,
1340 				struct amdgpu_vm_bo_base *entry)
1341 {
1342 	struct amdgpu_vm_bo_base *parent = amdgpu_vm_pt_parent(entry);
1343 	struct amdgpu_bo *bo = parent->bo, *pbo;
1344 	uint64_t pde, pt, flags;
1345 	unsigned level;
1346 
1347 	for (level = 0, pbo = bo->parent; pbo; ++level)
1348 		pbo = pbo->parent;
1349 
1350 	level += params->adev->vm_manager.root_level;
1351 	amdgpu_gmc_get_pde_for_bo(entry->bo, level, &pt, &flags);
1352 	pde = (entry - to_amdgpu_bo_vm(parent->bo)->entries) * 8;
1353 	return vm->update_funcs->update(params, to_amdgpu_bo_vm(bo), pde, pt,
1354 					1, 0, flags);
1355 }
1356 
1357 /**
1358  * amdgpu_vm_invalidate_pds - mark all PDs as invalid
1359  *
1360  * @adev: amdgpu_device pointer
1361  * @vm: related vm
1362  *
1363  * Mark all PD level as invalid after an error.
1364  */
amdgpu_vm_invalidate_pds(struct amdgpu_device * adev,struct amdgpu_vm * vm)1365 static void amdgpu_vm_invalidate_pds(struct amdgpu_device *adev,
1366 				     struct amdgpu_vm *vm)
1367 {
1368 	struct amdgpu_vm_pt_cursor cursor;
1369 	struct amdgpu_vm_bo_base *entry;
1370 
1371 	for_each_amdgpu_vm_pt_dfs_safe(adev, vm, NULL, cursor, entry)
1372 		if (entry->bo && !entry->moved)
1373 			amdgpu_vm_bo_relocated(entry);
1374 }
1375 
1376 /**
1377  * amdgpu_vm_update_pdes - make sure that all directories are valid
1378  *
1379  * @adev: amdgpu_device pointer
1380  * @vm: requested vm
1381  * @immediate: submit immediately to the paging queue
1382  *
1383  * Makes sure all directories are up to date.
1384  *
1385  * Returns:
1386  * 0 for success, error for failure.
1387  */
amdgpu_vm_update_pdes(struct amdgpu_device * adev,struct amdgpu_vm * vm,bool immediate)1388 int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
1389 			  struct amdgpu_vm *vm, bool immediate)
1390 {
1391 	struct amdgpu_vm_update_params params;
1392 	int r;
1393 
1394 	if (list_empty(&vm->relocated))
1395 		return 0;
1396 
1397 	memset(&params, 0, sizeof(params));
1398 	params.adev = adev;
1399 	params.vm = vm;
1400 	params.immediate = immediate;
1401 
1402 	r = vm->update_funcs->prepare(&params, NULL, AMDGPU_SYNC_EXPLICIT);
1403 	if (r)
1404 		return r;
1405 
1406 	while (!list_empty(&vm->relocated)) {
1407 		struct amdgpu_vm_bo_base *entry;
1408 
1409 		entry = list_first_entry(&vm->relocated,
1410 					 struct amdgpu_vm_bo_base,
1411 					 vm_status);
1412 		amdgpu_vm_bo_idle(entry);
1413 
1414 		r = amdgpu_vm_update_pde(&params, vm, entry);
1415 		if (r)
1416 			goto error;
1417 	}
1418 
1419 	r = vm->update_funcs->commit(&params, &vm->last_update);
1420 	if (r)
1421 		goto error;
1422 	return 0;
1423 
1424 error:
1425 	amdgpu_vm_invalidate_pds(adev, vm);
1426 	return r;
1427 }
1428 
1429 /*
1430  * amdgpu_vm_update_flags - figure out flags for PTE updates
1431  *
1432  * Make sure to set the right flags for the PTEs at the desired level.
1433  */
amdgpu_vm_update_flags(struct amdgpu_vm_update_params * params,struct amdgpu_bo_vm * pt,unsigned int level,uint64_t pe,uint64_t addr,unsigned int count,uint32_t incr,uint64_t flags)1434 static void amdgpu_vm_update_flags(struct amdgpu_vm_update_params *params,
1435 				   struct amdgpu_bo_vm *pt, unsigned int level,
1436 				   uint64_t pe, uint64_t addr,
1437 				   unsigned int count, uint32_t incr,
1438 				   uint64_t flags)
1439 
1440 {
1441 	if (level != AMDGPU_VM_PTB) {
1442 		flags |= AMDGPU_PDE_PTE;
1443 		amdgpu_gmc_get_vm_pde(params->adev, level, &addr, &flags);
1444 
1445 	} else if (params->adev->asic_type >= CHIP_VEGA10 &&
1446 		   !(flags & AMDGPU_PTE_VALID) &&
1447 		   !(flags & AMDGPU_PTE_PRT)) {
1448 
1449 		/* Workaround for fault priority problem on GMC9 */
1450 		flags |= AMDGPU_PTE_EXECUTABLE;
1451 	}
1452 
1453 	params->vm->update_funcs->update(params, pt, pe, addr, count, incr,
1454 					 flags);
1455 }
1456 
1457 /**
1458  * amdgpu_vm_fragment - get fragment for PTEs
1459  *
1460  * @params: see amdgpu_vm_update_params definition
1461  * @start: first PTE to handle
1462  * @end: last PTE to handle
1463  * @flags: hw mapping flags
1464  * @frag: resulting fragment size
1465  * @frag_end: end of this fragment
1466  *
1467  * Returns the first possible fragment for the start and end address.
1468  */
amdgpu_vm_fragment(struct amdgpu_vm_update_params * params,uint64_t start,uint64_t end,uint64_t flags,unsigned int * frag,uint64_t * frag_end)1469 static void amdgpu_vm_fragment(struct amdgpu_vm_update_params *params,
1470 			       uint64_t start, uint64_t end, uint64_t flags,
1471 			       unsigned int *frag, uint64_t *frag_end)
1472 {
1473 	/**
1474 	 * The MC L1 TLB supports variable sized pages, based on a fragment
1475 	 * field in the PTE. When this field is set to a non-zero value, page
1476 	 * granularity is increased from 4KB to (1 << (12 + frag)). The PTE
1477 	 * flags are considered valid for all PTEs within the fragment range
1478 	 * and corresponding mappings are assumed to be physically contiguous.
1479 	 *
1480 	 * The L1 TLB can store a single PTE for the whole fragment,
1481 	 * significantly increasing the space available for translation
1482 	 * caching. This leads to large improvements in throughput when the
1483 	 * TLB is under pressure.
1484 	 *
1485 	 * The L2 TLB distributes small and large fragments into two
1486 	 * asymmetric partitions. The large fragment cache is significantly
1487 	 * larger. Thus, we try to use large fragments wherever possible.
1488 	 * Userspace can support this by aligning virtual base address and
1489 	 * allocation size to the fragment size.
1490 	 *
1491 	 * Starting with Vega10 the fragment size only controls the L1. The L2
1492 	 * is now directly feed with small/huge/giant pages from the walker.
1493 	 */
1494 	unsigned max_frag;
1495 
1496 	if (params->adev->asic_type < CHIP_VEGA10)
1497 		max_frag = params->adev->vm_manager.fragment_size;
1498 	else
1499 		max_frag = 31;
1500 
1501 	/* system pages are non continuously */
1502 	if (params->pages_addr) {
1503 		*frag = 0;
1504 		*frag_end = end;
1505 		return;
1506 	}
1507 
1508 	/* This intentionally wraps around if no bit is set */
1509 	*frag = min((unsigned)ffs(start) - 1, (unsigned)fls64(end - start) - 1);
1510 	if (*frag >= max_frag) {
1511 		*frag = max_frag;
1512 		*frag_end = end & ~((1ULL << max_frag) - 1);
1513 	} else {
1514 		*frag_end = start + (1 << *frag);
1515 	}
1516 }
1517 
1518 /**
1519  * amdgpu_vm_update_ptes - make sure that page tables are valid
1520  *
1521  * @params: see amdgpu_vm_update_params definition
1522  * @start: start of GPU address range
1523  * @end: end of GPU address range
1524  * @dst: destination address to map to, the next dst inside the function
1525  * @flags: mapping flags
1526  *
1527  * Update the page tables in the range @start - @end.
1528  *
1529  * Returns:
1530  * 0 for success, -EINVAL for failure.
1531  */
amdgpu_vm_update_ptes(struct amdgpu_vm_update_params * params,uint64_t start,uint64_t end,uint64_t dst,uint64_t flags)1532 static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
1533 				 uint64_t start, uint64_t end,
1534 				 uint64_t dst, uint64_t flags)
1535 {
1536 	struct amdgpu_device *adev = params->adev;
1537 	struct amdgpu_vm_pt_cursor cursor;
1538 	uint64_t frag_start = start, frag_end;
1539 	unsigned int frag;
1540 	int r;
1541 
1542 	/* figure out the initial fragment */
1543 	amdgpu_vm_fragment(params, frag_start, end, flags, &frag, &frag_end);
1544 
1545 	/* walk over the address space and update the PTs */
1546 	amdgpu_vm_pt_start(adev, params->vm, start, &cursor);
1547 	while (cursor.pfn < end) {
1548 		unsigned shift, parent_shift, mask;
1549 		uint64_t incr, entry_end, pe_start;
1550 		struct amdgpu_bo *pt;
1551 
1552 		if (!params->unlocked) {
1553 			/* make sure that the page tables covering the
1554 			 * address range are actually allocated
1555 			 */
1556 			r = amdgpu_vm_alloc_pts(params->adev, params->vm,
1557 						&cursor, params->immediate);
1558 			if (r)
1559 				return r;
1560 		}
1561 
1562 		shift = amdgpu_vm_level_shift(adev, cursor.level);
1563 		parent_shift = amdgpu_vm_level_shift(adev, cursor.level - 1);
1564 		if (params->unlocked) {
1565 			/* Unlocked updates are only allowed on the leaves */
1566 			if (amdgpu_vm_pt_descendant(adev, &cursor))
1567 				continue;
1568 		} else if (adev->asic_type < CHIP_VEGA10 &&
1569 			   (flags & AMDGPU_PTE_VALID)) {
1570 			/* No huge page support before GMC v9 */
1571 			if (cursor.level != AMDGPU_VM_PTB) {
1572 				if (!amdgpu_vm_pt_descendant(adev, &cursor))
1573 					return -ENOENT;
1574 				continue;
1575 			}
1576 		} else if (frag < shift) {
1577 			/* We can't use this level when the fragment size is
1578 			 * smaller than the address shift. Go to the next
1579 			 * child entry and try again.
1580 			 */
1581 			if (amdgpu_vm_pt_descendant(adev, &cursor))
1582 				continue;
1583 		} else if (frag >= parent_shift) {
1584 			/* If the fragment size is even larger than the parent
1585 			 * shift we should go up one level and check it again.
1586 			 */
1587 			if (!amdgpu_vm_pt_ancestor(&cursor))
1588 				return -EINVAL;
1589 			continue;
1590 		}
1591 
1592 		pt = cursor.entry->bo;
1593 		if (!pt) {
1594 			/* We need all PDs and PTs for mapping something, */
1595 			if (flags & AMDGPU_PTE_VALID)
1596 				return -ENOENT;
1597 
1598 			/* but unmapping something can happen at a higher
1599 			 * level.
1600 			 */
1601 			if (!amdgpu_vm_pt_ancestor(&cursor))
1602 				return -EINVAL;
1603 
1604 			pt = cursor.entry->bo;
1605 			shift = parent_shift;
1606 			frag_end = max(frag_end, ALIGN(frag_start + 1,
1607 				   1ULL << shift));
1608 		}
1609 
1610 		/* Looks good so far, calculate parameters for the update */
1611 		incr = (uint64_t)AMDGPU_GPU_PAGE_SIZE << shift;
1612 		mask = amdgpu_vm_entries_mask(adev, cursor.level);
1613 		pe_start = ((cursor.pfn >> shift) & mask) * 8;
1614 		entry_end = ((uint64_t)mask + 1) << shift;
1615 		entry_end += cursor.pfn & ~(entry_end - 1);
1616 		entry_end = min(entry_end, end);
1617 
1618 		do {
1619 			struct amdgpu_vm *vm = params->vm;
1620 			uint64_t upd_end = min(entry_end, frag_end);
1621 			unsigned nptes = (upd_end - frag_start) >> shift;
1622 			uint64_t upd_flags = flags | AMDGPU_PTE_FRAG(frag);
1623 
1624 			/* This can happen when we set higher level PDs to
1625 			 * silent to stop fault floods.
1626 			 */
1627 			nptes = max(nptes, 1u);
1628 
1629 			trace_amdgpu_vm_update_ptes(params, frag_start, upd_end,
1630 						    nptes, dst, incr, upd_flags,
1631 						    vm->task_info.pid,
1632 						    vm->immediate.fence_context);
1633 			amdgpu_vm_update_flags(params, to_amdgpu_bo_vm(pt),
1634 					       cursor.level, pe_start, dst,
1635 					       nptes, incr, upd_flags);
1636 
1637 			pe_start += nptes * 8;
1638 			dst += nptes * incr;
1639 
1640 			frag_start = upd_end;
1641 			if (frag_start >= frag_end) {
1642 				/* figure out the next fragment */
1643 				amdgpu_vm_fragment(params, frag_start, end,
1644 						   flags, &frag, &frag_end);
1645 				if (frag < shift)
1646 					break;
1647 			}
1648 		} while (frag_start < entry_end);
1649 
1650 		if (amdgpu_vm_pt_descendant(adev, &cursor)) {
1651 			/* Free all child entries.
1652 			 * Update the tables with the flags and addresses and free up subsequent
1653 			 * tables in the case of huge pages or freed up areas.
1654 			 * This is the maximum you can free, because all other page tables are not
1655 			 * completely covered by the range and so potentially still in use.
1656 			 */
1657 			while (cursor.pfn < frag_start) {
1658 				/* Make sure previous mapping is freed */
1659 				if (cursor.entry->bo) {
1660 					params->table_freed = true;
1661 					amdgpu_vm_free_pts(adev, params->vm, &cursor);
1662 				}
1663 				amdgpu_vm_pt_next(adev, &cursor);
1664 			}
1665 
1666 		} else if (frag >= shift) {
1667 			/* or just move on to the next on the same level. */
1668 			amdgpu_vm_pt_next(adev, &cursor);
1669 		}
1670 	}
1671 
1672 	return 0;
1673 }
1674 
1675 /**
1676  * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
1677  *
1678  * @adev: amdgpu_device pointer of the VM
1679  * @bo_adev: amdgpu_device pointer of the mapped BO
1680  * @vm: requested vm
1681  * @immediate: immediate submission in a page fault
1682  * @unlocked: unlocked invalidation during MM callback
1683  * @resv: fences we need to sync to
1684  * @start: start of mapped range
1685  * @last: last mapped entry
1686  * @flags: flags for the entries
1687  * @offset: offset into nodes and pages_addr
1688  * @res: ttm_resource to map
1689  * @pages_addr: DMA addresses to use for mapping
1690  * @fence: optional resulting fence
1691  * @table_freed: return true if page table is freed
1692  *
1693  * Fill in the page table entries between @start and @last.
1694  *
1695  * Returns:
1696  * 0 for success, -EINVAL for failure.
1697  */
amdgpu_vm_bo_update_mapping(struct amdgpu_device * adev,struct amdgpu_device * bo_adev,struct amdgpu_vm * vm,bool immediate,bool unlocked,struct dma_resv * resv,uint64_t start,uint64_t last,uint64_t flags,uint64_t offset,struct ttm_resource * res,dma_addr_t * pages_addr,struct dma_fence ** fence,bool * table_freed)1698 int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
1699 				struct amdgpu_device *bo_adev,
1700 				struct amdgpu_vm *vm, bool immediate,
1701 				bool unlocked, struct dma_resv *resv,
1702 				uint64_t start, uint64_t last,
1703 				uint64_t flags, uint64_t offset,
1704 				struct ttm_resource *res,
1705 				dma_addr_t *pages_addr,
1706 				struct dma_fence **fence,
1707 				bool *table_freed)
1708 {
1709 	struct amdgpu_vm_update_params params;
1710 	struct amdgpu_res_cursor cursor;
1711 	enum amdgpu_sync_mode sync_mode;
1712 	int r, idx;
1713 
1714 	if (!drm_dev_enter(&adev->ddev, &idx))
1715 		return -ENODEV;
1716 
1717 	memset(&params, 0, sizeof(params));
1718 	params.adev = adev;
1719 	params.vm = vm;
1720 	params.immediate = immediate;
1721 	params.pages_addr = pages_addr;
1722 	params.unlocked = unlocked;
1723 
1724 	/* Implicitly sync to command submissions in the same VM before
1725 	 * unmapping. Sync to moving fences before mapping.
1726 	 */
1727 	if (!(flags & AMDGPU_PTE_VALID))
1728 		sync_mode = AMDGPU_SYNC_EQ_OWNER;
1729 	else
1730 		sync_mode = AMDGPU_SYNC_EXPLICIT;
1731 
1732 	amdgpu_vm_eviction_lock(vm);
1733 	if (vm->evicting) {
1734 		r = -EBUSY;
1735 		goto error_unlock;
1736 	}
1737 
1738 	if (!unlocked && !dma_fence_is_signaled(vm->last_unlocked)) {
1739 		struct dma_fence *tmp = dma_fence_get_stub();
1740 
1741 		amdgpu_bo_fence(vm->root.bo, vm->last_unlocked, true);
1742 		swap(vm->last_unlocked, tmp);
1743 		dma_fence_put(tmp);
1744 	}
1745 
1746 	r = vm->update_funcs->prepare(&params, resv, sync_mode);
1747 	if (r)
1748 		goto error_unlock;
1749 
1750 	amdgpu_res_first(pages_addr ? NULL : res, offset,
1751 			 (last - start + 1) * AMDGPU_GPU_PAGE_SIZE, &cursor);
1752 	while (cursor.remaining) {
1753 		uint64_t tmp, num_entries, addr;
1754 
1755 		num_entries = cursor.size >> AMDGPU_GPU_PAGE_SHIFT;
1756 		if (pages_addr) {
1757 			bool contiguous = true;
1758 
1759 			if (num_entries > AMDGPU_GPU_PAGES_IN_CPU_PAGE) {
1760 				uint64_t pfn = cursor.start >> PAGE_SHIFT;
1761 				uint64_t count;
1762 
1763 				contiguous = pages_addr[pfn + 1] ==
1764 					pages_addr[pfn] + PAGE_SIZE;
1765 
1766 				tmp = num_entries /
1767 					AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1768 				for (count = 2; count < tmp; ++count) {
1769 					uint64_t idx = pfn + count;
1770 
1771 					if (contiguous != (pages_addr[idx] ==
1772 					    pages_addr[idx - 1] + PAGE_SIZE))
1773 						break;
1774 				}
1775 				num_entries = count *
1776 					AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1777 			}
1778 
1779 			if (!contiguous) {
1780 				addr = cursor.start;
1781 				params.pages_addr = pages_addr;
1782 			} else {
1783 				addr = pages_addr[cursor.start >> PAGE_SHIFT];
1784 				params.pages_addr = NULL;
1785 			}
1786 
1787 		} else if (flags & (AMDGPU_PTE_VALID | AMDGPU_PTE_PRT)) {
1788 			addr = bo_adev->vm_manager.vram_base_offset +
1789 				cursor.start;
1790 		} else {
1791 			addr = 0;
1792 		}
1793 
1794 		tmp = start + num_entries;
1795 		r = amdgpu_vm_update_ptes(&params, start, tmp, addr, flags);
1796 		if (r)
1797 			goto error_unlock;
1798 
1799 		amdgpu_res_next(&cursor, num_entries * AMDGPU_GPU_PAGE_SIZE);
1800 		start = tmp;
1801 	}
1802 
1803 	r = vm->update_funcs->commit(&params, fence);
1804 
1805 	if (table_freed)
1806 		*table_freed = *table_freed || params.table_freed;
1807 
1808 error_unlock:
1809 	amdgpu_vm_eviction_unlock(vm);
1810 	drm_dev_exit(idx);
1811 	return r;
1812 }
1813 
amdgpu_vm_get_memory(struct amdgpu_vm * vm,uint64_t * vram_mem,uint64_t * gtt_mem,uint64_t * cpu_mem)1814 void amdgpu_vm_get_memory(struct amdgpu_vm *vm, uint64_t *vram_mem,
1815 				uint64_t *gtt_mem, uint64_t *cpu_mem)
1816 {
1817 	struct amdgpu_bo_va *bo_va, *tmp;
1818 
1819 	list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status) {
1820 		if (!bo_va->base.bo)
1821 			continue;
1822 		amdgpu_bo_get_memory(bo_va->base.bo, vram_mem,
1823 				gtt_mem, cpu_mem);
1824 	}
1825 	list_for_each_entry_safe(bo_va, tmp, &vm->evicted, base.vm_status) {
1826 		if (!bo_va->base.bo)
1827 			continue;
1828 		amdgpu_bo_get_memory(bo_va->base.bo, vram_mem,
1829 				gtt_mem, cpu_mem);
1830 	}
1831 	list_for_each_entry_safe(bo_va, tmp, &vm->relocated, base.vm_status) {
1832 		if (!bo_va->base.bo)
1833 			continue;
1834 		amdgpu_bo_get_memory(bo_va->base.bo, vram_mem,
1835 				gtt_mem, cpu_mem);
1836 	}
1837 	list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) {
1838 		if (!bo_va->base.bo)
1839 			continue;
1840 		amdgpu_bo_get_memory(bo_va->base.bo, vram_mem,
1841 				gtt_mem, cpu_mem);
1842 	}
1843 	spin_lock(&vm->invalidated_lock);
1844 	list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status) {
1845 		if (!bo_va->base.bo)
1846 			continue;
1847 		amdgpu_bo_get_memory(bo_va->base.bo, vram_mem,
1848 				gtt_mem, cpu_mem);
1849 	}
1850 	list_for_each_entry_safe(bo_va, tmp, &vm->done, base.vm_status) {
1851 		if (!bo_va->base.bo)
1852 			continue;
1853 		amdgpu_bo_get_memory(bo_va->base.bo, vram_mem,
1854 				gtt_mem, cpu_mem);
1855 	}
1856 	spin_unlock(&vm->invalidated_lock);
1857 }
1858 /**
1859  * amdgpu_vm_bo_update - update all BO mappings in the vm page table
1860  *
1861  * @adev: amdgpu_device pointer
1862  * @bo_va: requested BO and VM object
1863  * @clear: if true clear the entries
1864  * @table_freed: return true if page table is freed
1865  *
1866  * Fill in the page table entries for @bo_va.
1867  *
1868  * Returns:
1869  * 0 for success, -EINVAL for failure.
1870  */
amdgpu_vm_bo_update(struct amdgpu_device * adev,struct amdgpu_bo_va * bo_va,bool clear,bool * table_freed)1871 int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
1872 			bool clear, bool *table_freed)
1873 {
1874 	struct amdgpu_bo *bo = bo_va->base.bo;
1875 	struct amdgpu_vm *vm = bo_va->base.vm;
1876 	struct amdgpu_bo_va_mapping *mapping;
1877 	dma_addr_t *pages_addr = NULL;
1878 	struct ttm_resource *mem;
1879 	struct dma_fence **last_update;
1880 	struct dma_resv *resv;
1881 	uint64_t flags;
1882 	struct amdgpu_device *bo_adev = adev;
1883 	int r;
1884 
1885 	if (clear || !bo) {
1886 		mem = NULL;
1887 		resv = vm->root.bo->tbo.base.resv;
1888 	} else {
1889 		struct drm_gem_object *obj = &bo->tbo.base;
1890 
1891 		resv = bo->tbo.base.resv;
1892 		if (obj->import_attach && bo_va->is_xgmi) {
1893 			struct dma_buf *dma_buf = obj->import_attach->dmabuf;
1894 			struct drm_gem_object *gobj = dma_buf->priv;
1895 			struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
1896 
1897 			if (abo->tbo.resource->mem_type == TTM_PL_VRAM)
1898 				bo = gem_to_amdgpu_bo(gobj);
1899 		}
1900 		mem = bo->tbo.resource;
1901 		if (mem->mem_type == TTM_PL_TT ||
1902 		    mem->mem_type == AMDGPU_PL_PREEMPT)
1903 			pages_addr = bo->tbo.ttm->dma_address;
1904 	}
1905 
1906 	if (bo) {
1907 		flags = amdgpu_ttm_tt_pte_flags(adev, bo->tbo.ttm, mem);
1908 
1909 		if (amdgpu_bo_encrypted(bo))
1910 			flags |= AMDGPU_PTE_TMZ;
1911 
1912 		bo_adev = amdgpu_ttm_adev(bo->tbo.bdev);
1913 	} else {
1914 		flags = 0x0;
1915 	}
1916 
1917 	if (clear || (bo && bo->tbo.base.resv ==
1918 		      vm->root.bo->tbo.base.resv))
1919 		last_update = &vm->last_update;
1920 	else
1921 		last_update = &bo_va->last_pt_update;
1922 
1923 	if (!clear && bo_va->base.moved) {
1924 		bo_va->base.moved = false;
1925 		list_splice_init(&bo_va->valids, &bo_va->invalids);
1926 
1927 	} else if (bo_va->cleared != clear) {
1928 		list_splice_init(&bo_va->valids, &bo_va->invalids);
1929 	}
1930 
1931 	list_for_each_entry(mapping, &bo_va->invalids, list) {
1932 		uint64_t update_flags = flags;
1933 
1934 		/* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
1935 		 * but in case of something, we filter the flags in first place
1936 		 */
1937 		if (!(mapping->flags & AMDGPU_PTE_READABLE))
1938 			update_flags &= ~AMDGPU_PTE_READABLE;
1939 		if (!(mapping->flags & AMDGPU_PTE_WRITEABLE))
1940 			update_flags &= ~AMDGPU_PTE_WRITEABLE;
1941 
1942 		/* Apply ASIC specific mapping flags */
1943 		amdgpu_gmc_get_vm_pte(adev, mapping, &update_flags);
1944 
1945 		trace_amdgpu_vm_bo_update(mapping);
1946 
1947 		r = amdgpu_vm_bo_update_mapping(adev, bo_adev, vm, false, false,
1948 						resv, mapping->start,
1949 						mapping->last, update_flags,
1950 						mapping->offset, mem,
1951 						pages_addr, last_update, table_freed);
1952 		if (r)
1953 			return r;
1954 	}
1955 
1956 	/* If the BO is not in its preferred location add it back to
1957 	 * the evicted list so that it gets validated again on the
1958 	 * next command submission.
1959 	 */
1960 	if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv) {
1961 		uint32_t mem_type = bo->tbo.resource->mem_type;
1962 
1963 		if (!(bo->preferred_domains &
1964 		      amdgpu_mem_type_to_domain(mem_type)))
1965 			amdgpu_vm_bo_evicted(&bo_va->base);
1966 		else
1967 			amdgpu_vm_bo_idle(&bo_va->base);
1968 	} else {
1969 		amdgpu_vm_bo_done(&bo_va->base);
1970 	}
1971 
1972 	list_splice_init(&bo_va->invalids, &bo_va->valids);
1973 	bo_va->cleared = clear;
1974 
1975 	if (trace_amdgpu_vm_bo_mapping_enabled()) {
1976 		list_for_each_entry(mapping, &bo_va->valids, list)
1977 			trace_amdgpu_vm_bo_mapping(mapping);
1978 	}
1979 
1980 	return 0;
1981 }
1982 
1983 /**
1984  * amdgpu_vm_update_prt_state - update the global PRT state
1985  *
1986  * @adev: amdgpu_device pointer
1987  */
amdgpu_vm_update_prt_state(struct amdgpu_device * adev)1988 static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev)
1989 {
1990 	unsigned long flags;
1991 	bool enable;
1992 
1993 	spin_lock_irqsave(&adev->vm_manager.prt_lock, flags);
1994 	enable = !!atomic_read(&adev->vm_manager.num_prt_users);
1995 	adev->gmc.gmc_funcs->set_prt(adev, enable);
1996 	spin_unlock_irqrestore(&adev->vm_manager.prt_lock, flags);
1997 }
1998 
1999 /**
2000  * amdgpu_vm_prt_get - add a PRT user
2001  *
2002  * @adev: amdgpu_device pointer
2003  */
amdgpu_vm_prt_get(struct amdgpu_device * adev)2004 static void amdgpu_vm_prt_get(struct amdgpu_device *adev)
2005 {
2006 	if (!adev->gmc.gmc_funcs->set_prt)
2007 		return;
2008 
2009 	if (atomic_inc_return(&adev->vm_manager.num_prt_users) == 1)
2010 		amdgpu_vm_update_prt_state(adev);
2011 }
2012 
2013 /**
2014  * amdgpu_vm_prt_put - drop a PRT user
2015  *
2016  * @adev: amdgpu_device pointer
2017  */
amdgpu_vm_prt_put(struct amdgpu_device * adev)2018 static void amdgpu_vm_prt_put(struct amdgpu_device *adev)
2019 {
2020 	if (atomic_dec_return(&adev->vm_manager.num_prt_users) == 0)
2021 		amdgpu_vm_update_prt_state(adev);
2022 }
2023 
2024 /**
2025  * amdgpu_vm_prt_cb - callback for updating the PRT status
2026  *
2027  * @fence: fence for the callback
2028  * @_cb: the callback function
2029  */
amdgpu_vm_prt_cb(struct dma_fence * fence,struct dma_fence_cb * _cb)2030 static void amdgpu_vm_prt_cb(struct dma_fence *fence, struct dma_fence_cb *_cb)
2031 {
2032 	struct amdgpu_prt_cb *cb = container_of(_cb, struct amdgpu_prt_cb, cb);
2033 
2034 	amdgpu_vm_prt_put(cb->adev);
2035 	kfree(cb);
2036 }
2037 
2038 /**
2039  * amdgpu_vm_add_prt_cb - add callback for updating the PRT status
2040  *
2041  * @adev: amdgpu_device pointer
2042  * @fence: fence for the callback
2043  */
amdgpu_vm_add_prt_cb(struct amdgpu_device * adev,struct dma_fence * fence)2044 static void amdgpu_vm_add_prt_cb(struct amdgpu_device *adev,
2045 				 struct dma_fence *fence)
2046 {
2047 	struct amdgpu_prt_cb *cb;
2048 
2049 	if (!adev->gmc.gmc_funcs->set_prt)
2050 		return;
2051 
2052 	cb = kmalloc(sizeof(struct amdgpu_prt_cb), GFP_KERNEL);
2053 	if (!cb) {
2054 		/* Last resort when we are OOM */
2055 		if (fence)
2056 			dma_fence_wait(fence, false);
2057 
2058 		amdgpu_vm_prt_put(adev);
2059 	} else {
2060 		cb->adev = adev;
2061 		if (!fence || dma_fence_add_callback(fence, &cb->cb,
2062 						     amdgpu_vm_prt_cb))
2063 			amdgpu_vm_prt_cb(fence, &cb->cb);
2064 	}
2065 }
2066 
2067 /**
2068  * amdgpu_vm_free_mapping - free a mapping
2069  *
2070  * @adev: amdgpu_device pointer
2071  * @vm: requested vm
2072  * @mapping: mapping to be freed
2073  * @fence: fence of the unmap operation
2074  *
2075  * Free a mapping and make sure we decrease the PRT usage count if applicable.
2076  */
amdgpu_vm_free_mapping(struct amdgpu_device * adev,struct amdgpu_vm * vm,struct amdgpu_bo_va_mapping * mapping,struct dma_fence * fence)2077 static void amdgpu_vm_free_mapping(struct amdgpu_device *adev,
2078 				   struct amdgpu_vm *vm,
2079 				   struct amdgpu_bo_va_mapping *mapping,
2080 				   struct dma_fence *fence)
2081 {
2082 	if (mapping->flags & AMDGPU_PTE_PRT)
2083 		amdgpu_vm_add_prt_cb(adev, fence);
2084 	kfree(mapping);
2085 }
2086 
2087 /**
2088  * amdgpu_vm_prt_fini - finish all prt mappings
2089  *
2090  * @adev: amdgpu_device pointer
2091  * @vm: requested vm
2092  *
2093  * Register a cleanup callback to disable PRT support after VM dies.
2094  */
amdgpu_vm_prt_fini(struct amdgpu_device * adev,struct amdgpu_vm * vm)2095 static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2096 {
2097 	struct dma_resv *resv = vm->root.bo->tbo.base.resv;
2098 	struct dma_fence *excl, **shared;
2099 	unsigned i, shared_count;
2100 	int r;
2101 
2102 	r = dma_resv_get_fences(resv, &excl, &shared_count, &shared);
2103 	if (r) {
2104 		/* Not enough memory to grab the fence list, as last resort
2105 		 * block for all the fences to complete.
2106 		 */
2107 		dma_resv_wait_timeout(resv, true, false,
2108 						    MAX_SCHEDULE_TIMEOUT);
2109 		return;
2110 	}
2111 
2112 	/* Add a callback for each fence in the reservation object */
2113 	amdgpu_vm_prt_get(adev);
2114 	amdgpu_vm_add_prt_cb(adev, excl);
2115 
2116 	for (i = 0; i < shared_count; ++i) {
2117 		amdgpu_vm_prt_get(adev);
2118 		amdgpu_vm_add_prt_cb(adev, shared[i]);
2119 	}
2120 
2121 	kfree(shared);
2122 }
2123 
2124 /**
2125  * amdgpu_vm_clear_freed - clear freed BOs in the PT
2126  *
2127  * @adev: amdgpu_device pointer
2128  * @vm: requested vm
2129  * @fence: optional resulting fence (unchanged if no work needed to be done
2130  * or if an error occurred)
2131  *
2132  * Make sure all freed BOs are cleared in the PT.
2133  * PTs have to be reserved and mutex must be locked!
2134  *
2135  * Returns:
2136  * 0 for success.
2137  *
2138  */
amdgpu_vm_clear_freed(struct amdgpu_device * adev,struct amdgpu_vm * vm,struct dma_fence ** fence)2139 int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
2140 			  struct amdgpu_vm *vm,
2141 			  struct dma_fence **fence)
2142 {
2143 	struct dma_resv *resv = vm->root.bo->tbo.base.resv;
2144 	struct amdgpu_bo_va_mapping *mapping;
2145 	uint64_t init_pte_value = 0;
2146 	struct dma_fence *f = NULL;
2147 	int r;
2148 
2149 	while (!list_empty(&vm->freed)) {
2150 		mapping = list_first_entry(&vm->freed,
2151 			struct amdgpu_bo_va_mapping, list);
2152 		list_del(&mapping->list);
2153 
2154 		if (vm->pte_support_ats &&
2155 		    mapping->start < AMDGPU_GMC_HOLE_START)
2156 			init_pte_value = AMDGPU_PTE_DEFAULT_ATC;
2157 
2158 		r = amdgpu_vm_bo_update_mapping(adev, adev, vm, false, false,
2159 						resv, mapping->start,
2160 						mapping->last, init_pte_value,
2161 						0, NULL, NULL, &f, NULL);
2162 		amdgpu_vm_free_mapping(adev, vm, mapping, f);
2163 		if (r) {
2164 			dma_fence_put(f);
2165 			return r;
2166 		}
2167 	}
2168 
2169 	if (fence && f) {
2170 		dma_fence_put(*fence);
2171 		*fence = f;
2172 	} else {
2173 		dma_fence_put(f);
2174 	}
2175 
2176 	return 0;
2177 
2178 }
2179 
2180 /**
2181  * amdgpu_vm_handle_moved - handle moved BOs in the PT
2182  *
2183  * @adev: amdgpu_device pointer
2184  * @vm: requested vm
2185  *
2186  * Make sure all BOs which are moved are updated in the PTs.
2187  *
2188  * Returns:
2189  * 0 for success.
2190  *
2191  * PTs have to be reserved!
2192  */
amdgpu_vm_handle_moved(struct amdgpu_device * adev,struct amdgpu_vm * vm)2193 int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
2194 			   struct amdgpu_vm *vm)
2195 {
2196 	struct amdgpu_bo_va *bo_va, *tmp;
2197 	struct dma_resv *resv;
2198 	bool clear;
2199 	int r;
2200 
2201 	list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) {
2202 		/* Per VM BOs never need to bo cleared in the page tables */
2203 		r = amdgpu_vm_bo_update(adev, bo_va, false, NULL);
2204 		if (r)
2205 			return r;
2206 	}
2207 
2208 	spin_lock(&vm->invalidated_lock);
2209 	while (!list_empty(&vm->invalidated)) {
2210 		bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va,
2211 					 base.vm_status);
2212 		resv = bo_va->base.bo->tbo.base.resv;
2213 		spin_unlock(&vm->invalidated_lock);
2214 
2215 		/* Try to reserve the BO to avoid clearing its ptes */
2216 		if (!amdgpu_vm_debug && dma_resv_trylock(resv))
2217 			clear = false;
2218 		/* Somebody else is using the BO right now */
2219 		else
2220 			clear = true;
2221 
2222 		r = amdgpu_vm_bo_update(adev, bo_va, clear, NULL);
2223 		if (r)
2224 			return r;
2225 
2226 		if (!clear)
2227 			dma_resv_unlock(resv);
2228 		spin_lock(&vm->invalidated_lock);
2229 	}
2230 	spin_unlock(&vm->invalidated_lock);
2231 
2232 	return 0;
2233 }
2234 
2235 /**
2236  * amdgpu_vm_bo_add - add a bo to a specific vm
2237  *
2238  * @adev: amdgpu_device pointer
2239  * @vm: requested vm
2240  * @bo: amdgpu buffer object
2241  *
2242  * Add @bo into the requested vm.
2243  * Add @bo to the list of bos associated with the vm
2244  *
2245  * Returns:
2246  * Newly added bo_va or NULL for failure
2247  *
2248  * Object has to be reserved!
2249  */
amdgpu_vm_bo_add(struct amdgpu_device * adev,struct amdgpu_vm * vm,struct amdgpu_bo * bo)2250 struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
2251 				      struct amdgpu_vm *vm,
2252 				      struct amdgpu_bo *bo)
2253 {
2254 	struct amdgpu_bo_va *bo_va;
2255 
2256 	bo_va = kzalloc(sizeof(struct amdgpu_bo_va), GFP_KERNEL);
2257 	if (bo_va == NULL) {
2258 		return NULL;
2259 	}
2260 	amdgpu_vm_bo_base_init(&bo_va->base, vm, bo);
2261 
2262 	bo_va->ref_count = 1;
2263 	INIT_LIST_HEAD(&bo_va->valids);
2264 	INIT_LIST_HEAD(&bo_va->invalids);
2265 
2266 	if (!bo)
2267 		return bo_va;
2268 
2269 	if (amdgpu_dmabuf_is_xgmi_accessible(adev, bo)) {
2270 		bo_va->is_xgmi = true;
2271 		/* Power up XGMI if it can be potentially used */
2272 		amdgpu_xgmi_set_pstate(adev, AMDGPU_XGMI_PSTATE_MAX_VEGA20);
2273 	}
2274 
2275 	return bo_va;
2276 }
2277 
2278 
2279 /**
2280  * amdgpu_vm_bo_insert_map - insert a new mapping
2281  *
2282  * @adev: amdgpu_device pointer
2283  * @bo_va: bo_va to store the address
2284  * @mapping: the mapping to insert
2285  *
2286  * Insert a new mapping into all structures.
2287  */
amdgpu_vm_bo_insert_map(struct amdgpu_device * adev,struct amdgpu_bo_va * bo_va,struct amdgpu_bo_va_mapping * mapping)2288 static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
2289 				    struct amdgpu_bo_va *bo_va,
2290 				    struct amdgpu_bo_va_mapping *mapping)
2291 {
2292 	struct amdgpu_vm *vm = bo_va->base.vm;
2293 	struct amdgpu_bo *bo = bo_va->base.bo;
2294 
2295 	mapping->bo_va = bo_va;
2296 	list_add(&mapping->list, &bo_va->invalids);
2297 	amdgpu_vm_it_insert(mapping, &vm->va);
2298 
2299 	if (mapping->flags & AMDGPU_PTE_PRT)
2300 		amdgpu_vm_prt_get(adev);
2301 
2302 	if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv &&
2303 	    !bo_va->base.moved) {
2304 		list_move(&bo_va->base.vm_status, &vm->moved);
2305 	}
2306 	trace_amdgpu_vm_bo_map(bo_va, mapping);
2307 }
2308 
2309 /**
2310  * amdgpu_vm_bo_map - map bo inside a vm
2311  *
2312  * @adev: amdgpu_device pointer
2313  * @bo_va: bo_va to store the address
2314  * @saddr: where to map the BO
2315  * @offset: requested offset in the BO
2316  * @size: BO size in bytes
2317  * @flags: attributes of pages (read/write/valid/etc.)
2318  *
2319  * Add a mapping of the BO at the specefied addr into the VM.
2320  *
2321  * Returns:
2322  * 0 for success, error for failure.
2323  *
2324  * Object has to be reserved and unreserved outside!
2325  */
amdgpu_vm_bo_map(struct amdgpu_device * adev,struct amdgpu_bo_va * bo_va,uint64_t saddr,uint64_t offset,uint64_t size,uint64_t flags)2326 int amdgpu_vm_bo_map(struct amdgpu_device *adev,
2327 		     struct amdgpu_bo_va *bo_va,
2328 		     uint64_t saddr, uint64_t offset,
2329 		     uint64_t size, uint64_t flags)
2330 {
2331 	struct amdgpu_bo_va_mapping *mapping, *tmp;
2332 	struct amdgpu_bo *bo = bo_va->base.bo;
2333 	struct amdgpu_vm *vm = bo_va->base.vm;
2334 	uint64_t eaddr;
2335 
2336 	/* validate the parameters */
2337 	if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK || size & ~PAGE_MASK)
2338 		return -EINVAL;
2339 	if (saddr + size <= saddr || offset + size <= offset)
2340 		return -EINVAL;
2341 
2342 	/* make sure object fit at this offset */
2343 	eaddr = saddr + size - 1;
2344 	if ((bo && offset + size > amdgpu_bo_size(bo)) ||
2345 	    (eaddr >= adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT))
2346 		return -EINVAL;
2347 
2348 	saddr /= AMDGPU_GPU_PAGE_SIZE;
2349 	eaddr /= AMDGPU_GPU_PAGE_SIZE;
2350 
2351 	tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
2352 	if (tmp) {
2353 		/* bo and tmp overlap, invalid addr */
2354 		dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
2355 			"0x%010Lx-0x%010Lx\n", bo, saddr, eaddr,
2356 			tmp->start, tmp->last + 1);
2357 		return -EINVAL;
2358 	}
2359 
2360 	mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
2361 	if (!mapping)
2362 		return -ENOMEM;
2363 
2364 	mapping->start = saddr;
2365 	mapping->last = eaddr;
2366 	mapping->offset = offset;
2367 	mapping->flags = flags;
2368 
2369 	amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
2370 
2371 	return 0;
2372 }
2373 
2374 /**
2375  * amdgpu_vm_bo_replace_map - map bo inside a vm, replacing existing mappings
2376  *
2377  * @adev: amdgpu_device pointer
2378  * @bo_va: bo_va to store the address
2379  * @saddr: where to map the BO
2380  * @offset: requested offset in the BO
2381  * @size: BO size in bytes
2382  * @flags: attributes of pages (read/write/valid/etc.)
2383  *
2384  * Add a mapping of the BO at the specefied addr into the VM. Replace existing
2385  * mappings as we do so.
2386  *
2387  * Returns:
2388  * 0 for success, error for failure.
2389  *
2390  * Object has to be reserved and unreserved outside!
2391  */
amdgpu_vm_bo_replace_map(struct amdgpu_device * adev,struct amdgpu_bo_va * bo_va,uint64_t saddr,uint64_t offset,uint64_t size,uint64_t flags)2392 int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
2393 			     struct amdgpu_bo_va *bo_va,
2394 			     uint64_t saddr, uint64_t offset,
2395 			     uint64_t size, uint64_t flags)
2396 {
2397 	struct amdgpu_bo_va_mapping *mapping;
2398 	struct amdgpu_bo *bo = bo_va->base.bo;
2399 	uint64_t eaddr;
2400 	int r;
2401 
2402 	/* validate the parameters */
2403 	if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK || size & ~PAGE_MASK)
2404 		return -EINVAL;
2405 	if (saddr + size <= saddr || offset + size <= offset)
2406 		return -EINVAL;
2407 
2408 	/* make sure object fit at this offset */
2409 	eaddr = saddr + size - 1;
2410 	if ((bo && offset + size > amdgpu_bo_size(bo)) ||
2411 	    (eaddr >= adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT))
2412 		return -EINVAL;
2413 
2414 	/* Allocate all the needed memory */
2415 	mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
2416 	if (!mapping)
2417 		return -ENOMEM;
2418 
2419 	r = amdgpu_vm_bo_clear_mappings(adev, bo_va->base.vm, saddr, size);
2420 	if (r) {
2421 		kfree(mapping);
2422 		return r;
2423 	}
2424 
2425 	saddr /= AMDGPU_GPU_PAGE_SIZE;
2426 	eaddr /= AMDGPU_GPU_PAGE_SIZE;
2427 
2428 	mapping->start = saddr;
2429 	mapping->last = eaddr;
2430 	mapping->offset = offset;
2431 	mapping->flags = flags;
2432 
2433 	amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
2434 
2435 	return 0;
2436 }
2437 
2438 /**
2439  * amdgpu_vm_bo_unmap - remove bo mapping from vm
2440  *
2441  * @adev: amdgpu_device pointer
2442  * @bo_va: bo_va to remove the address from
2443  * @saddr: where to the BO is mapped
2444  *
2445  * Remove a mapping of the BO at the specefied addr from the VM.
2446  *
2447  * Returns:
2448  * 0 for success, error for failure.
2449  *
2450  * Object has to be reserved and unreserved outside!
2451  */
amdgpu_vm_bo_unmap(struct amdgpu_device * adev,struct amdgpu_bo_va * bo_va,uint64_t saddr)2452 int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
2453 		       struct amdgpu_bo_va *bo_va,
2454 		       uint64_t saddr)
2455 {
2456 	struct amdgpu_bo_va_mapping *mapping;
2457 	struct amdgpu_vm *vm = bo_va->base.vm;
2458 	bool valid = true;
2459 
2460 	saddr /= AMDGPU_GPU_PAGE_SIZE;
2461 
2462 	list_for_each_entry(mapping, &bo_va->valids, list) {
2463 		if (mapping->start == saddr)
2464 			break;
2465 	}
2466 
2467 	if (&mapping->list == &bo_va->valids) {
2468 		valid = false;
2469 
2470 		list_for_each_entry(mapping, &bo_va->invalids, list) {
2471 			if (mapping->start == saddr)
2472 				break;
2473 		}
2474 
2475 		if (&mapping->list == &bo_va->invalids)
2476 			return -ENOENT;
2477 	}
2478 
2479 	list_del(&mapping->list);
2480 	amdgpu_vm_it_remove(mapping, &vm->va);
2481 	mapping->bo_va = NULL;
2482 	trace_amdgpu_vm_bo_unmap(bo_va, mapping);
2483 
2484 	if (valid)
2485 		list_add(&mapping->list, &vm->freed);
2486 	else
2487 		amdgpu_vm_free_mapping(adev, vm, mapping,
2488 				       bo_va->last_pt_update);
2489 
2490 	return 0;
2491 }
2492 
2493 /**
2494  * amdgpu_vm_bo_clear_mappings - remove all mappings in a specific range
2495  *
2496  * @adev: amdgpu_device pointer
2497  * @vm: VM structure to use
2498  * @saddr: start of the range
2499  * @size: size of the range
2500  *
2501  * Remove all mappings in a range, split them as appropriate.
2502  *
2503  * Returns:
2504  * 0 for success, error for failure.
2505  */
amdgpu_vm_bo_clear_mappings(struct amdgpu_device * adev,struct amdgpu_vm * vm,uint64_t saddr,uint64_t size)2506 int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
2507 				struct amdgpu_vm *vm,
2508 				uint64_t saddr, uint64_t size)
2509 {
2510 	struct amdgpu_bo_va_mapping *before, *after, *tmp, *next;
2511 	LIST_HEAD(removed);
2512 	uint64_t eaddr;
2513 
2514 	eaddr = saddr + size - 1;
2515 	saddr /= AMDGPU_GPU_PAGE_SIZE;
2516 	eaddr /= AMDGPU_GPU_PAGE_SIZE;
2517 
2518 	/* Allocate all the needed memory */
2519 	before = kzalloc(sizeof(*before), GFP_KERNEL);
2520 	if (!before)
2521 		return -ENOMEM;
2522 	INIT_LIST_HEAD(&before->list);
2523 
2524 	after = kzalloc(sizeof(*after), GFP_KERNEL);
2525 	if (!after) {
2526 		kfree(before);
2527 		return -ENOMEM;
2528 	}
2529 	INIT_LIST_HEAD(&after->list);
2530 
2531 	/* Now gather all removed mappings */
2532 	tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
2533 	while (tmp) {
2534 		/* Remember mapping split at the start */
2535 		if (tmp->start < saddr) {
2536 			before->start = tmp->start;
2537 			before->last = saddr - 1;
2538 			before->offset = tmp->offset;
2539 			before->flags = tmp->flags;
2540 			before->bo_va = tmp->bo_va;
2541 			list_add(&before->list, &tmp->bo_va->invalids);
2542 		}
2543 
2544 		/* Remember mapping split at the end */
2545 		if (tmp->last > eaddr) {
2546 			after->start = eaddr + 1;
2547 			after->last = tmp->last;
2548 			after->offset = tmp->offset;
2549 			after->offset += (after->start - tmp->start) << PAGE_SHIFT;
2550 			after->flags = tmp->flags;
2551 			after->bo_va = tmp->bo_va;
2552 			list_add(&after->list, &tmp->bo_va->invalids);
2553 		}
2554 
2555 		list_del(&tmp->list);
2556 		list_add(&tmp->list, &removed);
2557 
2558 		tmp = amdgpu_vm_it_iter_next(tmp, saddr, eaddr);
2559 	}
2560 
2561 	/* And free them up */
2562 	list_for_each_entry_safe(tmp, next, &removed, list) {
2563 		amdgpu_vm_it_remove(tmp, &vm->va);
2564 		list_del(&tmp->list);
2565 
2566 		if (tmp->start < saddr)
2567 		    tmp->start = saddr;
2568 		if (tmp->last > eaddr)
2569 		    tmp->last = eaddr;
2570 
2571 		tmp->bo_va = NULL;
2572 		list_add(&tmp->list, &vm->freed);
2573 		trace_amdgpu_vm_bo_unmap(NULL, tmp);
2574 	}
2575 
2576 	/* Insert partial mapping before the range */
2577 	if (!list_empty(&before->list)) {
2578 		struct amdgpu_bo *bo = before->bo_va->base.bo;
2579 
2580 		amdgpu_vm_it_insert(before, &vm->va);
2581 		if (before->flags & AMDGPU_PTE_PRT)
2582 			amdgpu_vm_prt_get(adev);
2583 
2584 		if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv &&
2585 		    !before->bo_va->base.moved)
2586 			amdgpu_vm_bo_moved(&before->bo_va->base);
2587 	} else {
2588 		kfree(before);
2589 	}
2590 
2591 	/* Insert partial mapping after the range */
2592 	if (!list_empty(&after->list)) {
2593 		struct amdgpu_bo *bo = after->bo_va->base.bo;
2594 
2595 		amdgpu_vm_it_insert(after, &vm->va);
2596 		if (after->flags & AMDGPU_PTE_PRT)
2597 			amdgpu_vm_prt_get(adev);
2598 
2599 		if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv &&
2600 		    !after->bo_va->base.moved)
2601 			amdgpu_vm_bo_moved(&after->bo_va->base);
2602 	} else {
2603 		kfree(after);
2604 	}
2605 
2606 	return 0;
2607 }
2608 
2609 /**
2610  * amdgpu_vm_bo_lookup_mapping - find mapping by address
2611  *
2612  * @vm: the requested VM
2613  * @addr: the address
2614  *
2615  * Find a mapping by it's address.
2616  *
2617  * Returns:
2618  * The amdgpu_bo_va_mapping matching for addr or NULL
2619  *
2620  */
amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm * vm,uint64_t addr)2621 struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
2622 							 uint64_t addr)
2623 {
2624 	return amdgpu_vm_it_iter_first(&vm->va, addr, addr);
2625 }
2626 
2627 /**
2628  * amdgpu_vm_bo_trace_cs - trace all reserved mappings
2629  *
2630  * @vm: the requested vm
2631  * @ticket: CS ticket
2632  *
2633  * Trace all mappings of BOs reserved during a command submission.
2634  */
amdgpu_vm_bo_trace_cs(struct amdgpu_vm * vm,struct ww_acquire_ctx * ticket)2635 void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket)
2636 {
2637 	struct amdgpu_bo_va_mapping *mapping;
2638 
2639 	if (!trace_amdgpu_vm_bo_cs_enabled())
2640 		return;
2641 
2642 	for (mapping = amdgpu_vm_it_iter_first(&vm->va, 0, U64_MAX); mapping;
2643 	     mapping = amdgpu_vm_it_iter_next(mapping, 0, U64_MAX)) {
2644 		if (mapping->bo_va && mapping->bo_va->base.bo) {
2645 			struct amdgpu_bo *bo;
2646 
2647 			bo = mapping->bo_va->base.bo;
2648 			if (dma_resv_locking_ctx(bo->tbo.base.resv) !=
2649 			    ticket)
2650 				continue;
2651 		}
2652 
2653 		trace_amdgpu_vm_bo_cs(mapping);
2654 	}
2655 }
2656 
2657 /**
2658  * amdgpu_vm_bo_rmv - remove a bo to a specific vm
2659  *
2660  * @adev: amdgpu_device pointer
2661  * @bo_va: requested bo_va
2662  *
2663  * Remove @bo_va->bo from the requested vm.
2664  *
2665  * Object have to be reserved!
2666  */
amdgpu_vm_bo_rmv(struct amdgpu_device * adev,struct amdgpu_bo_va * bo_va)2667 void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
2668 		      struct amdgpu_bo_va *bo_va)
2669 {
2670 	struct amdgpu_bo_va_mapping *mapping, *next;
2671 	struct amdgpu_bo *bo = bo_va->base.bo;
2672 	struct amdgpu_vm *vm = bo_va->base.vm;
2673 	struct amdgpu_vm_bo_base **base;
2674 
2675 	if (bo) {
2676 		if (bo->tbo.base.resv == vm->root.bo->tbo.base.resv)
2677 			vm->bulk_moveable = false;
2678 
2679 		for (base = &bo_va->base.bo->vm_bo; *base;
2680 		     base = &(*base)->next) {
2681 			if (*base != &bo_va->base)
2682 				continue;
2683 
2684 			*base = bo_va->base.next;
2685 			break;
2686 		}
2687 	}
2688 
2689 	spin_lock(&vm->invalidated_lock);
2690 	list_del(&bo_va->base.vm_status);
2691 	spin_unlock(&vm->invalidated_lock);
2692 
2693 	list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
2694 		list_del(&mapping->list);
2695 		amdgpu_vm_it_remove(mapping, &vm->va);
2696 		mapping->bo_va = NULL;
2697 		trace_amdgpu_vm_bo_unmap(bo_va, mapping);
2698 		list_add(&mapping->list, &vm->freed);
2699 	}
2700 	list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
2701 		list_del(&mapping->list);
2702 		amdgpu_vm_it_remove(mapping, &vm->va);
2703 		amdgpu_vm_free_mapping(adev, vm, mapping,
2704 				       bo_va->last_pt_update);
2705 	}
2706 
2707 	dma_fence_put(bo_va->last_pt_update);
2708 
2709 	if (bo && bo_va->is_xgmi)
2710 		amdgpu_xgmi_set_pstate(adev, AMDGPU_XGMI_PSTATE_MIN);
2711 
2712 	kfree(bo_va);
2713 }
2714 
2715 /**
2716  * amdgpu_vm_evictable - check if we can evict a VM
2717  *
2718  * @bo: A page table of the VM.
2719  *
2720  * Check if it is possible to evict a VM.
2721  */
amdgpu_vm_evictable(struct amdgpu_bo * bo)2722 bool amdgpu_vm_evictable(struct amdgpu_bo *bo)
2723 {
2724 	struct amdgpu_vm_bo_base *bo_base = bo->vm_bo;
2725 
2726 	/* Page tables of a destroyed VM can go away immediately */
2727 	if (!bo_base || !bo_base->vm)
2728 		return true;
2729 
2730 	/* Don't evict VM page tables while they are busy */
2731 	if (!dma_resv_test_signaled(bo->tbo.base.resv, true))
2732 		return false;
2733 
2734 	/* Try to block ongoing updates */
2735 	if (!amdgpu_vm_eviction_trylock(bo_base->vm))
2736 		return false;
2737 
2738 	/* Don't evict VM page tables while they are updated */
2739 	if (!dma_fence_is_signaled(bo_base->vm->last_unlocked)) {
2740 		amdgpu_vm_eviction_unlock(bo_base->vm);
2741 		return false;
2742 	}
2743 
2744 	bo_base->vm->evicting = true;
2745 	amdgpu_vm_eviction_unlock(bo_base->vm);
2746 	return true;
2747 }
2748 
2749 /**
2750  * amdgpu_vm_bo_invalidate - mark the bo as invalid
2751  *
2752  * @adev: amdgpu_device pointer
2753  * @bo: amdgpu buffer object
2754  * @evicted: is the BO evicted
2755  *
2756  * Mark @bo as invalid.
2757  */
amdgpu_vm_bo_invalidate(struct amdgpu_device * adev,struct amdgpu_bo * bo,bool evicted)2758 void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
2759 			     struct amdgpu_bo *bo, bool evicted)
2760 {
2761 	struct amdgpu_vm_bo_base *bo_base;
2762 
2763 	/* shadow bo doesn't have bo base, its validation needs its parent */
2764 	if (bo->parent && (amdgpu_bo_shadowed(bo->parent) == bo))
2765 		bo = bo->parent;
2766 
2767 	for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
2768 		struct amdgpu_vm *vm = bo_base->vm;
2769 
2770 		if (evicted && bo->tbo.base.resv == vm->root.bo->tbo.base.resv) {
2771 			amdgpu_vm_bo_evicted(bo_base);
2772 			continue;
2773 		}
2774 
2775 		if (bo_base->moved)
2776 			continue;
2777 		bo_base->moved = true;
2778 
2779 		if (bo->tbo.type == ttm_bo_type_kernel)
2780 			amdgpu_vm_bo_relocated(bo_base);
2781 		else if (bo->tbo.base.resv == vm->root.bo->tbo.base.resv)
2782 			amdgpu_vm_bo_moved(bo_base);
2783 		else
2784 			amdgpu_vm_bo_invalidated(bo_base);
2785 	}
2786 }
2787 
2788 /**
2789  * amdgpu_vm_get_block_size - calculate VM page table size as power of two
2790  *
2791  * @vm_size: VM size
2792  *
2793  * Returns:
2794  * VM page table as power of two
2795  */
amdgpu_vm_get_block_size(uint64_t vm_size)2796 static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size)
2797 {
2798 	/* Total bits covered by PD + PTs */
2799 	unsigned bits = ilog2(vm_size) + 18;
2800 
2801 	/* Make sure the PD is 4K in size up to 8GB address space.
2802 	   Above that split equal between PD and PTs */
2803 	if (vm_size <= 8)
2804 		return (bits - 9);
2805 	else
2806 		return ((bits + 3) / 2);
2807 }
2808 
2809 /**
2810  * amdgpu_vm_adjust_size - adjust vm size, block size and fragment size
2811  *
2812  * @adev: amdgpu_device pointer
2813  * @min_vm_size: the minimum vm size in GB if it's set auto
2814  * @fragment_size_default: Default PTE fragment size
2815  * @max_level: max VMPT level
2816  * @max_bits: max address space size in bits
2817  *
2818  */
amdgpu_vm_adjust_size(struct amdgpu_device * adev,uint32_t min_vm_size,uint32_t fragment_size_default,unsigned max_level,unsigned max_bits)2819 void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
2820 			   uint32_t fragment_size_default, unsigned max_level,
2821 			   unsigned max_bits)
2822 {
2823 	unsigned int max_size = 1 << (max_bits - 30);
2824 	unsigned int vm_size;
2825 	uint64_t tmp;
2826 
2827 	/* adjust vm size first */
2828 	if (amdgpu_vm_size != -1) {
2829 		vm_size = amdgpu_vm_size;
2830 		if (vm_size > max_size) {
2831 			dev_warn(adev->dev, "VM size (%d) too large, max is %u GB\n",
2832 				 amdgpu_vm_size, max_size);
2833 			vm_size = max_size;
2834 		}
2835 	} else {
2836 		struct sysinfo si;
2837 		unsigned int phys_ram_gb;
2838 
2839 		/* Optimal VM size depends on the amount of physical
2840 		 * RAM available. Underlying requirements and
2841 		 * assumptions:
2842 		 *
2843 		 *  - Need to map system memory and VRAM from all GPUs
2844 		 *     - VRAM from other GPUs not known here
2845 		 *     - Assume VRAM <= system memory
2846 		 *  - On GFX8 and older, VM space can be segmented for
2847 		 *    different MTYPEs
2848 		 *  - Need to allow room for fragmentation, guard pages etc.
2849 		 *
2850 		 * This adds up to a rough guess of system memory x3.
2851 		 * Round up to power of two to maximize the available
2852 		 * VM size with the given page table size.
2853 		 */
2854 		si_meminfo(&si);
2855 		phys_ram_gb = ((uint64_t)si.totalram * si.mem_unit +
2856 			       (1 << 30) - 1) >> 30;
2857 		vm_size = roundup_pow_of_two(
2858 			min(max(phys_ram_gb * 3, min_vm_size), max_size));
2859 	}
2860 
2861 	adev->vm_manager.max_pfn = (uint64_t)vm_size << 18;
2862 
2863 	tmp = roundup_pow_of_two(adev->vm_manager.max_pfn);
2864 	if (amdgpu_vm_block_size != -1)
2865 		tmp >>= amdgpu_vm_block_size - 9;
2866 	tmp = DIV_ROUND_UP(fls64(tmp) - 1, 9) - 1;
2867 	adev->vm_manager.num_level = min(max_level, (unsigned)tmp);
2868 	switch (adev->vm_manager.num_level) {
2869 	case 3:
2870 		adev->vm_manager.root_level = AMDGPU_VM_PDB2;
2871 		break;
2872 	case 2:
2873 		adev->vm_manager.root_level = AMDGPU_VM_PDB1;
2874 		break;
2875 	case 1:
2876 		adev->vm_manager.root_level = AMDGPU_VM_PDB0;
2877 		break;
2878 	default:
2879 		dev_err(adev->dev, "VMPT only supports 2~4+1 levels\n");
2880 	}
2881 	/* block size depends on vm size and hw setup*/
2882 	if (amdgpu_vm_block_size != -1)
2883 		adev->vm_manager.block_size =
2884 			min((unsigned)amdgpu_vm_block_size, max_bits
2885 			    - AMDGPU_GPU_PAGE_SHIFT
2886 			    - 9 * adev->vm_manager.num_level);
2887 	else if (adev->vm_manager.num_level > 1)
2888 		adev->vm_manager.block_size = 9;
2889 	else
2890 		adev->vm_manager.block_size = amdgpu_vm_get_block_size(tmp);
2891 
2892 	if (amdgpu_vm_fragment_size == -1)
2893 		adev->vm_manager.fragment_size = fragment_size_default;
2894 	else
2895 		adev->vm_manager.fragment_size = amdgpu_vm_fragment_size;
2896 
2897 	DRM_INFO("vm size is %u GB, %u levels, block size is %u-bit, fragment size is %u-bit\n",
2898 		 vm_size, adev->vm_manager.num_level + 1,
2899 		 adev->vm_manager.block_size,
2900 		 adev->vm_manager.fragment_size);
2901 }
2902 
2903 /**
2904  * amdgpu_vm_wait_idle - wait for the VM to become idle
2905  *
2906  * @vm: VM object to wait for
2907  * @timeout: timeout to wait for VM to become idle
2908  */
amdgpu_vm_wait_idle(struct amdgpu_vm * vm,long timeout)2909 long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
2910 {
2911 	timeout = dma_resv_wait_timeout(vm->root.bo->tbo.base.resv, true,
2912 					true, timeout);
2913 	if (timeout <= 0)
2914 		return timeout;
2915 
2916 	return dma_fence_wait_timeout(vm->last_unlocked, true, timeout);
2917 }
2918 
2919 /**
2920  * amdgpu_vm_init - initialize a vm instance
2921  *
2922  * @adev: amdgpu_device pointer
2923  * @vm: requested vm
2924  *
2925  * Init @vm fields.
2926  *
2927  * Returns:
2928  * 0 for success, error for failure.
2929  */
amdgpu_vm_init(struct amdgpu_device * adev,struct amdgpu_vm * vm)2930 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2931 {
2932 	struct amdgpu_bo *root_bo;
2933 	struct amdgpu_bo_vm *root;
2934 	int r, i;
2935 
2936 	vm->va = RB_ROOT_CACHED;
2937 	for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
2938 		vm->reserved_vmid[i] = NULL;
2939 	INIT_LIST_HEAD(&vm->evicted);
2940 	INIT_LIST_HEAD(&vm->relocated);
2941 	INIT_LIST_HEAD(&vm->moved);
2942 	INIT_LIST_HEAD(&vm->idle);
2943 	INIT_LIST_HEAD(&vm->invalidated);
2944 	spin_lock_init(&vm->invalidated_lock);
2945 	INIT_LIST_HEAD(&vm->freed);
2946 	INIT_LIST_HEAD(&vm->done);
2947 
2948 	/* create scheduler entities for page table updates */
2949 	r = drm_sched_entity_init(&vm->immediate, DRM_SCHED_PRIORITY_NORMAL,
2950 				  adev->vm_manager.vm_pte_scheds,
2951 				  adev->vm_manager.vm_pte_num_scheds, NULL);
2952 	if (r)
2953 		return r;
2954 
2955 	r = drm_sched_entity_init(&vm->delayed, DRM_SCHED_PRIORITY_NORMAL,
2956 				  adev->vm_manager.vm_pte_scheds,
2957 				  adev->vm_manager.vm_pte_num_scheds, NULL);
2958 	if (r)
2959 		goto error_free_immediate;
2960 
2961 	vm->pte_support_ats = false;
2962 	vm->is_compute_context = false;
2963 
2964 	vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2965 				    AMDGPU_VM_USE_CPU_FOR_GFX);
2966 
2967 	DRM_DEBUG_DRIVER("VM update mode is %s\n",
2968 			 vm->use_cpu_for_update ? "CPU" : "SDMA");
2969 	WARN_ONCE((vm->use_cpu_for_update &&
2970 		   !amdgpu_gmc_vram_full_visible(&adev->gmc)),
2971 		  "CPU update of VM recommended only for large BAR system\n");
2972 
2973 	if (vm->use_cpu_for_update)
2974 		vm->update_funcs = &amdgpu_vm_cpu_funcs;
2975 	else
2976 		vm->update_funcs = &amdgpu_vm_sdma_funcs;
2977 	vm->last_update = NULL;
2978 	vm->last_unlocked = dma_fence_get_stub();
2979 
2980 	mutex_init(&vm->eviction_lock);
2981 	vm->evicting = false;
2982 
2983 	r = amdgpu_vm_pt_create(adev, vm, adev->vm_manager.root_level,
2984 				false, &root);
2985 	if (r)
2986 		goto error_free_delayed;
2987 	root_bo = &root->bo;
2988 	r = amdgpu_bo_reserve(root_bo, true);
2989 	if (r)
2990 		goto error_free_root;
2991 
2992 	r = dma_resv_reserve_shared(root_bo->tbo.base.resv, 1);
2993 	if (r)
2994 		goto error_unreserve;
2995 
2996 	amdgpu_vm_bo_base_init(&vm->root, vm, root_bo);
2997 
2998 	r = amdgpu_vm_clear_bo(adev, vm, root, false);
2999 	if (r)
3000 		goto error_unreserve;
3001 
3002 	amdgpu_bo_unreserve(vm->root.bo);
3003 
3004 	INIT_KFIFO(vm->faults);
3005 
3006 	return 0;
3007 
3008 error_unreserve:
3009 	amdgpu_bo_unreserve(vm->root.bo);
3010 
3011 error_free_root:
3012 	amdgpu_bo_unref(&root->shadow);
3013 	amdgpu_bo_unref(&root_bo);
3014 	vm->root.bo = NULL;
3015 
3016 error_free_delayed:
3017 	dma_fence_put(vm->last_unlocked);
3018 	drm_sched_entity_destroy(&vm->delayed);
3019 
3020 error_free_immediate:
3021 	drm_sched_entity_destroy(&vm->immediate);
3022 
3023 	return r;
3024 }
3025 
3026 /**
3027  * amdgpu_vm_check_clean_reserved - check if a VM is clean
3028  *
3029  * @adev: amdgpu_device pointer
3030  * @vm: the VM to check
3031  *
3032  * check all entries of the root PD, if any subsequent PDs are allocated,
3033  * it means there are page table creating and filling, and is no a clean
3034  * VM
3035  *
3036  * Returns:
3037  *	0 if this VM is clean
3038  */
amdgpu_vm_check_clean_reserved(struct amdgpu_device * adev,struct amdgpu_vm * vm)3039 static int amdgpu_vm_check_clean_reserved(struct amdgpu_device *adev,
3040 					  struct amdgpu_vm *vm)
3041 {
3042 	enum amdgpu_vm_level root = adev->vm_manager.root_level;
3043 	unsigned int entries = amdgpu_vm_num_entries(adev, root);
3044 	unsigned int i = 0;
3045 
3046 	for (i = 0; i < entries; i++) {
3047 		if (to_amdgpu_bo_vm(vm->root.bo)->entries[i].bo)
3048 			return -EINVAL;
3049 	}
3050 
3051 	return 0;
3052 }
3053 
3054 /**
3055  * amdgpu_vm_make_compute - Turn a GFX VM into a compute VM
3056  *
3057  * @adev: amdgpu_device pointer
3058  * @vm: requested vm
3059  *
3060  * This only works on GFX VMs that don't have any BOs added and no
3061  * page tables allocated yet.
3062  *
3063  * Changes the following VM parameters:
3064  * - use_cpu_for_update
3065  * - pte_supports_ats
3066  *
3067  * Reinitializes the page directory to reflect the changed ATS
3068  * setting.
3069  *
3070  * Returns:
3071  * 0 for success, -errno for errors.
3072  */
amdgpu_vm_make_compute(struct amdgpu_device * adev,struct amdgpu_vm * vm)3073 int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
3074 {
3075 	bool pte_support_ats = (adev->asic_type == CHIP_RAVEN);
3076 	int r;
3077 
3078 	r = amdgpu_bo_reserve(vm->root.bo, true);
3079 	if (r)
3080 		return r;
3081 
3082 	/* Sanity checks */
3083 	r = amdgpu_vm_check_clean_reserved(adev, vm);
3084 	if (r)
3085 		goto unreserve_bo;
3086 
3087 	/* Check if PD needs to be reinitialized and do it before
3088 	 * changing any other state, in case it fails.
3089 	 */
3090 	if (pte_support_ats != vm->pte_support_ats) {
3091 		vm->pte_support_ats = pte_support_ats;
3092 		r = amdgpu_vm_clear_bo(adev, vm,
3093 				       to_amdgpu_bo_vm(vm->root.bo),
3094 				       false);
3095 		if (r)
3096 			goto unreserve_bo;
3097 	}
3098 
3099 	/* Update VM state */
3100 	vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
3101 				    AMDGPU_VM_USE_CPU_FOR_COMPUTE);
3102 	DRM_DEBUG_DRIVER("VM update mode is %s\n",
3103 			 vm->use_cpu_for_update ? "CPU" : "SDMA");
3104 	WARN_ONCE((vm->use_cpu_for_update &&
3105 		   !amdgpu_gmc_vram_full_visible(&adev->gmc)),
3106 		  "CPU update of VM recommended only for large BAR system\n");
3107 
3108 	if (vm->use_cpu_for_update) {
3109 		/* Sync with last SDMA update/clear before switching to CPU */
3110 		r = amdgpu_bo_sync_wait(vm->root.bo,
3111 					AMDGPU_FENCE_OWNER_UNDEFINED, true);
3112 		if (r)
3113 			goto unreserve_bo;
3114 
3115 		vm->update_funcs = &amdgpu_vm_cpu_funcs;
3116 	} else {
3117 		vm->update_funcs = &amdgpu_vm_sdma_funcs;
3118 	}
3119 	dma_fence_put(vm->last_update);
3120 	vm->last_update = NULL;
3121 	vm->is_compute_context = true;
3122 
3123 	/* Free the shadow bo for compute VM */
3124 	amdgpu_bo_unref(&to_amdgpu_bo_vm(vm->root.bo)->shadow);
3125 
3126 	goto unreserve_bo;
3127 
3128 unreserve_bo:
3129 	amdgpu_bo_unreserve(vm->root.bo);
3130 	return r;
3131 }
3132 
3133 /**
3134  * amdgpu_vm_release_compute - release a compute vm
3135  * @adev: amdgpu_device pointer
3136  * @vm: a vm turned into compute vm by calling amdgpu_vm_make_compute
3137  *
3138  * This is a correspondant of amdgpu_vm_make_compute. It decouples compute
3139  * pasid from vm. Compute should stop use of vm after this call.
3140  */
amdgpu_vm_release_compute(struct amdgpu_device * adev,struct amdgpu_vm * vm)3141 void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
3142 {
3143 	amdgpu_vm_set_pasid(adev, vm, 0);
3144 	vm->is_compute_context = false;
3145 }
3146 
3147 /**
3148  * amdgpu_vm_fini - tear down a vm instance
3149  *
3150  * @adev: amdgpu_device pointer
3151  * @vm: requested vm
3152  *
3153  * Tear down @vm.
3154  * Unbind the VM and remove all bos from the vm bo list
3155  */
amdgpu_vm_fini(struct amdgpu_device * adev,struct amdgpu_vm * vm)3156 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
3157 {
3158 	struct amdgpu_bo_va_mapping *mapping, *tmp;
3159 	bool prt_fini_needed = !!adev->gmc.gmc_funcs->set_prt;
3160 	struct amdgpu_bo *root;
3161 	int i;
3162 
3163 	amdgpu_amdkfd_gpuvm_destroy_cb(adev, vm);
3164 
3165 	root = amdgpu_bo_ref(vm->root.bo);
3166 	amdgpu_bo_reserve(root, true);
3167 	amdgpu_vm_set_pasid(adev, vm, 0);
3168 	dma_fence_wait(vm->last_unlocked, false);
3169 	dma_fence_put(vm->last_unlocked);
3170 
3171 	list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
3172 		if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) {
3173 			amdgpu_vm_prt_fini(adev, vm);
3174 			prt_fini_needed = false;
3175 		}
3176 
3177 		list_del(&mapping->list);
3178 		amdgpu_vm_free_mapping(adev, vm, mapping, NULL);
3179 	}
3180 
3181 	amdgpu_vm_free_pts(adev, vm, NULL);
3182 	amdgpu_bo_unreserve(root);
3183 	amdgpu_bo_unref(&root);
3184 	WARN_ON(vm->root.bo);
3185 
3186 	drm_sched_entity_destroy(&vm->immediate);
3187 	drm_sched_entity_destroy(&vm->delayed);
3188 
3189 	if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
3190 		dev_err(adev->dev, "still active bo inside vm\n");
3191 	}
3192 	rbtree_postorder_for_each_entry_safe(mapping, tmp,
3193 					     &vm->va.rb_root, rb) {
3194 		/* Don't remove the mapping here, we don't want to trigger a
3195 		 * rebalance and the tree is about to be destroyed anyway.
3196 		 */
3197 		list_del(&mapping->list);
3198 		kfree(mapping);
3199 	}
3200 
3201 	dma_fence_put(vm->last_update);
3202 	for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
3203 		amdgpu_vmid_free_reserved(adev, vm, i);
3204 }
3205 
3206 /**
3207  * amdgpu_vm_manager_init - init the VM manager
3208  *
3209  * @adev: amdgpu_device pointer
3210  *
3211  * Initialize the VM manager structures
3212  */
amdgpu_vm_manager_init(struct amdgpu_device * adev)3213 void amdgpu_vm_manager_init(struct amdgpu_device *adev)
3214 {
3215 	unsigned i;
3216 
3217 	/* Concurrent flushes are only possible starting with Vega10 and
3218 	 * are broken on Navi10 and Navi14.
3219 	 */
3220 	adev->vm_manager.concurrent_flush = !(adev->asic_type < CHIP_VEGA10 ||
3221 					      adev->asic_type == CHIP_NAVI10 ||
3222 					      adev->asic_type == CHIP_NAVI14);
3223 	amdgpu_vmid_mgr_init(adev);
3224 
3225 	adev->vm_manager.fence_context =
3226 		dma_fence_context_alloc(AMDGPU_MAX_RINGS);
3227 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
3228 		adev->vm_manager.seqno[i] = 0;
3229 
3230 	spin_lock_init(&adev->vm_manager.prt_lock);
3231 	atomic_set(&adev->vm_manager.num_prt_users, 0);
3232 
3233 	/* If not overridden by the user, by default, only in large BAR systems
3234 	 * Compute VM tables will be updated by CPU
3235 	 */
3236 #ifdef CONFIG_X86_64
3237 	if (amdgpu_vm_update_mode == -1) {
3238 		/* For asic with VF MMIO access protection
3239 		 * avoid using CPU for VM table updates
3240 		 */
3241 		if (amdgpu_gmc_vram_full_visible(&adev->gmc) &&
3242 		    !amdgpu_sriov_vf_mmio_access_protection(adev))
3243 			adev->vm_manager.vm_update_mode =
3244 				AMDGPU_VM_USE_CPU_FOR_COMPUTE;
3245 		else
3246 			adev->vm_manager.vm_update_mode = 0;
3247 	} else
3248 		adev->vm_manager.vm_update_mode = amdgpu_vm_update_mode;
3249 #else
3250 	adev->vm_manager.vm_update_mode = 0;
3251 #endif
3252 
3253 	xa_init_flags(&adev->vm_manager.pasids, XA_FLAGS_LOCK_IRQ);
3254 }
3255 
3256 /**
3257  * amdgpu_vm_manager_fini - cleanup VM manager
3258  *
3259  * @adev: amdgpu_device pointer
3260  *
3261  * Cleanup the VM manager and free resources.
3262  */
amdgpu_vm_manager_fini(struct amdgpu_device * adev)3263 void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
3264 {
3265 	WARN_ON(!xa_empty(&adev->vm_manager.pasids));
3266 	xa_destroy(&adev->vm_manager.pasids);
3267 
3268 	amdgpu_vmid_mgr_fini(adev);
3269 }
3270 
3271 /**
3272  * amdgpu_vm_ioctl - Manages VMID reservation for vm hubs.
3273  *
3274  * @dev: drm device pointer
3275  * @data: drm_amdgpu_vm
3276  * @filp: drm file pointer
3277  *
3278  * Returns:
3279  * 0 for success, -errno for errors.
3280  */
amdgpu_vm_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)3281 int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
3282 {
3283 	union drm_amdgpu_vm *args = data;
3284 	struct amdgpu_device *adev = drm_to_adev(dev);
3285 	struct amdgpu_fpriv *fpriv = filp->driver_priv;
3286 	long timeout = msecs_to_jiffies(2000);
3287 	int r;
3288 
3289 	/* No valid flags defined yet */
3290 	if (args->in.flags)
3291 		return -EINVAL;
3292 
3293 	switch (args->in.op) {
3294 	case AMDGPU_VM_OP_RESERVE_VMID:
3295 		/* We only have requirement to reserve vmid from gfxhub */
3296 		r = amdgpu_vmid_alloc_reserved(adev, &fpriv->vm,
3297 					       AMDGPU_GFXHUB_0);
3298 		if (r)
3299 			return r;
3300 		break;
3301 	case AMDGPU_VM_OP_UNRESERVE_VMID:
3302 		if (amdgpu_sriov_runtime(adev))
3303 			timeout = 8 * timeout;
3304 
3305 		/* Wait vm idle to make sure the vmid set in SPM_VMID is
3306 		 * not referenced anymore.
3307 		 */
3308 		r = amdgpu_bo_reserve(fpriv->vm.root.bo, true);
3309 		if (r)
3310 			return r;
3311 
3312 		r = amdgpu_vm_wait_idle(&fpriv->vm, timeout);
3313 		if (r < 0)
3314 			return r;
3315 
3316 		amdgpu_bo_unreserve(fpriv->vm.root.bo);
3317 		amdgpu_vmid_free_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB_0);
3318 		break;
3319 	default:
3320 		return -EINVAL;
3321 	}
3322 
3323 	return 0;
3324 }
3325 
3326 /**
3327  * amdgpu_vm_get_task_info - Extracts task info for a PASID.
3328  *
3329  * @adev: drm device pointer
3330  * @pasid: PASID identifier for VM
3331  * @task_info: task_info to fill.
3332  */
amdgpu_vm_get_task_info(struct amdgpu_device * adev,u32 pasid,struct amdgpu_task_info * task_info)3333 void amdgpu_vm_get_task_info(struct amdgpu_device *adev, u32 pasid,
3334 			 struct amdgpu_task_info *task_info)
3335 {
3336 	struct amdgpu_vm *vm;
3337 	unsigned long flags;
3338 
3339 	xa_lock_irqsave(&adev->vm_manager.pasids, flags);
3340 
3341 	vm = xa_load(&adev->vm_manager.pasids, pasid);
3342 	if (vm)
3343 		*task_info = vm->task_info;
3344 
3345 	xa_unlock_irqrestore(&adev->vm_manager.pasids, flags);
3346 }
3347 
3348 /**
3349  * amdgpu_vm_set_task_info - Sets VMs task info.
3350  *
3351  * @vm: vm for which to set the info
3352  */
amdgpu_vm_set_task_info(struct amdgpu_vm * vm)3353 void amdgpu_vm_set_task_info(struct amdgpu_vm *vm)
3354 {
3355 	if (vm->task_info.pid)
3356 		return;
3357 
3358 	vm->task_info.pid = current->pid;
3359 	get_task_comm(vm->task_info.task_name, current);
3360 
3361 	if (current->group_leader->mm != current->mm)
3362 		return;
3363 
3364 	vm->task_info.tgid = current->group_leader->pid;
3365 	get_task_comm(vm->task_info.process_name, current->group_leader);
3366 }
3367 
3368 /**
3369  * amdgpu_vm_handle_fault - graceful handling of VM faults.
3370  * @adev: amdgpu device pointer
3371  * @pasid: PASID of the VM
3372  * @addr: Address of the fault
3373  * @write_fault: true is write fault, false is read fault
3374  *
3375  * Try to gracefully handle a VM fault. Return true if the fault was handled and
3376  * shouldn't be reported any more.
3377  */
amdgpu_vm_handle_fault(struct amdgpu_device * adev,u32 pasid,uint64_t addr,bool write_fault)3378 bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
3379 			    uint64_t addr, bool write_fault)
3380 {
3381 	bool is_compute_context = false;
3382 	struct amdgpu_bo *root;
3383 	unsigned long irqflags;
3384 	uint64_t value, flags;
3385 	struct amdgpu_vm *vm;
3386 	int r;
3387 
3388 	xa_lock_irqsave(&adev->vm_manager.pasids, irqflags);
3389 	vm = xa_load(&adev->vm_manager.pasids, pasid);
3390 	if (vm) {
3391 		root = amdgpu_bo_ref(vm->root.bo);
3392 		is_compute_context = vm->is_compute_context;
3393 	} else {
3394 		root = NULL;
3395 	}
3396 	xa_unlock_irqrestore(&adev->vm_manager.pasids, irqflags);
3397 
3398 	if (!root)
3399 		return false;
3400 
3401 	addr /= AMDGPU_GPU_PAGE_SIZE;
3402 
3403 	if (is_compute_context &&
3404 	    !svm_range_restore_pages(adev, pasid, addr, write_fault)) {
3405 		amdgpu_bo_unref(&root);
3406 		return true;
3407 	}
3408 
3409 	r = amdgpu_bo_reserve(root, true);
3410 	if (r)
3411 		goto error_unref;
3412 
3413 	/* Double check that the VM still exists */
3414 	xa_lock_irqsave(&adev->vm_manager.pasids, irqflags);
3415 	vm = xa_load(&adev->vm_manager.pasids, pasid);
3416 	if (vm && vm->root.bo != root)
3417 		vm = NULL;
3418 	xa_unlock_irqrestore(&adev->vm_manager.pasids, irqflags);
3419 	if (!vm)
3420 		goto error_unlock;
3421 
3422 	flags = AMDGPU_PTE_VALID | AMDGPU_PTE_SNOOPED |
3423 		AMDGPU_PTE_SYSTEM;
3424 
3425 	if (is_compute_context) {
3426 		/* Intentionally setting invalid PTE flag
3427 		 * combination to force a no-retry-fault
3428 		 */
3429 		flags = AMDGPU_PTE_EXECUTABLE | AMDGPU_PDE_PTE |
3430 			AMDGPU_PTE_TF;
3431 		value = 0;
3432 	} else if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_NEVER) {
3433 		/* Redirect the access to the dummy page */
3434 		value = adev->dummy_page_addr;
3435 		flags |= AMDGPU_PTE_EXECUTABLE | AMDGPU_PTE_READABLE |
3436 			AMDGPU_PTE_WRITEABLE;
3437 
3438 	} else {
3439 		/* Let the hw retry silently on the PTE */
3440 		value = 0;
3441 	}
3442 
3443 	r = dma_resv_reserve_shared(root->tbo.base.resv, 1);
3444 	if (r) {
3445 		pr_debug("failed %d to reserve fence slot\n", r);
3446 		goto error_unlock;
3447 	}
3448 
3449 	r = amdgpu_vm_bo_update_mapping(adev, adev, vm, true, false, NULL, addr,
3450 					addr, flags, value, NULL, NULL, NULL,
3451 					NULL);
3452 	if (r)
3453 		goto error_unlock;
3454 
3455 	r = amdgpu_vm_update_pdes(adev, vm, true);
3456 
3457 error_unlock:
3458 	amdgpu_bo_unreserve(root);
3459 	if (r < 0)
3460 		DRM_ERROR("Can't handle page fault (%d)\n", r);
3461 
3462 error_unref:
3463 	amdgpu_bo_unref(&root);
3464 
3465 	return false;
3466 }
3467 
3468 #if defined(CONFIG_DEBUG_FS)
3469 /**
3470  * amdgpu_debugfs_vm_bo_info  - print BO info for the VM
3471  *
3472  * @vm: Requested VM for printing BO info
3473  * @m: debugfs file
3474  *
3475  * Print BO information in debugfs file for the VM
3476  */
amdgpu_debugfs_vm_bo_info(struct amdgpu_vm * vm,struct seq_file * m)3477 void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m)
3478 {
3479 	struct amdgpu_bo_va *bo_va, *tmp;
3480 	u64 total_idle = 0;
3481 	u64 total_evicted = 0;
3482 	u64 total_relocated = 0;
3483 	u64 total_moved = 0;
3484 	u64 total_invalidated = 0;
3485 	u64 total_done = 0;
3486 	unsigned int total_idle_objs = 0;
3487 	unsigned int total_evicted_objs = 0;
3488 	unsigned int total_relocated_objs = 0;
3489 	unsigned int total_moved_objs = 0;
3490 	unsigned int total_invalidated_objs = 0;
3491 	unsigned int total_done_objs = 0;
3492 	unsigned int id = 0;
3493 
3494 	seq_puts(m, "\tIdle BOs:\n");
3495 	list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status) {
3496 		if (!bo_va->base.bo)
3497 			continue;
3498 		total_idle += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
3499 	}
3500 	total_idle_objs = id;
3501 	id = 0;
3502 
3503 	seq_puts(m, "\tEvicted BOs:\n");
3504 	list_for_each_entry_safe(bo_va, tmp, &vm->evicted, base.vm_status) {
3505 		if (!bo_va->base.bo)
3506 			continue;
3507 		total_evicted += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
3508 	}
3509 	total_evicted_objs = id;
3510 	id = 0;
3511 
3512 	seq_puts(m, "\tRelocated BOs:\n");
3513 	list_for_each_entry_safe(bo_va, tmp, &vm->relocated, base.vm_status) {
3514 		if (!bo_va->base.bo)
3515 			continue;
3516 		total_relocated += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
3517 	}
3518 	total_relocated_objs = id;
3519 	id = 0;
3520 
3521 	seq_puts(m, "\tMoved BOs:\n");
3522 	list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) {
3523 		if (!bo_va->base.bo)
3524 			continue;
3525 		total_moved += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
3526 	}
3527 	total_moved_objs = id;
3528 	id = 0;
3529 
3530 	seq_puts(m, "\tInvalidated BOs:\n");
3531 	spin_lock(&vm->invalidated_lock);
3532 	list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status) {
3533 		if (!bo_va->base.bo)
3534 			continue;
3535 		total_invalidated += amdgpu_bo_print_info(id++,	bo_va->base.bo, m);
3536 	}
3537 	total_invalidated_objs = id;
3538 	id = 0;
3539 
3540 	seq_puts(m, "\tDone BOs:\n");
3541 	list_for_each_entry_safe(bo_va, tmp, &vm->done, base.vm_status) {
3542 		if (!bo_va->base.bo)
3543 			continue;
3544 		total_done += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
3545 	}
3546 	spin_unlock(&vm->invalidated_lock);
3547 	total_done_objs = id;
3548 
3549 	seq_printf(m, "\tTotal idle size:        %12lld\tobjs:\t%d\n", total_idle,
3550 		   total_idle_objs);
3551 	seq_printf(m, "\tTotal evicted size:     %12lld\tobjs:\t%d\n", total_evicted,
3552 		   total_evicted_objs);
3553 	seq_printf(m, "\tTotal relocated size:   %12lld\tobjs:\t%d\n", total_relocated,
3554 		   total_relocated_objs);
3555 	seq_printf(m, "\tTotal moved size:       %12lld\tobjs:\t%d\n", total_moved,
3556 		   total_moved_objs);
3557 	seq_printf(m, "\tTotal invalidated size: %12lld\tobjs:\t%d\n", total_invalidated,
3558 		   total_invalidated_objs);
3559 	seq_printf(m, "\tTotal done size:        %12lld\tobjs:\t%d\n", total_done,
3560 		   total_done_objs);
3561 }
3562 #endif
3563