• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /*
3  * Copyright 2022 Advanced Micro Devices, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include <drm/drm_drv.h>
25 
26 #include "amdgpu.h"
27 #include "amdgpu_trace.h"
28 #include "amdgpu_vm.h"
29 
30 /*
31  * amdgpu_vm_pt_cursor - state for for_each_amdgpu_vm_pt
32  */
33 struct amdgpu_vm_pt_cursor {
34 	uint64_t pfn;
35 	struct amdgpu_vm_bo_base *parent;
36 	struct amdgpu_vm_bo_base *entry;
37 	unsigned int level;
38 };
39 
40 /**
41  * amdgpu_vm_pt_level_shift - return the addr shift for each level
42  *
43  * @adev: amdgpu_device pointer
44  * @level: VMPT level
45  *
46  * Returns:
47  * The number of bits the pfn needs to be right shifted for a level.
48  */
amdgpu_vm_pt_level_shift(struct amdgpu_device * adev,unsigned int level)49 static unsigned int amdgpu_vm_pt_level_shift(struct amdgpu_device *adev,
50 					     unsigned int level)
51 {
52 	switch (level) {
53 	case AMDGPU_VM_PDB2:
54 	case AMDGPU_VM_PDB1:
55 	case AMDGPU_VM_PDB0:
56 		return 9 * (AMDGPU_VM_PDB0 - level) +
57 			adev->vm_manager.block_size;
58 	case AMDGPU_VM_PTB:
59 		return 0;
60 	default:
61 		return ~0;
62 	}
63 }
64 
65 /**
66  * amdgpu_vm_pt_num_entries - return the number of entries in a PD/PT
67  *
68  * @adev: amdgpu_device pointer
69  * @level: VMPT level
70  *
71  * Returns:
72  * The number of entries in a page directory or page table.
73  */
amdgpu_vm_pt_num_entries(struct amdgpu_device * adev,unsigned int level)74 static unsigned int amdgpu_vm_pt_num_entries(struct amdgpu_device *adev,
75 					     unsigned int level)
76 {
77 	unsigned int shift;
78 
79 	shift = amdgpu_vm_pt_level_shift(adev, adev->vm_manager.root_level);
80 	if (level == adev->vm_manager.root_level)
81 		/* For the root directory */
82 		return round_up(adev->vm_manager.max_pfn, 1ULL << shift)
83 			>> shift;
84 	else if (level != AMDGPU_VM_PTB)
85 		/* Everything in between */
86 		return 512;
87 
88 	/* For the page tables on the leaves */
89 	return AMDGPU_VM_PTE_COUNT(adev);
90 }
91 
92 /**
93  * amdgpu_vm_pt_num_ats_entries - return the number of ATS entries in the root PD
94  *
95  * @adev: amdgpu_device pointer
96  *
97  * Returns:
98  * The number of entries in the root page directory which needs the ATS setting.
99  */
amdgpu_vm_pt_num_ats_entries(struct amdgpu_device * adev)100 static unsigned int amdgpu_vm_pt_num_ats_entries(struct amdgpu_device *adev)
101 {
102 	unsigned int shift;
103 
104 	shift = amdgpu_vm_pt_level_shift(adev, adev->vm_manager.root_level);
105 	return AMDGPU_GMC_HOLE_START >> (shift + AMDGPU_GPU_PAGE_SHIFT);
106 }
107 
108 /**
109  * amdgpu_vm_pt_entries_mask - the mask to get the entry number of a PD/PT
110  *
111  * @adev: amdgpu_device pointer
112  * @level: VMPT level
113  *
114  * Returns:
115  * The mask to extract the entry number of a PD/PT from an address.
116  */
amdgpu_vm_pt_entries_mask(struct amdgpu_device * adev,unsigned int level)117 static uint32_t amdgpu_vm_pt_entries_mask(struct amdgpu_device *adev,
118 					  unsigned int level)
119 {
120 	if (level <= adev->vm_manager.root_level)
121 		return 0xffffffff;
122 	else if (level != AMDGPU_VM_PTB)
123 		return 0x1ff;
124 	else
125 		return AMDGPU_VM_PTE_COUNT(adev) - 1;
126 }
127 
128 /**
129  * amdgpu_vm_pt_size - returns the size of the page table in bytes
130  *
131  * @adev: amdgpu_device pointer
132  * @level: VMPT level
133  *
134  * Returns:
135  * The size of the BO for a page directory or page table in bytes.
136  */
amdgpu_vm_pt_size(struct amdgpu_device * adev,unsigned int level)137 static unsigned int amdgpu_vm_pt_size(struct amdgpu_device *adev,
138 				      unsigned int level)
139 {
140 	return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_pt_num_entries(adev, level) * 8);
141 }
142 
143 /**
144  * amdgpu_vm_pt_parent - get the parent page directory
145  *
146  * @pt: child page table
147  *
148  * Helper to get the parent entry for the child page table. NULL if we are at
149  * the root page directory.
150  */
151 static struct amdgpu_vm_bo_base *
amdgpu_vm_pt_parent(struct amdgpu_vm_bo_base * pt)152 amdgpu_vm_pt_parent(struct amdgpu_vm_bo_base *pt)
153 {
154 	struct amdgpu_bo *parent = pt->bo->parent;
155 
156 	if (!parent)
157 		return NULL;
158 
159 	return parent->vm_bo;
160 }
161 
162 /**
163  * amdgpu_vm_pt_start - start PD/PT walk
164  *
165  * @adev: amdgpu_device pointer
166  * @vm: amdgpu_vm structure
167  * @start: start address of the walk
168  * @cursor: state to initialize
169  *
170  * Initialize a amdgpu_vm_pt_cursor to start a walk.
171  */
amdgpu_vm_pt_start(struct amdgpu_device * adev,struct amdgpu_vm * vm,uint64_t start,struct amdgpu_vm_pt_cursor * cursor)172 static void amdgpu_vm_pt_start(struct amdgpu_device *adev,
173 			       struct amdgpu_vm *vm, uint64_t start,
174 			       struct amdgpu_vm_pt_cursor *cursor)
175 {
176 	cursor->pfn = start;
177 	cursor->parent = NULL;
178 	cursor->entry = &vm->root;
179 	cursor->level = adev->vm_manager.root_level;
180 }
181 
182 /**
183  * amdgpu_vm_pt_descendant - go to child node
184  *
185  * @adev: amdgpu_device pointer
186  * @cursor: current state
187  *
188  * Walk to the child node of the current node.
189  * Returns:
190  * True if the walk was possible, false otherwise.
191  */
amdgpu_vm_pt_descendant(struct amdgpu_device * adev,struct amdgpu_vm_pt_cursor * cursor)192 static bool amdgpu_vm_pt_descendant(struct amdgpu_device *adev,
193 				    struct amdgpu_vm_pt_cursor *cursor)
194 {
195 	unsigned int mask, shift, idx;
196 
197 	if ((cursor->level == AMDGPU_VM_PTB) || !cursor->entry ||
198 	    !cursor->entry->bo)
199 		return false;
200 
201 	mask = amdgpu_vm_pt_entries_mask(adev, cursor->level);
202 	shift = amdgpu_vm_pt_level_shift(adev, cursor->level);
203 
204 	++cursor->level;
205 	idx = (cursor->pfn >> shift) & mask;
206 	cursor->parent = cursor->entry;
207 	cursor->entry = &to_amdgpu_bo_vm(cursor->entry->bo)->entries[idx];
208 	return true;
209 }
210 
211 /**
212  * amdgpu_vm_pt_sibling - go to sibling node
213  *
214  * @adev: amdgpu_device pointer
215  * @cursor: current state
216  *
217  * Walk to the sibling node of the current node.
218  * Returns:
219  * True if the walk was possible, false otherwise.
220  */
amdgpu_vm_pt_sibling(struct amdgpu_device * adev,struct amdgpu_vm_pt_cursor * cursor)221 static bool amdgpu_vm_pt_sibling(struct amdgpu_device *adev,
222 				 struct amdgpu_vm_pt_cursor *cursor)
223 {
224 
225 	unsigned int shift, num_entries;
226 	struct amdgpu_bo_vm *parent;
227 
228 	/* Root doesn't have a sibling */
229 	if (!cursor->parent)
230 		return false;
231 
232 	/* Go to our parents and see if we got a sibling */
233 	shift = amdgpu_vm_pt_level_shift(adev, cursor->level - 1);
234 	num_entries = amdgpu_vm_pt_num_entries(adev, cursor->level - 1);
235 	parent = to_amdgpu_bo_vm(cursor->parent->bo);
236 
237 	if (cursor->entry == &parent->entries[num_entries - 1])
238 		return false;
239 
240 	cursor->pfn += 1ULL << shift;
241 	cursor->pfn &= ~((1ULL << shift) - 1);
242 	++cursor->entry;
243 	return true;
244 }
245 
246 /**
247  * amdgpu_vm_pt_ancestor - go to parent node
248  *
249  * @cursor: current state
250  *
251  * Walk to the parent node of the current node.
252  * Returns:
253  * True if the walk was possible, false otherwise.
254  */
amdgpu_vm_pt_ancestor(struct amdgpu_vm_pt_cursor * cursor)255 static bool amdgpu_vm_pt_ancestor(struct amdgpu_vm_pt_cursor *cursor)
256 {
257 	if (!cursor->parent)
258 		return false;
259 
260 	--cursor->level;
261 	cursor->entry = cursor->parent;
262 	cursor->parent = amdgpu_vm_pt_parent(cursor->parent);
263 	return true;
264 }
265 
266 /**
267  * amdgpu_vm_pt_next - get next PD/PT in hieratchy
268  *
269  * @adev: amdgpu_device pointer
270  * @cursor: current state
271  *
272  * Walk the PD/PT tree to the next node.
273  */
amdgpu_vm_pt_next(struct amdgpu_device * adev,struct amdgpu_vm_pt_cursor * cursor)274 static void amdgpu_vm_pt_next(struct amdgpu_device *adev,
275 			      struct amdgpu_vm_pt_cursor *cursor)
276 {
277 	/* First try a newborn child */
278 	if (amdgpu_vm_pt_descendant(adev, cursor))
279 		return;
280 
281 	/* If that didn't worked try to find a sibling */
282 	while (!amdgpu_vm_pt_sibling(adev, cursor)) {
283 		/* No sibling, go to our parents and grandparents */
284 		if (!amdgpu_vm_pt_ancestor(cursor)) {
285 			cursor->pfn = ~0ll;
286 			return;
287 		}
288 	}
289 }
290 
291 /**
292  * amdgpu_vm_pt_first_dfs - start a deep first search
293  *
294  * @adev: amdgpu_device structure
295  * @vm: amdgpu_vm structure
296  * @start: optional cursor to start with
297  * @cursor: state to initialize
298  *
299  * Starts a deep first traversal of the PD/PT tree.
300  */
amdgpu_vm_pt_first_dfs(struct amdgpu_device * adev,struct amdgpu_vm * vm,struct amdgpu_vm_pt_cursor * start,struct amdgpu_vm_pt_cursor * cursor)301 static void amdgpu_vm_pt_first_dfs(struct amdgpu_device *adev,
302 				   struct amdgpu_vm *vm,
303 				   struct amdgpu_vm_pt_cursor *start,
304 				   struct amdgpu_vm_pt_cursor *cursor)
305 {
306 	if (start)
307 		*cursor = *start;
308 	else
309 		amdgpu_vm_pt_start(adev, vm, 0, cursor);
310 
311 	while (amdgpu_vm_pt_descendant(adev, cursor))
312 		;
313 }
314 
315 /**
316  * amdgpu_vm_pt_continue_dfs - check if the deep first search should continue
317  *
318  * @start: starting point for the search
319  * @entry: current entry
320  *
321  * Returns:
322  * True when the search should continue, false otherwise.
323  */
amdgpu_vm_pt_continue_dfs(struct amdgpu_vm_pt_cursor * start,struct amdgpu_vm_bo_base * entry)324 static bool amdgpu_vm_pt_continue_dfs(struct amdgpu_vm_pt_cursor *start,
325 				      struct amdgpu_vm_bo_base *entry)
326 {
327 	return entry && (!start || entry != start->entry);
328 }
329 
330 /**
331  * amdgpu_vm_pt_next_dfs - get the next node for a deep first search
332  *
333  * @adev: amdgpu_device structure
334  * @cursor: current state
335  *
336  * Move the cursor to the next node in a deep first search.
337  */
amdgpu_vm_pt_next_dfs(struct amdgpu_device * adev,struct amdgpu_vm_pt_cursor * cursor)338 static void amdgpu_vm_pt_next_dfs(struct amdgpu_device *adev,
339 				  struct amdgpu_vm_pt_cursor *cursor)
340 {
341 	if (!cursor->entry)
342 		return;
343 
344 	if (!cursor->parent)
345 		cursor->entry = NULL;
346 	else if (amdgpu_vm_pt_sibling(adev, cursor))
347 		while (amdgpu_vm_pt_descendant(adev, cursor))
348 			;
349 	else
350 		amdgpu_vm_pt_ancestor(cursor);
351 }
352 
353 /*
354  * for_each_amdgpu_vm_pt_dfs_safe - safe deep first search of all PDs/PTs
355  */
356 #define for_each_amdgpu_vm_pt_dfs_safe(adev, vm, start, cursor, entry)		\
357 	for (amdgpu_vm_pt_first_dfs((adev), (vm), (start), &(cursor)),		\
358 	     (entry) = (cursor).entry, amdgpu_vm_pt_next_dfs((adev), &(cursor));\
359 	     amdgpu_vm_pt_continue_dfs((start), (entry));			\
360 	     (entry) = (cursor).entry, amdgpu_vm_pt_next_dfs((adev), &(cursor)))
361 
362 /**
363  * amdgpu_vm_pt_clear - initially clear the PDs/PTs
364  *
365  * @adev: amdgpu_device pointer
366  * @vm: VM to clear BO from
367  * @vmbo: BO to clear
368  * @immediate: use an immediate update
369  *
370  * Root PD needs to be reserved when calling this.
371  *
372  * Returns:
373  * 0 on success, errno otherwise.
374  */
amdgpu_vm_pt_clear(struct amdgpu_device * adev,struct amdgpu_vm * vm,struct amdgpu_bo_vm * vmbo,bool immediate)375 int amdgpu_vm_pt_clear(struct amdgpu_device *adev, struct amdgpu_vm *vm,
376 		       struct amdgpu_bo_vm *vmbo, bool immediate)
377 {
378 	unsigned int level = adev->vm_manager.root_level;
379 	struct ttm_operation_ctx ctx = { true, false };
380 	struct amdgpu_vm_update_params params;
381 	struct amdgpu_bo *ancestor = &vmbo->bo;
382 	unsigned int entries, ats_entries;
383 	struct amdgpu_bo *bo = &vmbo->bo;
384 	uint64_t addr;
385 	int r, idx;
386 
387 	/* Figure out our place in the hierarchy */
388 	if (ancestor->parent) {
389 		++level;
390 		while (ancestor->parent->parent) {
391 			++level;
392 			ancestor = ancestor->parent;
393 		}
394 	}
395 
396 	entries = amdgpu_bo_size(bo) / 8;
397 	if (!vm->pte_support_ats) {
398 		ats_entries = 0;
399 
400 	} else if (!bo->parent) {
401 		ats_entries = amdgpu_vm_pt_num_ats_entries(adev);
402 		ats_entries = min(ats_entries, entries);
403 		entries -= ats_entries;
404 
405 	} else {
406 		struct amdgpu_vm_bo_base *pt;
407 
408 		pt = ancestor->vm_bo;
409 		ats_entries = amdgpu_vm_pt_num_ats_entries(adev);
410 		if ((pt - to_amdgpu_bo_vm(vm->root.bo)->entries) >=
411 		    ats_entries) {
412 			ats_entries = 0;
413 		} else {
414 			ats_entries = entries;
415 			entries = 0;
416 		}
417 	}
418 
419 	r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
420 	if (r)
421 		return r;
422 
423 	if (vmbo->shadow) {
424 		struct amdgpu_bo *shadow = vmbo->shadow;
425 
426 		r = ttm_bo_validate(&shadow->tbo, &shadow->placement, &ctx);
427 		if (r)
428 			return r;
429 	}
430 
431 	if (!drm_dev_enter(adev_to_drm(adev), &idx))
432 		return -ENODEV;
433 
434 	r = vm->update_funcs->map_table(vmbo);
435 	if (r)
436 		goto exit;
437 
438 	memset(&params, 0, sizeof(params));
439 	params.adev = adev;
440 	params.vm = vm;
441 	params.immediate = immediate;
442 
443 	r = vm->update_funcs->prepare(&params, NULL, AMDGPU_SYNC_EXPLICIT);
444 	if (r)
445 		goto exit;
446 
447 	addr = 0;
448 	if (ats_entries) {
449 		uint64_t value = 0, flags;
450 
451 		flags = AMDGPU_PTE_DEFAULT_ATC;
452 		if (level != AMDGPU_VM_PTB) {
453 			/* Handle leaf PDEs as PTEs */
454 			flags |= AMDGPU_PDE_PTE;
455 			amdgpu_gmc_get_vm_pde(adev, level, &value, &flags);
456 		}
457 
458 		r = vm->update_funcs->update(&params, vmbo, addr, 0,
459 					     ats_entries, value, flags);
460 		if (r)
461 			goto exit;
462 
463 		addr += ats_entries * 8;
464 	}
465 
466 	if (entries) {
467 		uint64_t value = 0, flags = 0;
468 
469 		if (adev->asic_type >= CHIP_VEGA10) {
470 			if (level != AMDGPU_VM_PTB) {
471 				/* Handle leaf PDEs as PTEs */
472 				flags |= AMDGPU_PDE_PTE;
473 				amdgpu_gmc_get_vm_pde(adev, level,
474 						      &value, &flags);
475 			} else {
476 				/* Workaround for fault priority problem on GMC9 */
477 				flags = AMDGPU_PTE_EXECUTABLE;
478 			}
479 		}
480 
481 		r = vm->update_funcs->update(&params, vmbo, addr, 0, entries,
482 					     value, flags);
483 		if (r)
484 			goto exit;
485 	}
486 
487 	r = vm->update_funcs->commit(&params, NULL);
488 exit:
489 	drm_dev_exit(idx);
490 	return r;
491 }
492 
493 /**
494  * amdgpu_vm_pt_create - create bo for PD/PT
495  *
496  * @adev: amdgpu_device pointer
497  * @vm: requesting vm
498  * @level: the page table level
499  * @immediate: use a immediate update
500  * @vmbo: pointer to the buffer object pointer
501  * @xcp_id: GPU partition id
502  */
amdgpu_vm_pt_create(struct amdgpu_device * adev,struct amdgpu_vm * vm,int level,bool immediate,struct amdgpu_bo_vm ** vmbo,int32_t xcp_id)503 int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm,
504 			int level, bool immediate, struct amdgpu_bo_vm **vmbo,
505 			int32_t xcp_id)
506 {
507 	struct amdgpu_bo_param bp;
508 	struct amdgpu_bo *bo;
509 	struct dma_resv *resv;
510 	unsigned int num_entries;
511 	int r;
512 
513 	memset(&bp, 0, sizeof(bp));
514 
515 	bp.size = amdgpu_vm_pt_size(adev, level);
516 	bp.byte_align = AMDGPU_GPU_PAGE_SIZE;
517 
518 	if (!adev->gmc.is_app_apu)
519 		bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
520 	else
521 		bp.domain = AMDGPU_GEM_DOMAIN_GTT;
522 
523 	bp.domain = amdgpu_bo_get_preferred_domain(adev, bp.domain);
524 	bp.flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
525 		AMDGPU_GEM_CREATE_CPU_GTT_USWC;
526 
527 	if (level < AMDGPU_VM_PTB)
528 		num_entries = amdgpu_vm_pt_num_entries(adev, level);
529 	else
530 		num_entries = 0;
531 
532 	bp.bo_ptr_size = struct_size((*vmbo), entries, num_entries);
533 
534 	if (vm->use_cpu_for_update)
535 		bp.flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
536 
537 	bp.type = ttm_bo_type_kernel;
538 	bp.no_wait_gpu = immediate;
539 	bp.xcp_id_plus1 = xcp_id + 1;
540 
541 	if (vm->root.bo)
542 		bp.resv = vm->root.bo->tbo.base.resv;
543 
544 	r = amdgpu_bo_create_vm(adev, &bp, vmbo);
545 	if (r)
546 		return r;
547 
548 	bo = &(*vmbo)->bo;
549 	if (vm->is_compute_context || (adev->flags & AMD_IS_APU)) {
550 		(*vmbo)->shadow = NULL;
551 		return 0;
552 	}
553 
554 	if (!bp.resv)
555 		WARN_ON(dma_resv_lock(bo->tbo.base.resv,
556 				      NULL));
557 	resv = bp.resv;
558 	memset(&bp, 0, sizeof(bp));
559 	bp.size = amdgpu_vm_pt_size(adev, level);
560 	bp.domain = AMDGPU_GEM_DOMAIN_GTT;
561 	bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC;
562 	bp.type = ttm_bo_type_kernel;
563 	bp.resv = bo->tbo.base.resv;
564 	bp.bo_ptr_size = sizeof(struct amdgpu_bo);
565 	bp.xcp_id_plus1 = xcp_id + 1;
566 
567 	r = amdgpu_bo_create(adev, &bp, &(*vmbo)->shadow);
568 
569 	if (!resv)
570 		dma_resv_unlock(bo->tbo.base.resv);
571 
572 	if (r) {
573 		amdgpu_bo_unref(&bo);
574 		return r;
575 	}
576 
577 	amdgpu_bo_add_to_shadow_list(*vmbo);
578 
579 	return 0;
580 }
581 
582 /**
583  * amdgpu_vm_pt_alloc - Allocate a specific page table
584  *
585  * @adev: amdgpu_device pointer
586  * @vm: VM to allocate page tables for
587  * @cursor: Which page table to allocate
588  * @immediate: use an immediate update
589  *
590  * Make sure a specific page table or directory is allocated.
591  *
592  * Returns:
593  * 1 if page table needed to be allocated, 0 if page table was already
594  * allocated, negative errno if an error occurred.
595  */
amdgpu_vm_pt_alloc(struct amdgpu_device * adev,struct amdgpu_vm * vm,struct amdgpu_vm_pt_cursor * cursor,bool immediate)596 static int amdgpu_vm_pt_alloc(struct amdgpu_device *adev,
597 			      struct amdgpu_vm *vm,
598 			      struct amdgpu_vm_pt_cursor *cursor,
599 			      bool immediate)
600 {
601 	struct amdgpu_vm_bo_base *entry = cursor->entry;
602 	struct amdgpu_bo *pt_bo;
603 	struct amdgpu_bo_vm *pt;
604 	int r;
605 
606 	if (entry->bo)
607 		return 0;
608 
609 	amdgpu_vm_eviction_unlock(vm);
610 	r = amdgpu_vm_pt_create(adev, vm, cursor->level, immediate, &pt,
611 				vm->root.bo->xcp_id);
612 	amdgpu_vm_eviction_lock(vm);
613 	if (r)
614 		return r;
615 
616 	/* Keep a reference to the root directory to avoid
617 	 * freeing them up in the wrong order.
618 	 */
619 	pt_bo = &pt->bo;
620 	pt_bo->parent = amdgpu_bo_ref(cursor->parent->bo);
621 	amdgpu_vm_bo_base_init(entry, vm, pt_bo);
622 	r = amdgpu_vm_pt_clear(adev, vm, pt, immediate);
623 	if (r)
624 		goto error_free_pt;
625 
626 	return 0;
627 
628 error_free_pt:
629 	amdgpu_bo_unref(&pt->shadow);
630 	amdgpu_bo_unref(&pt_bo);
631 	return r;
632 }
633 
634 /**
635  * amdgpu_vm_pt_free - free one PD/PT
636  *
637  * @entry: PDE to free
638  */
amdgpu_vm_pt_free(struct amdgpu_vm_bo_base * entry)639 static void amdgpu_vm_pt_free(struct amdgpu_vm_bo_base *entry)
640 {
641 	struct amdgpu_bo *shadow;
642 
643 	if (!entry->bo)
644 		return;
645 
646 	entry->bo->vm_bo = NULL;
647 	shadow = amdgpu_bo_shadowed(entry->bo);
648 	if (shadow) {
649 		ttm_bo_set_bulk_move(&shadow->tbo, NULL);
650 		amdgpu_bo_unref(&shadow);
651 	}
652 	ttm_bo_set_bulk_move(&entry->bo->tbo, NULL);
653 
654 	spin_lock(&entry->vm->status_lock);
655 	list_del(&entry->vm_status);
656 	spin_unlock(&entry->vm->status_lock);
657 	amdgpu_bo_unref(&entry->bo);
658 }
659 
amdgpu_vm_pt_free_work(struct work_struct * work)660 void amdgpu_vm_pt_free_work(struct work_struct *work)
661 {
662 	struct amdgpu_vm_bo_base *entry, *next;
663 	struct amdgpu_vm *vm;
664 	LIST_HEAD(pt_freed);
665 
666 	vm = container_of(work, struct amdgpu_vm, pt_free_work);
667 
668 	spin_lock(&vm->status_lock);
669 	list_splice_init(&vm->pt_freed, &pt_freed);
670 	spin_unlock(&vm->status_lock);
671 
672 	/* flush_work in amdgpu_vm_fini ensure vm->root.bo is valid. */
673 	amdgpu_bo_reserve(vm->root.bo, true);
674 
675 	list_for_each_entry_safe(entry, next, &pt_freed, vm_status)
676 		amdgpu_vm_pt_free(entry);
677 
678 	amdgpu_bo_unreserve(vm->root.bo);
679 }
680 
681 /**
682  * amdgpu_vm_pt_free_dfs - free PD/PT levels
683  *
684  * @adev: amdgpu device structure
685  * @vm: amdgpu vm structure
686  * @start: optional cursor where to start freeing PDs/PTs
687  * @unlocked: vm resv unlock status
688  *
689  * Free the page directory or page table level and all sub levels.
690  */
amdgpu_vm_pt_free_dfs(struct amdgpu_device * adev,struct amdgpu_vm * vm,struct amdgpu_vm_pt_cursor * start,bool unlocked)691 static void amdgpu_vm_pt_free_dfs(struct amdgpu_device *adev,
692 				  struct amdgpu_vm *vm,
693 				  struct amdgpu_vm_pt_cursor *start,
694 				  bool unlocked)
695 {
696 	struct amdgpu_vm_pt_cursor cursor;
697 	struct amdgpu_vm_bo_base *entry;
698 
699 	if (unlocked) {
700 		spin_lock(&vm->status_lock);
701 		for_each_amdgpu_vm_pt_dfs_safe(adev, vm, start, cursor, entry)
702 			list_move(&entry->vm_status, &vm->pt_freed);
703 
704 		if (start)
705 			list_move(&start->entry->vm_status, &vm->pt_freed);
706 		spin_unlock(&vm->status_lock);
707 		schedule_work(&vm->pt_free_work);
708 		return;
709 	}
710 
711 	for_each_amdgpu_vm_pt_dfs_safe(adev, vm, start, cursor, entry)
712 		amdgpu_vm_pt_free(entry);
713 
714 	if (start)
715 		amdgpu_vm_pt_free(start->entry);
716 }
717 
718 /**
719  * amdgpu_vm_pt_free_root - free root PD
720  * @adev: amdgpu device structure
721  * @vm: amdgpu vm structure
722  *
723  * Free the root page directory and everything below it.
724  */
amdgpu_vm_pt_free_root(struct amdgpu_device * adev,struct amdgpu_vm * vm)725 void amdgpu_vm_pt_free_root(struct amdgpu_device *adev, struct amdgpu_vm *vm)
726 {
727 	amdgpu_vm_pt_free_dfs(adev, vm, NULL, false);
728 }
729 
730 /**
731  * amdgpu_vm_pt_is_root_clean - check if a root PD is clean
732  *
733  * @adev: amdgpu_device pointer
734  * @vm: the VM to check
735  *
736  * Check all entries of the root PD, if any subsequent PDs are allocated,
737  * it means there are page table creating and filling, and is no a clean
738  * VM
739  *
740  * Returns:
741  *	0 if this VM is clean
742  */
amdgpu_vm_pt_is_root_clean(struct amdgpu_device * adev,struct amdgpu_vm * vm)743 bool amdgpu_vm_pt_is_root_clean(struct amdgpu_device *adev,
744 				struct amdgpu_vm *vm)
745 {
746 	enum amdgpu_vm_level root = adev->vm_manager.root_level;
747 	unsigned int entries = amdgpu_vm_pt_num_entries(adev, root);
748 	unsigned int i = 0;
749 
750 	for (i = 0; i < entries; i++) {
751 		if (to_amdgpu_bo_vm(vm->root.bo)->entries[i].bo)
752 			return false;
753 	}
754 	return true;
755 }
756 
757 /**
758  * amdgpu_vm_pde_update - update a single level in the hierarchy
759  *
760  * @params: parameters for the update
761  * @entry: entry to update
762  *
763  * Makes sure the requested entry in parent is up to date.
764  */
amdgpu_vm_pde_update(struct amdgpu_vm_update_params * params,struct amdgpu_vm_bo_base * entry)765 int amdgpu_vm_pde_update(struct amdgpu_vm_update_params *params,
766 			 struct amdgpu_vm_bo_base *entry)
767 {
768 	struct amdgpu_vm_bo_base *parent = amdgpu_vm_pt_parent(entry);
769 	struct amdgpu_bo *bo, *pbo;
770 	struct amdgpu_vm *vm = params->vm;
771 	uint64_t pde, pt, flags;
772 	unsigned int level;
773 
774 	if (WARN_ON(!parent))
775 		return -EINVAL;
776 
777 	bo = parent->bo;
778 	for (level = 0, pbo = bo->parent; pbo; ++level)
779 		pbo = pbo->parent;
780 
781 	level += params->adev->vm_manager.root_level;
782 	amdgpu_gmc_get_pde_for_bo(entry->bo, level, &pt, &flags);
783 	pde = (entry - to_amdgpu_bo_vm(parent->bo)->entries) * 8;
784 	return vm->update_funcs->update(params, to_amdgpu_bo_vm(bo), pde, pt,
785 					1, 0, flags);
786 }
787 
788 /**
789  * amdgpu_vm_pte_update_noretry_flags - Update PTE no-retry flags
790  *
791  * @adev: amdgpu_device pointer
792  * @flags: pointer to PTE flags
793  *
794  * Update PTE no-retry flags when TF is enabled.
795  */
amdgpu_vm_pte_update_noretry_flags(struct amdgpu_device * adev,uint64_t * flags)796 static void amdgpu_vm_pte_update_noretry_flags(struct amdgpu_device *adev,
797 						uint64_t *flags)
798 {
799 	/*
800 	 * Update no-retry flags with the corresponding TF
801 	 * no-retry combination.
802 	 */
803 	if ((*flags & AMDGPU_VM_NORETRY_FLAGS) == AMDGPU_VM_NORETRY_FLAGS) {
804 		*flags &= ~AMDGPU_VM_NORETRY_FLAGS;
805 		*flags |= adev->gmc.noretry_flags;
806 	}
807 }
808 
809 /*
810  * amdgpu_vm_pte_update_flags - figure out flags for PTE updates
811  *
812  * Make sure to set the right flags for the PTEs at the desired level.
813  */
amdgpu_vm_pte_update_flags(struct amdgpu_vm_update_params * params,struct amdgpu_bo_vm * pt,unsigned int level,uint64_t pe,uint64_t addr,unsigned int count,uint32_t incr,uint64_t flags)814 static void amdgpu_vm_pte_update_flags(struct amdgpu_vm_update_params *params,
815 				       struct amdgpu_bo_vm *pt,
816 				       unsigned int level,
817 				       uint64_t pe, uint64_t addr,
818 				       unsigned int count, uint32_t incr,
819 				       uint64_t flags)
820 {
821 	struct amdgpu_device *adev = params->adev;
822 
823 	if (level != AMDGPU_VM_PTB) {
824 		flags |= AMDGPU_PDE_PTE;
825 		amdgpu_gmc_get_vm_pde(adev, level, &addr, &flags);
826 
827 	} else if (adev->asic_type >= CHIP_VEGA10 &&
828 		   !(flags & AMDGPU_PTE_VALID) &&
829 		   !(flags & AMDGPU_PTE_PRT)) {
830 
831 		/* Workaround for fault priority problem on GMC9 */
832 		flags |= AMDGPU_PTE_EXECUTABLE;
833 	}
834 
835 	/*
836 	 * Update no-retry flags to use the no-retry flag combination
837 	 * with TF enabled. The AMDGPU_VM_NORETRY_FLAGS flag combination
838 	 * does not work when TF is enabled. So, replace them with
839 	 * AMDGPU_VM_NORETRY_FLAGS_TF flag combination which works for
840 	 * all cases.
841 	 */
842 	if (level == AMDGPU_VM_PTB)
843 		amdgpu_vm_pte_update_noretry_flags(adev, &flags);
844 
845 	/* APUs mapping system memory may need different MTYPEs on different
846 	 * NUMA nodes. Only do this for contiguous ranges that can be assumed
847 	 * to be on the same NUMA node.
848 	 */
849 	if ((flags & AMDGPU_PTE_SYSTEM) && (adev->flags & AMD_IS_APU) &&
850 	    adev->gmc.gmc_funcs->override_vm_pte_flags &&
851 	    num_possible_nodes() > 1) {
852 		if (!params->pages_addr)
853 			amdgpu_gmc_override_vm_pte_flags(adev, params->vm,
854 							 addr, &flags);
855 		else
856 			dev_dbg(adev->dev,
857 				"override_vm_pte_flags skipped: non-contiguous\n");
858 	}
859 
860 	params->vm->update_funcs->update(params, pt, pe, addr, count, incr,
861 					 flags);
862 }
863 
864 /**
865  * amdgpu_vm_pte_fragment - get fragment for PTEs
866  *
867  * @params: see amdgpu_vm_update_params definition
868  * @start: first PTE to handle
869  * @end: last PTE to handle
870  * @flags: hw mapping flags
871  * @frag: resulting fragment size
872  * @frag_end: end of this fragment
873  *
874  * Returns the first possible fragment for the start and end address.
875  */
amdgpu_vm_pte_fragment(struct amdgpu_vm_update_params * params,uint64_t start,uint64_t end,uint64_t flags,unsigned int * frag,uint64_t * frag_end)876 static void amdgpu_vm_pte_fragment(struct amdgpu_vm_update_params *params,
877 				   uint64_t start, uint64_t end, uint64_t flags,
878 				   unsigned int *frag, uint64_t *frag_end)
879 {
880 	/**
881 	 * The MC L1 TLB supports variable sized pages, based on a fragment
882 	 * field in the PTE. When this field is set to a non-zero value, page
883 	 * granularity is increased from 4KB to (1 << (12 + frag)). The PTE
884 	 * flags are considered valid for all PTEs within the fragment range
885 	 * and corresponding mappings are assumed to be physically contiguous.
886 	 *
887 	 * The L1 TLB can store a single PTE for the whole fragment,
888 	 * significantly increasing the space available for translation
889 	 * caching. This leads to large improvements in throughput when the
890 	 * TLB is under pressure.
891 	 *
892 	 * The L2 TLB distributes small and large fragments into two
893 	 * asymmetric partitions. The large fragment cache is significantly
894 	 * larger. Thus, we try to use large fragments wherever possible.
895 	 * Userspace can support this by aligning virtual base address and
896 	 * allocation size to the fragment size.
897 	 *
898 	 * Starting with Vega10 the fragment size only controls the L1. The L2
899 	 * is now directly feed with small/huge/giant pages from the walker.
900 	 */
901 	unsigned int max_frag;
902 
903 	if (params->adev->asic_type < CHIP_VEGA10)
904 		max_frag = params->adev->vm_manager.fragment_size;
905 	else
906 		max_frag = 31;
907 
908 	/* system pages are non continuously */
909 	if (params->pages_addr) {
910 		*frag = 0;
911 		*frag_end = end;
912 		return;
913 	}
914 
915 	/* This intentionally wraps around if no bit is set */
916 	*frag = min_t(unsigned int, ffs(start) - 1, fls64(end - start) - 1);
917 	if (*frag >= max_frag) {
918 		*frag = max_frag;
919 		*frag_end = end & ~((1ULL << max_frag) - 1);
920 	} else {
921 		*frag_end = start + (1 << *frag);
922 	}
923 }
924 
925 /**
926  * amdgpu_vm_ptes_update - make sure that page tables are valid
927  *
928  * @params: see amdgpu_vm_update_params definition
929  * @start: start of GPU address range
930  * @end: end of GPU address range
931  * @dst: destination address to map to, the next dst inside the function
932  * @flags: mapping flags
933  *
934  * Update the page tables in the range @start - @end.
935  *
936  * Returns:
937  * 0 for success, -EINVAL for failure.
938  */
amdgpu_vm_ptes_update(struct amdgpu_vm_update_params * params,uint64_t start,uint64_t end,uint64_t dst,uint64_t flags)939 int amdgpu_vm_ptes_update(struct amdgpu_vm_update_params *params,
940 			  uint64_t start, uint64_t end,
941 			  uint64_t dst, uint64_t flags)
942 {
943 	struct amdgpu_device *adev = params->adev;
944 	struct amdgpu_vm_pt_cursor cursor;
945 	uint64_t frag_start = start, frag_end;
946 	unsigned int frag;
947 	int r;
948 
949 	/* figure out the initial fragment */
950 	amdgpu_vm_pte_fragment(params, frag_start, end, flags, &frag,
951 			       &frag_end);
952 
953 	/* walk over the address space and update the PTs */
954 	amdgpu_vm_pt_start(adev, params->vm, start, &cursor);
955 	while (cursor.pfn < end) {
956 		unsigned int shift, parent_shift, mask;
957 		uint64_t incr, entry_end, pe_start;
958 		struct amdgpu_bo *pt;
959 
960 		if (!params->unlocked) {
961 			/* make sure that the page tables covering the
962 			 * address range are actually allocated
963 			 */
964 			r = amdgpu_vm_pt_alloc(params->adev, params->vm,
965 					       &cursor, params->immediate);
966 			if (r)
967 				return r;
968 		}
969 
970 		shift = amdgpu_vm_pt_level_shift(adev, cursor.level);
971 		parent_shift = amdgpu_vm_pt_level_shift(adev, cursor.level - 1);
972 		if (params->unlocked) {
973 			/* Unlocked updates are only allowed on the leaves */
974 			if (amdgpu_vm_pt_descendant(adev, &cursor))
975 				continue;
976 		} else if (adev->asic_type < CHIP_VEGA10 &&
977 			   (flags & AMDGPU_PTE_VALID)) {
978 			/* No huge page support before GMC v9 */
979 			if (cursor.level != AMDGPU_VM_PTB) {
980 				if (!amdgpu_vm_pt_descendant(adev, &cursor))
981 					return -ENOENT;
982 				continue;
983 			}
984 		} else if (frag < shift) {
985 			/* We can't use this level when the fragment size is
986 			 * smaller than the address shift. Go to the next
987 			 * child entry and try again.
988 			 */
989 			if (amdgpu_vm_pt_descendant(adev, &cursor))
990 				continue;
991 		} else if (frag >= parent_shift) {
992 			/* If the fragment size is even larger than the parent
993 			 * shift we should go up one level and check it again.
994 			 */
995 			if (!amdgpu_vm_pt_ancestor(&cursor))
996 				return -EINVAL;
997 			continue;
998 		}
999 
1000 		pt = cursor.entry->bo;
1001 		if (!pt) {
1002 			/* We need all PDs and PTs for mapping something, */
1003 			if (flags & AMDGPU_PTE_VALID)
1004 				return -ENOENT;
1005 
1006 			/* but unmapping something can happen at a higher
1007 			 * level.
1008 			 */
1009 			if (!amdgpu_vm_pt_ancestor(&cursor))
1010 				return -EINVAL;
1011 
1012 			pt = cursor.entry->bo;
1013 			shift = parent_shift;
1014 			frag_end = max(frag_end, ALIGN(frag_start + 1,
1015 				   1ULL << shift));
1016 		}
1017 
1018 		/* Looks good so far, calculate parameters for the update */
1019 		incr = (uint64_t)AMDGPU_GPU_PAGE_SIZE << shift;
1020 		mask = amdgpu_vm_pt_entries_mask(adev, cursor.level);
1021 		pe_start = ((cursor.pfn >> shift) & mask) * 8;
1022 		entry_end = ((uint64_t)mask + 1) << shift;
1023 		entry_end += cursor.pfn & ~(entry_end - 1);
1024 		entry_end = min(entry_end, end);
1025 
1026 		do {
1027 			struct amdgpu_vm *vm = params->vm;
1028 			uint64_t upd_end = min(entry_end, frag_end);
1029 			unsigned int nptes = (upd_end - frag_start) >> shift;
1030 			uint64_t upd_flags = flags | AMDGPU_PTE_FRAG(frag);
1031 
1032 			/* This can happen when we set higher level PDs to
1033 			 * silent to stop fault floods.
1034 			 */
1035 			nptes = max(nptes, 1u);
1036 
1037 			trace_amdgpu_vm_update_ptes(params, frag_start, upd_end,
1038 						    min(nptes, 32u), dst, incr,
1039 						    upd_flags,
1040 						    vm->task_info.tgid,
1041 						    vm->immediate.fence_context);
1042 			amdgpu_vm_pte_update_flags(params, to_amdgpu_bo_vm(pt),
1043 						   cursor.level, pe_start, dst,
1044 						   nptes, incr, upd_flags);
1045 
1046 			pe_start += nptes * 8;
1047 			dst += nptes * incr;
1048 
1049 			frag_start = upd_end;
1050 			if (frag_start >= frag_end) {
1051 				/* figure out the next fragment */
1052 				amdgpu_vm_pte_fragment(params, frag_start, end,
1053 						       flags, &frag, &frag_end);
1054 				if (frag < shift)
1055 					break;
1056 			}
1057 		} while (frag_start < entry_end);
1058 
1059 		if (amdgpu_vm_pt_descendant(adev, &cursor)) {
1060 			/* Free all child entries.
1061 			 * Update the tables with the flags and addresses and free up subsequent
1062 			 * tables in the case of huge pages or freed up areas.
1063 			 * This is the maximum you can free, because all other page tables are not
1064 			 * completely covered by the range and so potentially still in use.
1065 			 */
1066 			while (cursor.pfn < frag_start) {
1067 				/* Make sure previous mapping is freed */
1068 				if (cursor.entry->bo) {
1069 					params->table_freed = true;
1070 					amdgpu_vm_pt_free_dfs(adev, params->vm,
1071 							      &cursor,
1072 							      params->unlocked);
1073 				}
1074 				amdgpu_vm_pt_next(adev, &cursor);
1075 			}
1076 
1077 		} else if (frag >= shift) {
1078 			/* or just move on to the next on the same level. */
1079 			amdgpu_vm_pt_next(adev, &cursor);
1080 		}
1081 	}
1082 
1083 	return 0;
1084 }
1085 
1086 /**
1087  * amdgpu_vm_pt_map_tables - have bo of root PD cpu accessible
1088  * @adev: amdgpu device structure
1089  * @vm: amdgpu vm structure
1090  *
1091  * make root page directory and everything below it cpu accessible.
1092  */
amdgpu_vm_pt_map_tables(struct amdgpu_device * adev,struct amdgpu_vm * vm)1093 int amdgpu_vm_pt_map_tables(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1094 {
1095 	struct amdgpu_vm_pt_cursor cursor;
1096 	struct amdgpu_vm_bo_base *entry;
1097 
1098 	for_each_amdgpu_vm_pt_dfs_safe(adev, vm, NULL, cursor, entry) {
1099 
1100 		struct amdgpu_bo_vm *bo;
1101 		int r;
1102 
1103 		if (entry->bo) {
1104 			bo = to_amdgpu_bo_vm(entry->bo);
1105 			r = vm->update_funcs->map_table(bo);
1106 			if (r)
1107 				return r;
1108 		}
1109 	}
1110 
1111 	return 0;
1112 }
1113