1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /*
3 * Copyright 2022 Advanced Micro Devices, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 #include <drm/drm_drv.h>
25
26 #include "amdgpu.h"
27 #include "amdgpu_trace.h"
28 #include "amdgpu_vm.h"
29
30 /*
31 * amdgpu_vm_pt_cursor - state for for_each_amdgpu_vm_pt
32 */
33 struct amdgpu_vm_pt_cursor {
34 uint64_t pfn;
35 struct amdgpu_vm_bo_base *parent;
36 struct amdgpu_vm_bo_base *entry;
37 unsigned int level;
38 };
39
40 /**
41 * amdgpu_vm_pt_level_shift - return the addr shift for each level
42 *
43 * @adev: amdgpu_device pointer
44 * @level: VMPT level
45 *
46 * Returns:
47 * The number of bits the pfn needs to be right shifted for a level.
48 */
amdgpu_vm_pt_level_shift(struct amdgpu_device * adev,unsigned int level)49 static unsigned int amdgpu_vm_pt_level_shift(struct amdgpu_device *adev,
50 unsigned int level)
51 {
52 switch (level) {
53 case AMDGPU_VM_PDB2:
54 case AMDGPU_VM_PDB1:
55 case AMDGPU_VM_PDB0:
56 return 9 * (AMDGPU_VM_PDB0 - level) +
57 adev->vm_manager.block_size;
58 case AMDGPU_VM_PTB:
59 return 0;
60 default:
61 return ~0;
62 }
63 }
64
65 /**
66 * amdgpu_vm_pt_num_entries - return the number of entries in a PD/PT
67 *
68 * @adev: amdgpu_device pointer
69 * @level: VMPT level
70 *
71 * Returns:
72 * The number of entries in a page directory or page table.
73 */
amdgpu_vm_pt_num_entries(struct amdgpu_device * adev,unsigned int level)74 static unsigned int amdgpu_vm_pt_num_entries(struct amdgpu_device *adev,
75 unsigned int level)
76 {
77 unsigned int shift;
78
79 shift = amdgpu_vm_pt_level_shift(adev, adev->vm_manager.root_level);
80 if (level == adev->vm_manager.root_level)
81 /* For the root directory */
82 return round_up(adev->vm_manager.max_pfn, 1ULL << shift)
83 >> shift;
84 else if (level != AMDGPU_VM_PTB)
85 /* Everything in between */
86 return 512;
87
88 /* For the page tables on the leaves */
89 return AMDGPU_VM_PTE_COUNT(adev);
90 }
91
92 /**
93 * amdgpu_vm_pt_entries_mask - the mask to get the entry number of a PD/PT
94 *
95 * @adev: amdgpu_device pointer
96 * @level: VMPT level
97 *
98 * Returns:
99 * The mask to extract the entry number of a PD/PT from an address.
100 */
amdgpu_vm_pt_entries_mask(struct amdgpu_device * adev,unsigned int level)101 static uint32_t amdgpu_vm_pt_entries_mask(struct amdgpu_device *adev,
102 unsigned int level)
103 {
104 if (level <= adev->vm_manager.root_level)
105 return 0xffffffff;
106 else if (level != AMDGPU_VM_PTB)
107 return 0x1ff;
108 else
109 return AMDGPU_VM_PTE_COUNT(adev) - 1;
110 }
111
112 /**
113 * amdgpu_vm_pt_size - returns the size of the page table in bytes
114 *
115 * @adev: amdgpu_device pointer
116 * @level: VMPT level
117 *
118 * Returns:
119 * The size of the BO for a page directory or page table in bytes.
120 */
amdgpu_vm_pt_size(struct amdgpu_device * adev,unsigned int level)121 static unsigned int amdgpu_vm_pt_size(struct amdgpu_device *adev,
122 unsigned int level)
123 {
124 return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_pt_num_entries(adev, level) * 8);
125 }
126
127 /**
128 * amdgpu_vm_pt_parent - get the parent page directory
129 *
130 * @pt: child page table
131 *
132 * Helper to get the parent entry for the child page table. NULL if we are at
133 * the root page directory.
134 */
135 static struct amdgpu_vm_bo_base *
amdgpu_vm_pt_parent(struct amdgpu_vm_bo_base * pt)136 amdgpu_vm_pt_parent(struct amdgpu_vm_bo_base *pt)
137 {
138 struct amdgpu_bo *parent = pt->bo->parent;
139
140 if (!parent)
141 return NULL;
142
143 return parent->vm_bo;
144 }
145
146 /**
147 * amdgpu_vm_pt_start - start PD/PT walk
148 *
149 * @adev: amdgpu_device pointer
150 * @vm: amdgpu_vm structure
151 * @start: start address of the walk
152 * @cursor: state to initialize
153 *
154 * Initialize a amdgpu_vm_pt_cursor to start a walk.
155 */
amdgpu_vm_pt_start(struct amdgpu_device * adev,struct amdgpu_vm * vm,uint64_t start,struct amdgpu_vm_pt_cursor * cursor)156 static void amdgpu_vm_pt_start(struct amdgpu_device *adev,
157 struct amdgpu_vm *vm, uint64_t start,
158 struct amdgpu_vm_pt_cursor *cursor)
159 {
160 cursor->pfn = start;
161 cursor->parent = NULL;
162 cursor->entry = &vm->root;
163 cursor->level = adev->vm_manager.root_level;
164 }
165
166 /**
167 * amdgpu_vm_pt_descendant - go to child node
168 *
169 * @adev: amdgpu_device pointer
170 * @cursor: current state
171 *
172 * Walk to the child node of the current node.
173 * Returns:
174 * True if the walk was possible, false otherwise.
175 */
amdgpu_vm_pt_descendant(struct amdgpu_device * adev,struct amdgpu_vm_pt_cursor * cursor)176 static bool amdgpu_vm_pt_descendant(struct amdgpu_device *adev,
177 struct amdgpu_vm_pt_cursor *cursor)
178 {
179 unsigned int mask, shift, idx;
180
181 if ((cursor->level == AMDGPU_VM_PTB) || !cursor->entry ||
182 !cursor->entry->bo)
183 return false;
184
185 mask = amdgpu_vm_pt_entries_mask(adev, cursor->level);
186 shift = amdgpu_vm_pt_level_shift(adev, cursor->level);
187
188 ++cursor->level;
189 idx = (cursor->pfn >> shift) & mask;
190 cursor->parent = cursor->entry;
191 cursor->entry = &to_amdgpu_bo_vm(cursor->entry->bo)->entries[idx];
192 return true;
193 }
194
195 /**
196 * amdgpu_vm_pt_sibling - go to sibling node
197 *
198 * @adev: amdgpu_device pointer
199 * @cursor: current state
200 *
201 * Walk to the sibling node of the current node.
202 * Returns:
203 * True if the walk was possible, false otherwise.
204 */
amdgpu_vm_pt_sibling(struct amdgpu_device * adev,struct amdgpu_vm_pt_cursor * cursor)205 static bool amdgpu_vm_pt_sibling(struct amdgpu_device *adev,
206 struct amdgpu_vm_pt_cursor *cursor)
207 {
208
209 unsigned int shift, num_entries;
210 struct amdgpu_bo_vm *parent;
211
212 /* Root doesn't have a sibling */
213 if (!cursor->parent)
214 return false;
215
216 /* Go to our parents and see if we got a sibling */
217 shift = amdgpu_vm_pt_level_shift(adev, cursor->level - 1);
218 num_entries = amdgpu_vm_pt_num_entries(adev, cursor->level - 1);
219 parent = to_amdgpu_bo_vm(cursor->parent->bo);
220
221 if (cursor->entry == &parent->entries[num_entries - 1])
222 return false;
223
224 cursor->pfn += 1ULL << shift;
225 cursor->pfn &= ~((1ULL << shift) - 1);
226 ++cursor->entry;
227 return true;
228 }
229
230 /**
231 * amdgpu_vm_pt_ancestor - go to parent node
232 *
233 * @cursor: current state
234 *
235 * Walk to the parent node of the current node.
236 * Returns:
237 * True if the walk was possible, false otherwise.
238 */
amdgpu_vm_pt_ancestor(struct amdgpu_vm_pt_cursor * cursor)239 static bool amdgpu_vm_pt_ancestor(struct amdgpu_vm_pt_cursor *cursor)
240 {
241 if (!cursor->parent)
242 return false;
243
244 --cursor->level;
245 cursor->entry = cursor->parent;
246 cursor->parent = amdgpu_vm_pt_parent(cursor->parent);
247 return true;
248 }
249
250 /**
251 * amdgpu_vm_pt_next - get next PD/PT in hieratchy
252 *
253 * @adev: amdgpu_device pointer
254 * @cursor: current state
255 *
256 * Walk the PD/PT tree to the next node.
257 */
amdgpu_vm_pt_next(struct amdgpu_device * adev,struct amdgpu_vm_pt_cursor * cursor)258 static void amdgpu_vm_pt_next(struct amdgpu_device *adev,
259 struct amdgpu_vm_pt_cursor *cursor)
260 {
261 /* First try a newborn child */
262 if (amdgpu_vm_pt_descendant(adev, cursor))
263 return;
264
265 /* If that didn't worked try to find a sibling */
266 while (!amdgpu_vm_pt_sibling(adev, cursor)) {
267 /* No sibling, go to our parents and grandparents */
268 if (!amdgpu_vm_pt_ancestor(cursor)) {
269 cursor->pfn = ~0ll;
270 return;
271 }
272 }
273 }
274
275 /**
276 * amdgpu_vm_pt_first_dfs - start a deep first search
277 *
278 * @adev: amdgpu_device structure
279 * @vm: amdgpu_vm structure
280 * @start: optional cursor to start with
281 * @cursor: state to initialize
282 *
283 * Starts a deep first traversal of the PD/PT tree.
284 */
amdgpu_vm_pt_first_dfs(struct amdgpu_device * adev,struct amdgpu_vm * vm,struct amdgpu_vm_pt_cursor * start,struct amdgpu_vm_pt_cursor * cursor)285 static void amdgpu_vm_pt_first_dfs(struct amdgpu_device *adev,
286 struct amdgpu_vm *vm,
287 struct amdgpu_vm_pt_cursor *start,
288 struct amdgpu_vm_pt_cursor *cursor)
289 {
290 if (start)
291 *cursor = *start;
292 else
293 amdgpu_vm_pt_start(adev, vm, 0, cursor);
294
295 while (amdgpu_vm_pt_descendant(adev, cursor))
296 ;
297 }
298
299 /**
300 * amdgpu_vm_pt_continue_dfs - check if the deep first search should continue
301 *
302 * @start: starting point for the search
303 * @entry: current entry
304 *
305 * Returns:
306 * True when the search should continue, false otherwise.
307 */
amdgpu_vm_pt_continue_dfs(struct amdgpu_vm_pt_cursor * start,struct amdgpu_vm_bo_base * entry)308 static bool amdgpu_vm_pt_continue_dfs(struct amdgpu_vm_pt_cursor *start,
309 struct amdgpu_vm_bo_base *entry)
310 {
311 return entry && (!start || entry != start->entry);
312 }
313
314 /**
315 * amdgpu_vm_pt_next_dfs - get the next node for a deep first search
316 *
317 * @adev: amdgpu_device structure
318 * @cursor: current state
319 *
320 * Move the cursor to the next node in a deep first search.
321 */
amdgpu_vm_pt_next_dfs(struct amdgpu_device * adev,struct amdgpu_vm_pt_cursor * cursor)322 static void amdgpu_vm_pt_next_dfs(struct amdgpu_device *adev,
323 struct amdgpu_vm_pt_cursor *cursor)
324 {
325 if (!cursor->entry)
326 return;
327
328 if (!cursor->parent)
329 cursor->entry = NULL;
330 else if (amdgpu_vm_pt_sibling(adev, cursor))
331 while (amdgpu_vm_pt_descendant(adev, cursor))
332 ;
333 else
334 amdgpu_vm_pt_ancestor(cursor);
335 }
336
337 /*
338 * for_each_amdgpu_vm_pt_dfs_safe - safe deep first search of all PDs/PTs
339 */
340 #define for_each_amdgpu_vm_pt_dfs_safe(adev, vm, start, cursor, entry) \
341 for (amdgpu_vm_pt_first_dfs((adev), (vm), (start), &(cursor)), \
342 (entry) = (cursor).entry, amdgpu_vm_pt_next_dfs((adev), &(cursor));\
343 amdgpu_vm_pt_continue_dfs((start), (entry)); \
344 (entry) = (cursor).entry, amdgpu_vm_pt_next_dfs((adev), &(cursor)))
345
346 /**
347 * amdgpu_vm_pt_clear - initially clear the PDs/PTs
348 *
349 * @adev: amdgpu_device pointer
350 * @vm: VM to clear BO from
351 * @vmbo: BO to clear
352 * @immediate: use an immediate update
353 *
354 * Root PD needs to be reserved when calling this.
355 *
356 * Returns:
357 * 0 on success, errno otherwise.
358 */
amdgpu_vm_pt_clear(struct amdgpu_device * adev,struct amdgpu_vm * vm,struct amdgpu_bo_vm * vmbo,bool immediate)359 int amdgpu_vm_pt_clear(struct amdgpu_device *adev, struct amdgpu_vm *vm,
360 struct amdgpu_bo_vm *vmbo, bool immediate)
361 {
362 unsigned int level = adev->vm_manager.root_level;
363 struct ttm_operation_ctx ctx = { true, false };
364 struct amdgpu_vm_update_params params;
365 struct amdgpu_bo *ancestor = &vmbo->bo;
366 unsigned int entries;
367 struct amdgpu_bo *bo = &vmbo->bo;
368 uint64_t addr;
369 int r, idx;
370
371 /* Figure out our place in the hierarchy */
372 if (ancestor->parent) {
373 ++level;
374 while (ancestor->parent->parent) {
375 ++level;
376 ancestor = ancestor->parent;
377 }
378 }
379
380 entries = amdgpu_bo_size(bo) / 8;
381
382 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
383 if (r)
384 return r;
385
386 if (!drm_dev_enter(adev_to_drm(adev), &idx))
387 return -ENODEV;
388
389 r = vm->update_funcs->map_table(vmbo);
390 if (r)
391 goto exit;
392
393 memset(¶ms, 0, sizeof(params));
394 params.adev = adev;
395 params.vm = vm;
396 params.immediate = immediate;
397
398 r = vm->update_funcs->prepare(¶ms, NULL);
399 if (r)
400 goto exit;
401
402 addr = 0;
403
404 uint64_t value = 0, flags = 0;
405 if (adev->asic_type >= CHIP_VEGA10) {
406 if (level != AMDGPU_VM_PTB) {
407 /* Handle leaf PDEs as PTEs */
408 flags |= AMDGPU_PDE_PTE_FLAG(adev);
409 amdgpu_gmc_get_vm_pde(adev, level,
410 &value, &flags);
411 } else {
412 /* Workaround for fault priority problem on GMC9 */
413 flags = AMDGPU_PTE_EXECUTABLE;
414 }
415 }
416
417 r = vm->update_funcs->update(¶ms, vmbo, addr, 0, entries,
418 value, flags);
419 if (r)
420 goto exit;
421
422 r = vm->update_funcs->commit(¶ms, NULL);
423 exit:
424 drm_dev_exit(idx);
425 return r;
426 }
427
428 /**
429 * amdgpu_vm_pt_create - create bo for PD/PT
430 *
431 * @adev: amdgpu_device pointer
432 * @vm: requesting vm
433 * @level: the page table level
434 * @immediate: use a immediate update
435 * @vmbo: pointer to the buffer object pointer
436 * @xcp_id: GPU partition id
437 */
amdgpu_vm_pt_create(struct amdgpu_device * adev,struct amdgpu_vm * vm,int level,bool immediate,struct amdgpu_bo_vm ** vmbo,int32_t xcp_id)438 int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm,
439 int level, bool immediate, struct amdgpu_bo_vm **vmbo,
440 int32_t xcp_id)
441 {
442 struct amdgpu_bo_param bp;
443 unsigned int num_entries;
444
445 memset(&bp, 0, sizeof(bp));
446
447 bp.size = amdgpu_vm_pt_size(adev, level);
448 bp.byte_align = AMDGPU_GPU_PAGE_SIZE;
449
450 if (!adev->gmc.is_app_apu)
451 bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
452 else
453 bp.domain = AMDGPU_GEM_DOMAIN_GTT;
454
455 bp.domain = amdgpu_bo_get_preferred_domain(adev, bp.domain);
456 bp.flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
457 AMDGPU_GEM_CREATE_CPU_GTT_USWC;
458
459 if (level < AMDGPU_VM_PTB)
460 num_entries = amdgpu_vm_pt_num_entries(adev, level);
461 else
462 num_entries = 0;
463
464 bp.bo_ptr_size = struct_size((*vmbo), entries, num_entries);
465
466 if (vm->use_cpu_for_update)
467 bp.flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
468
469 bp.type = ttm_bo_type_kernel;
470 bp.no_wait_gpu = immediate;
471 bp.xcp_id_plus1 = xcp_id + 1;
472
473 if (vm->root.bo)
474 bp.resv = vm->root.bo->tbo.base.resv;
475
476 return amdgpu_bo_create_vm(adev, &bp, vmbo);
477 }
478
479 /**
480 * amdgpu_vm_pt_alloc - Allocate a specific page table
481 *
482 * @adev: amdgpu_device pointer
483 * @vm: VM to allocate page tables for
484 * @cursor: Which page table to allocate
485 * @immediate: use an immediate update
486 *
487 * Make sure a specific page table or directory is allocated.
488 *
489 * Returns:
490 * 1 if page table needed to be allocated, 0 if page table was already
491 * allocated, negative errno if an error occurred.
492 */
amdgpu_vm_pt_alloc(struct amdgpu_device * adev,struct amdgpu_vm * vm,struct amdgpu_vm_pt_cursor * cursor,bool immediate)493 static int amdgpu_vm_pt_alloc(struct amdgpu_device *adev,
494 struct amdgpu_vm *vm,
495 struct amdgpu_vm_pt_cursor *cursor,
496 bool immediate)
497 {
498 struct amdgpu_vm_bo_base *entry = cursor->entry;
499 struct amdgpu_bo *pt_bo;
500 struct amdgpu_bo_vm *pt;
501 int r;
502
503 if (entry->bo)
504 return 0;
505
506 amdgpu_vm_eviction_unlock(vm);
507 r = amdgpu_vm_pt_create(adev, vm, cursor->level, immediate, &pt,
508 vm->root.bo->xcp_id);
509 amdgpu_vm_eviction_lock(vm);
510 if (r)
511 return r;
512
513 /* Keep a reference to the root directory to avoid
514 * freeing them up in the wrong order.
515 */
516 pt_bo = &pt->bo;
517 pt_bo->parent = amdgpu_bo_ref(cursor->parent->bo);
518 amdgpu_vm_bo_base_init(entry, vm, pt_bo);
519 r = amdgpu_vm_pt_clear(adev, vm, pt, immediate);
520 if (r)
521 goto error_free_pt;
522
523 return 0;
524
525 error_free_pt:
526 amdgpu_bo_unref(&pt_bo);
527 return r;
528 }
529
530 /**
531 * amdgpu_vm_pt_free - free one PD/PT
532 *
533 * @entry: PDE to free
534 */
amdgpu_vm_pt_free(struct amdgpu_vm_bo_base * entry)535 static void amdgpu_vm_pt_free(struct amdgpu_vm_bo_base *entry)
536 {
537 if (!entry->bo)
538 return;
539
540 entry->bo->vm_bo = NULL;
541 ttm_bo_set_bulk_move(&entry->bo->tbo, NULL);
542
543 spin_lock(&entry->vm->status_lock);
544 list_del(&entry->vm_status);
545 spin_unlock(&entry->vm->status_lock);
546 amdgpu_bo_unref(&entry->bo);
547 }
548
549 /**
550 * amdgpu_vm_pt_free_list - free PD/PT levels
551 *
552 * @adev: amdgpu device structure
553 * @params: see amdgpu_vm_update_params definition
554 *
555 * Free the page directory objects saved in the flush list
556 */
amdgpu_vm_pt_free_list(struct amdgpu_device * adev,struct amdgpu_vm_update_params * params)557 void amdgpu_vm_pt_free_list(struct amdgpu_device *adev,
558 struct amdgpu_vm_update_params *params)
559 {
560 struct amdgpu_vm_bo_base *entry, *next;
561 bool unlocked = params->unlocked;
562
563 if (list_empty(¶ms->tlb_flush_waitlist))
564 return;
565
566 /*
567 * unlocked unmap clear page table leaves, warning to free the page entry.
568 */
569 WARN_ON(unlocked);
570
571 list_for_each_entry_safe(entry, next, ¶ms->tlb_flush_waitlist, vm_status)
572 amdgpu_vm_pt_free(entry);
573 }
574
575 /**
576 * amdgpu_vm_pt_add_list - add PD/PT level to the flush list
577 *
578 * @params: parameters for the update
579 * @cursor: first PT entry to start DF search from, non NULL
580 *
581 * This list will be freed after TLB flush.
582 */
amdgpu_vm_pt_add_list(struct amdgpu_vm_update_params * params,struct amdgpu_vm_pt_cursor * cursor)583 static void amdgpu_vm_pt_add_list(struct amdgpu_vm_update_params *params,
584 struct amdgpu_vm_pt_cursor *cursor)
585 {
586 struct amdgpu_vm_pt_cursor seek;
587 struct amdgpu_vm_bo_base *entry;
588
589 spin_lock(¶ms->vm->status_lock);
590 for_each_amdgpu_vm_pt_dfs_safe(params->adev, params->vm, cursor, seek, entry) {
591 if (entry && entry->bo)
592 list_move(&entry->vm_status, ¶ms->tlb_flush_waitlist);
593 }
594
595 /* enter start node now */
596 list_move(&cursor->entry->vm_status, ¶ms->tlb_flush_waitlist);
597 spin_unlock(¶ms->vm->status_lock);
598 }
599
600 /**
601 * amdgpu_vm_pt_free_root - free root PD
602 * @adev: amdgpu device structure
603 * @vm: amdgpu vm structure
604 *
605 * Free the root page directory and everything below it.
606 */
amdgpu_vm_pt_free_root(struct amdgpu_device * adev,struct amdgpu_vm * vm)607 void amdgpu_vm_pt_free_root(struct amdgpu_device *adev, struct amdgpu_vm *vm)
608 {
609 struct amdgpu_vm_pt_cursor cursor;
610 struct amdgpu_vm_bo_base *entry;
611
612 for_each_amdgpu_vm_pt_dfs_safe(adev, vm, NULL, cursor, entry) {
613 if (entry)
614 amdgpu_vm_pt_free(entry);
615 }
616 }
617
618 /**
619 * amdgpu_vm_pde_update - update a single level in the hierarchy
620 *
621 * @params: parameters for the update
622 * @entry: entry to update
623 *
624 * Makes sure the requested entry in parent is up to date.
625 */
amdgpu_vm_pde_update(struct amdgpu_vm_update_params * params,struct amdgpu_vm_bo_base * entry)626 int amdgpu_vm_pde_update(struct amdgpu_vm_update_params *params,
627 struct amdgpu_vm_bo_base *entry)
628 {
629 struct amdgpu_vm_bo_base *parent = amdgpu_vm_pt_parent(entry);
630 struct amdgpu_bo *bo, *pbo;
631 struct amdgpu_vm *vm = params->vm;
632 uint64_t pde, pt, flags;
633 unsigned int level;
634
635 if (WARN_ON(!parent))
636 return -EINVAL;
637
638 bo = parent->bo;
639 for (level = 0, pbo = bo->parent; pbo; ++level)
640 pbo = pbo->parent;
641
642 level += params->adev->vm_manager.root_level;
643 amdgpu_gmc_get_pde_for_bo(entry->bo, level, &pt, &flags);
644 pde = (entry - to_amdgpu_bo_vm(parent->bo)->entries) * 8;
645 return vm->update_funcs->update(params, to_amdgpu_bo_vm(bo), pde, pt,
646 1, 0, flags);
647 }
648
649 /**
650 * amdgpu_vm_pte_update_noretry_flags - Update PTE no-retry flags
651 *
652 * @adev: amdgpu_device pointer
653 * @flags: pointer to PTE flags
654 *
655 * Update PTE no-retry flags when TF is enabled.
656 */
amdgpu_vm_pte_update_noretry_flags(struct amdgpu_device * adev,uint64_t * flags)657 static void amdgpu_vm_pte_update_noretry_flags(struct amdgpu_device *adev,
658 uint64_t *flags)
659 {
660 /*
661 * Update no-retry flags with the corresponding TF
662 * no-retry combination.
663 */
664 if ((*flags & AMDGPU_VM_NORETRY_FLAGS) == AMDGPU_VM_NORETRY_FLAGS) {
665 *flags &= ~AMDGPU_VM_NORETRY_FLAGS;
666 *flags |= adev->gmc.noretry_flags;
667 }
668 }
669
670 /*
671 * amdgpu_vm_pte_update_flags - figure out flags for PTE updates
672 *
673 * Make sure to set the right flags for the PTEs at the desired level.
674 */
amdgpu_vm_pte_update_flags(struct amdgpu_vm_update_params * params,struct amdgpu_bo_vm * pt,unsigned int level,uint64_t pe,uint64_t addr,unsigned int count,uint32_t incr,uint64_t flags)675 static void amdgpu_vm_pte_update_flags(struct amdgpu_vm_update_params *params,
676 struct amdgpu_bo_vm *pt,
677 unsigned int level,
678 uint64_t pe, uint64_t addr,
679 unsigned int count, uint32_t incr,
680 uint64_t flags)
681 {
682 struct amdgpu_device *adev = params->adev;
683
684 if (level != AMDGPU_VM_PTB) {
685 flags |= AMDGPU_PDE_PTE_FLAG(params->adev);
686 amdgpu_gmc_get_vm_pde(adev, level, &addr, &flags);
687
688 } else if (adev->asic_type >= CHIP_VEGA10 &&
689 !(flags & AMDGPU_PTE_VALID) &&
690 !(flags & AMDGPU_PTE_PRT_FLAG(params->adev))) {
691
692 /* Workaround for fault priority problem on GMC9 */
693 flags |= AMDGPU_PTE_EXECUTABLE;
694 }
695
696 /*
697 * Update no-retry flags to use the no-retry flag combination
698 * with TF enabled. The AMDGPU_VM_NORETRY_FLAGS flag combination
699 * does not work when TF is enabled. So, replace them with
700 * AMDGPU_VM_NORETRY_FLAGS_TF flag combination which works for
701 * all cases.
702 */
703 if (level == AMDGPU_VM_PTB)
704 amdgpu_vm_pte_update_noretry_flags(adev, &flags);
705
706 /* APUs mapping system memory may need different MTYPEs on different
707 * NUMA nodes. Only do this for contiguous ranges that can be assumed
708 * to be on the same NUMA node.
709 */
710 if ((flags & AMDGPU_PTE_SYSTEM) && (adev->flags & AMD_IS_APU) &&
711 adev->gmc.gmc_funcs->override_vm_pte_flags &&
712 num_possible_nodes() > 1 && !params->pages_addr && params->allow_override)
713 amdgpu_gmc_override_vm_pte_flags(adev, params->vm, addr, &flags);
714
715 params->vm->update_funcs->update(params, pt, pe, addr, count, incr,
716 flags);
717 }
718
719 /**
720 * amdgpu_vm_pte_fragment - get fragment for PTEs
721 *
722 * @params: see amdgpu_vm_update_params definition
723 * @start: first PTE to handle
724 * @end: last PTE to handle
725 * @flags: hw mapping flags
726 * @frag: resulting fragment size
727 * @frag_end: end of this fragment
728 *
729 * Returns the first possible fragment for the start and end address.
730 */
amdgpu_vm_pte_fragment(struct amdgpu_vm_update_params * params,uint64_t start,uint64_t end,uint64_t flags,unsigned int * frag,uint64_t * frag_end)731 static void amdgpu_vm_pte_fragment(struct amdgpu_vm_update_params *params,
732 uint64_t start, uint64_t end, uint64_t flags,
733 unsigned int *frag, uint64_t *frag_end)
734 {
735 /**
736 * The MC L1 TLB supports variable sized pages, based on a fragment
737 * field in the PTE. When this field is set to a non-zero value, page
738 * granularity is increased from 4KB to (1 << (12 + frag)). The PTE
739 * flags are considered valid for all PTEs within the fragment range
740 * and corresponding mappings are assumed to be physically contiguous.
741 *
742 * The L1 TLB can store a single PTE for the whole fragment,
743 * significantly increasing the space available for translation
744 * caching. This leads to large improvements in throughput when the
745 * TLB is under pressure.
746 *
747 * The L2 TLB distributes small and large fragments into two
748 * asymmetric partitions. The large fragment cache is significantly
749 * larger. Thus, we try to use large fragments wherever possible.
750 * Userspace can support this by aligning virtual base address and
751 * allocation size to the fragment size.
752 *
753 * Starting with Vega10 the fragment size only controls the L1. The L2
754 * is now directly feed with small/huge/giant pages from the walker.
755 */
756 unsigned int max_frag;
757
758 if (params->adev->asic_type < CHIP_VEGA10)
759 max_frag = params->adev->vm_manager.fragment_size;
760 else
761 max_frag = 31;
762
763 /* system pages are non continuously */
764 if (params->pages_addr) {
765 *frag = 0;
766 *frag_end = end;
767 return;
768 }
769
770 /* This intentionally wraps around if no bit is set */
771 *frag = min_t(unsigned int, ffs(start) - 1, fls64(end - start) - 1);
772 if (*frag >= max_frag) {
773 *frag = max_frag;
774 *frag_end = end & ~((1ULL << max_frag) - 1);
775 } else {
776 *frag_end = start + (1 << *frag);
777 }
778 }
779
780 /**
781 * amdgpu_vm_ptes_update - make sure that page tables are valid
782 *
783 * @params: see amdgpu_vm_update_params definition
784 * @start: start of GPU address range
785 * @end: end of GPU address range
786 * @dst: destination address to map to, the next dst inside the function
787 * @flags: mapping flags
788 *
789 * Update the page tables in the range @start - @end.
790 *
791 * Returns:
792 * 0 for success, -EINVAL for failure.
793 */
amdgpu_vm_ptes_update(struct amdgpu_vm_update_params * params,uint64_t start,uint64_t end,uint64_t dst,uint64_t flags)794 int amdgpu_vm_ptes_update(struct amdgpu_vm_update_params *params,
795 uint64_t start, uint64_t end,
796 uint64_t dst, uint64_t flags)
797 {
798 struct amdgpu_device *adev = params->adev;
799 struct amdgpu_vm_pt_cursor cursor;
800 uint64_t frag_start = start, frag_end;
801 unsigned int frag;
802 int r;
803
804 /* figure out the initial fragment */
805 amdgpu_vm_pte_fragment(params, frag_start, end, flags, &frag,
806 &frag_end);
807
808 /* walk over the address space and update the PTs */
809 amdgpu_vm_pt_start(adev, params->vm, start, &cursor);
810 while (cursor.pfn < end) {
811 unsigned int shift, parent_shift, mask;
812 uint64_t incr, entry_end, pe_start;
813 struct amdgpu_bo *pt;
814
815 if (!params->unlocked) {
816 /* make sure that the page tables covering the
817 * address range are actually allocated
818 */
819 r = amdgpu_vm_pt_alloc(params->adev, params->vm,
820 &cursor, params->immediate);
821 if (r)
822 return r;
823 }
824
825 shift = amdgpu_vm_pt_level_shift(adev, cursor.level);
826 parent_shift = amdgpu_vm_pt_level_shift(adev, cursor.level - 1);
827 if (params->unlocked) {
828 /* Unlocked updates are only allowed on the leaves */
829 if (amdgpu_vm_pt_descendant(adev, &cursor))
830 continue;
831 } else if (adev->asic_type < CHIP_VEGA10 &&
832 (flags & AMDGPU_PTE_VALID)) {
833 /* No huge page support before GMC v9 */
834 if (cursor.level != AMDGPU_VM_PTB) {
835 if (!amdgpu_vm_pt_descendant(adev, &cursor))
836 return -ENOENT;
837 continue;
838 }
839 } else if (frag < shift) {
840 /* We can't use this level when the fragment size is
841 * smaller than the address shift. Go to the next
842 * child entry and try again.
843 */
844 if (amdgpu_vm_pt_descendant(adev, &cursor))
845 continue;
846 } else if (frag >= parent_shift) {
847 /* If the fragment size is even larger than the parent
848 * shift we should go up one level and check it again.
849 */
850 if (!amdgpu_vm_pt_ancestor(&cursor))
851 return -EINVAL;
852 continue;
853 }
854
855 pt = cursor.entry->bo;
856 if (!pt) {
857 /* We need all PDs and PTs for mapping something, */
858 if (flags & AMDGPU_PTE_VALID)
859 return -ENOENT;
860
861 /* but unmapping something can happen at a higher
862 * level.
863 */
864 if (!amdgpu_vm_pt_ancestor(&cursor))
865 return -EINVAL;
866
867 pt = cursor.entry->bo;
868 shift = parent_shift;
869 frag_end = max(frag_end, ALIGN(frag_start + 1,
870 1ULL << shift));
871 }
872
873 /* Looks good so far, calculate parameters for the update */
874 incr = (uint64_t)AMDGPU_GPU_PAGE_SIZE << shift;
875 mask = amdgpu_vm_pt_entries_mask(adev, cursor.level);
876 pe_start = ((cursor.pfn >> shift) & mask) * 8;
877
878 if (cursor.level < AMDGPU_VM_PTB && params->unlocked)
879 /*
880 * MMU notifier callback unlocked unmap huge page, leave is PDE entry,
881 * only clear one entry. Next entry search again for PDE or PTE leave.
882 */
883 entry_end = 1ULL << shift;
884 else
885 entry_end = ((uint64_t)mask + 1) << shift;
886 entry_end += cursor.pfn & ~(entry_end - 1);
887 entry_end = min(entry_end, end);
888
889 do {
890 struct amdgpu_vm *vm = params->vm;
891 uint64_t upd_end = min(entry_end, frag_end);
892 unsigned int nptes = (upd_end - frag_start) >> shift;
893 uint64_t upd_flags = flags | AMDGPU_PTE_FRAG(frag);
894
895 /* This can happen when we set higher level PDs to
896 * silent to stop fault floods.
897 */
898 nptes = max(nptes, 1u);
899
900 trace_amdgpu_vm_update_ptes(params, frag_start, upd_end,
901 min(nptes, 32u), dst, incr,
902 upd_flags,
903 vm->task_info ? vm->task_info->tgid : 0,
904 vm->immediate.fence_context);
905 amdgpu_vm_pte_update_flags(params, to_amdgpu_bo_vm(pt),
906 cursor.level, pe_start, dst,
907 nptes, incr, upd_flags);
908
909 pe_start += nptes * 8;
910 dst += nptes * incr;
911
912 frag_start = upd_end;
913 if (frag_start >= frag_end) {
914 /* figure out the next fragment */
915 amdgpu_vm_pte_fragment(params, frag_start, end,
916 flags, &frag, &frag_end);
917 if (frag < shift)
918 break;
919 }
920 } while (frag_start < entry_end);
921
922 if (amdgpu_vm_pt_descendant(adev, &cursor)) {
923 /* Free all child entries.
924 * Update the tables with the flags and addresses and free up subsequent
925 * tables in the case of huge pages or freed up areas.
926 * This is the maximum you can free, because all other page tables are not
927 * completely covered by the range and so potentially still in use.
928 */
929 while (cursor.pfn < frag_start) {
930 /* Make sure previous mapping is freed */
931 if (cursor.entry->bo) {
932 params->needs_flush = true;
933 amdgpu_vm_pt_add_list(params, &cursor);
934 }
935 amdgpu_vm_pt_next(adev, &cursor);
936 }
937
938 } else if (frag >= shift) {
939 /* or just move on to the next on the same level. */
940 amdgpu_vm_pt_next(adev, &cursor);
941 }
942 }
943
944 return 0;
945 }
946
947 /**
948 * amdgpu_vm_pt_map_tables - have bo of root PD cpu accessible
949 * @adev: amdgpu device structure
950 * @vm: amdgpu vm structure
951 *
952 * make root page directory and everything below it cpu accessible.
953 */
amdgpu_vm_pt_map_tables(struct amdgpu_device * adev,struct amdgpu_vm * vm)954 int amdgpu_vm_pt_map_tables(struct amdgpu_device *adev, struct amdgpu_vm *vm)
955 {
956 struct amdgpu_vm_pt_cursor cursor;
957 struct amdgpu_vm_bo_base *entry;
958
959 for_each_amdgpu_vm_pt_dfs_safe(adev, vm, NULL, cursor, entry) {
960
961 struct amdgpu_bo_vm *bo;
962 int r;
963
964 if (entry->bo) {
965 bo = to_amdgpu_bo_vm(entry->bo);
966 r = vm->update_funcs->map_table(bo);
967 if (r)
968 return r;
969 }
970 }
971
972 return 0;
973 }
974