• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2018 Advanced Micro Devices, Inc.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  */
26 
27 #include <linux/io-64-nonatomic-lo-hi.h>
28 
29 #include "amdgpu.h"
30 #include "amdgpu_gmc.h"
31 #include "amdgpu_ras.h"
32 #include "amdgpu_xgmi.h"
33 
34 #include <drm/drm_drv.h>
35 
36 /**
37  * amdgpu_gmc_pdb0_alloc - allocate vram for pdb0
38  *
39  * @adev: amdgpu_device pointer
40  *
41  * Allocate video memory for pdb0 and map it for CPU access
42  * Returns 0 for success, error for failure.
43  */
amdgpu_gmc_pdb0_alloc(struct amdgpu_device * adev)44 int amdgpu_gmc_pdb0_alloc(struct amdgpu_device *adev)
45 {
46 	int r;
47 	struct amdgpu_bo_param bp;
48 	u64 vram_size = adev->gmc.xgmi.node_segment_size * adev->gmc.xgmi.num_physical_nodes;
49 	uint32_t pde0_page_shift = adev->gmc.vmid0_page_table_block_size + 21;
50 	uint32_t npdes = (vram_size + (1ULL << pde0_page_shift) -1) >> pde0_page_shift;
51 
52 	memset(&bp, 0, sizeof(bp));
53 	bp.size = PAGE_ALIGN((npdes + 1) * 8);
54 	bp.byte_align = PAGE_SIZE;
55 	bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
56 	bp.flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
57 		AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
58 	bp.type = ttm_bo_type_kernel;
59 	bp.resv = NULL;
60 	bp.bo_ptr_size = sizeof(struct amdgpu_bo);
61 
62 	r = amdgpu_bo_create(adev, &bp, &adev->gmc.pdb0_bo);
63 	if (r)
64 		return r;
65 
66 	r = amdgpu_bo_reserve(adev->gmc.pdb0_bo, false);
67 	if (unlikely(r != 0))
68 		goto bo_reserve_failure;
69 
70 	r = amdgpu_bo_pin(adev->gmc.pdb0_bo, AMDGPU_GEM_DOMAIN_VRAM);
71 	if (r)
72 		goto bo_pin_failure;
73 	r = amdgpu_bo_kmap(adev->gmc.pdb0_bo, &adev->gmc.ptr_pdb0);
74 	if (r)
75 		goto bo_kmap_failure;
76 
77 	amdgpu_bo_unreserve(adev->gmc.pdb0_bo);
78 	return 0;
79 
80 bo_kmap_failure:
81 	amdgpu_bo_unpin(adev->gmc.pdb0_bo);
82 bo_pin_failure:
83 	amdgpu_bo_unreserve(adev->gmc.pdb0_bo);
84 bo_reserve_failure:
85 	amdgpu_bo_unref(&adev->gmc.pdb0_bo);
86 	return r;
87 }
88 
89 /**
90  * amdgpu_gmc_get_pde_for_bo - get the PDE for a BO
91  *
92  * @bo: the BO to get the PDE for
93  * @level: the level in the PD hirarchy
94  * @addr: resulting addr
95  * @flags: resulting flags
96  *
97  * Get the address and flags to be used for a PDE (Page Directory Entry).
98  */
amdgpu_gmc_get_pde_for_bo(struct amdgpu_bo * bo,int level,uint64_t * addr,uint64_t * flags)99 void amdgpu_gmc_get_pde_for_bo(struct amdgpu_bo *bo, int level,
100 			       uint64_t *addr, uint64_t *flags)
101 {
102 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
103 
104 	switch (bo->tbo.resource->mem_type) {
105 	case TTM_PL_TT:
106 		*addr = bo->tbo.ttm->dma_address[0];
107 		break;
108 	case TTM_PL_VRAM:
109 		*addr = amdgpu_bo_gpu_offset(bo);
110 		break;
111 	default:
112 		*addr = 0;
113 		break;
114 	}
115 	*flags = amdgpu_ttm_tt_pde_flags(bo->tbo.ttm, bo->tbo.resource);
116 	amdgpu_gmc_get_vm_pde(adev, level, addr, flags);
117 }
118 
119 /*
120  * amdgpu_gmc_pd_addr - return the address of the root directory
121  */
amdgpu_gmc_pd_addr(struct amdgpu_bo * bo)122 uint64_t amdgpu_gmc_pd_addr(struct amdgpu_bo *bo)
123 {
124 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
125 	uint64_t pd_addr;
126 
127 	/* TODO: move that into ASIC specific code */
128 	if (adev->asic_type >= CHIP_VEGA10) {
129 		uint64_t flags = AMDGPU_PTE_VALID;
130 
131 		amdgpu_gmc_get_pde_for_bo(bo, -1, &pd_addr, &flags);
132 		pd_addr |= flags;
133 	} else {
134 		pd_addr = amdgpu_bo_gpu_offset(bo);
135 	}
136 	return pd_addr;
137 }
138 
139 /**
140  * amdgpu_gmc_set_pte_pde - update the page tables using CPU
141  *
142  * @adev: amdgpu_device pointer
143  * @cpu_pt_addr: cpu address of the page table
144  * @gpu_page_idx: entry in the page table to update
145  * @addr: dst addr to write into pte/pde
146  * @flags: access flags
147  *
148  * Update the page tables using CPU.
149  */
amdgpu_gmc_set_pte_pde(struct amdgpu_device * adev,void * cpu_pt_addr,uint32_t gpu_page_idx,uint64_t addr,uint64_t flags)150 int amdgpu_gmc_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr,
151 				uint32_t gpu_page_idx, uint64_t addr,
152 				uint64_t flags)
153 {
154 	void __iomem *ptr = (void *)cpu_pt_addr;
155 	uint64_t value;
156 	int idx;
157 
158 	if (!drm_dev_enter(&adev->ddev, &idx))
159 		return 0;
160 
161 	/*
162 	 * The following is for PTE only. GART does not have PDEs.
163 	*/
164 	value = addr & 0x0000FFFFFFFFF000ULL;
165 	value |= flags;
166 	writeq(value, ptr + (gpu_page_idx * 8));
167 
168 	drm_dev_exit(idx);
169 
170 	return 0;
171 }
172 
173 /**
174  * amdgpu_gmc_agp_addr - return the address in the AGP address space
175  *
176  * @bo: TTM BO which needs the address, must be in GTT domain
177  *
178  * Tries to figure out how to access the BO through the AGP aperture. Returns
179  * AMDGPU_BO_INVALID_OFFSET if that is not possible.
180  */
amdgpu_gmc_agp_addr(struct ttm_buffer_object * bo)181 uint64_t amdgpu_gmc_agp_addr(struct ttm_buffer_object *bo)
182 {
183 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
184 
185 	if (bo->ttm->num_pages != 1 || bo->ttm->caching == ttm_cached)
186 		return AMDGPU_BO_INVALID_OFFSET;
187 
188 	if (bo->ttm->dma_address[0] + PAGE_SIZE >= adev->gmc.agp_size)
189 		return AMDGPU_BO_INVALID_OFFSET;
190 
191 	return adev->gmc.agp_start + bo->ttm->dma_address[0];
192 }
193 
194 /**
195  * amdgpu_gmc_vram_location - try to find VRAM location
196  *
197  * @adev: amdgpu device structure holding all necessary information
198  * @mc: memory controller structure holding memory information
199  * @base: base address at which to put VRAM
200  *
201  * Function will try to place VRAM at base address provided
202  * as parameter.
203  */
amdgpu_gmc_vram_location(struct amdgpu_device * adev,struct amdgpu_gmc * mc,u64 base)204 void amdgpu_gmc_vram_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc,
205 			      u64 base)
206 {
207 	uint64_t limit = (uint64_t)amdgpu_vram_limit << 20;
208 
209 	mc->vram_start = base;
210 	mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
211 	if (limit && limit < mc->real_vram_size)
212 		mc->real_vram_size = limit;
213 
214 	if (mc->xgmi.num_physical_nodes == 0) {
215 		mc->fb_start = mc->vram_start;
216 		mc->fb_end = mc->vram_end;
217 	}
218 	dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
219 			mc->mc_vram_size >> 20, mc->vram_start,
220 			mc->vram_end, mc->real_vram_size >> 20);
221 }
222 
223 /** amdgpu_gmc_sysvm_location - place vram and gart in sysvm aperture
224  *
225  * @adev: amdgpu device structure holding all necessary information
226  * @mc: memory controller structure holding memory information
227  *
228  * This function is only used if use GART for FB translation. In such
229  * case, we use sysvm aperture (vmid0 page tables) for both vram
230  * and gart (aka system memory) access.
231  *
232  * GPUVM (and our organization of vmid0 page tables) require sysvm
233  * aperture to be placed at a location aligned with 8 times of native
234  * page size. For example, if vm_context0_cntl.page_table_block_size
235  * is 12, then native page size is 8G (2M*2^12), sysvm should start
236  * with a 64G aligned address. For simplicity, we just put sysvm at
237  * address 0. So vram start at address 0 and gart is right after vram.
238  */
amdgpu_gmc_sysvm_location(struct amdgpu_device * adev,struct amdgpu_gmc * mc)239 void amdgpu_gmc_sysvm_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc)
240 {
241 	u64 hive_vram_start = 0;
242 	u64 hive_vram_end = mc->xgmi.node_segment_size * mc->xgmi.num_physical_nodes - 1;
243 	mc->vram_start = mc->xgmi.node_segment_size * mc->xgmi.physical_node_id;
244 	mc->vram_end = mc->vram_start + mc->xgmi.node_segment_size - 1;
245 	mc->gart_start = hive_vram_end + 1;
246 	mc->gart_end = mc->gart_start + mc->gart_size - 1;
247 	mc->fb_start = hive_vram_start;
248 	mc->fb_end = hive_vram_end;
249 	dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
250 			mc->mc_vram_size >> 20, mc->vram_start,
251 			mc->vram_end, mc->real_vram_size >> 20);
252 	dev_info(adev->dev, "GART: %lluM 0x%016llX - 0x%016llX\n",
253 			mc->gart_size >> 20, mc->gart_start, mc->gart_end);
254 }
255 
256 /**
257  * amdgpu_gmc_gart_location - try to find GART location
258  *
259  * @adev: amdgpu device structure holding all necessary information
260  * @mc: memory controller structure holding memory information
261  *
262  * Function will place try to place GART before or after VRAM.
263  * If GART size is bigger than space left then we ajust GART size.
264  * Thus function will never fails.
265  */
amdgpu_gmc_gart_location(struct amdgpu_device * adev,struct amdgpu_gmc * mc)266 void amdgpu_gmc_gart_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc)
267 {
268 	const uint64_t four_gb = 0x100000000ULL;
269 	u64 size_af, size_bf;
270 	/*To avoid the hole, limit the max mc address to AMDGPU_GMC_HOLE_START*/
271 	u64 max_mc_address = min(adev->gmc.mc_mask, AMDGPU_GMC_HOLE_START - 1);
272 
273 	/* VCE doesn't like it when BOs cross a 4GB segment, so align
274 	 * the GART base on a 4GB boundary as well.
275 	 */
276 	size_bf = mc->fb_start;
277 	size_af = max_mc_address + 1 - ALIGN(mc->fb_end + 1, four_gb);
278 
279 	if (mc->gart_size > max(size_bf, size_af)) {
280 		dev_warn(adev->dev, "limiting GART\n");
281 		mc->gart_size = max(size_bf, size_af);
282 	}
283 
284 	if ((size_bf >= mc->gart_size && size_bf < size_af) ||
285 	    (size_af < mc->gart_size))
286 		mc->gart_start = 0;
287 	else
288 		mc->gart_start = max_mc_address - mc->gart_size + 1;
289 
290 	mc->gart_start &= ~(four_gb - 1);
291 	mc->gart_end = mc->gart_start + mc->gart_size - 1;
292 	dev_info(adev->dev, "GART: %lluM 0x%016llX - 0x%016llX\n",
293 			mc->gart_size >> 20, mc->gart_start, mc->gart_end);
294 }
295 
296 /**
297  * amdgpu_gmc_agp_location - try to find AGP location
298  * @adev: amdgpu device structure holding all necessary information
299  * @mc: memory controller structure holding memory information
300  *
301  * Function will place try to find a place for the AGP BAR in the MC address
302  * space.
303  *
304  * AGP BAR will be assigned the largest available hole in the address space.
305  * Should be called after VRAM and GART locations are setup.
306  */
amdgpu_gmc_agp_location(struct amdgpu_device * adev,struct amdgpu_gmc * mc)307 void amdgpu_gmc_agp_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc)
308 {
309 	const uint64_t sixteen_gb = 1ULL << 34;
310 	const uint64_t sixteen_gb_mask = ~(sixteen_gb - 1);
311 	u64 size_af, size_bf;
312 
313 	if (amdgpu_sriov_vf(adev)) {
314 		mc->agp_start = 0xffffffffffff;
315 		mc->agp_end = 0x0;
316 		mc->agp_size = 0;
317 
318 		return;
319 	}
320 
321 	if (mc->fb_start > mc->gart_start) {
322 		size_bf = (mc->fb_start & sixteen_gb_mask) -
323 			ALIGN(mc->gart_end + 1, sixteen_gb);
324 		size_af = mc->mc_mask + 1 - ALIGN(mc->fb_end + 1, sixteen_gb);
325 	} else {
326 		size_bf = mc->fb_start & sixteen_gb_mask;
327 		size_af = (mc->gart_start & sixteen_gb_mask) -
328 			ALIGN(mc->fb_end + 1, sixteen_gb);
329 	}
330 
331 	if (size_bf > size_af) {
332 		mc->agp_start = (mc->fb_start - size_bf) & sixteen_gb_mask;
333 		mc->agp_size = size_bf;
334 	} else {
335 		mc->agp_start = ALIGN(mc->fb_end + 1, sixteen_gb);
336 		mc->agp_size = size_af;
337 	}
338 
339 	mc->agp_end = mc->agp_start + mc->agp_size - 1;
340 	dev_info(adev->dev, "AGP: %lluM 0x%016llX - 0x%016llX\n",
341 			mc->agp_size >> 20, mc->agp_start, mc->agp_end);
342 }
343 
344 /**
345  * amdgpu_gmc_fault_key - get hask key from vm fault address and pasid
346  *
347  * @addr: 48 bit physical address, page aligned (36 significant bits)
348  * @pasid: 16 bit process address space identifier
349  */
amdgpu_gmc_fault_key(uint64_t addr,uint16_t pasid)350 static inline uint64_t amdgpu_gmc_fault_key(uint64_t addr, uint16_t pasid)
351 {
352 	return addr << 4 | pasid;
353 }
354 
355 /**
356  * amdgpu_gmc_filter_faults - filter VM faults
357  *
358  * @adev: amdgpu device structure
359  * @addr: address of the VM fault
360  * @pasid: PASID of the process causing the fault
361  * @timestamp: timestamp of the fault
362  *
363  * Returns:
364  * True if the fault was filtered and should not be processed further.
365  * False if the fault is a new one and needs to be handled.
366  */
amdgpu_gmc_filter_faults(struct amdgpu_device * adev,uint64_t addr,uint16_t pasid,uint64_t timestamp)367 bool amdgpu_gmc_filter_faults(struct amdgpu_device *adev, uint64_t addr,
368 			      uint16_t pasid, uint64_t timestamp)
369 {
370 	struct amdgpu_gmc *gmc = &adev->gmc;
371 	uint64_t stamp, key = amdgpu_gmc_fault_key(addr, pasid);
372 	struct amdgpu_gmc_fault *fault;
373 	uint32_t hash;
374 
375 	/* If we don't have space left in the ring buffer return immediately */
376 	stamp = max(timestamp, AMDGPU_GMC_FAULT_TIMEOUT + 1) -
377 		AMDGPU_GMC_FAULT_TIMEOUT;
378 	if (gmc->fault_ring[gmc->last_fault].timestamp >= stamp)
379 		return true;
380 
381 	/* Try to find the fault in the hash */
382 	hash = hash_64(key, AMDGPU_GMC_FAULT_HASH_ORDER);
383 	fault = &gmc->fault_ring[gmc->fault_hash[hash].idx];
384 	while (fault->timestamp >= stamp) {
385 		uint64_t tmp;
386 
387 		if (atomic64_read(&fault->key) == key)
388 			return true;
389 
390 		tmp = fault->timestamp;
391 		fault = &gmc->fault_ring[fault->next];
392 
393 		/* Check if the entry was reused */
394 		if (fault->timestamp >= tmp)
395 			break;
396 	}
397 
398 	/* Add the fault to the ring */
399 	fault = &gmc->fault_ring[gmc->last_fault];
400 	atomic64_set(&fault->key, key);
401 	fault->timestamp = timestamp;
402 
403 	/* And update the hash */
404 	fault->next = gmc->fault_hash[hash].idx;
405 	gmc->fault_hash[hash].idx = gmc->last_fault++;
406 	return false;
407 }
408 
409 /**
410  * amdgpu_gmc_filter_faults_remove - remove address from VM faults filter
411  *
412  * @adev: amdgpu device structure
413  * @addr: address of the VM fault
414  * @pasid: PASID of the process causing the fault
415  *
416  * Remove the address from fault filter, then future vm fault on this address
417  * will pass to retry fault handler to recover.
418  */
amdgpu_gmc_filter_faults_remove(struct amdgpu_device * adev,uint64_t addr,uint16_t pasid)419 void amdgpu_gmc_filter_faults_remove(struct amdgpu_device *adev, uint64_t addr,
420 				     uint16_t pasid)
421 {
422 	struct amdgpu_gmc *gmc = &adev->gmc;
423 	uint64_t key = amdgpu_gmc_fault_key(addr, pasid);
424 	struct amdgpu_gmc_fault *fault;
425 	uint32_t hash;
426 	uint64_t tmp;
427 
428 	hash = hash_64(key, AMDGPU_GMC_FAULT_HASH_ORDER);
429 	fault = &gmc->fault_ring[gmc->fault_hash[hash].idx];
430 	do {
431 		if (atomic64_cmpxchg(&fault->key, key, 0) == key)
432 			break;
433 
434 		tmp = fault->timestamp;
435 		fault = &gmc->fault_ring[fault->next];
436 	} while (fault->timestamp < tmp);
437 }
438 
amdgpu_gmc_ras_late_init(struct amdgpu_device * adev)439 int amdgpu_gmc_ras_late_init(struct amdgpu_device *adev)
440 {
441 	int r;
442 
443 	if (adev->umc.ras_funcs &&
444 	    adev->umc.ras_funcs->ras_late_init) {
445 		r = adev->umc.ras_funcs->ras_late_init(adev);
446 		if (r)
447 			return r;
448 	}
449 
450 	if (adev->mmhub.ras_funcs &&
451 	    adev->mmhub.ras_funcs->ras_late_init) {
452 		r = adev->mmhub.ras_funcs->ras_late_init(adev);
453 		if (r)
454 			return r;
455 	}
456 
457 	if (!adev->gmc.xgmi.connected_to_cpu)
458 		adev->gmc.xgmi.ras_funcs = &xgmi_ras_funcs;
459 
460 	if (adev->gmc.xgmi.ras_funcs &&
461 	    adev->gmc.xgmi.ras_funcs->ras_late_init) {
462 		r = adev->gmc.xgmi.ras_funcs->ras_late_init(adev);
463 		if (r)
464 			return r;
465 	}
466 
467 	if (adev->hdp.ras_funcs &&
468 	    adev->hdp.ras_funcs->ras_late_init) {
469 		r = adev->hdp.ras_funcs->ras_late_init(adev);
470 		if (r)
471 			return r;
472 	}
473 
474 	if (adev->mca.mp0.ras_funcs &&
475 	    adev->mca.mp0.ras_funcs->ras_late_init) {
476 		r = adev->mca.mp0.ras_funcs->ras_late_init(adev);
477 		if (r)
478 			return r;
479 	}
480 
481 	if (adev->mca.mp1.ras_funcs &&
482 	    adev->mca.mp1.ras_funcs->ras_late_init) {
483 		r = adev->mca.mp1.ras_funcs->ras_late_init(adev);
484 		if (r)
485 			return r;
486 	}
487 
488 	if (adev->mca.mpio.ras_funcs &&
489 	    adev->mca.mpio.ras_funcs->ras_late_init) {
490 		r = adev->mca.mpio.ras_funcs->ras_late_init(adev);
491 		if (r)
492 			return r;
493 	}
494 
495 	return 0;
496 }
497 
amdgpu_gmc_ras_fini(struct amdgpu_device * adev)498 void amdgpu_gmc_ras_fini(struct amdgpu_device *adev)
499 {
500 	if (adev->umc.ras_funcs &&
501 	    adev->umc.ras_funcs->ras_fini)
502 		adev->umc.ras_funcs->ras_fini(adev);
503 
504 	if (adev->mmhub.ras_funcs &&
505 	    adev->mmhub.ras_funcs->ras_fini)
506 		adev->mmhub.ras_funcs->ras_fini(adev);
507 
508 	if (adev->gmc.xgmi.ras_funcs &&
509 	    adev->gmc.xgmi.ras_funcs->ras_fini)
510 		adev->gmc.xgmi.ras_funcs->ras_fini(adev);
511 
512 	if (adev->hdp.ras_funcs &&
513 	    adev->hdp.ras_funcs->ras_fini)
514 		adev->hdp.ras_funcs->ras_fini(adev);
515 }
516 
517 	/*
518 	 * The latest engine allocation on gfx9/10 is:
519 	 * Engine 2, 3: firmware
520 	 * Engine 0, 1, 4~16: amdgpu ring,
521 	 *                    subject to change when ring number changes
522 	 * Engine 17: Gart flushes
523 	 */
524 #define GFXHUB_FREE_VM_INV_ENGS_BITMAP		0x1FFF3
525 #define MMHUB_FREE_VM_INV_ENGS_BITMAP		0x1FFF3
526 
amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device * adev)527 int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev)
528 {
529 	struct amdgpu_ring *ring;
530 	unsigned vm_inv_engs[AMDGPU_MAX_VMHUBS] =
531 		{GFXHUB_FREE_VM_INV_ENGS_BITMAP, MMHUB_FREE_VM_INV_ENGS_BITMAP,
532 		GFXHUB_FREE_VM_INV_ENGS_BITMAP};
533 	unsigned i;
534 	unsigned vmhub, inv_eng;
535 
536 	for (i = 0; i < adev->num_rings; ++i) {
537 		ring = adev->rings[i];
538 		vmhub = ring->funcs->vmhub;
539 
540 		if (ring == &adev->mes.ring)
541 			continue;
542 
543 		inv_eng = ffs(vm_inv_engs[vmhub]);
544 		if (!inv_eng) {
545 			dev_err(adev->dev, "no VM inv eng for ring %s\n",
546 				ring->name);
547 			return -EINVAL;
548 		}
549 
550 		ring->vm_inv_eng = inv_eng - 1;
551 		vm_inv_engs[vmhub] &= ~(1 << ring->vm_inv_eng);
552 
553 		dev_info(adev->dev, "ring %s uses VM inv eng %u on hub %u\n",
554 			 ring->name, ring->vm_inv_eng, ring->funcs->vmhub);
555 	}
556 
557 	return 0;
558 }
559 
560 /**
561  * amdgpu_gmc_tmz_set -- check and set if a device supports TMZ
562  * @adev: amdgpu_device pointer
563  *
564  * Check and set if an the device @adev supports Trusted Memory
565  * Zones (TMZ).
566  */
amdgpu_gmc_tmz_set(struct amdgpu_device * adev)567 void amdgpu_gmc_tmz_set(struct amdgpu_device *adev)
568 {
569 	switch (adev->asic_type) {
570 	case CHIP_RAVEN:
571 	case CHIP_RENOIR:
572 		if (amdgpu_tmz == 0) {
573 			adev->gmc.tmz_enabled = false;
574 			dev_info(adev->dev,
575 				 "Trusted Memory Zone (TMZ) feature disabled (cmd line)\n");
576 		} else {
577 			adev->gmc.tmz_enabled = true;
578 			dev_info(adev->dev,
579 				 "Trusted Memory Zone (TMZ) feature enabled\n");
580 		}
581 		break;
582 	case CHIP_NAVI10:
583 	case CHIP_NAVI14:
584 	case CHIP_NAVI12:
585 	case CHIP_VANGOGH:
586 	case CHIP_YELLOW_CARP:
587 		/* Don't enable it by default yet.
588 		 */
589 		if (amdgpu_tmz < 1) {
590 			adev->gmc.tmz_enabled = false;
591 			dev_info(adev->dev,
592 				 "Trusted Memory Zone (TMZ) feature disabled as experimental (default)\n");
593 		} else {
594 			adev->gmc.tmz_enabled = true;
595 			dev_info(adev->dev,
596 				 "Trusted Memory Zone (TMZ) feature enabled as experimental (cmd line)\n");
597 		}
598 		break;
599 	default:
600 		adev->gmc.tmz_enabled = false;
601 		dev_info(adev->dev,
602 			 "Trusted Memory Zone (TMZ) feature not supported\n");
603 		break;
604 	}
605 }
606 
607 /**
608  * amdgpu_gmc_noretry_set -- set per asic noretry defaults
609  * @adev: amdgpu_device pointer
610  *
611  * Set a per asic default for the no-retry parameter.
612  *
613  */
amdgpu_gmc_noretry_set(struct amdgpu_device * adev)614 void amdgpu_gmc_noretry_set(struct amdgpu_device *adev)
615 {
616 	struct amdgpu_gmc *gmc = &adev->gmc;
617 
618 	switch (adev->asic_type) {
619 	case CHIP_VEGA10:
620 	case CHIP_VEGA20:
621 	case CHIP_ARCTURUS:
622 	case CHIP_ALDEBARAN:
623 		/*
624 		 * noretry = 0 will cause kfd page fault tests fail
625 		 * for some ASICs, so set default to 1 for these ASICs.
626 		 */
627 		if (amdgpu_noretry == -1)
628 			gmc->noretry = 1;
629 		else
630 			gmc->noretry = amdgpu_noretry;
631 		break;
632 	case CHIP_RAVEN:
633 	default:
634 		/* Raven currently has issues with noretry
635 		 * regardless of what we decide for other
636 		 * asics, we should leave raven with
637 		 * noretry = 0 until we root cause the
638 		 * issues.
639 		 *
640 		 * default this to 0 for now, but we may want
641 		 * to change this in the future for certain
642 		 * GPUs as it can increase performance in
643 		 * certain cases.
644 		 */
645 		if (amdgpu_noretry == -1)
646 			gmc->noretry = 0;
647 		else
648 			gmc->noretry = amdgpu_noretry;
649 		break;
650 	}
651 }
652 
amdgpu_gmc_set_vm_fault_masks(struct amdgpu_device * adev,int hub_type,bool enable)653 void amdgpu_gmc_set_vm_fault_masks(struct amdgpu_device *adev, int hub_type,
654 				   bool enable)
655 {
656 	struct amdgpu_vmhub *hub;
657 	u32 tmp, reg, i;
658 
659 	hub = &adev->vmhub[hub_type];
660 	for (i = 0; i < 16; i++) {
661 		reg = hub->vm_context0_cntl + hub->ctx_distance * i;
662 
663 		tmp = (hub_type == AMDGPU_GFXHUB_0) ?
664 			RREG32_SOC15_IP(GC, reg) :
665 			RREG32_SOC15_IP(MMHUB, reg);
666 
667 		if (enable)
668 			tmp |= hub->vm_cntx_cntl_vm_fault;
669 		else
670 			tmp &= ~hub->vm_cntx_cntl_vm_fault;
671 
672 		(hub_type == AMDGPU_GFXHUB_0) ?
673 			WREG32_SOC15_IP(GC, reg, tmp) :
674 			WREG32_SOC15_IP(MMHUB, reg, tmp);
675 	}
676 }
677 
amdgpu_gmc_get_vbios_allocations(struct amdgpu_device * adev)678 void amdgpu_gmc_get_vbios_allocations(struct amdgpu_device *adev)
679 {
680 	unsigned size;
681 
682 	/*
683 	 * TODO:
684 	 * Currently there is a bug where some memory client outside
685 	 * of the driver writes to first 8M of VRAM on S3 resume,
686 	 * this overrides GART which by default gets placed in first 8M and
687 	 * causes VM_FAULTS once GTT is accessed.
688 	 * Keep the stolen memory reservation until the while this is not solved.
689 	 */
690 	switch (adev->asic_type) {
691 	case CHIP_VEGA10:
692 	case CHIP_RAVEN:
693 	case CHIP_RENOIR:
694 		adev->mman.keep_stolen_vga_memory = true;
695 		break;
696 	default:
697 		adev->mman.keep_stolen_vga_memory = false;
698 		break;
699 	}
700 
701 	if (amdgpu_sriov_vf(adev) ||
702 	    !amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_DCE)) {
703 		size = 0;
704 	} else {
705 		size = amdgpu_gmc_get_vbios_fb_size(adev);
706 
707 		if (adev->mman.keep_stolen_vga_memory)
708 			size = max(size, (unsigned)AMDGPU_VBIOS_VGA_ALLOCATION);
709 	}
710 
711 	/* set to 0 if the pre-OS buffer uses up most of vram */
712 	if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024))
713 		size = 0;
714 
715 	if (size > AMDGPU_VBIOS_VGA_ALLOCATION) {
716 		adev->mman.stolen_vga_size = AMDGPU_VBIOS_VGA_ALLOCATION;
717 		adev->mman.stolen_extended_size = size - adev->mman.stolen_vga_size;
718 	} else {
719 		adev->mman.stolen_vga_size = size;
720 		adev->mman.stolen_extended_size = 0;
721 	}
722 }
723 
724 /**
725  * amdgpu_gmc_init_pdb0 - initialize PDB0
726  *
727  * @adev: amdgpu_device pointer
728  *
729  * This function is only used when GART page table is used
730  * for FB address translatioin. In such a case, we construct
731  * a 2-level system VM page table: PDB0->PTB, to cover both
732  * VRAM of the hive and system memory.
733  *
734  * PDB0 is static, initialized once on driver initialization.
735  * The first n entries of PDB0 are used as PTE by setting
736  * P bit to 1, pointing to VRAM. The n+1'th entry points
737  * to a big PTB covering system memory.
738  *
739  */
amdgpu_gmc_init_pdb0(struct amdgpu_device * adev)740 void amdgpu_gmc_init_pdb0(struct amdgpu_device *adev)
741 {
742 	int i;
743 	uint64_t flags = adev->gart.gart_pte_flags; //TODO it is UC. explore NC/RW?
744 	/* Each PDE0 (used as PTE) covers (2^vmid0_page_table_block_size)*2M
745 	 */
746 	u64 vram_size = adev->gmc.xgmi.node_segment_size * adev->gmc.xgmi.num_physical_nodes;
747 	u64 pde0_page_size = (1ULL<<adev->gmc.vmid0_page_table_block_size)<<21;
748 	u64 vram_addr = adev->vm_manager.vram_base_offset -
749 		adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
750 	u64 vram_end = vram_addr + vram_size;
751 	u64 gart_ptb_gpu_pa = amdgpu_gmc_vram_pa(adev, adev->gart.bo);
752 
753 	flags |= AMDGPU_PTE_VALID | AMDGPU_PTE_READABLE;
754 	flags |= AMDGPU_PTE_WRITEABLE;
755 	flags |= AMDGPU_PTE_SNOOPED;
756 	flags |= AMDGPU_PTE_FRAG((adev->gmc.vmid0_page_table_block_size + 9*1));
757 	flags |= AMDGPU_PDE_PTE;
758 
759 	/* The first n PDE0 entries are used as PTE,
760 	 * pointing to vram
761 	 */
762 	for (i = 0; vram_addr < vram_end; i++, vram_addr += pde0_page_size)
763 		amdgpu_gmc_set_pte_pde(adev, adev->gmc.ptr_pdb0, i, vram_addr, flags);
764 
765 	/* The n+1'th PDE0 entry points to a huge
766 	 * PTB who has more than 512 entries each
767 	 * pointing to a 4K system page
768 	 */
769 	flags = AMDGPU_PTE_VALID;
770 	flags |= AMDGPU_PDE_BFS(0) | AMDGPU_PTE_SNOOPED;
771 	/* Requires gart_ptb_gpu_pa to be 4K aligned */
772 	amdgpu_gmc_set_pte_pde(adev, adev->gmc.ptr_pdb0, i, gart_ptb_gpu_pa, flags);
773 }
774 
775 /**
776  * amdgpu_gmc_vram_mc2pa - calculate vram buffer's physical address from MC
777  * address
778  *
779  * @adev: amdgpu_device pointer
780  * @mc_addr: MC address of buffer
781  */
amdgpu_gmc_vram_mc2pa(struct amdgpu_device * adev,uint64_t mc_addr)782 uint64_t amdgpu_gmc_vram_mc2pa(struct amdgpu_device *adev, uint64_t mc_addr)
783 {
784 	return mc_addr - adev->gmc.vram_start + adev->vm_manager.vram_base_offset;
785 }
786 
787 /**
788  * amdgpu_gmc_vram_pa - calculate vram buffer object's physical address from
789  * GPU's view
790  *
791  * @adev: amdgpu_device pointer
792  * @bo: amdgpu buffer object
793  */
amdgpu_gmc_vram_pa(struct amdgpu_device * adev,struct amdgpu_bo * bo)794 uint64_t amdgpu_gmc_vram_pa(struct amdgpu_device *adev, struct amdgpu_bo *bo)
795 {
796 	return amdgpu_gmc_vram_mc2pa(adev, amdgpu_bo_gpu_offset(bo));
797 }
798 
799 /**
800  * amdgpu_gmc_vram_cpu_pa - calculate vram buffer object's physical address
801  * from CPU's view
802  *
803  * @adev: amdgpu_device pointer
804  * @bo: amdgpu buffer object
805  */
amdgpu_gmc_vram_cpu_pa(struct amdgpu_device * adev,struct amdgpu_bo * bo)806 uint64_t amdgpu_gmc_vram_cpu_pa(struct amdgpu_device *adev, struct amdgpu_bo *bo)
807 {
808 	return amdgpu_bo_gpu_offset(bo) - adev->gmc.vram_start + adev->gmc.aper_base;
809 }
810 
amdgpu_gmc_get_reserved_allocation(struct amdgpu_device * adev)811 void amdgpu_gmc_get_reserved_allocation(struct amdgpu_device *adev)
812 {
813 	/* Some ASICs need to reserve a region of video memory to avoid access
814 	 * from driver */
815 	adev->mman.stolen_reserved_offset = 0;
816 	adev->mman.stolen_reserved_size = 0;
817 
818 	switch (adev->asic_type) {
819 	case CHIP_YELLOW_CARP:
820 		if (amdgpu_discovery == 0) {
821 			adev->mman.stolen_reserved_offset = 0x1ffb0000;
822 			adev->mman.stolen_reserved_size = 64 * PAGE_SIZE;
823 		}
824 		break;
825 	default:
826 		break;
827 	}
828 }
829