• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2018 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/firmware.h>
25 
26 #include "amdgpu.h"
27 #include "amdgpu_discovery.h"
28 #include "soc15_hw_ip.h"
29 #include "discovery.h"
30 #include "amdgpu_ras.h"
31 
32 #include "soc15.h"
33 #include "gfx_v9_0.h"
34 #include "gfx_v9_4_3.h"
35 #include "gmc_v9_0.h"
36 #include "df_v1_7.h"
37 #include "df_v3_6.h"
38 #include "df_v4_3.h"
39 #include "df_v4_6_2.h"
40 #include "df_v4_15.h"
41 #include "nbio_v6_1.h"
42 #include "nbio_v7_0.h"
43 #include "nbio_v7_4.h"
44 #include "nbio_v7_9.h"
45 #include "nbio_v7_11.h"
46 #include "hdp_v4_0.h"
47 #include "vega10_ih.h"
48 #include "vega20_ih.h"
49 #include "sdma_v4_0.h"
50 #include "sdma_v4_4_2.h"
51 #include "uvd_v7_0.h"
52 #include "vce_v4_0.h"
53 #include "vcn_v1_0.h"
54 #include "vcn_v2_5.h"
55 #include "jpeg_v2_5.h"
56 #include "smuio_v9_0.h"
57 #include "gmc_v10_0.h"
58 #include "gmc_v11_0.h"
59 #include "gmc_v12_0.h"
60 #include "gfxhub_v2_0.h"
61 #include "mmhub_v2_0.h"
62 #include "nbio_v2_3.h"
63 #include "nbio_v4_3.h"
64 #include "nbio_v7_2.h"
65 #include "nbio_v7_7.h"
66 #include "nbif_v6_3_1.h"
67 #include "hdp_v5_0.h"
68 #include "hdp_v5_2.h"
69 #include "hdp_v6_0.h"
70 #include "hdp_v7_0.h"
71 #include "nv.h"
72 #include "soc21.h"
73 #include "soc24.h"
74 #include "navi10_ih.h"
75 #include "ih_v6_0.h"
76 #include "ih_v6_1.h"
77 #include "ih_v7_0.h"
78 #include "gfx_v10_0.h"
79 #include "gfx_v11_0.h"
80 #include "gfx_v12_0.h"
81 #include "sdma_v5_0.h"
82 #include "sdma_v5_2.h"
83 #include "sdma_v6_0.h"
84 #include "sdma_v7_0.h"
85 #include "lsdma_v6_0.h"
86 #include "lsdma_v7_0.h"
87 #include "vcn_v2_0.h"
88 #include "jpeg_v2_0.h"
89 #include "vcn_v3_0.h"
90 #include "jpeg_v3_0.h"
91 #include "vcn_v4_0.h"
92 #include "jpeg_v4_0.h"
93 #include "vcn_v4_0_3.h"
94 #include "jpeg_v4_0_3.h"
95 #include "vcn_v4_0_5.h"
96 #include "jpeg_v4_0_5.h"
97 #include "amdgpu_vkms.h"
98 #include "mes_v11_0.h"
99 #include "mes_v12_0.h"
100 #include "smuio_v11_0.h"
101 #include "smuio_v11_0_6.h"
102 #include "smuio_v13_0.h"
103 #include "smuio_v13_0_3.h"
104 #include "smuio_v13_0_6.h"
105 #include "smuio_v14_0_2.h"
106 #include "vcn_v5_0_0.h"
107 #include "jpeg_v5_0_0.h"
108 
109 #include "amdgpu_vpe.h"
110 #if defined(CONFIG_DRM_AMD_ISP)
111 #include "amdgpu_isp.h"
112 #endif
113 
114 MODULE_FIRMWARE("amdgpu/ip_discovery.bin");
115 MODULE_FIRMWARE("amdgpu/vega10_ip_discovery.bin");
116 MODULE_FIRMWARE("amdgpu/vega12_ip_discovery.bin");
117 MODULE_FIRMWARE("amdgpu/vega20_ip_discovery.bin");
118 MODULE_FIRMWARE("amdgpu/raven_ip_discovery.bin");
119 MODULE_FIRMWARE("amdgpu/raven2_ip_discovery.bin");
120 MODULE_FIRMWARE("amdgpu/picasso_ip_discovery.bin");
121 MODULE_FIRMWARE("amdgpu/arcturus_ip_discovery.bin");
122 MODULE_FIRMWARE("amdgpu/aldebaran_ip_discovery.bin");
123 
124 #define mmIP_DISCOVERY_VERSION  0x16A00
125 #define mmRCC_CONFIG_MEMSIZE	0xde3
126 #define mmMP0_SMN_C2PMSG_33	0x16061
127 #define mmMM_INDEX		0x0
128 #define mmMM_INDEX_HI		0x6
129 #define mmMM_DATA		0x1
130 
131 static const char *hw_id_names[HW_ID_MAX] = {
132 	[MP1_HWID]		= "MP1",
133 	[MP2_HWID]		= "MP2",
134 	[THM_HWID]		= "THM",
135 	[SMUIO_HWID]		= "SMUIO",
136 	[FUSE_HWID]		= "FUSE",
137 	[CLKA_HWID]		= "CLKA",
138 	[PWR_HWID]		= "PWR",
139 	[GC_HWID]		= "GC",
140 	[UVD_HWID]		= "UVD",
141 	[AUDIO_AZ_HWID]		= "AUDIO_AZ",
142 	[ACP_HWID]		= "ACP",
143 	[DCI_HWID]		= "DCI",
144 	[DMU_HWID]		= "DMU",
145 	[DCO_HWID]		= "DCO",
146 	[DIO_HWID]		= "DIO",
147 	[XDMA_HWID]		= "XDMA",
148 	[DCEAZ_HWID]		= "DCEAZ",
149 	[DAZ_HWID]		= "DAZ",
150 	[SDPMUX_HWID]		= "SDPMUX",
151 	[NTB_HWID]		= "NTB",
152 	[IOHC_HWID]		= "IOHC",
153 	[L2IMU_HWID]		= "L2IMU",
154 	[VCE_HWID]		= "VCE",
155 	[MMHUB_HWID]		= "MMHUB",
156 	[ATHUB_HWID]		= "ATHUB",
157 	[DBGU_NBIO_HWID]	= "DBGU_NBIO",
158 	[DFX_HWID]		= "DFX",
159 	[DBGU0_HWID]		= "DBGU0",
160 	[DBGU1_HWID]		= "DBGU1",
161 	[OSSSYS_HWID]		= "OSSSYS",
162 	[HDP_HWID]		= "HDP",
163 	[SDMA0_HWID]		= "SDMA0",
164 	[SDMA1_HWID]		= "SDMA1",
165 	[SDMA2_HWID]		= "SDMA2",
166 	[SDMA3_HWID]		= "SDMA3",
167 	[LSDMA_HWID]		= "LSDMA",
168 	[ISP_HWID]		= "ISP",
169 	[DBGU_IO_HWID]		= "DBGU_IO",
170 	[DF_HWID]		= "DF",
171 	[CLKB_HWID]		= "CLKB",
172 	[FCH_HWID]		= "FCH",
173 	[DFX_DAP_HWID]		= "DFX_DAP",
174 	[L1IMU_PCIE_HWID]	= "L1IMU_PCIE",
175 	[L1IMU_NBIF_HWID]	= "L1IMU_NBIF",
176 	[L1IMU_IOAGR_HWID]	= "L1IMU_IOAGR",
177 	[L1IMU3_HWID]		= "L1IMU3",
178 	[L1IMU4_HWID]		= "L1IMU4",
179 	[L1IMU5_HWID]		= "L1IMU5",
180 	[L1IMU6_HWID]		= "L1IMU6",
181 	[L1IMU7_HWID]		= "L1IMU7",
182 	[L1IMU8_HWID]		= "L1IMU8",
183 	[L1IMU9_HWID]		= "L1IMU9",
184 	[L1IMU10_HWID]		= "L1IMU10",
185 	[L1IMU11_HWID]		= "L1IMU11",
186 	[L1IMU12_HWID]		= "L1IMU12",
187 	[L1IMU13_HWID]		= "L1IMU13",
188 	[L1IMU14_HWID]		= "L1IMU14",
189 	[L1IMU15_HWID]		= "L1IMU15",
190 	[WAFLC_HWID]		= "WAFLC",
191 	[FCH_USB_PD_HWID]	= "FCH_USB_PD",
192 	[PCIE_HWID]		= "PCIE",
193 	[PCS_HWID]		= "PCS",
194 	[DDCL_HWID]		= "DDCL",
195 	[SST_HWID]		= "SST",
196 	[IOAGR_HWID]		= "IOAGR",
197 	[NBIF_HWID]		= "NBIF",
198 	[IOAPIC_HWID]		= "IOAPIC",
199 	[SYSTEMHUB_HWID]	= "SYSTEMHUB",
200 	[NTBCCP_HWID]		= "NTBCCP",
201 	[UMC_HWID]		= "UMC",
202 	[SATA_HWID]		= "SATA",
203 	[USB_HWID]		= "USB",
204 	[CCXSEC_HWID]		= "CCXSEC",
205 	[XGMI_HWID]		= "XGMI",
206 	[XGBE_HWID]		= "XGBE",
207 	[MP0_HWID]		= "MP0",
208 	[VPE_HWID]		= "VPE",
209 };
210 
211 static int hw_id_map[MAX_HWIP] = {
212 	[GC_HWIP]	= GC_HWID,
213 	[HDP_HWIP]	= HDP_HWID,
214 	[SDMA0_HWIP]	= SDMA0_HWID,
215 	[SDMA1_HWIP]	= SDMA1_HWID,
216 	[SDMA2_HWIP]    = SDMA2_HWID,
217 	[SDMA3_HWIP]    = SDMA3_HWID,
218 	[LSDMA_HWIP]    = LSDMA_HWID,
219 	[MMHUB_HWIP]	= MMHUB_HWID,
220 	[ATHUB_HWIP]	= ATHUB_HWID,
221 	[NBIO_HWIP]	= NBIF_HWID,
222 	[MP0_HWIP]	= MP0_HWID,
223 	[MP1_HWIP]	= MP1_HWID,
224 	[UVD_HWIP]	= UVD_HWID,
225 	[VCE_HWIP]	= VCE_HWID,
226 	[DF_HWIP]	= DF_HWID,
227 	[DCE_HWIP]	= DMU_HWID,
228 	[OSSSYS_HWIP]	= OSSSYS_HWID,
229 	[SMUIO_HWIP]	= SMUIO_HWID,
230 	[PWR_HWIP]	= PWR_HWID,
231 	[NBIF_HWIP]	= NBIF_HWID,
232 	[THM_HWIP]	= THM_HWID,
233 	[CLK_HWIP]	= CLKA_HWID,
234 	[UMC_HWIP]	= UMC_HWID,
235 	[XGMI_HWIP]	= XGMI_HWID,
236 	[DCI_HWIP]	= DCI_HWID,
237 	[PCIE_HWIP]	= PCIE_HWID,
238 	[VPE_HWIP]	= VPE_HWID,
239 	[ISP_HWIP]	= ISP_HWID,
240 };
241 
amdgpu_discovery_read_binary_from_sysmem(struct amdgpu_device * adev,uint8_t * binary)242 static int amdgpu_discovery_read_binary_from_sysmem(struct amdgpu_device *adev, uint8_t *binary)
243 {
244 	u64 tmr_offset, tmr_size, pos;
245 	void *discv_regn;
246 	int ret;
247 
248 	ret = amdgpu_acpi_get_tmr_info(adev, &tmr_offset, &tmr_size);
249 	if (ret)
250 		return ret;
251 
252 	pos = tmr_offset + tmr_size - DISCOVERY_TMR_OFFSET;
253 
254 	/* This region is read-only and reserved from system use */
255 	discv_regn = memremap(pos, adev->mman.discovery_tmr_size, MEMREMAP_WC);
256 	if (discv_regn) {
257 		memcpy(binary, discv_regn, adev->mman.discovery_tmr_size);
258 		memunmap(discv_regn);
259 		return 0;
260 	}
261 
262 	return -ENOENT;
263 }
264 
265 #define IP_DISCOVERY_V2		2
266 #define IP_DISCOVERY_V4		4
267 
amdgpu_discovery_read_binary_from_mem(struct amdgpu_device * adev,uint8_t * binary)268 static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev,
269 						 uint8_t *binary)
270 {
271 	uint64_t vram_size;
272 	u32 msg;
273 	int i, ret = 0;
274 
275 	if (!amdgpu_sriov_vf(adev)) {
276 		/* It can take up to two second for IFWI init to complete on some dGPUs,
277 		 * but generally it should be in the 60-100ms range.  Normally this starts
278 		 * as soon as the device gets power so by the time the OS loads this has long
279 		 * completed.  However, when a card is hotplugged via e.g., USB4, we need to
280 		 * wait for this to complete.  Once the C2PMSG is updated, we can
281 		 * continue.
282 		 */
283 
284 		for (i = 0; i < 2000; i++) {
285 			msg = RREG32(mmMP0_SMN_C2PMSG_33);
286 			if (msg & 0x80000000)
287 				break;
288 			msleep(1);
289 		}
290 	}
291 
292 	vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20;
293 
294 	if (vram_size) {
295 		uint64_t pos = vram_size - DISCOVERY_TMR_OFFSET;
296 		amdgpu_device_vram_access(adev, pos, (uint32_t *)binary,
297 					  adev->mman.discovery_tmr_size, false);
298 	} else {
299 		ret = amdgpu_discovery_read_binary_from_sysmem(adev, binary);
300 	}
301 
302 	return ret;
303 }
304 
amdgpu_discovery_read_binary_from_file(struct amdgpu_device * adev,uint8_t * binary,const char * fw_name)305 static int amdgpu_discovery_read_binary_from_file(struct amdgpu_device *adev,
306 							uint8_t *binary,
307 							const char *fw_name)
308 {
309 	const struct firmware *fw;
310 	int r;
311 
312 	r = firmware_request_nowarn(&fw, fw_name, adev->dev);
313 	if (r) {
314 		if (amdgpu_discovery == 2)
315 			dev_err(adev->dev, "can't load firmware \"%s\"\n", fw_name);
316 		else
317 			drm_info(&adev->ddev, "Optional firmware \"%s\" was not found\n", fw_name);
318 		return r;
319 	}
320 
321 	memcpy((u8 *)binary, (u8 *)fw->data, fw->size);
322 	release_firmware(fw);
323 
324 	return 0;
325 }
326 
amdgpu_discovery_calculate_checksum(uint8_t * data,uint32_t size)327 static uint16_t amdgpu_discovery_calculate_checksum(uint8_t *data, uint32_t size)
328 {
329 	uint16_t checksum = 0;
330 	int i;
331 
332 	for (i = 0; i < size; i++)
333 		checksum += data[i];
334 
335 	return checksum;
336 }
337 
amdgpu_discovery_verify_checksum(uint8_t * data,uint32_t size,uint16_t expected)338 static inline bool amdgpu_discovery_verify_checksum(uint8_t *data, uint32_t size,
339 						    uint16_t expected)
340 {
341 	return !!(amdgpu_discovery_calculate_checksum(data, size) == expected);
342 }
343 
amdgpu_discovery_verify_binary_signature(uint8_t * binary)344 static inline bool amdgpu_discovery_verify_binary_signature(uint8_t *binary)
345 {
346 	struct binary_header *bhdr;
347 	bhdr = (struct binary_header *)binary;
348 
349 	return (le32_to_cpu(bhdr->binary_signature) == BINARY_SIGNATURE);
350 }
351 
amdgpu_discovery_harvest_config_quirk(struct amdgpu_device * adev)352 static void amdgpu_discovery_harvest_config_quirk(struct amdgpu_device *adev)
353 {
354 	/*
355 	 * So far, apply this quirk only on those Navy Flounder boards which
356 	 * have a bad harvest table of VCN config.
357 	 */
358 	if ((amdgpu_ip_version(adev, UVD_HWIP, 1) == IP_VERSION(3, 0, 1)) &&
359 	    (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 3, 2))) {
360 		switch (adev->pdev->revision) {
361 		case 0xC1:
362 		case 0xC2:
363 		case 0xC3:
364 		case 0xC5:
365 		case 0xC7:
366 		case 0xCF:
367 		case 0xDF:
368 			adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1;
369 			adev->vcn.inst_mask &= ~AMDGPU_VCN_HARVEST_VCN1;
370 			break;
371 		default:
372 			break;
373 		}
374 	}
375 }
376 
amdgpu_discovery_verify_npsinfo(struct amdgpu_device * adev,struct binary_header * bhdr)377 static int amdgpu_discovery_verify_npsinfo(struct amdgpu_device *adev,
378 					   struct binary_header *bhdr)
379 {
380 	struct table_info *info;
381 	uint16_t checksum;
382 	uint16_t offset;
383 
384 	info = &bhdr->table_list[NPS_INFO];
385 	offset = le16_to_cpu(info->offset);
386 	checksum = le16_to_cpu(info->checksum);
387 
388 	struct nps_info_header *nhdr =
389 		(struct nps_info_header *)(adev->mman.discovery_bin + offset);
390 
391 	if (le32_to_cpu(nhdr->table_id) != NPS_INFO_TABLE_ID) {
392 		dev_dbg(adev->dev, "invalid ip discovery nps info table id\n");
393 		return -EINVAL;
394 	}
395 
396 	if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
397 					      le32_to_cpu(nhdr->size_bytes),
398 					      checksum)) {
399 		dev_dbg(adev->dev, "invalid nps info data table checksum\n");
400 		return -EINVAL;
401 	}
402 
403 	return 0;
404 }
405 
amdgpu_discovery_get_fw_name(struct amdgpu_device * adev)406 static const char *amdgpu_discovery_get_fw_name(struct amdgpu_device *adev)
407 {
408 	if (amdgpu_discovery == 2)
409 		return "amdgpu/ip_discovery.bin";
410 
411 	switch (adev->asic_type) {
412 	case CHIP_VEGA10:
413 		return "amdgpu/vega10_ip_discovery.bin";
414 	case CHIP_VEGA12:
415 		return "amdgpu/vega12_ip_discovery.bin";
416 	case CHIP_RAVEN:
417 		if (adev->apu_flags & AMD_APU_IS_RAVEN2)
418 			return "amdgpu/raven2_ip_discovery.bin";
419 		else if (adev->apu_flags & AMD_APU_IS_PICASSO)
420 			return "amdgpu/picasso_ip_discovery.bin";
421 		else
422 			return "amdgpu/raven_ip_discovery.bin";
423 	case CHIP_VEGA20:
424 		return "amdgpu/vega20_ip_discovery.bin";
425 	case CHIP_ARCTURUS:
426 		return "amdgpu/arcturus_ip_discovery.bin";
427 	case CHIP_ALDEBARAN:
428 		return "amdgpu/aldebaran_ip_discovery.bin";
429 	default:
430 		return NULL;
431 	}
432 }
433 
amdgpu_discovery_init(struct amdgpu_device * adev)434 static int amdgpu_discovery_init(struct amdgpu_device *adev)
435 {
436 	struct table_info *info;
437 	struct binary_header *bhdr;
438 	const char *fw_name;
439 	uint16_t offset;
440 	uint16_t size;
441 	uint16_t checksum;
442 	int r;
443 
444 	adev->mman.discovery_tmr_size = DISCOVERY_TMR_SIZE;
445 	adev->mman.discovery_bin = kzalloc(adev->mman.discovery_tmr_size, GFP_KERNEL);
446 	if (!adev->mman.discovery_bin)
447 		return -ENOMEM;
448 
449 	/* Read from file if it is the preferred option */
450 	fw_name = amdgpu_discovery_get_fw_name(adev);
451 	if (fw_name != NULL) {
452 		drm_dbg(&adev->ddev, "use ip discovery information from file");
453 		r = amdgpu_discovery_read_binary_from_file(adev, adev->mman.discovery_bin, fw_name);
454 		if (r)
455 			goto out;
456 	} else {
457 		drm_dbg(&adev->ddev, "use ip discovery information from memory");
458 		r = amdgpu_discovery_read_binary_from_mem(
459 			adev, adev->mman.discovery_bin);
460 		if (r)
461 			goto out;
462 	}
463 
464 	/* check the ip discovery binary signature */
465 	if (!amdgpu_discovery_verify_binary_signature(adev->mman.discovery_bin)) {
466 		dev_err(adev->dev,
467 			"get invalid ip discovery binary signature\n");
468 		r = -EINVAL;
469 		goto out;
470 	}
471 
472 	bhdr = (struct binary_header *)adev->mman.discovery_bin;
473 
474 	offset = offsetof(struct binary_header, binary_checksum) +
475 		sizeof(bhdr->binary_checksum);
476 	size = le16_to_cpu(bhdr->binary_size) - offset;
477 	checksum = le16_to_cpu(bhdr->binary_checksum);
478 
479 	if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
480 					      size, checksum)) {
481 		dev_err(adev->dev, "invalid ip discovery binary checksum\n");
482 		r = -EINVAL;
483 		goto out;
484 	}
485 
486 	info = &bhdr->table_list[IP_DISCOVERY];
487 	offset = le16_to_cpu(info->offset);
488 	checksum = le16_to_cpu(info->checksum);
489 
490 	if (offset) {
491 		struct ip_discovery_header *ihdr =
492 			(struct ip_discovery_header *)(adev->mman.discovery_bin + offset);
493 		if (le32_to_cpu(ihdr->signature) != DISCOVERY_TABLE_SIGNATURE) {
494 			dev_err(adev->dev, "invalid ip discovery data table signature\n");
495 			r = -EINVAL;
496 			goto out;
497 		}
498 
499 		if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
500 						      le16_to_cpu(ihdr->size), checksum)) {
501 			dev_err(adev->dev, "invalid ip discovery data table checksum\n");
502 			r = -EINVAL;
503 			goto out;
504 		}
505 	}
506 
507 	info = &bhdr->table_list[GC];
508 	offset = le16_to_cpu(info->offset);
509 	checksum = le16_to_cpu(info->checksum);
510 
511 	if (offset) {
512 		struct gpu_info_header *ghdr =
513 			(struct gpu_info_header *)(adev->mman.discovery_bin + offset);
514 
515 		if (le32_to_cpu(ghdr->table_id) != GC_TABLE_ID) {
516 			dev_err(adev->dev, "invalid ip discovery gc table id\n");
517 			r = -EINVAL;
518 			goto out;
519 		}
520 
521 		if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
522 						      le32_to_cpu(ghdr->size), checksum)) {
523 			dev_err(adev->dev, "invalid gc data table checksum\n");
524 			r = -EINVAL;
525 			goto out;
526 		}
527 	}
528 
529 	info = &bhdr->table_list[HARVEST_INFO];
530 	offset = le16_to_cpu(info->offset);
531 	checksum = le16_to_cpu(info->checksum);
532 
533 	if (offset) {
534 		struct harvest_info_header *hhdr =
535 			(struct harvest_info_header *)(adev->mman.discovery_bin + offset);
536 
537 		if (le32_to_cpu(hhdr->signature) != HARVEST_TABLE_SIGNATURE) {
538 			dev_err(adev->dev, "invalid ip discovery harvest table signature\n");
539 			r = -EINVAL;
540 			goto out;
541 		}
542 
543 		if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
544 						      sizeof(struct harvest_table), checksum)) {
545 			dev_err(adev->dev, "invalid harvest data table checksum\n");
546 			r = -EINVAL;
547 			goto out;
548 		}
549 	}
550 
551 	info = &bhdr->table_list[VCN_INFO];
552 	offset = le16_to_cpu(info->offset);
553 	checksum = le16_to_cpu(info->checksum);
554 
555 	if (offset) {
556 		struct vcn_info_header *vhdr =
557 			(struct vcn_info_header *)(adev->mman.discovery_bin + offset);
558 
559 		if (le32_to_cpu(vhdr->table_id) != VCN_INFO_TABLE_ID) {
560 			dev_err(adev->dev, "invalid ip discovery vcn table id\n");
561 			r = -EINVAL;
562 			goto out;
563 		}
564 
565 		if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
566 						      le32_to_cpu(vhdr->size_bytes), checksum)) {
567 			dev_err(adev->dev, "invalid vcn data table checksum\n");
568 			r = -EINVAL;
569 			goto out;
570 		}
571 	}
572 
573 	info = &bhdr->table_list[MALL_INFO];
574 	offset = le16_to_cpu(info->offset);
575 	checksum = le16_to_cpu(info->checksum);
576 
577 	if (0 && offset) {
578 		struct mall_info_header *mhdr =
579 			(struct mall_info_header *)(adev->mman.discovery_bin + offset);
580 
581 		if (le32_to_cpu(mhdr->table_id) != MALL_INFO_TABLE_ID) {
582 			dev_err(adev->dev, "invalid ip discovery mall table id\n");
583 			r = -EINVAL;
584 			goto out;
585 		}
586 
587 		if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
588 						      le32_to_cpu(mhdr->size_bytes), checksum)) {
589 			dev_err(adev->dev, "invalid mall data table checksum\n");
590 			r = -EINVAL;
591 			goto out;
592 		}
593 	}
594 
595 	return 0;
596 
597 out:
598 	kfree(adev->mman.discovery_bin);
599 	adev->mman.discovery_bin = NULL;
600 	if ((amdgpu_discovery != 2) &&
601 	    (RREG32(mmIP_DISCOVERY_VERSION) == 4))
602 		amdgpu_ras_query_boot_status(adev, 4);
603 	return r;
604 }
605 
606 static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev);
607 
amdgpu_discovery_fini(struct amdgpu_device * adev)608 void amdgpu_discovery_fini(struct amdgpu_device *adev)
609 {
610 	amdgpu_discovery_sysfs_fini(adev);
611 	kfree(adev->mman.discovery_bin);
612 	adev->mman.discovery_bin = NULL;
613 }
614 
amdgpu_discovery_validate_ip(const struct ip_v4 * ip)615 static int amdgpu_discovery_validate_ip(const struct ip_v4 *ip)
616 {
617 	if (ip->instance_number >= HWIP_MAX_INSTANCE) {
618 		DRM_ERROR("Unexpected instance_number (%d) from ip discovery blob\n",
619 			  ip->instance_number);
620 		return -EINVAL;
621 	}
622 	if (le16_to_cpu(ip->hw_id) >= HW_ID_MAX) {
623 		DRM_ERROR("Unexpected hw_id (%d) from ip discovery blob\n",
624 			  le16_to_cpu(ip->hw_id));
625 		return -EINVAL;
626 	}
627 
628 	return 0;
629 }
630 
amdgpu_discovery_read_harvest_bit_per_ip(struct amdgpu_device * adev,uint32_t * vcn_harvest_count)631 static void amdgpu_discovery_read_harvest_bit_per_ip(struct amdgpu_device *adev,
632 						uint32_t *vcn_harvest_count)
633 {
634 	struct binary_header *bhdr;
635 	struct ip_discovery_header *ihdr;
636 	struct die_header *dhdr;
637 	struct ip_v4 *ip;
638 	uint16_t die_offset, ip_offset, num_dies, num_ips;
639 	int i, j;
640 
641 	bhdr = (struct binary_header *)adev->mman.discovery_bin;
642 	ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
643 			le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
644 	num_dies = le16_to_cpu(ihdr->num_dies);
645 
646 	/* scan harvest bit of all IP data structures */
647 	for (i = 0; i < num_dies; i++) {
648 		die_offset = le16_to_cpu(ihdr->die_info[i].die_offset);
649 		dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
650 		num_ips = le16_to_cpu(dhdr->num_ips);
651 		ip_offset = die_offset + sizeof(*dhdr);
652 
653 		for (j = 0; j < num_ips; j++) {
654 			ip = (struct ip_v4 *)(adev->mman.discovery_bin + ip_offset);
655 
656 			if (amdgpu_discovery_validate_ip(ip))
657 				goto next_ip;
658 
659 			if (le16_to_cpu(ip->variant) == 1) {
660 				switch (le16_to_cpu(ip->hw_id)) {
661 				case VCN_HWID:
662 					(*vcn_harvest_count)++;
663 					if (ip->instance_number == 0) {
664 						adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN0;
665 						adev->vcn.inst_mask &=
666 							~AMDGPU_VCN_HARVEST_VCN0;
667 						adev->jpeg.inst_mask &=
668 							~AMDGPU_VCN_HARVEST_VCN0;
669 					} else {
670 						adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1;
671 						adev->vcn.inst_mask &=
672 							~AMDGPU_VCN_HARVEST_VCN1;
673 						adev->jpeg.inst_mask &=
674 							~AMDGPU_VCN_HARVEST_VCN1;
675 					}
676 					break;
677 				case DMU_HWID:
678 					adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
679 					break;
680 				default:
681 					break;
682 				}
683 			}
684 next_ip:
685 			if (ihdr->base_addr_64_bit)
686 				ip_offset += struct_size(ip, base_address_64, ip->num_base_address);
687 			else
688 				ip_offset += struct_size(ip, base_address, ip->num_base_address);
689 		}
690 	}
691 }
692 
amdgpu_discovery_read_from_harvest_table(struct amdgpu_device * adev,uint32_t * vcn_harvest_count,uint32_t * umc_harvest_count)693 static void amdgpu_discovery_read_from_harvest_table(struct amdgpu_device *adev,
694 						     uint32_t *vcn_harvest_count,
695 						     uint32_t *umc_harvest_count)
696 {
697 	struct binary_header *bhdr;
698 	struct harvest_table *harvest_info;
699 	u16 offset;
700 	int i;
701 	uint32_t umc_harvest_config = 0;
702 
703 	bhdr = (struct binary_header *)adev->mman.discovery_bin;
704 	offset = le16_to_cpu(bhdr->table_list[HARVEST_INFO].offset);
705 
706 	if (!offset) {
707 		dev_err(adev->dev, "invalid harvest table offset\n");
708 		return;
709 	}
710 
711 	harvest_info = (struct harvest_table *)(adev->mman.discovery_bin + offset);
712 
713 	for (i = 0; i < 32; i++) {
714 		if (le16_to_cpu(harvest_info->list[i].hw_id) == 0)
715 			break;
716 
717 		switch (le16_to_cpu(harvest_info->list[i].hw_id)) {
718 		case VCN_HWID:
719 			(*vcn_harvest_count)++;
720 			adev->vcn.harvest_config |=
721 				(1 << harvest_info->list[i].number_instance);
722 			adev->jpeg.harvest_config |=
723 				(1 << harvest_info->list[i].number_instance);
724 
725 			adev->vcn.inst_mask &=
726 				~(1U << harvest_info->list[i].number_instance);
727 			adev->jpeg.inst_mask &=
728 				~(1U << harvest_info->list[i].number_instance);
729 			break;
730 		case DMU_HWID:
731 			adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
732 			break;
733 		case UMC_HWID:
734 			umc_harvest_config |=
735 				1 << (le16_to_cpu(harvest_info->list[i].number_instance));
736 			(*umc_harvest_count)++;
737 			break;
738 		case GC_HWID:
739 			adev->gfx.xcc_mask &=
740 				~(1U << harvest_info->list[i].number_instance);
741 			break;
742 		case SDMA0_HWID:
743 			adev->sdma.sdma_mask &=
744 				~(1U << harvest_info->list[i].number_instance);
745 			break;
746 #if defined(CONFIG_DRM_AMD_ISP)
747 		case ISP_HWID:
748 			adev->isp.harvest_config |=
749 				~(1U << harvest_info->list[i].number_instance);
750 			break;
751 #endif
752 		default:
753 			break;
754 		}
755 	}
756 
757 	adev->umc.active_mask = ((1 << adev->umc.node_inst_num) - 1) &
758 				~umc_harvest_config;
759 }
760 
761 /* ================================================== */
762 
763 struct ip_hw_instance {
764 	struct kobject kobj; /* ip_discovery/die/#die/#hw_id/#instance/<attrs...> */
765 
766 	int hw_id;
767 	u8  num_instance;
768 	u8  major, minor, revision;
769 	u8  harvest;
770 
771 	int num_base_addresses;
772 	u32 base_addr[] __counted_by(num_base_addresses);
773 };
774 
775 struct ip_hw_id {
776 	struct kset hw_id_kset;  /* ip_discovery/die/#die/#hw_id/, contains ip_hw_instance */
777 	int hw_id;
778 };
779 
780 struct ip_die_entry {
781 	struct kset ip_kset;     /* ip_discovery/die/#die/, contains ip_hw_id  */
782 	u16 num_ips;
783 };
784 
785 /* -------------------------------------------------- */
786 
787 struct ip_hw_instance_attr {
788 	struct attribute attr;
789 	ssize_t (*show)(struct ip_hw_instance *ip_hw_instance, char *buf);
790 };
791 
hw_id_show(struct ip_hw_instance * ip_hw_instance,char * buf)792 static ssize_t hw_id_show(struct ip_hw_instance *ip_hw_instance, char *buf)
793 {
794 	return sysfs_emit(buf, "%d\n", ip_hw_instance->hw_id);
795 }
796 
num_instance_show(struct ip_hw_instance * ip_hw_instance,char * buf)797 static ssize_t num_instance_show(struct ip_hw_instance *ip_hw_instance, char *buf)
798 {
799 	return sysfs_emit(buf, "%d\n", ip_hw_instance->num_instance);
800 }
801 
major_show(struct ip_hw_instance * ip_hw_instance,char * buf)802 static ssize_t major_show(struct ip_hw_instance *ip_hw_instance, char *buf)
803 {
804 	return sysfs_emit(buf, "%d\n", ip_hw_instance->major);
805 }
806 
minor_show(struct ip_hw_instance * ip_hw_instance,char * buf)807 static ssize_t minor_show(struct ip_hw_instance *ip_hw_instance, char *buf)
808 {
809 	return sysfs_emit(buf, "%d\n", ip_hw_instance->minor);
810 }
811 
revision_show(struct ip_hw_instance * ip_hw_instance,char * buf)812 static ssize_t revision_show(struct ip_hw_instance *ip_hw_instance, char *buf)
813 {
814 	return sysfs_emit(buf, "%d\n", ip_hw_instance->revision);
815 }
816 
harvest_show(struct ip_hw_instance * ip_hw_instance,char * buf)817 static ssize_t harvest_show(struct ip_hw_instance *ip_hw_instance, char *buf)
818 {
819 	return sysfs_emit(buf, "0x%01X\n", ip_hw_instance->harvest);
820 }
821 
num_base_addresses_show(struct ip_hw_instance * ip_hw_instance,char * buf)822 static ssize_t num_base_addresses_show(struct ip_hw_instance *ip_hw_instance, char *buf)
823 {
824 	return sysfs_emit(buf, "%d\n", ip_hw_instance->num_base_addresses);
825 }
826 
base_addr_show(struct ip_hw_instance * ip_hw_instance,char * buf)827 static ssize_t base_addr_show(struct ip_hw_instance *ip_hw_instance, char *buf)
828 {
829 	ssize_t res, at;
830 	int ii;
831 
832 	for (res = at = ii = 0; ii < ip_hw_instance->num_base_addresses; ii++) {
833 		/* Here we satisfy the condition that, at + size <= PAGE_SIZE.
834 		 */
835 		if (at + 12 > PAGE_SIZE)
836 			break;
837 		res = sysfs_emit_at(buf, at, "0x%08X\n",
838 				    ip_hw_instance->base_addr[ii]);
839 		if (res <= 0)
840 			break;
841 		at += res;
842 	}
843 
844 	return res < 0 ? res : at;
845 }
846 
847 static struct ip_hw_instance_attr ip_hw_attr[] = {
848 	__ATTR_RO(hw_id),
849 	__ATTR_RO(num_instance),
850 	__ATTR_RO(major),
851 	__ATTR_RO(minor),
852 	__ATTR_RO(revision),
853 	__ATTR_RO(harvest),
854 	__ATTR_RO(num_base_addresses),
855 	__ATTR_RO(base_addr),
856 };
857 
858 static struct attribute *ip_hw_instance_attrs[ARRAY_SIZE(ip_hw_attr) + 1];
859 ATTRIBUTE_GROUPS(ip_hw_instance);
860 
861 #define to_ip_hw_instance(x) container_of(x, struct ip_hw_instance, kobj)
862 #define to_ip_hw_instance_attr(x) container_of(x, struct ip_hw_instance_attr, attr)
863 
ip_hw_instance_attr_show(struct kobject * kobj,struct attribute * attr,char * buf)864 static ssize_t ip_hw_instance_attr_show(struct kobject *kobj,
865 					struct attribute *attr,
866 					char *buf)
867 {
868 	struct ip_hw_instance *ip_hw_instance = to_ip_hw_instance(kobj);
869 	struct ip_hw_instance_attr *ip_hw_attr = to_ip_hw_instance_attr(attr);
870 
871 	if (!ip_hw_attr->show)
872 		return -EIO;
873 
874 	return ip_hw_attr->show(ip_hw_instance, buf);
875 }
876 
877 static const struct sysfs_ops ip_hw_instance_sysfs_ops = {
878 	.show = ip_hw_instance_attr_show,
879 };
880 
ip_hw_instance_release(struct kobject * kobj)881 static void ip_hw_instance_release(struct kobject *kobj)
882 {
883 	struct ip_hw_instance *ip_hw_instance = to_ip_hw_instance(kobj);
884 
885 	kfree(ip_hw_instance);
886 }
887 
888 static const struct kobj_type ip_hw_instance_ktype = {
889 	.release = ip_hw_instance_release,
890 	.sysfs_ops = &ip_hw_instance_sysfs_ops,
891 	.default_groups = ip_hw_instance_groups,
892 };
893 
894 /* -------------------------------------------------- */
895 
896 #define to_ip_hw_id(x)  container_of(to_kset(x), struct ip_hw_id, hw_id_kset)
897 
ip_hw_id_release(struct kobject * kobj)898 static void ip_hw_id_release(struct kobject *kobj)
899 {
900 	struct ip_hw_id *ip_hw_id = to_ip_hw_id(kobj);
901 
902 	if (!list_empty(&ip_hw_id->hw_id_kset.list))
903 		DRM_ERROR("ip_hw_id->hw_id_kset is not empty");
904 	kfree(ip_hw_id);
905 }
906 
907 static const struct kobj_type ip_hw_id_ktype = {
908 	.release = ip_hw_id_release,
909 	.sysfs_ops = &kobj_sysfs_ops,
910 };
911 
912 /* -------------------------------------------------- */
913 
914 static void die_kobj_release(struct kobject *kobj);
915 static void ip_disc_release(struct kobject *kobj);
916 
917 struct ip_die_entry_attribute {
918 	struct attribute attr;
919 	ssize_t (*show)(struct ip_die_entry *ip_die_entry, char *buf);
920 };
921 
922 #define to_ip_die_entry_attr(x)  container_of(x, struct ip_die_entry_attribute, attr)
923 
num_ips_show(struct ip_die_entry * ip_die_entry,char * buf)924 static ssize_t num_ips_show(struct ip_die_entry *ip_die_entry, char *buf)
925 {
926 	return sysfs_emit(buf, "%d\n", ip_die_entry->num_ips);
927 }
928 
929 /* If there are more ip_die_entry attrs, other than the number of IPs,
930  * we can make this intro an array of attrs, and then initialize
931  * ip_die_entry_attrs in a loop.
932  */
933 static struct ip_die_entry_attribute num_ips_attr =
934 	__ATTR_RO(num_ips);
935 
936 static struct attribute *ip_die_entry_attrs[] = {
937 	&num_ips_attr.attr,
938 	NULL,
939 };
940 ATTRIBUTE_GROUPS(ip_die_entry); /* ip_die_entry_groups */
941 
942 #define to_ip_die_entry(x) container_of(to_kset(x), struct ip_die_entry, ip_kset)
943 
ip_die_entry_attr_show(struct kobject * kobj,struct attribute * attr,char * buf)944 static ssize_t ip_die_entry_attr_show(struct kobject *kobj,
945 				      struct attribute *attr,
946 				      char *buf)
947 {
948 	struct ip_die_entry_attribute *ip_die_entry_attr = to_ip_die_entry_attr(attr);
949 	struct ip_die_entry *ip_die_entry = to_ip_die_entry(kobj);
950 
951 	if (!ip_die_entry_attr->show)
952 		return -EIO;
953 
954 	return ip_die_entry_attr->show(ip_die_entry, buf);
955 }
956 
ip_die_entry_release(struct kobject * kobj)957 static void ip_die_entry_release(struct kobject *kobj)
958 {
959 	struct ip_die_entry *ip_die_entry = to_ip_die_entry(kobj);
960 
961 	if (!list_empty(&ip_die_entry->ip_kset.list))
962 		DRM_ERROR("ip_die_entry->ip_kset is not empty");
963 	kfree(ip_die_entry);
964 }
965 
966 static const struct sysfs_ops ip_die_entry_sysfs_ops = {
967 	.show = ip_die_entry_attr_show,
968 };
969 
970 static const struct kobj_type ip_die_entry_ktype = {
971 	.release = ip_die_entry_release,
972 	.sysfs_ops = &ip_die_entry_sysfs_ops,
973 	.default_groups = ip_die_entry_groups,
974 };
975 
976 static const struct kobj_type die_kobj_ktype = {
977 	.release = die_kobj_release,
978 	.sysfs_ops = &kobj_sysfs_ops,
979 };
980 
981 static const struct kobj_type ip_discovery_ktype = {
982 	.release = ip_disc_release,
983 	.sysfs_ops = &kobj_sysfs_ops,
984 };
985 
986 struct ip_discovery_top {
987 	struct kobject kobj;    /* ip_discovery/ */
988 	struct kset die_kset;   /* ip_discovery/die/, contains ip_die_entry */
989 	struct amdgpu_device *adev;
990 };
991 
die_kobj_release(struct kobject * kobj)992 static void die_kobj_release(struct kobject *kobj)
993 {
994 	struct ip_discovery_top *ip_top = container_of(to_kset(kobj),
995 						       struct ip_discovery_top,
996 						       die_kset);
997 	if (!list_empty(&ip_top->die_kset.list))
998 		DRM_ERROR("ip_top->die_kset is not empty");
999 }
1000 
ip_disc_release(struct kobject * kobj)1001 static void ip_disc_release(struct kobject *kobj)
1002 {
1003 	struct ip_discovery_top *ip_top = container_of(kobj, struct ip_discovery_top,
1004 						       kobj);
1005 	struct amdgpu_device *adev = ip_top->adev;
1006 
1007 	adev->ip_top = NULL;
1008 	kfree(ip_top);
1009 }
1010 
amdgpu_discovery_get_harvest_info(struct amdgpu_device * adev,uint16_t hw_id,uint8_t inst)1011 static uint8_t amdgpu_discovery_get_harvest_info(struct amdgpu_device *adev,
1012 						 uint16_t hw_id, uint8_t inst)
1013 {
1014 	uint8_t harvest = 0;
1015 
1016 	/* Until a uniform way is figured, get mask based on hwid */
1017 	switch (hw_id) {
1018 	case VCN_HWID:
1019 		harvest = ((1 << inst) & adev->vcn.inst_mask) == 0;
1020 		break;
1021 	case DMU_HWID:
1022 		if (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK)
1023 			harvest = 0x1;
1024 		break;
1025 	case UMC_HWID:
1026 		/* TODO: It needs another parsing; for now, ignore.*/
1027 		break;
1028 	case GC_HWID:
1029 		harvest = ((1 << inst) & adev->gfx.xcc_mask) == 0;
1030 		break;
1031 	case SDMA0_HWID:
1032 		harvest = ((1 << inst) & adev->sdma.sdma_mask) == 0;
1033 		break;
1034 	default:
1035 		break;
1036 	}
1037 
1038 	return harvest;
1039 }
1040 
amdgpu_discovery_sysfs_ips(struct amdgpu_device * adev,struct ip_die_entry * ip_die_entry,const size_t _ip_offset,const int num_ips,bool reg_base_64)1041 static int amdgpu_discovery_sysfs_ips(struct amdgpu_device *adev,
1042 				      struct ip_die_entry *ip_die_entry,
1043 				      const size_t _ip_offset, const int num_ips,
1044 				      bool reg_base_64)
1045 {
1046 	int ii, jj, kk, res;
1047 
1048 	DRM_DEBUG("num_ips:%d", num_ips);
1049 
1050 	/* Find all IPs of a given HW ID, and add their instance to
1051 	 * #die/#hw_id/#instance/<attributes>
1052 	 */
1053 	for (ii = 0; ii < HW_ID_MAX; ii++) {
1054 		struct ip_hw_id *ip_hw_id = NULL;
1055 		size_t ip_offset = _ip_offset;
1056 
1057 		for (jj = 0; jj < num_ips; jj++) {
1058 			struct ip_v4 *ip;
1059 			struct ip_hw_instance *ip_hw_instance;
1060 
1061 			ip = (struct ip_v4 *)(adev->mman.discovery_bin + ip_offset);
1062 			if (amdgpu_discovery_validate_ip(ip) ||
1063 			    le16_to_cpu(ip->hw_id) != ii)
1064 				goto next_ip;
1065 
1066 			DRM_DEBUG("match:%d @ ip_offset:%zu", ii, ip_offset);
1067 
1068 			/* We have a hw_id match; register the hw
1069 			 * block if not yet registered.
1070 			 */
1071 			if (!ip_hw_id) {
1072 				ip_hw_id = kzalloc(sizeof(*ip_hw_id), GFP_KERNEL);
1073 				if (!ip_hw_id)
1074 					return -ENOMEM;
1075 				ip_hw_id->hw_id = ii;
1076 
1077 				kobject_set_name(&ip_hw_id->hw_id_kset.kobj, "%d", ii);
1078 				ip_hw_id->hw_id_kset.kobj.kset = &ip_die_entry->ip_kset;
1079 				ip_hw_id->hw_id_kset.kobj.ktype = &ip_hw_id_ktype;
1080 				res = kset_register(&ip_hw_id->hw_id_kset);
1081 				if (res) {
1082 					DRM_ERROR("Couldn't register ip_hw_id kset");
1083 					kfree(ip_hw_id);
1084 					return res;
1085 				}
1086 				if (hw_id_names[ii]) {
1087 					res = sysfs_create_link(&ip_die_entry->ip_kset.kobj,
1088 								&ip_hw_id->hw_id_kset.kobj,
1089 								hw_id_names[ii]);
1090 					if (res) {
1091 						DRM_ERROR("Couldn't create IP link %s in IP Die:%s\n",
1092 							  hw_id_names[ii],
1093 							  kobject_name(&ip_die_entry->ip_kset.kobj));
1094 					}
1095 				}
1096 			}
1097 
1098 			/* Now register its instance.
1099 			 */
1100 			ip_hw_instance = kzalloc(struct_size(ip_hw_instance,
1101 							     base_addr,
1102 							     ip->num_base_address),
1103 						 GFP_KERNEL);
1104 			if (!ip_hw_instance) {
1105 				DRM_ERROR("no memory for ip_hw_instance");
1106 				return -ENOMEM;
1107 			}
1108 			ip_hw_instance->hw_id = le16_to_cpu(ip->hw_id); /* == ii */
1109 			ip_hw_instance->num_instance = ip->instance_number;
1110 			ip_hw_instance->major = ip->major;
1111 			ip_hw_instance->minor = ip->minor;
1112 			ip_hw_instance->revision = ip->revision;
1113 			ip_hw_instance->harvest =
1114 				amdgpu_discovery_get_harvest_info(
1115 					adev, ip_hw_instance->hw_id,
1116 					ip_hw_instance->num_instance);
1117 			ip_hw_instance->num_base_addresses = ip->num_base_address;
1118 
1119 			for (kk = 0; kk < ip_hw_instance->num_base_addresses; kk++) {
1120 				if (reg_base_64)
1121 					ip_hw_instance->base_addr[kk] =
1122 						lower_32_bits(le64_to_cpu(ip->base_address_64[kk])) & 0x3FFFFFFF;
1123 				else
1124 					ip_hw_instance->base_addr[kk] = ip->base_address[kk];
1125 			}
1126 
1127 			kobject_init(&ip_hw_instance->kobj, &ip_hw_instance_ktype);
1128 			ip_hw_instance->kobj.kset = &ip_hw_id->hw_id_kset;
1129 			res = kobject_add(&ip_hw_instance->kobj, NULL,
1130 					  "%d", ip_hw_instance->num_instance);
1131 next_ip:
1132 			if (reg_base_64)
1133 				ip_offset += struct_size(ip, base_address_64,
1134 							 ip->num_base_address);
1135 			else
1136 				ip_offset += struct_size(ip, base_address,
1137 							 ip->num_base_address);
1138 		}
1139 	}
1140 
1141 	return 0;
1142 }
1143 
amdgpu_discovery_sysfs_recurse(struct amdgpu_device * adev)1144 static int amdgpu_discovery_sysfs_recurse(struct amdgpu_device *adev)
1145 {
1146 	struct binary_header *bhdr;
1147 	struct ip_discovery_header *ihdr;
1148 	struct die_header *dhdr;
1149 	struct kset *die_kset = &adev->ip_top->die_kset;
1150 	u16 num_dies, die_offset, num_ips;
1151 	size_t ip_offset;
1152 	int ii, res;
1153 
1154 	bhdr = (struct binary_header *)adev->mman.discovery_bin;
1155 	ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
1156 					      le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
1157 	num_dies = le16_to_cpu(ihdr->num_dies);
1158 
1159 	DRM_DEBUG("number of dies: %d\n", num_dies);
1160 
1161 	for (ii = 0; ii < num_dies; ii++) {
1162 		struct ip_die_entry *ip_die_entry;
1163 
1164 		die_offset = le16_to_cpu(ihdr->die_info[ii].die_offset);
1165 		dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
1166 		num_ips = le16_to_cpu(dhdr->num_ips);
1167 		ip_offset = die_offset + sizeof(*dhdr);
1168 
1169 		/* Add the die to the kset.
1170 		 *
1171 		 * dhdr->die_id == ii, which was checked in
1172 		 * amdgpu_discovery_reg_base_init().
1173 		 */
1174 
1175 		ip_die_entry = kzalloc(sizeof(*ip_die_entry), GFP_KERNEL);
1176 		if (!ip_die_entry)
1177 			return -ENOMEM;
1178 
1179 		ip_die_entry->num_ips = num_ips;
1180 
1181 		kobject_set_name(&ip_die_entry->ip_kset.kobj, "%d", le16_to_cpu(dhdr->die_id));
1182 		ip_die_entry->ip_kset.kobj.kset = die_kset;
1183 		ip_die_entry->ip_kset.kobj.ktype = &ip_die_entry_ktype;
1184 		res = kset_register(&ip_die_entry->ip_kset);
1185 		if (res) {
1186 			DRM_ERROR("Couldn't register ip_die_entry kset");
1187 			kfree(ip_die_entry);
1188 			return res;
1189 		}
1190 
1191 		amdgpu_discovery_sysfs_ips(adev, ip_die_entry, ip_offset, num_ips, !!ihdr->base_addr_64_bit);
1192 	}
1193 
1194 	return 0;
1195 }
1196 
amdgpu_discovery_sysfs_init(struct amdgpu_device * adev)1197 static int amdgpu_discovery_sysfs_init(struct amdgpu_device *adev)
1198 {
1199 	struct kset *die_kset;
1200 	int res, ii;
1201 
1202 	if (!adev->mman.discovery_bin)
1203 		return -EINVAL;
1204 
1205 	adev->ip_top = kzalloc(sizeof(*adev->ip_top), GFP_KERNEL);
1206 	if (!adev->ip_top)
1207 		return -ENOMEM;
1208 
1209 	adev->ip_top->adev = adev;
1210 
1211 	res = kobject_init_and_add(&adev->ip_top->kobj, &ip_discovery_ktype,
1212 				   &adev->dev->kobj, "ip_discovery");
1213 	if (res) {
1214 		DRM_ERROR("Couldn't init and add ip_discovery/");
1215 		goto Err;
1216 	}
1217 
1218 	die_kset = &adev->ip_top->die_kset;
1219 	kobject_set_name(&die_kset->kobj, "%s", "die");
1220 	die_kset->kobj.parent = &adev->ip_top->kobj;
1221 	die_kset->kobj.ktype = &die_kobj_ktype;
1222 	res = kset_register(&adev->ip_top->die_kset);
1223 	if (res) {
1224 		DRM_ERROR("Couldn't register die_kset");
1225 		goto Err;
1226 	}
1227 
1228 	for (ii = 0; ii < ARRAY_SIZE(ip_hw_attr); ii++)
1229 		ip_hw_instance_attrs[ii] = &ip_hw_attr[ii].attr;
1230 	ip_hw_instance_attrs[ii] = NULL;
1231 
1232 	res = amdgpu_discovery_sysfs_recurse(adev);
1233 
1234 	return res;
1235 Err:
1236 	kobject_put(&adev->ip_top->kobj);
1237 	return res;
1238 }
1239 
1240 /* -------------------------------------------------- */
1241 
1242 #define list_to_kobj(el) container_of(el, struct kobject, entry)
1243 
amdgpu_discovery_sysfs_ip_hw_free(struct ip_hw_id * ip_hw_id)1244 static void amdgpu_discovery_sysfs_ip_hw_free(struct ip_hw_id *ip_hw_id)
1245 {
1246 	struct list_head *el, *tmp;
1247 	struct kset *hw_id_kset;
1248 
1249 	hw_id_kset = &ip_hw_id->hw_id_kset;
1250 	spin_lock(&hw_id_kset->list_lock);
1251 	list_for_each_prev_safe(el, tmp, &hw_id_kset->list) {
1252 		list_del_init(el);
1253 		spin_unlock(&hw_id_kset->list_lock);
1254 		/* kobject is embedded in ip_hw_instance */
1255 		kobject_put(list_to_kobj(el));
1256 		spin_lock(&hw_id_kset->list_lock);
1257 	}
1258 	spin_unlock(&hw_id_kset->list_lock);
1259 	kobject_put(&ip_hw_id->hw_id_kset.kobj);
1260 }
1261 
amdgpu_discovery_sysfs_die_free(struct ip_die_entry * ip_die_entry)1262 static void amdgpu_discovery_sysfs_die_free(struct ip_die_entry *ip_die_entry)
1263 {
1264 	struct list_head *el, *tmp;
1265 	struct kset *ip_kset;
1266 
1267 	ip_kset = &ip_die_entry->ip_kset;
1268 	spin_lock(&ip_kset->list_lock);
1269 	list_for_each_prev_safe(el, tmp, &ip_kset->list) {
1270 		list_del_init(el);
1271 		spin_unlock(&ip_kset->list_lock);
1272 		amdgpu_discovery_sysfs_ip_hw_free(to_ip_hw_id(list_to_kobj(el)));
1273 		spin_lock(&ip_kset->list_lock);
1274 	}
1275 	spin_unlock(&ip_kset->list_lock);
1276 	kobject_put(&ip_die_entry->ip_kset.kobj);
1277 }
1278 
amdgpu_discovery_sysfs_fini(struct amdgpu_device * adev)1279 static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev)
1280 {
1281 	struct list_head *el, *tmp;
1282 	struct kset *die_kset;
1283 
1284 	die_kset = &adev->ip_top->die_kset;
1285 	spin_lock(&die_kset->list_lock);
1286 	list_for_each_prev_safe(el, tmp, &die_kset->list) {
1287 		list_del_init(el);
1288 		spin_unlock(&die_kset->list_lock);
1289 		amdgpu_discovery_sysfs_die_free(to_ip_die_entry(list_to_kobj(el)));
1290 		spin_lock(&die_kset->list_lock);
1291 	}
1292 	spin_unlock(&die_kset->list_lock);
1293 	kobject_put(&adev->ip_top->die_kset.kobj);
1294 	kobject_put(&adev->ip_top->kobj);
1295 }
1296 
1297 /* ================================================== */
1298 
amdgpu_discovery_reg_base_init(struct amdgpu_device * adev)1299 static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
1300 {
1301 	uint8_t num_base_address, subrev, variant;
1302 	struct binary_header *bhdr;
1303 	struct ip_discovery_header *ihdr;
1304 	struct die_header *dhdr;
1305 	struct ip_v4 *ip;
1306 	uint16_t die_offset;
1307 	uint16_t ip_offset;
1308 	uint16_t num_dies;
1309 	uint16_t num_ips;
1310 	int hw_ip;
1311 	int i, j, k;
1312 	int r;
1313 
1314 	r = amdgpu_discovery_init(adev);
1315 	if (r)
1316 		return r;
1317 
1318 	adev->gfx.xcc_mask = 0;
1319 	adev->sdma.sdma_mask = 0;
1320 	adev->vcn.inst_mask = 0;
1321 	adev->jpeg.inst_mask = 0;
1322 	bhdr = (struct binary_header *)adev->mman.discovery_bin;
1323 	ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
1324 			le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
1325 	num_dies = le16_to_cpu(ihdr->num_dies);
1326 
1327 	DRM_DEBUG("number of dies: %d\n", num_dies);
1328 
1329 	for (i = 0; i < num_dies; i++) {
1330 		die_offset = le16_to_cpu(ihdr->die_info[i].die_offset);
1331 		dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
1332 		num_ips = le16_to_cpu(dhdr->num_ips);
1333 		ip_offset = die_offset + sizeof(*dhdr);
1334 
1335 		if (le16_to_cpu(dhdr->die_id) != i) {
1336 			DRM_ERROR("invalid die id %d, expected %d\n",
1337 					le16_to_cpu(dhdr->die_id), i);
1338 			return -EINVAL;
1339 		}
1340 
1341 		DRM_DEBUG("number of hardware IPs on die%d: %d\n",
1342 				le16_to_cpu(dhdr->die_id), num_ips);
1343 
1344 		for (j = 0; j < num_ips; j++) {
1345 			ip = (struct ip_v4 *)(adev->mman.discovery_bin + ip_offset);
1346 
1347 			if (amdgpu_discovery_validate_ip(ip))
1348 				goto next_ip;
1349 
1350 			num_base_address = ip->num_base_address;
1351 
1352 			DRM_DEBUG("%s(%d) #%d v%d.%d.%d:\n",
1353 				  hw_id_names[le16_to_cpu(ip->hw_id)],
1354 				  le16_to_cpu(ip->hw_id),
1355 				  ip->instance_number,
1356 				  ip->major, ip->minor,
1357 				  ip->revision);
1358 
1359 			if (le16_to_cpu(ip->hw_id) == VCN_HWID) {
1360 				/* Bit [5:0]: original revision value
1361 				 * Bit [7:6]: en/decode capability:
1362 				 *     0b00 : VCN function normally
1363 				 *     0b10 : encode is disabled
1364 				 *     0b01 : decode is disabled
1365 				 */
1366 				if (adev->vcn.num_vcn_inst <
1367 				    AMDGPU_MAX_VCN_INSTANCES) {
1368 					adev->vcn.vcn_config[adev->vcn.num_vcn_inst] =
1369 						ip->revision & 0xc0;
1370 					adev->vcn.num_vcn_inst++;
1371 					adev->vcn.inst_mask |=
1372 						(1U << ip->instance_number);
1373 					adev->jpeg.inst_mask |=
1374 						(1U << ip->instance_number);
1375 				} else {
1376 					dev_err(adev->dev, "Too many VCN instances: %d vs %d\n",
1377 						adev->vcn.num_vcn_inst + 1,
1378 						AMDGPU_MAX_VCN_INSTANCES);
1379 				}
1380 				ip->revision &= ~0xc0;
1381 			}
1382 			if (le16_to_cpu(ip->hw_id) == SDMA0_HWID ||
1383 			    le16_to_cpu(ip->hw_id) == SDMA1_HWID ||
1384 			    le16_to_cpu(ip->hw_id) == SDMA2_HWID ||
1385 			    le16_to_cpu(ip->hw_id) == SDMA3_HWID) {
1386 				if (adev->sdma.num_instances <
1387 				    AMDGPU_MAX_SDMA_INSTANCES) {
1388 					adev->sdma.num_instances++;
1389 					adev->sdma.sdma_mask |=
1390 						(1U << ip->instance_number);
1391 				} else {
1392 					dev_err(adev->dev, "Too many SDMA instances: %d vs %d\n",
1393 						adev->sdma.num_instances + 1,
1394 						AMDGPU_MAX_SDMA_INSTANCES);
1395 				}
1396 			}
1397 
1398 			if (le16_to_cpu(ip->hw_id) == VPE_HWID) {
1399 				if (adev->vpe.num_instances < AMDGPU_MAX_VPE_INSTANCES)
1400 					adev->vpe.num_instances++;
1401 				else
1402 					dev_err(adev->dev, "Too many VPE instances: %d vs %d\n",
1403 						adev->vpe.num_instances + 1,
1404 						AMDGPU_MAX_VPE_INSTANCES);
1405 			}
1406 
1407 			if (le16_to_cpu(ip->hw_id) == UMC_HWID) {
1408 				adev->gmc.num_umc++;
1409 				adev->umc.node_inst_num++;
1410 			}
1411 
1412 			if (le16_to_cpu(ip->hw_id) == GC_HWID)
1413 				adev->gfx.xcc_mask |=
1414 					(1U << ip->instance_number);
1415 
1416 			for (k = 0; k < num_base_address; k++) {
1417 				/*
1418 				 * convert the endianness of base addresses in place,
1419 				 * so that we don't need to convert them when accessing adev->reg_offset.
1420 				 */
1421 				if (ihdr->base_addr_64_bit)
1422 					/* Truncate the 64bit base address from ip discovery
1423 					 * and only store lower 32bit ip base in reg_offset[].
1424 					 * Bits > 32 follows ASIC specific format, thus just
1425 					 * discard them and handle it within specific ASIC.
1426 					 * By this way reg_offset[] and related helpers can
1427 					 * stay unchanged.
1428 					 * The base address is in dwords, thus clear the
1429 					 * highest 2 bits to store.
1430 					 */
1431 					ip->base_address[k] =
1432 						lower_32_bits(le64_to_cpu(ip->base_address_64[k])) & 0x3FFFFFFF;
1433 				else
1434 					ip->base_address[k] = le32_to_cpu(ip->base_address[k]);
1435 				DRM_DEBUG("\t0x%08x\n", ip->base_address[k]);
1436 			}
1437 
1438 			for (hw_ip = 0; hw_ip < MAX_HWIP; hw_ip++) {
1439 				if (hw_id_map[hw_ip] == le16_to_cpu(ip->hw_id) &&
1440 				    hw_id_map[hw_ip] != 0) {
1441 					DRM_DEBUG("set register base offset for %s\n",
1442 							hw_id_names[le16_to_cpu(ip->hw_id)]);
1443 					adev->reg_offset[hw_ip][ip->instance_number] =
1444 						ip->base_address;
1445 					/* Instance support is somewhat inconsistent.
1446 					 * SDMA is a good example.  Sienna cichlid has 4 total
1447 					 * SDMA instances, each enumerated separately (HWIDs
1448 					 * 42, 43, 68, 69).  Arcturus has 8 total SDMA instances,
1449 					 * but they are enumerated as multiple instances of the
1450 					 * same HWIDs (4x HWID 42, 4x HWID 43).  UMC is another
1451 					 * example.  On most chips there are multiple instances
1452 					 * with the same HWID.
1453 					 */
1454 
1455 					if (ihdr->version < 3) {
1456 						subrev = 0;
1457 						variant = 0;
1458 					} else {
1459 						subrev = ip->sub_revision;
1460 						variant = ip->variant;
1461 					}
1462 
1463 					adev->ip_versions[hw_ip]
1464 							 [ip->instance_number] =
1465 						IP_VERSION_FULL(ip->major,
1466 								ip->minor,
1467 								ip->revision,
1468 								variant,
1469 								subrev);
1470 				}
1471 			}
1472 
1473 next_ip:
1474 			if (ihdr->base_addr_64_bit)
1475 				ip_offset += struct_size(ip, base_address_64, ip->num_base_address);
1476 			else
1477 				ip_offset += struct_size(ip, base_address, ip->num_base_address);
1478 		}
1479 	}
1480 
1481 	return 0;
1482 }
1483 
amdgpu_discovery_harvest_ip(struct amdgpu_device * adev)1484 static void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
1485 {
1486 	int vcn_harvest_count = 0;
1487 	int umc_harvest_count = 0;
1488 
1489 	/*
1490 	 * Harvest table does not fit Navi1x and legacy GPUs,
1491 	 * so read harvest bit per IP data structure to set
1492 	 * harvest configuration.
1493 	 */
1494 	if (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(10, 2, 0) &&
1495 	    amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 3) &&
1496 	    amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 4)) {
1497 		if ((adev->pdev->device == 0x731E &&
1498 			(adev->pdev->revision == 0xC6 ||
1499 			 adev->pdev->revision == 0xC7)) ||
1500 			(adev->pdev->device == 0x7340 &&
1501 			 adev->pdev->revision == 0xC9) ||
1502 			(adev->pdev->device == 0x7360 &&
1503 			 adev->pdev->revision == 0xC7))
1504 			amdgpu_discovery_read_harvest_bit_per_ip(adev,
1505 				&vcn_harvest_count);
1506 	} else {
1507 		amdgpu_discovery_read_from_harvest_table(adev,
1508 							 &vcn_harvest_count,
1509 							 &umc_harvest_count);
1510 	}
1511 
1512 	amdgpu_discovery_harvest_config_quirk(adev);
1513 
1514 	if (vcn_harvest_count == adev->vcn.num_vcn_inst) {
1515 		adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK;
1516 		adev->harvest_ip_mask |= AMD_HARVEST_IP_JPEG_MASK;
1517 	}
1518 
1519 	if (umc_harvest_count < adev->gmc.num_umc) {
1520 		adev->gmc.num_umc -= umc_harvest_count;
1521 	}
1522 }
1523 
1524 union gc_info {
1525 	struct gc_info_v1_0 v1;
1526 	struct gc_info_v1_1 v1_1;
1527 	struct gc_info_v1_2 v1_2;
1528 	struct gc_info_v1_3 v1_3;
1529 	struct gc_info_v2_0 v2;
1530 	struct gc_info_v2_1 v2_1;
1531 };
1532 
amdgpu_discovery_get_gfx_info(struct amdgpu_device * adev)1533 static int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
1534 {
1535 	struct binary_header *bhdr;
1536 	union gc_info *gc_info;
1537 	u16 offset;
1538 
1539 	if (!adev->mman.discovery_bin) {
1540 		DRM_ERROR("ip discovery uninitialized\n");
1541 		return -EINVAL;
1542 	}
1543 
1544 	bhdr = (struct binary_header *)adev->mman.discovery_bin;
1545 	offset = le16_to_cpu(bhdr->table_list[GC].offset);
1546 
1547 	if (!offset)
1548 		return 0;
1549 
1550 	gc_info = (union gc_info *)(adev->mman.discovery_bin + offset);
1551 
1552 	switch (le16_to_cpu(gc_info->v1.header.version_major)) {
1553 	case 1:
1554 		adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v1.gc_num_se);
1555 		adev->gfx.config.max_cu_per_sh = 2 * (le32_to_cpu(gc_info->v1.gc_num_wgp0_per_sa) +
1556 						      le32_to_cpu(gc_info->v1.gc_num_wgp1_per_sa));
1557 		adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v1.gc_num_sa_per_se);
1558 		adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v1.gc_num_rb_per_se);
1559 		adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v1.gc_num_gl2c);
1560 		adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v1.gc_num_gprs);
1561 		adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v1.gc_num_max_gs_thds);
1562 		adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v1.gc_gs_table_depth);
1563 		adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v1.gc_gsprim_buff_depth);
1564 		adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v1.gc_double_offchip_lds_buffer);
1565 		adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v1.gc_wave_size);
1566 		adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v1.gc_max_waves_per_simd);
1567 		adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v1.gc_max_scratch_slots_per_cu);
1568 		adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v1.gc_lds_size);
1569 		adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v1.gc_num_sc_per_se) /
1570 			le32_to_cpu(gc_info->v1.gc_num_sa_per_se);
1571 		adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v1.gc_num_packer_per_sc);
1572 		if (le16_to_cpu(gc_info->v1.header.version_minor) >= 1) {
1573 			adev->gfx.config.gc_num_tcp_per_sa = le32_to_cpu(gc_info->v1_1.gc_num_tcp_per_sa);
1574 			adev->gfx.config.gc_num_sdp_interface = le32_to_cpu(gc_info->v1_1.gc_num_sdp_interface);
1575 			adev->gfx.config.gc_num_tcps = le32_to_cpu(gc_info->v1_1.gc_num_tcps);
1576 		}
1577 		if (le16_to_cpu(gc_info->v1.header.version_minor) >= 2) {
1578 			adev->gfx.config.gc_num_tcp_per_wpg = le32_to_cpu(gc_info->v1_2.gc_num_tcp_per_wpg);
1579 			adev->gfx.config.gc_tcp_l1_size = le32_to_cpu(gc_info->v1_2.gc_tcp_l1_size);
1580 			adev->gfx.config.gc_num_sqc_per_wgp = le32_to_cpu(gc_info->v1_2.gc_num_sqc_per_wgp);
1581 			adev->gfx.config.gc_l1_instruction_cache_size_per_sqc = le32_to_cpu(gc_info->v1_2.gc_l1_instruction_cache_size_per_sqc);
1582 			adev->gfx.config.gc_l1_data_cache_size_per_sqc = le32_to_cpu(gc_info->v1_2.gc_l1_data_cache_size_per_sqc);
1583 			adev->gfx.config.gc_gl1c_per_sa = le32_to_cpu(gc_info->v1_2.gc_gl1c_per_sa);
1584 			adev->gfx.config.gc_gl1c_size_per_instance = le32_to_cpu(gc_info->v1_2.gc_gl1c_size_per_instance);
1585 			adev->gfx.config.gc_gl2c_per_gpu = le32_to_cpu(gc_info->v1_2.gc_gl2c_per_gpu);
1586 		}
1587 		if (le16_to_cpu(gc_info->v1.header.version_minor) >= 3) {
1588 			adev->gfx.config.gc_tcp_size_per_cu = le32_to_cpu(gc_info->v1_3.gc_tcp_size_per_cu);
1589 			adev->gfx.config.gc_tcp_cache_line_size = le32_to_cpu(gc_info->v1_3.gc_tcp_cache_line_size);
1590 			adev->gfx.config.gc_instruction_cache_size_per_sqc = le32_to_cpu(gc_info->v1_3.gc_instruction_cache_size_per_sqc);
1591 			adev->gfx.config.gc_instruction_cache_line_size = le32_to_cpu(gc_info->v1_3.gc_instruction_cache_line_size);
1592 			adev->gfx.config.gc_scalar_data_cache_size_per_sqc = le32_to_cpu(gc_info->v1_3.gc_scalar_data_cache_size_per_sqc);
1593 			adev->gfx.config.gc_scalar_data_cache_line_size = le32_to_cpu(gc_info->v1_3.gc_scalar_data_cache_line_size);
1594 			adev->gfx.config.gc_tcc_size = le32_to_cpu(gc_info->v1_3.gc_tcc_size);
1595 			adev->gfx.config.gc_tcc_cache_line_size = le32_to_cpu(gc_info->v1_3.gc_tcc_cache_line_size);
1596 		}
1597 		break;
1598 	case 2:
1599 		adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v2.gc_num_se);
1600 		adev->gfx.config.max_cu_per_sh = le32_to_cpu(gc_info->v2.gc_num_cu_per_sh);
1601 		adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v2.gc_num_sh_per_se);
1602 		adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v2.gc_num_rb_per_se);
1603 		adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v2.gc_num_tccs);
1604 		adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v2.gc_num_gprs);
1605 		adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v2.gc_num_max_gs_thds);
1606 		adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v2.gc_gs_table_depth);
1607 		adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v2.gc_gsprim_buff_depth);
1608 		adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v2.gc_double_offchip_lds_buffer);
1609 		adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v2.gc_wave_size);
1610 		adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v2.gc_max_waves_per_simd);
1611 		adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v2.gc_max_scratch_slots_per_cu);
1612 		adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v2.gc_lds_size);
1613 		adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v2.gc_num_sc_per_se) /
1614 			le32_to_cpu(gc_info->v2.gc_num_sh_per_se);
1615 		adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v2.gc_num_packer_per_sc);
1616 		if (le16_to_cpu(gc_info->v2.header.version_minor) == 1) {
1617 			adev->gfx.config.gc_num_tcp_per_sa = le32_to_cpu(gc_info->v2_1.gc_num_tcp_per_sh);
1618 			adev->gfx.config.gc_tcp_size_per_cu = le32_to_cpu(gc_info->v2_1.gc_tcp_size_per_cu);
1619 			adev->gfx.config.gc_num_sdp_interface = le32_to_cpu(gc_info->v2_1.gc_num_sdp_interface); /* per XCD */
1620 			adev->gfx.config.gc_num_cu_per_sqc = le32_to_cpu(gc_info->v2_1.gc_num_cu_per_sqc);
1621 			adev->gfx.config.gc_l1_instruction_cache_size_per_sqc = le32_to_cpu(gc_info->v2_1.gc_instruction_cache_size_per_sqc);
1622 			adev->gfx.config.gc_l1_data_cache_size_per_sqc = le32_to_cpu(gc_info->v2_1.gc_scalar_data_cache_size_per_sqc);
1623 			adev->gfx.config.gc_tcc_size = le32_to_cpu(gc_info->v2_1.gc_tcc_size); /* per XCD */
1624 		}
1625 		break;
1626 	default:
1627 		dev_err(adev->dev,
1628 			"Unhandled GC info table %d.%d\n",
1629 			le16_to_cpu(gc_info->v1.header.version_major),
1630 			le16_to_cpu(gc_info->v1.header.version_minor));
1631 		return -EINVAL;
1632 	}
1633 	return 0;
1634 }
1635 
1636 union mall_info {
1637 	struct mall_info_v1_0 v1;
1638 	struct mall_info_v2_0 v2;
1639 };
1640 
amdgpu_discovery_get_mall_info(struct amdgpu_device * adev)1641 static int amdgpu_discovery_get_mall_info(struct amdgpu_device *adev)
1642 {
1643 	struct binary_header *bhdr;
1644 	union mall_info *mall_info;
1645 	u32 u, mall_size_per_umc, m_s_present, half_use;
1646 	u64 mall_size;
1647 	u16 offset;
1648 
1649 	if (!adev->mman.discovery_bin) {
1650 		DRM_ERROR("ip discovery uninitialized\n");
1651 		return -EINVAL;
1652 	}
1653 
1654 	bhdr = (struct binary_header *)adev->mman.discovery_bin;
1655 	offset = le16_to_cpu(bhdr->table_list[MALL_INFO].offset);
1656 
1657 	if (!offset)
1658 		return 0;
1659 
1660 	mall_info = (union mall_info *)(adev->mman.discovery_bin + offset);
1661 
1662 	switch (le16_to_cpu(mall_info->v1.header.version_major)) {
1663 	case 1:
1664 		mall_size = 0;
1665 		mall_size_per_umc = le32_to_cpu(mall_info->v1.mall_size_per_m);
1666 		m_s_present = le32_to_cpu(mall_info->v1.m_s_present);
1667 		half_use = le32_to_cpu(mall_info->v1.m_half_use);
1668 		for (u = 0; u < adev->gmc.num_umc; u++) {
1669 			if (m_s_present & (1 << u))
1670 				mall_size += mall_size_per_umc * 2;
1671 			else if (half_use & (1 << u))
1672 				mall_size += mall_size_per_umc / 2;
1673 			else
1674 				mall_size += mall_size_per_umc;
1675 		}
1676 		adev->gmc.mall_size = mall_size;
1677 		adev->gmc.m_half_use = half_use;
1678 		break;
1679 	case 2:
1680 		mall_size_per_umc = le32_to_cpu(mall_info->v2.mall_size_per_umc);
1681 		adev->gmc.mall_size = (uint64_t)mall_size_per_umc * adev->gmc.num_umc;
1682 		break;
1683 	default:
1684 		dev_err(adev->dev,
1685 			"Unhandled MALL info table %d.%d\n",
1686 			le16_to_cpu(mall_info->v1.header.version_major),
1687 			le16_to_cpu(mall_info->v1.header.version_minor));
1688 		return -EINVAL;
1689 	}
1690 	return 0;
1691 }
1692 
1693 union vcn_info {
1694 	struct vcn_info_v1_0 v1;
1695 };
1696 
amdgpu_discovery_get_vcn_info(struct amdgpu_device * adev)1697 static int amdgpu_discovery_get_vcn_info(struct amdgpu_device *adev)
1698 {
1699 	struct binary_header *bhdr;
1700 	union vcn_info *vcn_info;
1701 	u16 offset;
1702 	int v;
1703 
1704 	if (!adev->mman.discovery_bin) {
1705 		DRM_ERROR("ip discovery uninitialized\n");
1706 		return -EINVAL;
1707 	}
1708 
1709 	/* num_vcn_inst is currently limited to AMDGPU_MAX_VCN_INSTANCES
1710 	 * which is smaller than VCN_INFO_TABLE_MAX_NUM_INSTANCES
1711 	 * but that may change in the future with new GPUs so keep this
1712 	 * check for defensive purposes.
1713 	 */
1714 	if (adev->vcn.num_vcn_inst > VCN_INFO_TABLE_MAX_NUM_INSTANCES) {
1715 		dev_err(adev->dev, "invalid vcn instances\n");
1716 		return -EINVAL;
1717 	}
1718 
1719 	bhdr = (struct binary_header *)adev->mman.discovery_bin;
1720 	offset = le16_to_cpu(bhdr->table_list[VCN_INFO].offset);
1721 
1722 	if (!offset)
1723 		return 0;
1724 
1725 	vcn_info = (union vcn_info *)(adev->mman.discovery_bin + offset);
1726 
1727 	switch (le16_to_cpu(vcn_info->v1.header.version_major)) {
1728 	case 1:
1729 		/* num_vcn_inst is currently limited to AMDGPU_MAX_VCN_INSTANCES
1730 		 * so this won't overflow.
1731 		 */
1732 		for (v = 0; v < adev->vcn.num_vcn_inst; v++) {
1733 			adev->vcn.vcn_codec_disable_mask[v] =
1734 				le32_to_cpu(vcn_info->v1.instance_info[v].fuse_data.all_bits);
1735 		}
1736 		break;
1737 	default:
1738 		dev_err(adev->dev,
1739 			"Unhandled VCN info table %d.%d\n",
1740 			le16_to_cpu(vcn_info->v1.header.version_major),
1741 			le16_to_cpu(vcn_info->v1.header.version_minor));
1742 		return -EINVAL;
1743 	}
1744 	return 0;
1745 }
1746 
1747 union nps_info {
1748 	struct nps_info_v1_0 v1;
1749 };
1750 
amdgpu_discovery_get_nps_info(struct amdgpu_device * adev,uint32_t * nps_type,struct amdgpu_gmc_memrange ** ranges,int * range_cnt)1751 int amdgpu_discovery_get_nps_info(struct amdgpu_device *adev,
1752 				  uint32_t *nps_type,
1753 				  struct amdgpu_gmc_memrange **ranges,
1754 				  int *range_cnt)
1755 {
1756 	struct amdgpu_gmc_memrange *mem_ranges;
1757 	struct binary_header *bhdr;
1758 	union nps_info *nps_info;
1759 	u16 offset;
1760 	int i;
1761 
1762 	if (!nps_type || !range_cnt || !ranges)
1763 		return -EINVAL;
1764 
1765 	if (!adev->mman.discovery_bin) {
1766 		dev_err(adev->dev,
1767 			"fetch mem range failed, ip discovery uninitialized\n");
1768 		return -EINVAL;
1769 	}
1770 
1771 	bhdr = (struct binary_header *)adev->mman.discovery_bin;
1772 	offset = le16_to_cpu(bhdr->table_list[NPS_INFO].offset);
1773 
1774 	if (!offset)
1775 		return -ENOENT;
1776 
1777 	/* If verification fails, return as if NPS table doesn't exist */
1778 	if (amdgpu_discovery_verify_npsinfo(adev, bhdr))
1779 		return -ENOENT;
1780 
1781 	nps_info = (union nps_info *)(adev->mman.discovery_bin + offset);
1782 
1783 	switch (le16_to_cpu(nps_info->v1.header.version_major)) {
1784 	case 1:
1785 		mem_ranges = kvcalloc(nps_info->v1.count,
1786 				      sizeof(*mem_ranges),
1787 				      GFP_KERNEL);
1788 		if (!mem_ranges)
1789 			return -ENOMEM;
1790 		*nps_type = nps_info->v1.nps_type;
1791 		*range_cnt = nps_info->v1.count;
1792 		for (i = 0; i < *range_cnt; i++) {
1793 			mem_ranges[i].base_address =
1794 				nps_info->v1.instance_info[i].base_address;
1795 			mem_ranges[i].limit_address =
1796 				nps_info->v1.instance_info[i].limit_address;
1797 			mem_ranges[i].nid_mask = -1;
1798 			mem_ranges[i].flags = 0;
1799 		}
1800 		*ranges = mem_ranges;
1801 		break;
1802 	default:
1803 		dev_err(adev->dev, "Unhandled NPS info table %d.%d\n",
1804 			le16_to_cpu(nps_info->v1.header.version_major),
1805 			le16_to_cpu(nps_info->v1.header.version_minor));
1806 		return -EINVAL;
1807 	}
1808 
1809 	return 0;
1810 }
1811 
amdgpu_discovery_set_common_ip_blocks(struct amdgpu_device * adev)1812 static int amdgpu_discovery_set_common_ip_blocks(struct amdgpu_device *adev)
1813 {
1814 	/* what IP to use for this? */
1815 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1816 	case IP_VERSION(9, 0, 1):
1817 	case IP_VERSION(9, 1, 0):
1818 	case IP_VERSION(9, 2, 1):
1819 	case IP_VERSION(9, 2, 2):
1820 	case IP_VERSION(9, 3, 0):
1821 	case IP_VERSION(9, 4, 0):
1822 	case IP_VERSION(9, 4, 1):
1823 	case IP_VERSION(9, 4, 2):
1824 	case IP_VERSION(9, 4, 3):
1825 	case IP_VERSION(9, 4, 4):
1826 		amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
1827 		break;
1828 	case IP_VERSION(10, 1, 10):
1829 	case IP_VERSION(10, 1, 1):
1830 	case IP_VERSION(10, 1, 2):
1831 	case IP_VERSION(10, 1, 3):
1832 	case IP_VERSION(10, 1, 4):
1833 	case IP_VERSION(10, 3, 0):
1834 	case IP_VERSION(10, 3, 1):
1835 	case IP_VERSION(10, 3, 2):
1836 	case IP_VERSION(10, 3, 3):
1837 	case IP_VERSION(10, 3, 4):
1838 	case IP_VERSION(10, 3, 5):
1839 	case IP_VERSION(10, 3, 6):
1840 	case IP_VERSION(10, 3, 7):
1841 		amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
1842 		break;
1843 	case IP_VERSION(11, 0, 0):
1844 	case IP_VERSION(11, 0, 1):
1845 	case IP_VERSION(11, 0, 2):
1846 	case IP_VERSION(11, 0, 3):
1847 	case IP_VERSION(11, 0, 4):
1848 	case IP_VERSION(11, 5, 0):
1849 	case IP_VERSION(11, 5, 1):
1850 	case IP_VERSION(11, 5, 2):
1851 		amdgpu_device_ip_block_add(adev, &soc21_common_ip_block);
1852 		break;
1853 	case IP_VERSION(12, 0, 0):
1854 	case IP_VERSION(12, 0, 1):
1855 		amdgpu_device_ip_block_add(adev, &soc24_common_ip_block);
1856 		break;
1857 	default:
1858 		dev_err(adev->dev,
1859 			"Failed to add common ip block(GC_HWIP:0x%x)\n",
1860 			amdgpu_ip_version(adev, GC_HWIP, 0));
1861 		return -EINVAL;
1862 	}
1863 	return 0;
1864 }
1865 
amdgpu_discovery_set_gmc_ip_blocks(struct amdgpu_device * adev)1866 static int amdgpu_discovery_set_gmc_ip_blocks(struct amdgpu_device *adev)
1867 {
1868 	/* use GC or MMHUB IP version */
1869 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1870 	case IP_VERSION(9, 0, 1):
1871 	case IP_VERSION(9, 1, 0):
1872 	case IP_VERSION(9, 2, 1):
1873 	case IP_VERSION(9, 2, 2):
1874 	case IP_VERSION(9, 3, 0):
1875 	case IP_VERSION(9, 4, 0):
1876 	case IP_VERSION(9, 4, 1):
1877 	case IP_VERSION(9, 4, 2):
1878 	case IP_VERSION(9, 4, 3):
1879 	case IP_VERSION(9, 4, 4):
1880 		amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
1881 		break;
1882 	case IP_VERSION(10, 1, 10):
1883 	case IP_VERSION(10, 1, 1):
1884 	case IP_VERSION(10, 1, 2):
1885 	case IP_VERSION(10, 1, 3):
1886 	case IP_VERSION(10, 1, 4):
1887 	case IP_VERSION(10, 3, 0):
1888 	case IP_VERSION(10, 3, 1):
1889 	case IP_VERSION(10, 3, 2):
1890 	case IP_VERSION(10, 3, 3):
1891 	case IP_VERSION(10, 3, 4):
1892 	case IP_VERSION(10, 3, 5):
1893 	case IP_VERSION(10, 3, 6):
1894 	case IP_VERSION(10, 3, 7):
1895 		amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
1896 		break;
1897 	case IP_VERSION(11, 0, 0):
1898 	case IP_VERSION(11, 0, 1):
1899 	case IP_VERSION(11, 0, 2):
1900 	case IP_VERSION(11, 0, 3):
1901 	case IP_VERSION(11, 0, 4):
1902 	case IP_VERSION(11, 5, 0):
1903 	case IP_VERSION(11, 5, 1):
1904 	case IP_VERSION(11, 5, 2):
1905 		amdgpu_device_ip_block_add(adev, &gmc_v11_0_ip_block);
1906 		break;
1907 	case IP_VERSION(12, 0, 0):
1908 	case IP_VERSION(12, 0, 1):
1909 		amdgpu_device_ip_block_add(adev, &gmc_v12_0_ip_block);
1910 		break;
1911 	default:
1912 		dev_err(adev->dev, "Failed to add gmc ip block(GC_HWIP:0x%x)\n",
1913 			amdgpu_ip_version(adev, GC_HWIP, 0));
1914 		return -EINVAL;
1915 	}
1916 	return 0;
1917 }
1918 
amdgpu_discovery_set_ih_ip_blocks(struct amdgpu_device * adev)1919 static int amdgpu_discovery_set_ih_ip_blocks(struct amdgpu_device *adev)
1920 {
1921 	switch (amdgpu_ip_version(adev, OSSSYS_HWIP, 0)) {
1922 	case IP_VERSION(4, 0, 0):
1923 	case IP_VERSION(4, 0, 1):
1924 	case IP_VERSION(4, 1, 0):
1925 	case IP_VERSION(4, 1, 1):
1926 	case IP_VERSION(4, 3, 0):
1927 		amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
1928 		break;
1929 	case IP_VERSION(4, 2, 0):
1930 	case IP_VERSION(4, 2, 1):
1931 	case IP_VERSION(4, 4, 0):
1932 	case IP_VERSION(4, 4, 2):
1933 	case IP_VERSION(4, 4, 5):
1934 		amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block);
1935 		break;
1936 	case IP_VERSION(5, 0, 0):
1937 	case IP_VERSION(5, 0, 1):
1938 	case IP_VERSION(5, 0, 2):
1939 	case IP_VERSION(5, 0, 3):
1940 	case IP_VERSION(5, 2, 0):
1941 	case IP_VERSION(5, 2, 1):
1942 		amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
1943 		break;
1944 	case IP_VERSION(6, 0, 0):
1945 	case IP_VERSION(6, 0, 1):
1946 	case IP_VERSION(6, 0, 2):
1947 		amdgpu_device_ip_block_add(adev, &ih_v6_0_ip_block);
1948 		break;
1949 	case IP_VERSION(6, 1, 0):
1950 		amdgpu_device_ip_block_add(adev, &ih_v6_1_ip_block);
1951 		break;
1952 	case IP_VERSION(7, 0, 0):
1953 		amdgpu_device_ip_block_add(adev, &ih_v7_0_ip_block);
1954 		break;
1955 	default:
1956 		dev_err(adev->dev,
1957 			"Failed to add ih ip block(OSSSYS_HWIP:0x%x)\n",
1958 			amdgpu_ip_version(adev, OSSSYS_HWIP, 0));
1959 		return -EINVAL;
1960 	}
1961 	return 0;
1962 }
1963 
amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device * adev)1964 static int amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device *adev)
1965 {
1966 	switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
1967 	case IP_VERSION(9, 0, 0):
1968 		amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
1969 		break;
1970 	case IP_VERSION(10, 0, 0):
1971 	case IP_VERSION(10, 0, 1):
1972 		amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block);
1973 		break;
1974 	case IP_VERSION(11, 0, 0):
1975 	case IP_VERSION(11, 0, 2):
1976 	case IP_VERSION(11, 0, 4):
1977 	case IP_VERSION(11, 0, 5):
1978 	case IP_VERSION(11, 0, 9):
1979 	case IP_VERSION(11, 0, 7):
1980 	case IP_VERSION(11, 0, 11):
1981 	case IP_VERSION(11, 0, 12):
1982 	case IP_VERSION(11, 0, 13):
1983 	case IP_VERSION(11, 5, 0):
1984 		amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
1985 		break;
1986 	case IP_VERSION(11, 0, 8):
1987 		amdgpu_device_ip_block_add(adev, &psp_v11_0_8_ip_block);
1988 		break;
1989 	case IP_VERSION(11, 0, 3):
1990 	case IP_VERSION(12, 0, 1):
1991 		amdgpu_device_ip_block_add(adev, &psp_v12_0_ip_block);
1992 		break;
1993 	case IP_VERSION(13, 0, 0):
1994 	case IP_VERSION(13, 0, 1):
1995 	case IP_VERSION(13, 0, 2):
1996 	case IP_VERSION(13, 0, 3):
1997 	case IP_VERSION(13, 0, 5):
1998 	case IP_VERSION(13, 0, 6):
1999 	case IP_VERSION(13, 0, 7):
2000 	case IP_VERSION(13, 0, 8):
2001 	case IP_VERSION(13, 0, 10):
2002 	case IP_VERSION(13, 0, 11):
2003 	case IP_VERSION(13, 0, 14):
2004 	case IP_VERSION(14, 0, 0):
2005 	case IP_VERSION(14, 0, 1):
2006 	case IP_VERSION(14, 0, 4):
2007 		amdgpu_device_ip_block_add(adev, &psp_v13_0_ip_block);
2008 		break;
2009 	case IP_VERSION(13, 0, 4):
2010 		amdgpu_device_ip_block_add(adev, &psp_v13_0_4_ip_block);
2011 		break;
2012 	case IP_VERSION(14, 0, 2):
2013 	case IP_VERSION(14, 0, 3):
2014 		amdgpu_device_ip_block_add(adev, &psp_v14_0_ip_block);
2015 		break;
2016 	default:
2017 		dev_err(adev->dev,
2018 			"Failed to add psp ip block(MP0_HWIP:0x%x)\n",
2019 			amdgpu_ip_version(adev, MP0_HWIP, 0));
2020 		return -EINVAL;
2021 	}
2022 	return 0;
2023 }
2024 
amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device * adev)2025 static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev)
2026 {
2027 	switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
2028 	case IP_VERSION(9, 0, 0):
2029 	case IP_VERSION(10, 0, 0):
2030 	case IP_VERSION(10, 0, 1):
2031 	case IP_VERSION(11, 0, 2):
2032 		if (adev->asic_type == CHIP_ARCTURUS)
2033 			amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
2034 		else
2035 			amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
2036 		break;
2037 	case IP_VERSION(11, 0, 0):
2038 	case IP_VERSION(11, 0, 5):
2039 	case IP_VERSION(11, 0, 9):
2040 	case IP_VERSION(11, 0, 7):
2041 	case IP_VERSION(11, 0, 8):
2042 	case IP_VERSION(11, 0, 11):
2043 	case IP_VERSION(11, 0, 12):
2044 	case IP_VERSION(11, 0, 13):
2045 	case IP_VERSION(11, 5, 0):
2046 		amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
2047 		break;
2048 	case IP_VERSION(12, 0, 0):
2049 	case IP_VERSION(12, 0, 1):
2050 		amdgpu_device_ip_block_add(adev, &smu_v12_0_ip_block);
2051 		break;
2052 	case IP_VERSION(13, 0, 0):
2053 	case IP_VERSION(13, 0, 1):
2054 	case IP_VERSION(13, 0, 2):
2055 	case IP_VERSION(13, 0, 3):
2056 	case IP_VERSION(13, 0, 4):
2057 	case IP_VERSION(13, 0, 5):
2058 	case IP_VERSION(13, 0, 6):
2059 	case IP_VERSION(13, 0, 7):
2060 	case IP_VERSION(13, 0, 8):
2061 	case IP_VERSION(13, 0, 10):
2062 	case IP_VERSION(13, 0, 11):
2063 	case IP_VERSION(13, 0, 14):
2064 		amdgpu_device_ip_block_add(adev, &smu_v13_0_ip_block);
2065 		break;
2066 	case IP_VERSION(14, 0, 0):
2067 	case IP_VERSION(14, 0, 1):
2068 	case IP_VERSION(14, 0, 2):
2069 	case IP_VERSION(14, 0, 3):
2070 	case IP_VERSION(14, 0, 4):
2071 		amdgpu_device_ip_block_add(adev, &smu_v14_0_ip_block);
2072 		break;
2073 	default:
2074 		dev_err(adev->dev,
2075 			"Failed to add smu ip block(MP1_HWIP:0x%x)\n",
2076 			amdgpu_ip_version(adev, MP1_HWIP, 0));
2077 		return -EINVAL;
2078 	}
2079 	return 0;
2080 }
2081 
2082 #if defined(CONFIG_DRM_AMD_DC)
amdgpu_discovery_set_sriov_display(struct amdgpu_device * adev)2083 static void amdgpu_discovery_set_sriov_display(struct amdgpu_device *adev)
2084 {
2085 	amdgpu_device_set_sriov_virtual_display(adev);
2086 	amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
2087 }
2088 #endif
2089 
amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device * adev)2090 static int amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device *adev)
2091 {
2092 	if (adev->enable_virtual_display) {
2093 		amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
2094 		return 0;
2095 	}
2096 
2097 	if (!amdgpu_device_has_dc_support(adev))
2098 		return 0;
2099 
2100 #if defined(CONFIG_DRM_AMD_DC)
2101 	if (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
2102 		switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
2103 		case IP_VERSION(1, 0, 0):
2104 		case IP_VERSION(1, 0, 1):
2105 		case IP_VERSION(2, 0, 2):
2106 		case IP_VERSION(2, 0, 0):
2107 		case IP_VERSION(2, 0, 3):
2108 		case IP_VERSION(2, 1, 0):
2109 		case IP_VERSION(3, 0, 0):
2110 		case IP_VERSION(3, 0, 2):
2111 		case IP_VERSION(3, 0, 3):
2112 		case IP_VERSION(3, 0, 1):
2113 		case IP_VERSION(3, 1, 2):
2114 		case IP_VERSION(3, 1, 3):
2115 		case IP_VERSION(3, 1, 4):
2116 		case IP_VERSION(3, 1, 5):
2117 		case IP_VERSION(3, 1, 6):
2118 		case IP_VERSION(3, 2, 0):
2119 		case IP_VERSION(3, 2, 1):
2120 		case IP_VERSION(3, 5, 0):
2121 		case IP_VERSION(3, 5, 1):
2122 		case IP_VERSION(4, 1, 0):
2123 			/* TODO: Fix IP version. DC code expects version 4.0.1 */
2124 			if (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(4, 1, 0))
2125 				adev->ip_versions[DCE_HWIP][0] = IP_VERSION(4, 0, 1);
2126 
2127 			if (amdgpu_sriov_vf(adev))
2128 				amdgpu_discovery_set_sriov_display(adev);
2129 			else
2130 				amdgpu_device_ip_block_add(adev, &dm_ip_block);
2131 			break;
2132 		default:
2133 			dev_err(adev->dev,
2134 				"Failed to add dm ip block(DCE_HWIP:0x%x)\n",
2135 				amdgpu_ip_version(adev, DCE_HWIP, 0));
2136 			return -EINVAL;
2137 		}
2138 	} else if (amdgpu_ip_version(adev, DCI_HWIP, 0)) {
2139 		switch (amdgpu_ip_version(adev, DCI_HWIP, 0)) {
2140 		case IP_VERSION(12, 0, 0):
2141 		case IP_VERSION(12, 0, 1):
2142 		case IP_VERSION(12, 1, 0):
2143 			if (amdgpu_sriov_vf(adev))
2144 				amdgpu_discovery_set_sriov_display(adev);
2145 			else
2146 				amdgpu_device_ip_block_add(adev, &dm_ip_block);
2147 			break;
2148 		default:
2149 			dev_err(adev->dev,
2150 				"Failed to add dm ip block(DCI_HWIP:0x%x)\n",
2151 				amdgpu_ip_version(adev, DCI_HWIP, 0));
2152 			return -EINVAL;
2153 		}
2154 	}
2155 #endif
2156 	return 0;
2157 }
2158 
amdgpu_discovery_set_gc_ip_blocks(struct amdgpu_device * adev)2159 static int amdgpu_discovery_set_gc_ip_blocks(struct amdgpu_device *adev)
2160 {
2161 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2162 	case IP_VERSION(9, 0, 1):
2163 	case IP_VERSION(9, 1, 0):
2164 	case IP_VERSION(9, 2, 1):
2165 	case IP_VERSION(9, 2, 2):
2166 	case IP_VERSION(9, 3, 0):
2167 	case IP_VERSION(9, 4, 0):
2168 	case IP_VERSION(9, 4, 1):
2169 	case IP_VERSION(9, 4, 2):
2170 		amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
2171 		break;
2172 	case IP_VERSION(9, 4, 3):
2173 	case IP_VERSION(9, 4, 4):
2174 		amdgpu_device_ip_block_add(adev, &gfx_v9_4_3_ip_block);
2175 		break;
2176 	case IP_VERSION(10, 1, 10):
2177 	case IP_VERSION(10, 1, 2):
2178 	case IP_VERSION(10, 1, 1):
2179 	case IP_VERSION(10, 1, 3):
2180 	case IP_VERSION(10, 1, 4):
2181 	case IP_VERSION(10, 3, 0):
2182 	case IP_VERSION(10, 3, 2):
2183 	case IP_VERSION(10, 3, 1):
2184 	case IP_VERSION(10, 3, 4):
2185 	case IP_VERSION(10, 3, 5):
2186 	case IP_VERSION(10, 3, 6):
2187 	case IP_VERSION(10, 3, 3):
2188 	case IP_VERSION(10, 3, 7):
2189 		amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
2190 		break;
2191 	case IP_VERSION(11, 0, 0):
2192 	case IP_VERSION(11, 0, 1):
2193 	case IP_VERSION(11, 0, 2):
2194 	case IP_VERSION(11, 0, 3):
2195 	case IP_VERSION(11, 0, 4):
2196 	case IP_VERSION(11, 5, 0):
2197 	case IP_VERSION(11, 5, 1):
2198 	case IP_VERSION(11, 5, 2):
2199 		amdgpu_device_ip_block_add(adev, &gfx_v11_0_ip_block);
2200 		break;
2201 	case IP_VERSION(12, 0, 0):
2202 	case IP_VERSION(12, 0, 1):
2203 		amdgpu_device_ip_block_add(adev, &gfx_v12_0_ip_block);
2204 		break;
2205 	default:
2206 		dev_err(adev->dev, "Failed to add gfx ip block(GC_HWIP:0x%x)\n",
2207 			amdgpu_ip_version(adev, GC_HWIP, 0));
2208 		return -EINVAL;
2209 	}
2210 	return 0;
2211 }
2212 
amdgpu_discovery_set_sdma_ip_blocks(struct amdgpu_device * adev)2213 static int amdgpu_discovery_set_sdma_ip_blocks(struct amdgpu_device *adev)
2214 {
2215 	switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) {
2216 	case IP_VERSION(4, 0, 0):
2217 	case IP_VERSION(4, 0, 1):
2218 	case IP_VERSION(4, 1, 0):
2219 	case IP_VERSION(4, 1, 1):
2220 	case IP_VERSION(4, 1, 2):
2221 	case IP_VERSION(4, 2, 0):
2222 	case IP_VERSION(4, 2, 2):
2223 	case IP_VERSION(4, 4, 0):
2224 		amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
2225 		break;
2226 	case IP_VERSION(4, 4, 2):
2227 	case IP_VERSION(4, 4, 5):
2228 		amdgpu_device_ip_block_add(adev, &sdma_v4_4_2_ip_block);
2229 		break;
2230 	case IP_VERSION(5, 0, 0):
2231 	case IP_VERSION(5, 0, 1):
2232 	case IP_VERSION(5, 0, 2):
2233 	case IP_VERSION(5, 0, 5):
2234 		amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block);
2235 		break;
2236 	case IP_VERSION(5, 2, 0):
2237 	case IP_VERSION(5, 2, 2):
2238 	case IP_VERSION(5, 2, 4):
2239 	case IP_VERSION(5, 2, 5):
2240 	case IP_VERSION(5, 2, 6):
2241 	case IP_VERSION(5, 2, 3):
2242 	case IP_VERSION(5, 2, 1):
2243 	case IP_VERSION(5, 2, 7):
2244 		amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block);
2245 		break;
2246 	case IP_VERSION(6, 0, 0):
2247 	case IP_VERSION(6, 0, 1):
2248 	case IP_VERSION(6, 0, 2):
2249 	case IP_VERSION(6, 0, 3):
2250 	case IP_VERSION(6, 1, 0):
2251 	case IP_VERSION(6, 1, 1):
2252 	case IP_VERSION(6, 1, 2):
2253 		amdgpu_device_ip_block_add(adev, &sdma_v6_0_ip_block);
2254 		break;
2255 	case IP_VERSION(7, 0, 0):
2256 	case IP_VERSION(7, 0, 1):
2257 		amdgpu_device_ip_block_add(adev, &sdma_v7_0_ip_block);
2258 		break;
2259 	default:
2260 		dev_err(adev->dev,
2261 			"Failed to add sdma ip block(SDMA0_HWIP:0x%x)\n",
2262 			amdgpu_ip_version(adev, SDMA0_HWIP, 0));
2263 		return -EINVAL;
2264 	}
2265 	return 0;
2266 }
2267 
amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device * adev)2268 static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev)
2269 {
2270 	if (amdgpu_ip_version(adev, VCE_HWIP, 0)) {
2271 		switch (amdgpu_ip_version(adev, UVD_HWIP, 0)) {
2272 		case IP_VERSION(7, 0, 0):
2273 		case IP_VERSION(7, 2, 0):
2274 			/* UVD is not supported on vega20 SR-IOV */
2275 			if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev)))
2276 				amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block);
2277 			break;
2278 		default:
2279 			dev_err(adev->dev,
2280 				"Failed to add uvd v7 ip block(UVD_HWIP:0x%x)\n",
2281 				amdgpu_ip_version(adev, UVD_HWIP, 0));
2282 			return -EINVAL;
2283 		}
2284 		switch (amdgpu_ip_version(adev, VCE_HWIP, 0)) {
2285 		case IP_VERSION(4, 0, 0):
2286 		case IP_VERSION(4, 1, 0):
2287 			/* VCE is not supported on vega20 SR-IOV */
2288 			if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev)))
2289 				amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block);
2290 			break;
2291 		default:
2292 			dev_err(adev->dev,
2293 				"Failed to add VCE v4 ip block(VCE_HWIP:0x%x)\n",
2294 				amdgpu_ip_version(adev, VCE_HWIP, 0));
2295 			return -EINVAL;
2296 		}
2297 	} else {
2298 		switch (amdgpu_ip_version(adev, UVD_HWIP, 0)) {
2299 		case IP_VERSION(1, 0, 0):
2300 		case IP_VERSION(1, 0, 1):
2301 			amdgpu_device_ip_block_add(adev, &vcn_v1_0_ip_block);
2302 			break;
2303 		case IP_VERSION(2, 0, 0):
2304 		case IP_VERSION(2, 0, 2):
2305 		case IP_VERSION(2, 2, 0):
2306 			amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
2307 			if (!amdgpu_sriov_vf(adev))
2308 				amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
2309 			break;
2310 		case IP_VERSION(2, 0, 3):
2311 			break;
2312 		case IP_VERSION(2, 5, 0):
2313 			amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block);
2314 			amdgpu_device_ip_block_add(adev, &jpeg_v2_5_ip_block);
2315 			break;
2316 		case IP_VERSION(2, 6, 0):
2317 			amdgpu_device_ip_block_add(adev, &vcn_v2_6_ip_block);
2318 			amdgpu_device_ip_block_add(adev, &jpeg_v2_6_ip_block);
2319 			break;
2320 		case IP_VERSION(3, 0, 0):
2321 		case IP_VERSION(3, 0, 16):
2322 		case IP_VERSION(3, 1, 1):
2323 		case IP_VERSION(3, 1, 2):
2324 		case IP_VERSION(3, 0, 2):
2325 			amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
2326 			if (!amdgpu_sriov_vf(adev))
2327 				amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block);
2328 			break;
2329 		case IP_VERSION(3, 0, 33):
2330 			amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
2331 			break;
2332 		case IP_VERSION(4, 0, 0):
2333 		case IP_VERSION(4, 0, 2):
2334 		case IP_VERSION(4, 0, 4):
2335 			amdgpu_device_ip_block_add(adev, &vcn_v4_0_ip_block);
2336 			amdgpu_device_ip_block_add(adev, &jpeg_v4_0_ip_block);
2337 			break;
2338 		case IP_VERSION(4, 0, 3):
2339 			amdgpu_device_ip_block_add(adev, &vcn_v4_0_3_ip_block);
2340 			amdgpu_device_ip_block_add(adev, &jpeg_v4_0_3_ip_block);
2341 			break;
2342 		case IP_VERSION(4, 0, 5):
2343 		case IP_VERSION(4, 0, 6):
2344 			amdgpu_device_ip_block_add(adev, &vcn_v4_0_5_ip_block);
2345 			amdgpu_device_ip_block_add(adev, &jpeg_v4_0_5_ip_block);
2346 			break;
2347 		case IP_VERSION(5, 0, 0):
2348 			amdgpu_device_ip_block_add(adev, &vcn_v5_0_0_ip_block);
2349 			amdgpu_device_ip_block_add(adev, &jpeg_v5_0_0_ip_block);
2350 			break;
2351 		default:
2352 			dev_err(adev->dev,
2353 				"Failed to add vcn/jpeg ip block(UVD_HWIP:0x%x)\n",
2354 				amdgpu_ip_version(adev, UVD_HWIP, 0));
2355 			return -EINVAL;
2356 		}
2357 	}
2358 	return 0;
2359 }
2360 
amdgpu_discovery_set_mes_ip_blocks(struct amdgpu_device * adev)2361 static int amdgpu_discovery_set_mes_ip_blocks(struct amdgpu_device *adev)
2362 {
2363 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2364 	case IP_VERSION(11, 0, 0):
2365 	case IP_VERSION(11, 0, 1):
2366 	case IP_VERSION(11, 0, 2):
2367 	case IP_VERSION(11, 0, 3):
2368 	case IP_VERSION(11, 0, 4):
2369 	case IP_VERSION(11, 5, 0):
2370 	case IP_VERSION(11, 5, 1):
2371 	case IP_VERSION(11, 5, 2):
2372 		amdgpu_device_ip_block_add(adev, &mes_v11_0_ip_block);
2373 		adev->enable_mes = true;
2374 		adev->enable_mes_kiq = true;
2375 		break;
2376 	case IP_VERSION(12, 0, 0):
2377 	case IP_VERSION(12, 0, 1):
2378 		amdgpu_device_ip_block_add(adev, &mes_v12_0_ip_block);
2379 		adev->enable_mes = true;
2380 		adev->enable_mes_kiq = true;
2381 		if (amdgpu_uni_mes)
2382 			adev->enable_uni_mes = true;
2383 		break;
2384 	default:
2385 		break;
2386 	}
2387 	return 0;
2388 }
2389 
amdgpu_discovery_init_soc_config(struct amdgpu_device * adev)2390 static void amdgpu_discovery_init_soc_config(struct amdgpu_device *adev)
2391 {
2392 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2393 	case IP_VERSION(9, 4, 3):
2394 	case IP_VERSION(9, 4, 4):
2395 		aqua_vanjaram_init_soc_config(adev);
2396 		break;
2397 	default:
2398 		break;
2399 	}
2400 }
2401 
amdgpu_discovery_set_vpe_ip_blocks(struct amdgpu_device * adev)2402 static int amdgpu_discovery_set_vpe_ip_blocks(struct amdgpu_device *adev)
2403 {
2404 	switch (amdgpu_ip_version(adev, VPE_HWIP, 0)) {
2405 	case IP_VERSION(6, 1, 0):
2406 	case IP_VERSION(6, 1, 1):
2407 	case IP_VERSION(6, 1, 3):
2408 		amdgpu_device_ip_block_add(adev, &vpe_v6_1_ip_block);
2409 		break;
2410 	default:
2411 		break;
2412 	}
2413 
2414 	return 0;
2415 }
2416 
amdgpu_discovery_set_umsch_mm_ip_blocks(struct amdgpu_device * adev)2417 static int amdgpu_discovery_set_umsch_mm_ip_blocks(struct amdgpu_device *adev)
2418 {
2419 	switch (amdgpu_ip_version(adev, VCN_HWIP, 0)) {
2420 	case IP_VERSION(4, 0, 5):
2421 	case IP_VERSION(4, 0, 6):
2422 		if (amdgpu_umsch_mm & 0x1) {
2423 			amdgpu_device_ip_block_add(adev, &umsch_mm_v4_0_ip_block);
2424 			adev->enable_umsch_mm = true;
2425 		}
2426 		break;
2427 	default:
2428 		break;
2429 	}
2430 
2431 	return 0;
2432 }
2433 
amdgpu_discovery_set_isp_ip_blocks(struct amdgpu_device * adev)2434 static int amdgpu_discovery_set_isp_ip_blocks(struct amdgpu_device *adev)
2435 {
2436 #if defined(CONFIG_DRM_AMD_ISP)
2437 	switch (amdgpu_ip_version(adev, ISP_HWIP, 0)) {
2438 	case IP_VERSION(4, 1, 0):
2439 		amdgpu_device_ip_block_add(adev, &isp_v4_1_0_ip_block);
2440 		break;
2441 	case IP_VERSION(4, 1, 1):
2442 		amdgpu_device_ip_block_add(adev, &isp_v4_1_1_ip_block);
2443 		break;
2444 	default:
2445 		break;
2446 	}
2447 #endif
2448 
2449 	return 0;
2450 }
2451 
amdgpu_discovery_set_ip_blocks(struct amdgpu_device * adev)2452 int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
2453 {
2454 	int r;
2455 
2456 	switch (adev->asic_type) {
2457 	case CHIP_VEGA10:
2458 		/* This is not fatal.  We only need the discovery
2459 		 * binary for sysfs.  We don't need it for a
2460 		 * functional system.
2461 		 */
2462 		amdgpu_discovery_init(adev);
2463 		vega10_reg_base_init(adev);
2464 		adev->sdma.num_instances = 2;
2465 		adev->gmc.num_umc = 4;
2466 		adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 0, 0);
2467 		adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 0, 0);
2468 		adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 0);
2469 		adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 0, 0);
2470 		adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 0, 0);
2471 		adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 0, 0);
2472 		adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 0);
2473 		adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(6, 1, 0);
2474 		adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 0, 0);
2475 		adev->ip_versions[MP0_HWIP][0] = IP_VERSION(9, 0, 0);
2476 		adev->ip_versions[MP1_HWIP][0] = IP_VERSION(9, 0, 0);
2477 		adev->ip_versions[THM_HWIP][0] = IP_VERSION(9, 0, 0);
2478 		adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(9, 0, 0);
2479 		adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 0, 1);
2480 		adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 0, 0);
2481 		adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 0, 0);
2482 		adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 0);
2483 		break;
2484 	case CHIP_VEGA12:
2485 		/* This is not fatal.  We only need the discovery
2486 		 * binary for sysfs.  We don't need it for a
2487 		 * functional system.
2488 		 */
2489 		amdgpu_discovery_init(adev);
2490 		vega10_reg_base_init(adev);
2491 		adev->sdma.num_instances = 2;
2492 		adev->gmc.num_umc = 4;
2493 		adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 3, 0);
2494 		adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 3, 0);
2495 		adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 1);
2496 		adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 0, 1);
2497 		adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 0, 1);
2498 		adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 0, 1);
2499 		adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 5, 0);
2500 		adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(6, 2, 0);
2501 		adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 0);
2502 		adev->ip_versions[MP0_HWIP][0] = IP_VERSION(9, 0, 0);
2503 		adev->ip_versions[MP1_HWIP][0] = IP_VERSION(9, 0, 0);
2504 		adev->ip_versions[THM_HWIP][0] = IP_VERSION(9, 0, 0);
2505 		adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(9, 0, 1);
2506 		adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 2, 1);
2507 		adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 0, 0);
2508 		adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 0, 0);
2509 		adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 1);
2510 		break;
2511 	case CHIP_RAVEN:
2512 		/* This is not fatal.  We only need the discovery
2513 		 * binary for sysfs.  We don't need it for a
2514 		 * functional system.
2515 		 */
2516 		amdgpu_discovery_init(adev);
2517 		vega10_reg_base_init(adev);
2518 		adev->sdma.num_instances = 1;
2519 		adev->vcn.num_vcn_inst = 1;
2520 		adev->gmc.num_umc = 2;
2521 		if (adev->apu_flags & AMD_APU_IS_RAVEN2) {
2522 			adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 2, 0);
2523 			adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 2, 0);
2524 			adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 1, 1);
2525 			adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 1, 1);
2526 			adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 1, 1);
2527 			adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 1);
2528 			adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 0, 1);
2529 			adev->ip_versions[UMC_HWIP][0] = IP_VERSION(7, 5, 0);
2530 			adev->ip_versions[MP0_HWIP][0] = IP_VERSION(10, 0, 1);
2531 			adev->ip_versions[MP1_HWIP][0] = IP_VERSION(10, 0, 1);
2532 			adev->ip_versions[THM_HWIP][0] = IP_VERSION(10, 1, 0);
2533 			adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(10, 0, 1);
2534 			adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 2, 2);
2535 			adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 1);
2536 			adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 1);
2537 		} else {
2538 			adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 1, 0);
2539 			adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 1, 0);
2540 			adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 1, 0);
2541 			adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 1, 0);
2542 			adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 1, 0);
2543 			adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 0);
2544 			adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 0, 0);
2545 			adev->ip_versions[UMC_HWIP][0] = IP_VERSION(7, 0, 0);
2546 			adev->ip_versions[MP0_HWIP][0] = IP_VERSION(10, 0, 0);
2547 			adev->ip_versions[MP1_HWIP][0] = IP_VERSION(10, 0, 0);
2548 			adev->ip_versions[THM_HWIP][0] = IP_VERSION(10, 0, 0);
2549 			adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(10, 0, 0);
2550 			adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 1, 0);
2551 			adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 0);
2552 			adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 0);
2553 		}
2554 		break;
2555 	case CHIP_VEGA20:
2556 		/* This is not fatal.  We only need the discovery
2557 		 * binary for sysfs.  We don't need it for a
2558 		 * functional system.
2559 		 */
2560 		amdgpu_discovery_init(adev);
2561 		vega20_reg_base_init(adev);
2562 		adev->sdma.num_instances = 2;
2563 		adev->gmc.num_umc = 8;
2564 		adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 0);
2565 		adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 0);
2566 		adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 0);
2567 		adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 2, 0);
2568 		adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 2, 0);
2569 		adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 2, 0);
2570 		adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 0);
2571 		adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 0);
2572 		adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 1);
2573 		adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 2);
2574 		adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 2);
2575 		adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 2);
2576 		adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 2);
2577 		adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 0);
2578 		adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 2, 0);
2579 		adev->ip_versions[UVD_HWIP][1] = IP_VERSION(7, 2, 0);
2580 		adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 1, 0);
2581 		adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 1, 0);
2582 		break;
2583 	case CHIP_ARCTURUS:
2584 		/* This is not fatal.  We only need the discovery
2585 		 * binary for sysfs.  We don't need it for a
2586 		 * functional system.
2587 		 */
2588 		amdgpu_discovery_init(adev);
2589 		arct_reg_base_init(adev);
2590 		adev->sdma.num_instances = 8;
2591 		adev->vcn.num_vcn_inst = 2;
2592 		adev->gmc.num_umc = 8;
2593 		adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 1);
2594 		adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 1);
2595 		adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 1);
2596 		adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 2, 1);
2597 		adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 2, 2);
2598 		adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 2, 2);
2599 		adev->ip_versions[SDMA1_HWIP][1] = IP_VERSION(4, 2, 2);
2600 		adev->ip_versions[SDMA1_HWIP][2] = IP_VERSION(4, 2, 2);
2601 		adev->ip_versions[SDMA1_HWIP][3] = IP_VERSION(4, 2, 2);
2602 		adev->ip_versions[SDMA1_HWIP][4] = IP_VERSION(4, 2, 2);
2603 		adev->ip_versions[SDMA1_HWIP][5] = IP_VERSION(4, 2, 2);
2604 		adev->ip_versions[SDMA1_HWIP][6] = IP_VERSION(4, 2, 2);
2605 		adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 1);
2606 		adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 1);
2607 		adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 2);
2608 		adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 4);
2609 		adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 2);
2610 		adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 3);
2611 		adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 3);
2612 		adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 1);
2613 		adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 5, 0);
2614 		adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 5, 0);
2615 		break;
2616 	case CHIP_ALDEBARAN:
2617 		/* This is not fatal.  We only need the discovery
2618 		 * binary for sysfs.  We don't need it for a
2619 		 * functional system.
2620 		 */
2621 		amdgpu_discovery_init(adev);
2622 		aldebaran_reg_base_init(adev);
2623 		adev->sdma.num_instances = 5;
2624 		adev->vcn.num_vcn_inst = 2;
2625 		adev->gmc.num_umc = 4;
2626 		adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 2);
2627 		adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 2);
2628 		adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 4, 0);
2629 		adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 4, 0);
2630 		adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 4, 0);
2631 		adev->ip_versions[SDMA0_HWIP][1] = IP_VERSION(4, 4, 0);
2632 		adev->ip_versions[SDMA0_HWIP][2] = IP_VERSION(4, 4, 0);
2633 		adev->ip_versions[SDMA0_HWIP][3] = IP_VERSION(4, 4, 0);
2634 		adev->ip_versions[SDMA0_HWIP][4] = IP_VERSION(4, 4, 0);
2635 		adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 2);
2636 		adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 4);
2637 		adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 7, 0);
2638 		adev->ip_versions[MP0_HWIP][0] = IP_VERSION(13, 0, 2);
2639 		adev->ip_versions[MP1_HWIP][0] = IP_VERSION(13, 0, 2);
2640 		adev->ip_versions[THM_HWIP][0] = IP_VERSION(13, 0, 2);
2641 		adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(13, 0, 2);
2642 		adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 2);
2643 		adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 6, 0);
2644 		adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 6, 0);
2645 		adev->ip_versions[XGMI_HWIP][0] = IP_VERSION(6, 1, 0);
2646 		break;
2647 	default:
2648 		r = amdgpu_discovery_reg_base_init(adev);
2649 		if (r) {
2650 			drm_err(&adev->ddev, "discovery failed: %d\n", r);
2651 			return r;
2652 		}
2653 
2654 		amdgpu_discovery_harvest_ip(adev);
2655 		amdgpu_discovery_get_gfx_info(adev);
2656 		amdgpu_discovery_get_mall_info(adev);
2657 		amdgpu_discovery_get_vcn_info(adev);
2658 		break;
2659 	}
2660 
2661 	amdgpu_discovery_init_soc_config(adev);
2662 	amdgpu_discovery_sysfs_init(adev);
2663 
2664 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2665 	case IP_VERSION(9, 0, 1):
2666 	case IP_VERSION(9, 2, 1):
2667 	case IP_VERSION(9, 4, 0):
2668 	case IP_VERSION(9, 4, 1):
2669 	case IP_VERSION(9, 4, 2):
2670 	case IP_VERSION(9, 4, 3):
2671 	case IP_VERSION(9, 4, 4):
2672 		adev->family = AMDGPU_FAMILY_AI;
2673 		break;
2674 	case IP_VERSION(9, 1, 0):
2675 	case IP_VERSION(9, 2, 2):
2676 	case IP_VERSION(9, 3, 0):
2677 		adev->family = AMDGPU_FAMILY_RV;
2678 		break;
2679 	case IP_VERSION(10, 1, 10):
2680 	case IP_VERSION(10, 1, 1):
2681 	case IP_VERSION(10, 1, 2):
2682 	case IP_VERSION(10, 1, 3):
2683 	case IP_VERSION(10, 1, 4):
2684 	case IP_VERSION(10, 3, 0):
2685 	case IP_VERSION(10, 3, 2):
2686 	case IP_VERSION(10, 3, 4):
2687 	case IP_VERSION(10, 3, 5):
2688 		adev->family = AMDGPU_FAMILY_NV;
2689 		break;
2690 	case IP_VERSION(10, 3, 1):
2691 		adev->family = AMDGPU_FAMILY_VGH;
2692 		adev->apu_flags |= AMD_APU_IS_VANGOGH;
2693 		break;
2694 	case IP_VERSION(10, 3, 3):
2695 		adev->family = AMDGPU_FAMILY_YC;
2696 		break;
2697 	case IP_VERSION(10, 3, 6):
2698 		adev->family = AMDGPU_FAMILY_GC_10_3_6;
2699 		break;
2700 	case IP_VERSION(10, 3, 7):
2701 		adev->family = AMDGPU_FAMILY_GC_10_3_7;
2702 		break;
2703 	case IP_VERSION(11, 0, 0):
2704 	case IP_VERSION(11, 0, 2):
2705 	case IP_VERSION(11, 0, 3):
2706 		adev->family = AMDGPU_FAMILY_GC_11_0_0;
2707 		break;
2708 	case IP_VERSION(11, 0, 1):
2709 	case IP_VERSION(11, 0, 4):
2710 		adev->family = AMDGPU_FAMILY_GC_11_0_1;
2711 		break;
2712 	case IP_VERSION(11, 5, 0):
2713 	case IP_VERSION(11, 5, 1):
2714 	case IP_VERSION(11, 5, 2):
2715 		adev->family = AMDGPU_FAMILY_GC_11_5_0;
2716 		break;
2717 	case IP_VERSION(12, 0, 0):
2718 	case IP_VERSION(12, 0, 1):
2719 		adev->family = AMDGPU_FAMILY_GC_12_0_0;
2720 		break;
2721 	default:
2722 		return -EINVAL;
2723 	}
2724 
2725 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2726 	case IP_VERSION(9, 1, 0):
2727 	case IP_VERSION(9, 2, 2):
2728 	case IP_VERSION(9, 3, 0):
2729 	case IP_VERSION(10, 1, 3):
2730 	case IP_VERSION(10, 1, 4):
2731 	case IP_VERSION(10, 3, 1):
2732 	case IP_VERSION(10, 3, 3):
2733 	case IP_VERSION(10, 3, 6):
2734 	case IP_VERSION(10, 3, 7):
2735 	case IP_VERSION(11, 0, 1):
2736 	case IP_VERSION(11, 0, 4):
2737 	case IP_VERSION(11, 5, 0):
2738 	case IP_VERSION(11, 5, 1):
2739 	case IP_VERSION(11, 5, 2):
2740 		adev->flags |= AMD_IS_APU;
2741 		break;
2742 	default:
2743 		break;
2744 	}
2745 
2746 	if (amdgpu_ip_version(adev, XGMI_HWIP, 0) == IP_VERSION(4, 8, 0))
2747 		adev->gmc.xgmi.supported = true;
2748 
2749 	if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
2750 	    amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4))
2751 		adev->ip_versions[XGMI_HWIP][0] = IP_VERSION(6, 4, 0);
2752 
2753 	/* set NBIO version */
2754 	switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) {
2755 	case IP_VERSION(6, 1, 0):
2756 	case IP_VERSION(6, 2, 0):
2757 		adev->nbio.funcs = &nbio_v6_1_funcs;
2758 		adev->nbio.hdp_flush_reg = &nbio_v6_1_hdp_flush_reg;
2759 		break;
2760 	case IP_VERSION(7, 0, 0):
2761 	case IP_VERSION(7, 0, 1):
2762 	case IP_VERSION(2, 5, 0):
2763 		adev->nbio.funcs = &nbio_v7_0_funcs;
2764 		adev->nbio.hdp_flush_reg = &nbio_v7_0_hdp_flush_reg;
2765 		break;
2766 	case IP_VERSION(7, 4, 0):
2767 	case IP_VERSION(7, 4, 1):
2768 	case IP_VERSION(7, 4, 4):
2769 		adev->nbio.funcs = &nbio_v7_4_funcs;
2770 		adev->nbio.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg;
2771 		break;
2772 	case IP_VERSION(7, 9, 0):
2773 		adev->nbio.funcs = &nbio_v7_9_funcs;
2774 		adev->nbio.hdp_flush_reg = &nbio_v7_9_hdp_flush_reg;
2775 		break;
2776 	case IP_VERSION(7, 11, 0):
2777 	case IP_VERSION(7, 11, 1):
2778 	case IP_VERSION(7, 11, 3):
2779 		adev->nbio.funcs = &nbio_v7_11_funcs;
2780 		adev->nbio.hdp_flush_reg = &nbio_v7_11_hdp_flush_reg;
2781 		break;
2782 	case IP_VERSION(7, 2, 0):
2783 	case IP_VERSION(7, 2, 1):
2784 	case IP_VERSION(7, 3, 0):
2785 	case IP_VERSION(7, 5, 0):
2786 	case IP_VERSION(7, 5, 1):
2787 		adev->nbio.funcs = &nbio_v7_2_funcs;
2788 		adev->nbio.hdp_flush_reg = &nbio_v7_2_hdp_flush_reg;
2789 		break;
2790 	case IP_VERSION(2, 1, 1):
2791 	case IP_VERSION(2, 3, 0):
2792 	case IP_VERSION(2, 3, 1):
2793 	case IP_VERSION(2, 3, 2):
2794 	case IP_VERSION(3, 3, 0):
2795 	case IP_VERSION(3, 3, 1):
2796 	case IP_VERSION(3, 3, 2):
2797 	case IP_VERSION(3, 3, 3):
2798 		adev->nbio.funcs = &nbio_v2_3_funcs;
2799 		adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg;
2800 		break;
2801 	case IP_VERSION(4, 3, 0):
2802 	case IP_VERSION(4, 3, 1):
2803 		if (amdgpu_sriov_vf(adev))
2804 			adev->nbio.funcs = &nbio_v4_3_sriov_funcs;
2805 		else
2806 			adev->nbio.funcs = &nbio_v4_3_funcs;
2807 		adev->nbio.hdp_flush_reg = &nbio_v4_3_hdp_flush_reg;
2808 		break;
2809 	case IP_VERSION(7, 7, 0):
2810 	case IP_VERSION(7, 7, 1):
2811 		adev->nbio.funcs = &nbio_v7_7_funcs;
2812 		adev->nbio.hdp_flush_reg = &nbio_v7_7_hdp_flush_reg;
2813 		break;
2814 	case IP_VERSION(6, 3, 1):
2815 		adev->nbio.funcs = &nbif_v6_3_1_funcs;
2816 		adev->nbio.hdp_flush_reg = &nbif_v6_3_1_hdp_flush_reg;
2817 		break;
2818 	default:
2819 		break;
2820 	}
2821 
2822 	switch (amdgpu_ip_version(adev, HDP_HWIP, 0)) {
2823 	case IP_VERSION(4, 0, 0):
2824 	case IP_VERSION(4, 0, 1):
2825 	case IP_VERSION(4, 1, 0):
2826 	case IP_VERSION(4, 1, 1):
2827 	case IP_VERSION(4, 1, 2):
2828 	case IP_VERSION(4, 2, 0):
2829 	case IP_VERSION(4, 2, 1):
2830 	case IP_VERSION(4, 4, 0):
2831 	case IP_VERSION(4, 4, 2):
2832 	case IP_VERSION(4, 4, 5):
2833 		adev->hdp.funcs = &hdp_v4_0_funcs;
2834 		break;
2835 	case IP_VERSION(5, 0, 0):
2836 	case IP_VERSION(5, 0, 1):
2837 	case IP_VERSION(5, 0, 2):
2838 	case IP_VERSION(5, 0, 3):
2839 	case IP_VERSION(5, 0, 4):
2840 	case IP_VERSION(5, 2, 0):
2841 		adev->hdp.funcs = &hdp_v5_0_funcs;
2842 		break;
2843 	case IP_VERSION(5, 2, 1):
2844 		adev->hdp.funcs = &hdp_v5_2_funcs;
2845 		break;
2846 	case IP_VERSION(6, 0, 0):
2847 	case IP_VERSION(6, 0, 1):
2848 	case IP_VERSION(6, 1, 0):
2849 		adev->hdp.funcs = &hdp_v6_0_funcs;
2850 		break;
2851 	case IP_VERSION(7, 0, 0):
2852 		adev->hdp.funcs = &hdp_v7_0_funcs;
2853 		break;
2854 	default:
2855 		break;
2856 	}
2857 
2858 	switch (amdgpu_ip_version(adev, DF_HWIP, 0)) {
2859 	case IP_VERSION(3, 6, 0):
2860 	case IP_VERSION(3, 6, 1):
2861 	case IP_VERSION(3, 6, 2):
2862 		adev->df.funcs = &df_v3_6_funcs;
2863 		break;
2864 	case IP_VERSION(2, 1, 0):
2865 	case IP_VERSION(2, 1, 1):
2866 	case IP_VERSION(2, 5, 0):
2867 	case IP_VERSION(3, 5, 1):
2868 	case IP_VERSION(3, 5, 2):
2869 		adev->df.funcs = &df_v1_7_funcs;
2870 		break;
2871 	case IP_VERSION(4, 3, 0):
2872 		adev->df.funcs = &df_v4_3_funcs;
2873 		break;
2874 	case IP_VERSION(4, 6, 2):
2875 		adev->df.funcs = &df_v4_6_2_funcs;
2876 		break;
2877 	case IP_VERSION(4, 15, 0):
2878 	case IP_VERSION(4, 15, 1):
2879 		adev->df.funcs = &df_v4_15_funcs;
2880 		break;
2881 	default:
2882 		break;
2883 	}
2884 
2885 	switch (amdgpu_ip_version(adev, SMUIO_HWIP, 0)) {
2886 	case IP_VERSION(9, 0, 0):
2887 	case IP_VERSION(9, 0, 1):
2888 	case IP_VERSION(10, 0, 0):
2889 	case IP_VERSION(10, 0, 1):
2890 	case IP_VERSION(10, 0, 2):
2891 		adev->smuio.funcs = &smuio_v9_0_funcs;
2892 		break;
2893 	case IP_VERSION(11, 0, 0):
2894 	case IP_VERSION(11, 0, 2):
2895 	case IP_VERSION(11, 0, 3):
2896 	case IP_VERSION(11, 0, 4):
2897 	case IP_VERSION(11, 0, 7):
2898 	case IP_VERSION(11, 0, 8):
2899 		adev->smuio.funcs = &smuio_v11_0_funcs;
2900 		break;
2901 	case IP_VERSION(11, 0, 6):
2902 	case IP_VERSION(11, 0, 10):
2903 	case IP_VERSION(11, 0, 11):
2904 	case IP_VERSION(11, 5, 0):
2905 	case IP_VERSION(13, 0, 1):
2906 	case IP_VERSION(13, 0, 9):
2907 	case IP_VERSION(13, 0, 10):
2908 		adev->smuio.funcs = &smuio_v11_0_6_funcs;
2909 		break;
2910 	case IP_VERSION(13, 0, 2):
2911 		adev->smuio.funcs = &smuio_v13_0_funcs;
2912 		break;
2913 	case IP_VERSION(13, 0, 3):
2914 		adev->smuio.funcs = &smuio_v13_0_3_funcs;
2915 		if (adev->smuio.funcs->get_pkg_type(adev) == AMDGPU_PKG_TYPE_APU) {
2916 			adev->flags |= AMD_IS_APU;
2917 		}
2918 		break;
2919 	case IP_VERSION(13, 0, 6):
2920 	case IP_VERSION(13, 0, 8):
2921 	case IP_VERSION(14, 0, 0):
2922 	case IP_VERSION(14, 0, 1):
2923 		adev->smuio.funcs = &smuio_v13_0_6_funcs;
2924 		break;
2925 	case IP_VERSION(14, 0, 2):
2926 		adev->smuio.funcs = &smuio_v14_0_2_funcs;
2927 		break;
2928 	default:
2929 		break;
2930 	}
2931 
2932 	switch (amdgpu_ip_version(adev, LSDMA_HWIP, 0)) {
2933 	case IP_VERSION(6, 0, 0):
2934 	case IP_VERSION(6, 0, 1):
2935 	case IP_VERSION(6, 0, 2):
2936 	case IP_VERSION(6, 0, 3):
2937 		adev->lsdma.funcs = &lsdma_v6_0_funcs;
2938 		break;
2939 	case IP_VERSION(7, 0, 0):
2940 	case IP_VERSION(7, 0, 1):
2941 		adev->lsdma.funcs = &lsdma_v7_0_funcs;
2942 		break;
2943 	default:
2944 		break;
2945 	}
2946 
2947 	r = amdgpu_discovery_set_common_ip_blocks(adev);
2948 	if (r)
2949 		return r;
2950 
2951 	r = amdgpu_discovery_set_gmc_ip_blocks(adev);
2952 	if (r)
2953 		return r;
2954 
2955 	/* For SR-IOV, PSP needs to be initialized before IH */
2956 	if (amdgpu_sriov_vf(adev)) {
2957 		r = amdgpu_discovery_set_psp_ip_blocks(adev);
2958 		if (r)
2959 			return r;
2960 		r = amdgpu_discovery_set_ih_ip_blocks(adev);
2961 		if (r)
2962 			return r;
2963 	} else {
2964 		r = amdgpu_discovery_set_ih_ip_blocks(adev);
2965 		if (r)
2966 			return r;
2967 
2968 		if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
2969 			r = amdgpu_discovery_set_psp_ip_blocks(adev);
2970 			if (r)
2971 				return r;
2972 		}
2973 	}
2974 
2975 	if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
2976 		r = amdgpu_discovery_set_smu_ip_blocks(adev);
2977 		if (r)
2978 			return r;
2979 	}
2980 
2981 	r = amdgpu_discovery_set_display_ip_blocks(adev);
2982 	if (r)
2983 		return r;
2984 
2985 	r = amdgpu_discovery_set_gc_ip_blocks(adev);
2986 	if (r)
2987 		return r;
2988 
2989 	r = amdgpu_discovery_set_sdma_ip_blocks(adev);
2990 	if (r)
2991 		return r;
2992 
2993 	if ((adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
2994 	     !amdgpu_sriov_vf(adev)) ||
2995 	    (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO && amdgpu_dpm == 1)) {
2996 		r = amdgpu_discovery_set_smu_ip_blocks(adev);
2997 		if (r)
2998 			return r;
2999 	}
3000 
3001 	r = amdgpu_discovery_set_mm_ip_blocks(adev);
3002 	if (r)
3003 		return r;
3004 
3005 	r = amdgpu_discovery_set_mes_ip_blocks(adev);
3006 	if (r)
3007 		return r;
3008 
3009 	r = amdgpu_discovery_set_vpe_ip_blocks(adev);
3010 	if (r)
3011 		return r;
3012 
3013 	r = amdgpu_discovery_set_umsch_mm_ip_blocks(adev);
3014 	if (r)
3015 		return r;
3016 
3017 	r = amdgpu_discovery_set_isp_ip_blocks(adev);
3018 	if (r)
3019 		return r;
3020 	return 0;
3021 }
3022 
3023