• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <drm/amdgpu_drm.h>
25 #include "amdgpu.h"
26 #include "atomfirmware.h"
27 #include "amdgpu_atomfirmware.h"
28 #include "atom.h"
29 #include "atombios.h"
30 #include "soc15_hw_ip.h"
31 
32 union firmware_info {
33 	struct atom_firmware_info_v3_1 v31;
34 	struct atom_firmware_info_v3_2 v32;
35 	struct atom_firmware_info_v3_3 v33;
36 	struct atom_firmware_info_v3_4 v34;
37 };
38 
39 /*
40  * Helper function to query firmware capability
41  *
42  * @adev: amdgpu_device pointer
43  *
44  * Return firmware_capability in firmwareinfo table on success or 0 if not
45  */
amdgpu_atomfirmware_query_firmware_capability(struct amdgpu_device * adev)46 uint32_t amdgpu_atomfirmware_query_firmware_capability(struct amdgpu_device *adev)
47 {
48 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
49 	int index;
50 	u16 data_offset, size;
51 	union firmware_info *firmware_info;
52 	u8 frev, crev;
53 	u32 fw_cap = 0;
54 
55 	index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
56 			firmwareinfo);
57 
58 	if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context,
59 				index, &size, &frev, &crev, &data_offset)) {
60 		/* support firmware_info 3.1 + */
61 		if ((frev == 3 && crev >=1) || (frev > 3)) {
62 			firmware_info = (union firmware_info *)
63 				(mode_info->atom_context->bios + data_offset);
64 			fw_cap = le32_to_cpu(firmware_info->v31.firmware_capability);
65 		}
66 	}
67 
68 	return fw_cap;
69 }
70 
71 /*
72  * Helper function to query gpu virtualizaiton capability
73  *
74  * @adev: amdgpu_device pointer
75  *
76  * Return true if gpu virtualization is supported or false if not
77  */
amdgpu_atomfirmware_gpu_virtualization_supported(struct amdgpu_device * adev)78 bool amdgpu_atomfirmware_gpu_virtualization_supported(struct amdgpu_device *adev)
79 {
80 	u32 fw_cap;
81 
82 	fw_cap = adev->mode_info.firmware_flags;
83 
84 	return (fw_cap & ATOM_FIRMWARE_CAP_GPU_VIRTUALIZATION) ? true : false;
85 }
86 
amdgpu_atomfirmware_scratch_regs_init(struct amdgpu_device * adev)87 void amdgpu_atomfirmware_scratch_regs_init(struct amdgpu_device *adev)
88 {
89 	int index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
90 						firmwareinfo);
91 	uint16_t data_offset;
92 
93 	if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, NULL,
94 					  NULL, NULL, &data_offset)) {
95 		struct atom_firmware_info_v3_1 *firmware_info =
96 			(struct atom_firmware_info_v3_1 *)(adev->mode_info.atom_context->bios +
97 							   data_offset);
98 
99 		adev->bios_scratch_reg_offset =
100 			le32_to_cpu(firmware_info->bios_scratch_reg_startaddr);
101 	}
102 }
103 
amdgpu_atomfirmware_allocate_fb_v2_1(struct amdgpu_device * adev,struct vram_usagebyfirmware_v2_1 * fw_usage,int * usage_bytes)104 static int amdgpu_atomfirmware_allocate_fb_v2_1(struct amdgpu_device *adev,
105 	struct vram_usagebyfirmware_v2_1 *fw_usage, int *usage_bytes)
106 {
107 	uint32_t start_addr, fw_size, drv_size;
108 
109 	start_addr = le32_to_cpu(fw_usage->start_address_in_kb);
110 	fw_size = le16_to_cpu(fw_usage->used_by_firmware_in_kb);
111 	drv_size = le16_to_cpu(fw_usage->used_by_driver_in_kb);
112 
113 	DRM_DEBUG("atom firmware v2_1 requested %08x %dkb fw %dkb drv\n",
114 			  start_addr,
115 			  fw_size,
116 			  drv_size);
117 
118 	if ((start_addr & ATOM_VRAM_OPERATION_FLAGS_MASK) ==
119 		(uint32_t)(ATOM_VRAM_BLOCK_SRIOV_MSG_SHARE_RESERVATION <<
120 		ATOM_VRAM_OPERATION_FLAGS_SHIFT)) {
121 		/* Firmware request VRAM reservation for SR-IOV */
122 		adev->mman.fw_vram_usage_start_offset = (start_addr &
123 			(~ATOM_VRAM_OPERATION_FLAGS_MASK)) << 10;
124 		adev->mman.fw_vram_usage_size = fw_size << 10;
125 		/* Use the default scratch size */
126 		*usage_bytes = 0;
127 	} else {
128 		*usage_bytes = drv_size << 10;
129 	}
130 	return 0;
131 }
132 
amdgpu_atomfirmware_allocate_fb_v2_2(struct amdgpu_device * adev,struct vram_usagebyfirmware_v2_2 * fw_usage,int * usage_bytes)133 static int amdgpu_atomfirmware_allocate_fb_v2_2(struct amdgpu_device *adev,
134 		struct vram_usagebyfirmware_v2_2 *fw_usage, int *usage_bytes)
135 {
136 	uint32_t fw_start_addr, fw_size, drv_start_addr, drv_size;
137 
138 	fw_start_addr = le32_to_cpu(fw_usage->fw_region_start_address_in_kb);
139 	fw_size = le16_to_cpu(fw_usage->used_by_firmware_in_kb);
140 
141 	drv_start_addr = le32_to_cpu(fw_usage->driver_region0_start_address_in_kb);
142 	drv_size = le32_to_cpu(fw_usage->used_by_driver_region0_in_kb);
143 
144 	DRM_DEBUG("atom requested fw start at %08x %dkb and drv start at %08x %dkb\n",
145 			  fw_start_addr,
146 			  fw_size,
147 			  drv_start_addr,
148 			  drv_size);
149 
150 	if ((fw_start_addr & (ATOM_VRAM_BLOCK_NEEDS_NO_RESERVATION << 30)) == 0) {
151 		/* Firmware request VRAM reservation for SR-IOV */
152 		adev->mman.fw_vram_usage_start_offset = (fw_start_addr &
153 			(~ATOM_VRAM_OPERATION_FLAGS_MASK)) << 10;
154 		adev->mman.fw_vram_usage_size = fw_size << 10;
155 	}
156 
157 	if ((drv_start_addr & (ATOM_VRAM_BLOCK_NEEDS_NO_RESERVATION << 30)) == 0) {
158 		/* driver request VRAM reservation for SR-IOV */
159 		adev->mman.drv_vram_usage_start_offset = (drv_start_addr &
160 			(~ATOM_VRAM_OPERATION_FLAGS_MASK)) << 10;
161 		adev->mman.drv_vram_usage_size = drv_size << 10;
162 	}
163 
164 	*usage_bytes = 0;
165 	return 0;
166 }
167 
amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device * adev)168 int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev)
169 {
170 	struct atom_context *ctx = adev->mode_info.atom_context;
171 	int index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
172 						vram_usagebyfirmware);
173 	struct vram_usagebyfirmware_v2_1 *fw_usage_v2_1;
174 	struct vram_usagebyfirmware_v2_2 *fw_usage_v2_2;
175 	uint16_t data_offset;
176 	uint8_t frev, crev;
177 	int usage_bytes = 0;
178 
179 	if (amdgpu_atom_parse_data_header(ctx, index, NULL, &frev, &crev, &data_offset)) {
180 		if (frev == 2 && crev == 1) {
181 			fw_usage_v2_1 =
182 				(struct vram_usagebyfirmware_v2_1 *)(ctx->bios + data_offset);
183 			amdgpu_atomfirmware_allocate_fb_v2_1(adev,
184 					fw_usage_v2_1,
185 					&usage_bytes);
186 		} else if (frev >= 2 && crev >= 2) {
187 			fw_usage_v2_2 =
188 				(struct vram_usagebyfirmware_v2_2 *)(ctx->bios + data_offset);
189 			amdgpu_atomfirmware_allocate_fb_v2_2(adev,
190 					fw_usage_v2_2,
191 					&usage_bytes);
192 		}
193 	}
194 
195 	ctx->scratch_size_bytes = 0;
196 	if (usage_bytes == 0)
197 		usage_bytes = 20 * 1024;
198 	/* allocate some scratch memory */
199 	ctx->scratch = kzalloc(usage_bytes, GFP_KERNEL);
200 	if (!ctx->scratch)
201 		return -ENOMEM;
202 	ctx->scratch_size_bytes = usage_bytes;
203 	return 0;
204 }
205 
206 union igp_info {
207 	struct atom_integrated_system_info_v1_11 v11;
208 	struct atom_integrated_system_info_v1_12 v12;
209 	struct atom_integrated_system_info_v2_1 v21;
210 };
211 
212 union umc_info {
213 	struct atom_umc_info_v3_1 v31;
214 	struct atom_umc_info_v3_2 v32;
215 	struct atom_umc_info_v3_3 v33;
216 };
217 
218 union vram_info {
219 	struct atom_vram_info_header_v2_3 v23;
220 	struct atom_vram_info_header_v2_4 v24;
221 	struct atom_vram_info_header_v2_5 v25;
222 	struct atom_vram_info_header_v2_6 v26;
223 	struct atom_vram_info_header_v3_0 v30;
224 };
225 
226 union vram_module {
227 	struct atom_vram_module_v9 v9;
228 	struct atom_vram_module_v10 v10;
229 	struct atom_vram_module_v11 v11;
230 	struct atom_vram_module_v3_0 v30;
231 };
232 
convert_atom_mem_type_to_vram_type(struct amdgpu_device * adev,int atom_mem_type)233 static int convert_atom_mem_type_to_vram_type(struct amdgpu_device *adev,
234 					      int atom_mem_type)
235 {
236 	int vram_type;
237 
238 	if (adev->flags & AMD_IS_APU) {
239 		switch (atom_mem_type) {
240 		case Ddr2MemType:
241 		case LpDdr2MemType:
242 			vram_type = AMDGPU_VRAM_TYPE_DDR2;
243 			break;
244 		case Ddr3MemType:
245 		case LpDdr3MemType:
246 			vram_type = AMDGPU_VRAM_TYPE_DDR3;
247 			break;
248 		case Ddr4MemType:
249 			vram_type = AMDGPU_VRAM_TYPE_DDR4;
250 			break;
251 		case LpDdr4MemType:
252 			vram_type = AMDGPU_VRAM_TYPE_LPDDR4;
253 			break;
254 		case Ddr5MemType:
255 			vram_type = AMDGPU_VRAM_TYPE_DDR5;
256 			break;
257 		case LpDdr5MemType:
258 			vram_type = AMDGPU_VRAM_TYPE_LPDDR5;
259 			break;
260 		default:
261 			vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
262 			break;
263 		}
264 	} else {
265 		switch (atom_mem_type) {
266 		case ATOM_DGPU_VRAM_TYPE_GDDR5:
267 			vram_type = AMDGPU_VRAM_TYPE_GDDR5;
268 			break;
269 		case ATOM_DGPU_VRAM_TYPE_HBM2:
270 		case ATOM_DGPU_VRAM_TYPE_HBM2E:
271 			vram_type = AMDGPU_VRAM_TYPE_HBM;
272 			break;
273 		case ATOM_DGPU_VRAM_TYPE_GDDR6:
274 			vram_type = AMDGPU_VRAM_TYPE_GDDR6;
275 			break;
276 		default:
277 			vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
278 			break;
279 		}
280 	}
281 
282 	return vram_type;
283 }
284 
285 
286 int
amdgpu_atomfirmware_get_vram_info(struct amdgpu_device * adev,int * vram_width,int * vram_type,int * vram_vendor)287 amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev,
288 				  int *vram_width, int *vram_type,
289 				  int *vram_vendor)
290 {
291 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
292 	int index, i = 0;
293 	u16 data_offset, size;
294 	union igp_info *igp_info;
295 	union vram_info *vram_info;
296 	union vram_module *vram_module;
297 	u8 frev, crev;
298 	u8 mem_type;
299 	u8 mem_vendor;
300 	u32 mem_channel_number;
301 	u32 mem_channel_width;
302 	u32 module_id;
303 
304 	if (adev->flags & AMD_IS_APU)
305 		index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
306 						    integratedsysteminfo);
307 	else
308 		index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
309 						    vram_info);
310 
311 	if (amdgpu_atom_parse_data_header(mode_info->atom_context,
312 					  index, &size,
313 					  &frev, &crev, &data_offset)) {
314 		if (adev->flags & AMD_IS_APU) {
315 			igp_info = (union igp_info *)
316 				(mode_info->atom_context->bios + data_offset);
317 			switch (frev) {
318 			case 1:
319 				switch (crev) {
320 				case 11:
321 				case 12:
322 					mem_channel_number = igp_info->v11.umachannelnumber;
323 					if (!mem_channel_number)
324 						mem_channel_number = 1;
325 					/* channel width is 64 */
326 					if (vram_width)
327 						*vram_width = mem_channel_number * 64;
328 					mem_type = igp_info->v11.memorytype;
329 					if (vram_type)
330 						*vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
331 					break;
332 				default:
333 					return -EINVAL;
334 				}
335 				break;
336 			case 2:
337 				switch (crev) {
338 				case 1:
339 				case 2:
340 					mem_channel_number = igp_info->v21.umachannelnumber;
341 					if (!mem_channel_number)
342 						mem_channel_number = 1;
343 					/* channel width is 64 */
344 					if (vram_width)
345 						*vram_width = mem_channel_number * 64;
346 					mem_type = igp_info->v21.memorytype;
347 					if (vram_type)
348 						*vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
349 					break;
350 				default:
351 					return -EINVAL;
352 				}
353 				break;
354 			default:
355 				return -EINVAL;
356 			}
357 		} else {
358 			vram_info = (union vram_info *)
359 				(mode_info->atom_context->bios + data_offset);
360 			module_id = (RREG32(adev->bios_scratch_reg_offset + 4) & 0x00ff0000) >> 16;
361 			if (frev == 3) {
362 				switch (crev) {
363 				/* v30 */
364 				case 0:
365 					vram_module = (union vram_module *)vram_info->v30.vram_module;
366 					mem_vendor = (vram_module->v30.dram_vendor_id) & 0xF;
367 					if (vram_vendor)
368 						*vram_vendor = mem_vendor;
369 					mem_type = vram_info->v30.memory_type;
370 					if (vram_type)
371 						*vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
372 					mem_channel_number = vram_info->v30.channel_num;
373 					mem_channel_width = vram_info->v30.channel_width;
374 					if (vram_width)
375 						*vram_width = mem_channel_number * (1 << mem_channel_width);
376 					break;
377 				default:
378 					return -EINVAL;
379 				}
380 			} else if (frev == 2) {
381 				switch (crev) {
382 				/* v23 */
383 				case 3:
384 					if (module_id > vram_info->v23.vram_module_num)
385 						module_id = 0;
386 					vram_module = (union vram_module *)vram_info->v23.vram_module;
387 					while (i < module_id) {
388 						vram_module = (union vram_module *)
389 							((u8 *)vram_module + vram_module->v9.vram_module_size);
390 						i++;
391 					}
392 					mem_type = vram_module->v9.memory_type;
393 					if (vram_type)
394 						*vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
395 					mem_channel_number = vram_module->v9.channel_num;
396 					mem_channel_width = vram_module->v9.channel_width;
397 					if (vram_width)
398 						*vram_width = mem_channel_number * (1 << mem_channel_width);
399 					mem_vendor = (vram_module->v9.vender_rev_id) & 0xF;
400 					if (vram_vendor)
401 						*vram_vendor = mem_vendor;
402 					break;
403 				/* v24 */
404 				case 4:
405 					if (module_id > vram_info->v24.vram_module_num)
406 						module_id = 0;
407 					vram_module = (union vram_module *)vram_info->v24.vram_module;
408 					while (i < module_id) {
409 						vram_module = (union vram_module *)
410 							((u8 *)vram_module + vram_module->v10.vram_module_size);
411 						i++;
412 					}
413 					mem_type = vram_module->v10.memory_type;
414 					if (vram_type)
415 						*vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
416 					mem_channel_number = vram_module->v10.channel_num;
417 					mem_channel_width = vram_module->v10.channel_width;
418 					if (vram_width)
419 						*vram_width = mem_channel_number * (1 << mem_channel_width);
420 					mem_vendor = (vram_module->v10.vender_rev_id) & 0xF;
421 					if (vram_vendor)
422 						*vram_vendor = mem_vendor;
423 					break;
424 				/* v25 */
425 				case 5:
426 					if (module_id > vram_info->v25.vram_module_num)
427 						module_id = 0;
428 					vram_module = (union vram_module *)vram_info->v25.vram_module;
429 					while (i < module_id) {
430 						vram_module = (union vram_module *)
431 							((u8 *)vram_module + vram_module->v11.vram_module_size);
432 						i++;
433 					}
434 					mem_type = vram_module->v11.memory_type;
435 					if (vram_type)
436 						*vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
437 					mem_channel_number = vram_module->v11.channel_num;
438 					mem_channel_width = vram_module->v11.channel_width;
439 					if (vram_width)
440 						*vram_width = mem_channel_number * (1 << mem_channel_width);
441 					mem_vendor = (vram_module->v11.vender_rev_id) & 0xF;
442 					if (vram_vendor)
443 						*vram_vendor = mem_vendor;
444 					break;
445 				/* v26 */
446 				case 6:
447 					if (module_id > vram_info->v26.vram_module_num)
448 						module_id = 0;
449 					vram_module = (union vram_module *)vram_info->v26.vram_module;
450 					while (i < module_id) {
451 						vram_module = (union vram_module *)
452 							((u8 *)vram_module + vram_module->v9.vram_module_size);
453 						i++;
454 					}
455 					mem_type = vram_module->v9.memory_type;
456 					if (vram_type)
457 						*vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
458 					mem_channel_number = vram_module->v9.channel_num;
459 					mem_channel_width = vram_module->v9.channel_width;
460 					if (vram_width)
461 						*vram_width = mem_channel_number * (1 << mem_channel_width);
462 					mem_vendor = (vram_module->v9.vender_rev_id) & 0xF;
463 					if (vram_vendor)
464 						*vram_vendor = mem_vendor;
465 					break;
466 				default:
467 					return -EINVAL;
468 				}
469 			} else {
470 				/* invalid frev */
471 				return -EINVAL;
472 			}
473 		}
474 
475 	}
476 
477 	return 0;
478 }
479 
480 /*
481  * Return true if vbios enabled ecc by default, if umc info table is available
482  * or false if ecc is not enabled or umc info table is not available
483  */
amdgpu_atomfirmware_mem_ecc_supported(struct amdgpu_device * adev)484 bool amdgpu_atomfirmware_mem_ecc_supported(struct amdgpu_device *adev)
485 {
486 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
487 	int index;
488 	u16 data_offset, size;
489 	union umc_info *umc_info;
490 	u8 frev, crev;
491 	bool ecc_default_enabled = false;
492 	u8 umc_config;
493 	u32 umc_config1;
494 
495 	index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
496 			umc_info);
497 
498 	if (amdgpu_atom_parse_data_header(mode_info->atom_context,
499 				index, &size, &frev, &crev, &data_offset)) {
500 		if (frev == 3) {
501 			umc_info = (union umc_info *)
502 				(mode_info->atom_context->bios + data_offset);
503 			switch (crev) {
504 			case 1:
505 				umc_config = le32_to_cpu(umc_info->v31.umc_config);
506 				ecc_default_enabled =
507 					(umc_config & UMC_CONFIG__DEFAULT_MEM_ECC_ENABLE) ? true : false;
508 				break;
509 			case 2:
510 				umc_config = le32_to_cpu(umc_info->v32.umc_config);
511 				ecc_default_enabled =
512 					(umc_config & UMC_CONFIG__DEFAULT_MEM_ECC_ENABLE) ? true : false;
513 				break;
514 			case 3:
515 				umc_config = le32_to_cpu(umc_info->v33.umc_config);
516 				umc_config1 = le32_to_cpu(umc_info->v33.umc_config1);
517 				ecc_default_enabled =
518 					((umc_config & UMC_CONFIG__DEFAULT_MEM_ECC_ENABLE) ||
519 					 (umc_config1 & UMC_CONFIG1__ENABLE_ECC_CAPABLE)) ? true : false;
520 				break;
521 			default:
522 				/* unsupported crev */
523 				return false;
524 			}
525 		}
526 	}
527 
528 	return ecc_default_enabled;
529 }
530 
531 /*
532  * Helper function to query sram ecc capablity
533  *
534  * @adev: amdgpu_device pointer
535  *
536  * Return true if vbios supports sram ecc or false if not
537  */
amdgpu_atomfirmware_sram_ecc_supported(struct amdgpu_device * adev)538 bool amdgpu_atomfirmware_sram_ecc_supported(struct amdgpu_device *adev)
539 {
540 	u32 fw_cap;
541 
542 	fw_cap = adev->mode_info.firmware_flags;
543 
544 	return (fw_cap & ATOM_FIRMWARE_CAP_SRAM_ECC) ? true : false;
545 }
546 
547 /*
548  * Helper function to query dynamic boot config capability
549  *
550  * @adev: amdgpu_device pointer
551  *
552  * Return true if vbios supports dynamic boot config or false if not
553  */
amdgpu_atomfirmware_dynamic_boot_config_supported(struct amdgpu_device * adev)554 bool amdgpu_atomfirmware_dynamic_boot_config_supported(struct amdgpu_device *adev)
555 {
556 	u32 fw_cap;
557 
558 	fw_cap = adev->mode_info.firmware_flags;
559 
560 	return (fw_cap & ATOM_FIRMWARE_CAP_DYNAMIC_BOOT_CFG_ENABLE) ? true : false;
561 }
562 
563 /**
564  * amdgpu_atomfirmware_ras_rom_addr -- Get the RAS EEPROM addr from VBIOS
565  * @adev: amdgpu_device pointer
566  * @i2c_address: pointer to u8; if not NULL, will contain
567  *    the RAS EEPROM address if the function returns true
568  *
569  * Return true if VBIOS supports RAS EEPROM address reporting,
570  * else return false. If true and @i2c_address is not NULL,
571  * will contain the RAS ROM address.
572  */
amdgpu_atomfirmware_ras_rom_addr(struct amdgpu_device * adev,u8 * i2c_address)573 bool amdgpu_atomfirmware_ras_rom_addr(struct amdgpu_device *adev,
574 				      u8 *i2c_address)
575 {
576 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
577 	int index;
578 	u16 data_offset, size;
579 	union firmware_info *firmware_info;
580 	u8 frev, crev;
581 
582 	index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
583 					    firmwareinfo);
584 
585 	if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context,
586 					  index, &size, &frev, &crev,
587 					  &data_offset)) {
588 		/* support firmware_info 3.4 + */
589 		if ((frev == 3 && crev >=4) || (frev > 3)) {
590 			firmware_info = (union firmware_info *)
591 				(mode_info->atom_context->bios + data_offset);
592 			/* The ras_rom_i2c_slave_addr should ideally
593 			 * be a 19-bit EEPROM address, which would be
594 			 * used as is by the driver; see top of
595 			 * amdgpu_eeprom.c.
596 			 *
597 			 * When this is the case, 0 is of course a
598 			 * valid RAS EEPROM address, in which case,
599 			 * we'll drop the first "if (firm...)" and only
600 			 * leave the check for the pointer.
601 			 *
602 			 * The reason this works right now is because
603 			 * ras_rom_i2c_slave_addr contains the EEPROM
604 			 * device type qualifier 1010b in the top 4
605 			 * bits.
606 			 */
607 			if (firmware_info->v34.ras_rom_i2c_slave_addr) {
608 				if (i2c_address)
609 					*i2c_address = firmware_info->v34.ras_rom_i2c_slave_addr;
610 				return true;
611 			}
612 		}
613 	}
614 
615 	return false;
616 }
617 
618 
619 union smu_info {
620 	struct atom_smu_info_v3_1 v31;
621 	struct atom_smu_info_v4_0 v40;
622 };
623 
624 union gfx_info {
625 	struct atom_gfx_info_v2_2 v22;
626 	struct atom_gfx_info_v2_4 v24;
627 	struct atom_gfx_info_v2_7 v27;
628 	struct atom_gfx_info_v3_0 v30;
629 };
630 
amdgpu_atomfirmware_get_clock_info(struct amdgpu_device * adev)631 int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev)
632 {
633 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
634 	struct amdgpu_pll *spll = &adev->clock.spll;
635 	struct amdgpu_pll *mpll = &adev->clock.mpll;
636 	uint8_t frev, crev;
637 	uint16_t data_offset;
638 	int ret = -EINVAL, index;
639 
640 	index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
641 					    firmwareinfo);
642 	if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
643 				   &frev, &crev, &data_offset)) {
644 		union firmware_info *firmware_info =
645 			(union firmware_info *)(mode_info->atom_context->bios +
646 						data_offset);
647 
648 		adev->clock.default_sclk =
649 			le32_to_cpu(firmware_info->v31.bootup_sclk_in10khz);
650 		adev->clock.default_mclk =
651 			le32_to_cpu(firmware_info->v31.bootup_mclk_in10khz);
652 
653 		adev->pm.current_sclk = adev->clock.default_sclk;
654 		adev->pm.current_mclk = adev->clock.default_mclk;
655 
656 		ret = 0;
657 	}
658 
659 	index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
660 					    smu_info);
661 	if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
662 				   &frev, &crev, &data_offset)) {
663 		union smu_info *smu_info =
664 			(union smu_info *)(mode_info->atom_context->bios +
665 					   data_offset);
666 
667 		/* system clock */
668 		if (frev == 3)
669 			spll->reference_freq = le32_to_cpu(smu_info->v31.core_refclk_10khz);
670 		else if (frev == 4)
671 			spll->reference_freq = le32_to_cpu(smu_info->v40.core_refclk_10khz);
672 
673 		spll->reference_div = 0;
674 		spll->min_post_div = 1;
675 		spll->max_post_div = 1;
676 		spll->min_ref_div = 2;
677 		spll->max_ref_div = 0xff;
678 		spll->min_feedback_div = 4;
679 		spll->max_feedback_div = 0xff;
680 		spll->best_vco = 0;
681 
682 		ret = 0;
683 	}
684 
685 	index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
686 					    umc_info);
687 	if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
688 				   &frev, &crev, &data_offset)) {
689 		union umc_info *umc_info =
690 			(union umc_info *)(mode_info->atom_context->bios +
691 					   data_offset);
692 
693 		/* memory clock */
694 		mpll->reference_freq = le32_to_cpu(umc_info->v31.mem_refclk_10khz);
695 
696 		mpll->reference_div = 0;
697 		mpll->min_post_div = 1;
698 		mpll->max_post_div = 1;
699 		mpll->min_ref_div = 2;
700 		mpll->max_ref_div = 0xff;
701 		mpll->min_feedback_div = 4;
702 		mpll->max_feedback_div = 0xff;
703 		mpll->best_vco = 0;
704 
705 		ret = 0;
706 	}
707 
708 	/* if asic is Navi+, the rlc reference clock is used for system clock
709 	 * from vbios gfx_info table */
710 	if (adev->asic_type >= CHIP_NAVI10) {
711 		index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
712 						   gfx_info);
713 		if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
714 					  &frev, &crev, &data_offset)) {
715 			union gfx_info *gfx_info = (union gfx_info *)
716 				(mode_info->atom_context->bios + data_offset);
717 			if ((frev == 3) ||
718 			    (frev == 2 && crev == 6)) {
719 				spll->reference_freq = le32_to_cpu(gfx_info->v30.golden_tsc_count_lower_refclk);
720 				ret = 0;
721 			} else if ((frev == 2) &&
722 				   (crev >= 2) &&
723 				   (crev != 6)) {
724 				spll->reference_freq = le32_to_cpu(gfx_info->v22.rlc_gpu_timer_refclk);
725 				ret = 0;
726 			} else {
727 				BUG();
728 			}
729 		}
730 	}
731 
732 	return ret;
733 }
734 
amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device * adev)735 int amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device *adev)
736 {
737 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
738 	int index;
739 	uint8_t frev, crev;
740 	uint16_t data_offset;
741 
742 	index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
743 					    gfx_info);
744 	if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
745 				   &frev, &crev, &data_offset)) {
746 		union gfx_info *gfx_info = (union gfx_info *)
747 			(mode_info->atom_context->bios + data_offset);
748 		if (frev == 2) {
749 			switch (crev) {
750 			case 4:
751 				adev->gfx.config.max_shader_engines = gfx_info->v24.max_shader_engines;
752 				adev->gfx.config.max_cu_per_sh = gfx_info->v24.max_cu_per_sh;
753 				adev->gfx.config.max_sh_per_se = gfx_info->v24.max_sh_per_se;
754 				adev->gfx.config.max_backends_per_se = gfx_info->v24.max_backends_per_se;
755 				adev->gfx.config.max_texture_channel_caches = gfx_info->v24.max_texture_channel_caches;
756 				adev->gfx.config.max_gprs = le16_to_cpu(gfx_info->v24.gc_num_gprs);
757 				adev->gfx.config.max_gs_threads = gfx_info->v24.gc_num_max_gs_thds;
758 				adev->gfx.config.gs_vgt_table_depth = gfx_info->v24.gc_gs_table_depth;
759 				adev->gfx.config.gs_prim_buffer_depth =
760 					le16_to_cpu(gfx_info->v24.gc_gsprim_buff_depth);
761 				adev->gfx.config.double_offchip_lds_buf =
762 					gfx_info->v24.gc_double_offchip_lds_buffer;
763 				adev->gfx.cu_info.wave_front_size = le16_to_cpu(gfx_info->v24.gc_wave_size);
764 				adev->gfx.cu_info.max_waves_per_simd = le16_to_cpu(gfx_info->v24.gc_max_waves_per_simd);
765 				adev->gfx.cu_info.max_scratch_slots_per_cu = gfx_info->v24.gc_max_scratch_slots_per_cu;
766 				adev->gfx.cu_info.lds_size = le16_to_cpu(gfx_info->v24.gc_lds_size);
767 				return 0;
768 			case 7:
769 				adev->gfx.config.max_shader_engines = gfx_info->v27.max_shader_engines;
770 				adev->gfx.config.max_cu_per_sh = gfx_info->v27.max_cu_per_sh;
771 				adev->gfx.config.max_sh_per_se = gfx_info->v27.max_sh_per_se;
772 				adev->gfx.config.max_backends_per_se = gfx_info->v27.max_backends_per_se;
773 				adev->gfx.config.max_texture_channel_caches = gfx_info->v27.max_texture_channel_caches;
774 				adev->gfx.config.max_gprs = le16_to_cpu(gfx_info->v27.gc_num_gprs);
775 				adev->gfx.config.max_gs_threads = gfx_info->v27.gc_num_max_gs_thds;
776 				adev->gfx.config.gs_vgt_table_depth = gfx_info->v27.gc_gs_table_depth;
777 				adev->gfx.config.gs_prim_buffer_depth = le16_to_cpu(gfx_info->v27.gc_gsprim_buff_depth);
778 				adev->gfx.config.double_offchip_lds_buf = gfx_info->v27.gc_double_offchip_lds_buffer;
779 				adev->gfx.cu_info.wave_front_size = le16_to_cpu(gfx_info->v27.gc_wave_size);
780 				adev->gfx.cu_info.max_waves_per_simd = le16_to_cpu(gfx_info->v27.gc_max_waves_per_simd);
781 				adev->gfx.cu_info.max_scratch_slots_per_cu = gfx_info->v27.gc_max_scratch_slots_per_cu;
782 				adev->gfx.cu_info.lds_size = le16_to_cpu(gfx_info->v27.gc_lds_size);
783 				return 0;
784 			default:
785 				return -EINVAL;
786 			}
787 		} else if (frev == 3) {
788 			switch (crev) {
789 			case 0:
790 				adev->gfx.config.max_shader_engines = gfx_info->v30.max_shader_engines;
791 				adev->gfx.config.max_cu_per_sh = gfx_info->v30.max_cu_per_sh;
792 				adev->gfx.config.max_sh_per_se = gfx_info->v30.max_sh_per_se;
793 				adev->gfx.config.max_backends_per_se = gfx_info->v30.max_backends_per_se;
794 				adev->gfx.config.max_texture_channel_caches = gfx_info->v30.max_texture_channel_caches;
795 				return 0;
796 			default:
797 				return -EINVAL;
798 			}
799 		} else {
800 			return -EINVAL;
801 		}
802 
803 	}
804 	return -EINVAL;
805 }
806 
807 /*
808  * Helper function to query two stage mem training capability
809  *
810  * @adev: amdgpu_device pointer
811  *
812  * Return true if two stage mem training is supported or false if not
813  */
amdgpu_atomfirmware_mem_training_supported(struct amdgpu_device * adev)814 bool amdgpu_atomfirmware_mem_training_supported(struct amdgpu_device *adev)
815 {
816 	u32 fw_cap;
817 
818 	fw_cap = adev->mode_info.firmware_flags;
819 
820 	return (fw_cap & ATOM_FIRMWARE_CAP_ENABLE_2STAGE_BIST_TRAINING) ? true : false;
821 }
822 
amdgpu_atomfirmware_get_fw_reserved_fb_size(struct amdgpu_device * adev)823 int amdgpu_atomfirmware_get_fw_reserved_fb_size(struct amdgpu_device *adev)
824 {
825 	struct atom_context *ctx = adev->mode_info.atom_context;
826 	union firmware_info *firmware_info;
827 	int index;
828 	u16 data_offset, size;
829 	u8 frev, crev;
830 	int fw_reserved_fb_size;
831 
832 	index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
833 			firmwareinfo);
834 
835 	if (!amdgpu_atom_parse_data_header(ctx, index, &size,
836 				&frev, &crev, &data_offset))
837 		/* fail to parse data_header */
838 		return 0;
839 
840 	firmware_info = (union firmware_info *)(ctx->bios + data_offset);
841 
842 	if (frev !=3)
843 		return -EINVAL;
844 
845 	switch (crev) {
846 	case 4:
847 		fw_reserved_fb_size =
848 			(firmware_info->v34.fw_reserved_size_in_kb << 10);
849 		break;
850 	default:
851 		fw_reserved_fb_size = 0;
852 		break;
853 	}
854 
855 	return fw_reserved_fb_size;
856 }
857 
858 /*
859  * Helper function to execute asic_init table
860  *
861  * @adev: amdgpu_device pointer
862  * @fb_reset: flag to indicate whether fb is reset or not
863  *
864  * Return 0 if succeed, otherwise failed
865  */
amdgpu_atomfirmware_asic_init(struct amdgpu_device * adev,bool fb_reset)866 int amdgpu_atomfirmware_asic_init(struct amdgpu_device *adev, bool fb_reset)
867 {
868 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
869 	struct atom_context *ctx;
870 	uint8_t frev, crev;
871 	uint16_t data_offset;
872 	uint32_t bootup_sclk_in10khz, bootup_mclk_in10khz;
873 	struct asic_init_ps_allocation_v2_1 asic_init_ps_v2_1;
874 	int index;
875 
876 	if (!mode_info)
877 		return -EINVAL;
878 
879 	ctx = mode_info->atom_context;
880 	if (!ctx)
881 		return -EINVAL;
882 
883 	/* query bootup sclk/mclk from firmware_info table */
884 	index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
885 					    firmwareinfo);
886 	if (amdgpu_atom_parse_data_header(ctx, index, NULL,
887 				&frev, &crev, &data_offset)) {
888 		union firmware_info *firmware_info =
889 			(union firmware_info *)(ctx->bios +
890 						data_offset);
891 
892 		bootup_sclk_in10khz =
893 			le32_to_cpu(firmware_info->v31.bootup_sclk_in10khz);
894 		bootup_mclk_in10khz =
895 			le32_to_cpu(firmware_info->v31.bootup_mclk_in10khz);
896 	} else {
897 		return -EINVAL;
898 	}
899 
900 	index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
901                                             asic_init);
902 	if (amdgpu_atom_parse_cmd_header(mode_info->atom_context, index, &frev, &crev)) {
903 		if (frev == 2 && crev >= 1) {
904 			memset(&asic_init_ps_v2_1, 0, sizeof(asic_init_ps_v2_1));
905 			asic_init_ps_v2_1.param.engineparam.sclkfreqin10khz = bootup_sclk_in10khz;
906 			asic_init_ps_v2_1.param.memparam.mclkfreqin10khz = bootup_mclk_in10khz;
907 			asic_init_ps_v2_1.param.engineparam.engineflag = b3NORMAL_ENGINE_INIT;
908 			if (!fb_reset)
909 				asic_init_ps_v2_1.param.memparam.memflag = b3DRAM_SELF_REFRESH_EXIT;
910 			else
911 				asic_init_ps_v2_1.param.memparam.memflag = 0;
912 		} else {
913 			return -EINVAL;
914 		}
915 	} else {
916 		return -EINVAL;
917 	}
918 
919 	return amdgpu_atom_execute_table(ctx, ATOM_CMD_INIT, (uint32_t *)&asic_init_ps_v2_1);
920 }
921