• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/firmware.h>
25 #include "drmP.h"
26 #include "amdgpu.h"
27 #include "ppsmc.h"
28 #include "iceland_smumgr.h"
29 #include "smu_ucode_xfer_vi.h"
30 #include "amdgpu_ucode.h"
31 
32 #include "smu/smu_7_1_1_d.h"
33 #include "smu/smu_7_1_1_sh_mask.h"
34 
35 #define ICELAND_SMC_SIZE 0x20000
36 
iceland_set_smc_sram_address(struct amdgpu_device * adev,uint32_t smc_address,uint32_t limit)37 static int iceland_set_smc_sram_address(struct amdgpu_device *adev,
38 					uint32_t smc_address, uint32_t limit)
39 {
40 	uint32_t val;
41 
42 	if (smc_address & 3)
43 		return -EINVAL;
44 
45 	if ((smc_address + 3) > limit)
46 		return -EINVAL;
47 
48 	WREG32(mmSMC_IND_INDEX_0, smc_address);
49 
50 	val = RREG32(mmSMC_IND_ACCESS_CNTL);
51 	val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
52 	WREG32(mmSMC_IND_ACCESS_CNTL, val);
53 
54 	return 0;
55 }
56 
iceland_copy_bytes_to_smc(struct amdgpu_device * adev,uint32_t smc_start_address,const uint8_t * src,uint32_t byte_count,uint32_t limit)57 static int iceland_copy_bytes_to_smc(struct amdgpu_device *adev,
58 				     uint32_t smc_start_address,
59 				     const uint8_t *src,
60 				     uint32_t byte_count, uint32_t limit)
61 {
62 	uint32_t addr;
63 	uint32_t data, orig_data;
64 	int result = 0;
65 	uint32_t extra_shift;
66 	unsigned long flags;
67 
68 	if (smc_start_address & 3)
69 		return -EINVAL;
70 
71 	if ((smc_start_address + byte_count) > limit)
72 		return -EINVAL;
73 
74 	addr = smc_start_address;
75 
76 	spin_lock_irqsave(&adev->smc_idx_lock, flags);
77 	while (byte_count >= 4) {
78 		/* Bytes are written into the SMC addres space with the MSB first */
79 		data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3];
80 
81 		result = iceland_set_smc_sram_address(adev, addr, limit);
82 
83 		if (result)
84 			goto out;
85 
86 		WREG32(mmSMC_IND_DATA_0, data);
87 
88 		src += 4;
89 		byte_count -= 4;
90 		addr += 4;
91 	}
92 
93 	if (0 != byte_count) {
94 		/* Now write odd bytes left, do a read modify write cycle */
95 		data = 0;
96 
97 		result = iceland_set_smc_sram_address(adev, addr, limit);
98 		if (result)
99 			goto out;
100 
101 		orig_data = RREG32(mmSMC_IND_DATA_0);
102 		extra_shift = 8 * (4 - byte_count);
103 
104 		while (byte_count > 0) {
105 			data = (data << 8) + *src++;
106 			byte_count--;
107 		}
108 
109 		data <<= extra_shift;
110 		data |= (orig_data & ~((~0UL) << extra_shift));
111 
112 		result = iceland_set_smc_sram_address(adev, addr, limit);
113 		if (result)
114 			goto out;
115 
116 		WREG32(mmSMC_IND_DATA_0, data);
117 	}
118 
119 out:
120 	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
121 	return result;
122 }
123 
iceland_start_smc(struct amdgpu_device * adev)124 void iceland_start_smc(struct amdgpu_device *adev)
125 {
126 	uint32_t val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
127 
128 	val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 0);
129 	WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
130 }
131 
iceland_reset_smc(struct amdgpu_device * adev)132 void iceland_reset_smc(struct amdgpu_device *adev)
133 {
134 	uint32_t val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
135 
136 	val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1);
137 	WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
138 }
139 
iceland_program_jump_on_start(struct amdgpu_device * adev)140 static int iceland_program_jump_on_start(struct amdgpu_device *adev)
141 {
142 	static unsigned char data[] = {0xE0, 0x00, 0x80, 0x40};
143 	iceland_copy_bytes_to_smc(adev, 0x0, data, 4, sizeof(data)+1);
144 
145 	return 0;
146 }
147 
iceland_stop_smc_clock(struct amdgpu_device * adev)148 void iceland_stop_smc_clock(struct amdgpu_device *adev)
149 {
150 	uint32_t val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
151 
152 	val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 1);
153 	WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val);
154 }
155 
iceland_start_smc_clock(struct amdgpu_device * adev)156 void iceland_start_smc_clock(struct amdgpu_device *adev)
157 {
158 	uint32_t val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
159 
160 	val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
161 	WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val);
162 }
163 
iceland_is_smc_ram_running(struct amdgpu_device * adev)164 static bool iceland_is_smc_ram_running(struct amdgpu_device *adev)
165 {
166 	uint32_t val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
167 	val = REG_GET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable);
168 
169 	return ((0 == val) && (0x20100 <= RREG32_SMC(ixSMC_PC_C)));
170 }
171 
wait_smu_response(struct amdgpu_device * adev)172 static int wait_smu_response(struct amdgpu_device *adev)
173 {
174 	int i;
175 	uint32_t val;
176 
177 	for (i = 0; i < adev->usec_timeout; i++) {
178 		val = RREG32(mmSMC_RESP_0);
179 		if (REG_GET_FIELD(val, SMC_RESP_0, SMC_RESP))
180 			break;
181 		udelay(1);
182 	}
183 
184 	if (i == adev->usec_timeout)
185 		return -EINVAL;
186 
187 	return 0;
188 }
189 
iceland_send_msg_to_smc(struct amdgpu_device * adev,PPSMC_Msg msg)190 static int iceland_send_msg_to_smc(struct amdgpu_device *adev, PPSMC_Msg msg)
191 {
192 	if (!iceland_is_smc_ram_running(adev))
193 		return -EINVAL;
194 
195 	if (wait_smu_response(adev)) {
196 		DRM_ERROR("Failed to send previous message\n");
197 		return -EINVAL;
198 	}
199 
200 	WREG32(mmSMC_MESSAGE_0, msg);
201 
202 	if (wait_smu_response(adev)) {
203 		DRM_ERROR("Failed to send message\n");
204 		return -EINVAL;
205 	}
206 
207 	return 0;
208 }
209 
iceland_send_msg_to_smc_without_waiting(struct amdgpu_device * adev,PPSMC_Msg msg)210 static int iceland_send_msg_to_smc_without_waiting(struct amdgpu_device *adev,
211 						   PPSMC_Msg msg)
212 {
213 	if (!iceland_is_smc_ram_running(adev))
214 		return -EINVAL;;
215 
216 	if (wait_smu_response(adev)) {
217 		DRM_ERROR("Failed to send previous message\n");
218 		return -EINVAL;
219 	}
220 
221 	WREG32(mmSMC_MESSAGE_0, msg);
222 
223 	return 0;
224 }
225 
iceland_send_msg_to_smc_with_parameter(struct amdgpu_device * adev,PPSMC_Msg msg,uint32_t parameter)226 static int iceland_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
227 						  PPSMC_Msg msg,
228 						  uint32_t parameter)
229 {
230 	WREG32(mmSMC_MSG_ARG_0, parameter);
231 
232 	return iceland_send_msg_to_smc(adev, msg);
233 }
234 
iceland_send_msg_to_smc_with_parameter_without_waiting(struct amdgpu_device * adev,PPSMC_Msg msg,uint32_t parameter)235 static int iceland_send_msg_to_smc_with_parameter_without_waiting(
236 					struct amdgpu_device *adev,
237 					PPSMC_Msg msg, uint32_t parameter)
238 {
239 	WREG32(mmSMC_MSG_ARG_0, parameter);
240 
241 	return iceland_send_msg_to_smc_without_waiting(adev, msg);
242 }
243 
244 #if 0 /* not used yet */
245 static int iceland_wait_for_smc_inactive(struct amdgpu_device *adev)
246 {
247 	int i;
248 	uint32_t val;
249 
250 	if (!iceland_is_smc_ram_running(adev))
251 		return -EINVAL;
252 
253 	for (i = 0; i < adev->usec_timeout; i++) {
254 		val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
255 		if (REG_GET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, cken) == 0)
256 			break;
257 		udelay(1);
258 	}
259 
260 	if (i == adev->usec_timeout)
261 		return -EINVAL;
262 
263 	return 0;
264 }
265 #endif
266 
iceland_smu_upload_firmware_image(struct amdgpu_device * adev)267 static int iceland_smu_upload_firmware_image(struct amdgpu_device *adev)
268 {
269 	const struct smc_firmware_header_v1_0 *hdr;
270 	uint32_t ucode_size;
271 	uint32_t ucode_start_address;
272 	const uint8_t *src;
273 	uint32_t val;
274 	uint32_t byte_count;
275 	uint32_t data;
276 	unsigned long flags;
277 	int i;
278 
279 	if (!adev->pm.fw)
280 		return -EINVAL;
281 
282 	hdr = (const struct smc_firmware_header_v1_0 *)adev->pm.fw->data;
283 	amdgpu_ucode_print_smc_hdr(&hdr->header);
284 
285 	adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
286 	ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
287 	ucode_start_address = le32_to_cpu(hdr->ucode_start_addr);
288 	src = (const uint8_t *)
289 		(adev->pm.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
290 
291 	if (ucode_size & 3) {
292 		DRM_ERROR("SMC ucode is not 4 bytes aligned\n");
293 		return -EINVAL;
294 	}
295 
296 	if (ucode_size > ICELAND_SMC_SIZE) {
297 		DRM_ERROR("SMC address is beyond the SMC RAM area\n");
298 		return -EINVAL;
299 	}
300 
301 	for (i = 0; i < adev->usec_timeout; i++) {
302 		val = RREG32_SMC(ixRCU_UC_EVENTS);
303 		if (REG_GET_FIELD(val, RCU_UC_EVENTS, boot_seq_done) == 0)
304 			break;
305 		udelay(1);
306 	}
307 	val = RREG32_SMC(ixSMC_SYSCON_MISC_CNTL);
308 	WREG32_SMC(ixSMC_SYSCON_MISC_CNTL, val | 1);
309 
310 	iceland_stop_smc_clock(adev);
311 	iceland_reset_smc(adev);
312 
313 	spin_lock_irqsave(&adev->smc_idx_lock, flags);
314 	WREG32(mmSMC_IND_INDEX_0, ucode_start_address);
315 
316 	val = RREG32(mmSMC_IND_ACCESS_CNTL);
317 	val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1);
318 	WREG32(mmSMC_IND_ACCESS_CNTL, val);
319 
320 	byte_count = ucode_size;
321 	while (byte_count >= 4) {
322 		data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3];
323 		WREG32(mmSMC_IND_DATA_0, data);
324 		src += 4;
325 		byte_count -= 4;
326 	}
327 	val = RREG32(mmSMC_IND_ACCESS_CNTL);
328 	val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
329 	WREG32(mmSMC_IND_ACCESS_CNTL, val);
330 	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
331 
332 	return 0;
333 }
334 
335 #if 0 /* not used yet */
336 static int iceland_read_smc_sram_dword(struct amdgpu_device *adev,
337 				       uint32_t smc_address,
338 				       uint32_t *value,
339 				       uint32_t limit)
340 {
341 	int result;
342 	unsigned long flags;
343 
344 	spin_lock_irqsave(&adev->smc_idx_lock, flags);
345 	result = iceland_set_smc_sram_address(adev, smc_address, limit);
346 	if (result == 0)
347 		*value = RREG32(mmSMC_IND_DATA_0);
348 	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
349 	return result;
350 }
351 
352 static int iceland_write_smc_sram_dword(struct amdgpu_device *adev,
353 					uint32_t smc_address,
354 					uint32_t value,
355 					uint32_t limit)
356 {
357 	int result;
358 	unsigned long flags;
359 
360 	spin_lock_irqsave(&adev->smc_idx_lock, flags);
361 	result = iceland_set_smc_sram_address(adev, smc_address, limit);
362 	if (result == 0)
363 		WREG32(mmSMC_IND_DATA_0, value);
364 	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
365 	return result;
366 }
367 
368 static int iceland_smu_stop_smc(struct amdgpu_device *adev)
369 {
370 	iceland_reset_smc(adev);
371 	iceland_stop_smc_clock(adev);
372 
373 	return 0;
374 }
375 #endif
376 
iceland_smu_start_smc(struct amdgpu_device * adev)377 static int iceland_smu_start_smc(struct amdgpu_device *adev)
378 {
379 	int i;
380 	uint32_t val;
381 
382 	iceland_program_jump_on_start(adev);
383 	iceland_start_smc_clock(adev);
384 	iceland_start_smc(adev);
385 
386 	for (i = 0; i < adev->usec_timeout; i++) {
387 		val = RREG32_SMC(ixFIRMWARE_FLAGS);
388 		if (REG_GET_FIELD(val, FIRMWARE_FLAGS, INTERRUPTS_ENABLED) == 1)
389 			break;
390 		udelay(1);
391 	}
392 	return 0;
393 }
394 
iceland_convert_fw_type(uint32_t fw_type)395 static enum AMDGPU_UCODE_ID iceland_convert_fw_type(uint32_t fw_type)
396 {
397 	switch (fw_type) {
398 		case UCODE_ID_SDMA0:
399 			return AMDGPU_UCODE_ID_SDMA0;
400 		case UCODE_ID_SDMA1:
401 			return AMDGPU_UCODE_ID_SDMA1;
402 		case UCODE_ID_CP_CE:
403 			return AMDGPU_UCODE_ID_CP_CE;
404 		case UCODE_ID_CP_PFP:
405 			return AMDGPU_UCODE_ID_CP_PFP;
406 		case UCODE_ID_CP_ME:
407 			return AMDGPU_UCODE_ID_CP_ME;
408 		case UCODE_ID_CP_MEC:
409 		case UCODE_ID_CP_MEC_JT1:
410 			return AMDGPU_UCODE_ID_CP_MEC1;
411 		case UCODE_ID_CP_MEC_JT2:
412 			return AMDGPU_UCODE_ID_CP_MEC2;
413 		case UCODE_ID_RLC_G:
414 			return AMDGPU_UCODE_ID_RLC_G;
415 		default:
416 			DRM_ERROR("ucode type is out of range!\n");
417 			return AMDGPU_UCODE_ID_MAXIMUM;
418 	}
419 }
420 
iceland_smu_get_mask_for_fw_type(uint32_t fw_type)421 static uint32_t iceland_smu_get_mask_for_fw_type(uint32_t fw_type)
422 {
423 	switch (fw_type) {
424 		case AMDGPU_UCODE_ID_SDMA0:
425 			return UCODE_ID_SDMA0_MASK;
426 		case AMDGPU_UCODE_ID_SDMA1:
427 			return UCODE_ID_SDMA1_MASK;
428 		case AMDGPU_UCODE_ID_CP_CE:
429 			return UCODE_ID_CP_CE_MASK;
430 		case AMDGPU_UCODE_ID_CP_PFP:
431 			return UCODE_ID_CP_PFP_MASK;
432 		case AMDGPU_UCODE_ID_CP_ME:
433 			return UCODE_ID_CP_ME_MASK;
434 		case AMDGPU_UCODE_ID_CP_MEC1:
435 			return UCODE_ID_CP_MEC_MASK | UCODE_ID_CP_MEC_JT1_MASK;
436 		case AMDGPU_UCODE_ID_CP_MEC2:
437 			return UCODE_ID_CP_MEC_MASK;
438 		case AMDGPU_UCODE_ID_RLC_G:
439 			return UCODE_ID_RLC_G_MASK;
440 		default:
441 			DRM_ERROR("ucode type is out of range!\n");
442 			return 0;
443 	}
444 }
445 
iceland_smu_populate_single_firmware_entry(struct amdgpu_device * adev,uint32_t fw_type,struct SMU_Entry * entry)446 static int iceland_smu_populate_single_firmware_entry(struct amdgpu_device *adev,
447 						      uint32_t fw_type,
448 						      struct SMU_Entry *entry)
449 {
450 	enum AMDGPU_UCODE_ID id = iceland_convert_fw_type(fw_type);
451 	struct amdgpu_firmware_info *ucode = &adev->firmware.ucode[id];
452 	const struct gfx_firmware_header_v1_0 *header = NULL;
453 	uint64_t gpu_addr;
454 	uint32_t data_size;
455 
456 	if (ucode->fw == NULL)
457 		return -EINVAL;
458 
459 	gpu_addr  = ucode->mc_addr;
460 	header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
461 	data_size = le32_to_cpu(header->header.ucode_size_bytes);
462 
463 	entry->version = (uint16_t)le32_to_cpu(header->header.ucode_version);
464 	entry->id = (uint16_t)fw_type;
465 	entry->image_addr_high = upper_32_bits(gpu_addr);
466 	entry->image_addr_low = lower_32_bits(gpu_addr);
467 	entry->meta_data_addr_high = 0;
468 	entry->meta_data_addr_low = 0;
469 	entry->data_size_byte = data_size;
470 	entry->num_register_entries = 0;
471 	entry->flags = 0;
472 
473 	return 0;
474 }
475 
iceland_smu_request_load_fw(struct amdgpu_device * adev)476 static int iceland_smu_request_load_fw(struct amdgpu_device *adev)
477 {
478 	struct iceland_smu_private_data *private = (struct iceland_smu_private_data *)adev->smu.priv;
479 	struct SMU_DRAMData_TOC *toc;
480 	uint32_t fw_to_load;
481 
482 	toc = (struct SMU_DRAMData_TOC *)private->header;
483 	toc->num_entries = 0;
484 	toc->structure_version = 1;
485 
486 	if (!adev->firmware.smu_load)
487 		return 0;
488 
489 	if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_RLC_G,
490 			&toc->entry[toc->num_entries++])) {
491 		DRM_ERROR("Failed to get firmware entry for RLC\n");
492 		return -EINVAL;
493 	}
494 
495 	if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_CE,
496 			&toc->entry[toc->num_entries++])) {
497 		DRM_ERROR("Failed to get firmware entry for CE\n");
498 		return -EINVAL;
499 	}
500 
501 	if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_PFP,
502 			&toc->entry[toc->num_entries++])) {
503 		DRM_ERROR("Failed to get firmware entry for PFP\n");
504 		return -EINVAL;
505 	}
506 
507 	if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_ME,
508 			&toc->entry[toc->num_entries++])) {
509 		DRM_ERROR("Failed to get firmware entry for ME\n");
510 		return -EINVAL;
511 	}
512 
513 	if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC,
514 			&toc->entry[toc->num_entries++])) {
515 		DRM_ERROR("Failed to get firmware entry for MEC\n");
516 		return -EINVAL;
517 	}
518 
519 	if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC_JT1,
520 			&toc->entry[toc->num_entries++])) {
521 		DRM_ERROR("Failed to get firmware entry for MEC_JT1\n");
522 		return -EINVAL;
523 	}
524 
525 	if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA0,
526 			&toc->entry[toc->num_entries++])) {
527 		DRM_ERROR("Failed to get firmware entry for SDMA0\n");
528 		return -EINVAL;
529 	}
530 
531 	if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA1,
532 			&toc->entry[toc->num_entries++])) {
533 		DRM_ERROR("Failed to get firmware entry for SDMA1\n");
534 		return -EINVAL;
535 	}
536 
537 	iceland_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DRV_DRAM_ADDR_HI, private->header_addr_high);
538 	iceland_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DRV_DRAM_ADDR_LO, private->header_addr_low);
539 
540 	fw_to_load = UCODE_ID_RLC_G_MASK |
541 			UCODE_ID_SDMA0_MASK |
542 			UCODE_ID_SDMA1_MASK |
543 			UCODE_ID_CP_CE_MASK |
544 			UCODE_ID_CP_ME_MASK |
545 			UCODE_ID_CP_PFP_MASK |
546 			UCODE_ID_CP_MEC_MASK |
547 			UCODE_ID_CP_MEC_JT1_MASK;
548 
549 
550 	if (iceland_send_msg_to_smc_with_parameter_without_waiting(adev, PPSMC_MSG_LoadUcodes, fw_to_load)) {
551 		DRM_ERROR("Fail to request SMU load ucode\n");
552 		return -EINVAL;
553 	}
554 
555 	return 0;
556 }
557 
iceland_smu_check_fw_load_finish(struct amdgpu_device * adev,uint32_t fw_type)558 static int iceland_smu_check_fw_load_finish(struct amdgpu_device *adev,
559 					    uint32_t fw_type)
560 {
561 	uint32_t fw_mask = iceland_smu_get_mask_for_fw_type(fw_type);
562 	int i;
563 
564 	for (i = 0; i < adev->usec_timeout; i++) {
565 		if (fw_mask == (RREG32_SMC(ixSOFT_REGISTERS_TABLE_27) & fw_mask))
566 			break;
567 		udelay(1);
568 	}
569 
570 	if (i == adev->usec_timeout) {
571 		DRM_ERROR("check firmware loading failed\n");
572 		return -EINVAL;
573 	}
574 
575 	return 0;
576 }
577 
iceland_smu_start(struct amdgpu_device * adev)578 int iceland_smu_start(struct amdgpu_device *adev)
579 {
580 	int result;
581 
582 	result = iceland_smu_upload_firmware_image(adev);
583 	if (result)
584 		return result;
585 	result = iceland_smu_start_smc(adev);
586 	if (result)
587 		return result;
588 
589 	return iceland_smu_request_load_fw(adev);
590 }
591 
592 static const struct amdgpu_smumgr_funcs iceland_smumgr_funcs = {
593 	.check_fw_load_finish = iceland_smu_check_fw_load_finish,
594 	.request_smu_load_fw = NULL,
595 	.request_smu_specific_fw = NULL,
596 };
597 
iceland_smu_init(struct amdgpu_device * adev)598 int iceland_smu_init(struct amdgpu_device *adev)
599 {
600 	struct iceland_smu_private_data *private;
601 	uint32_t image_size = ((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096;
602 	struct amdgpu_bo **toc_buf = &adev->smu.toc_buf;
603 	uint64_t mc_addr;
604 	void *toc_buf_ptr;
605 	int ret;
606 
607 	private = kzalloc(sizeof(struct iceland_smu_private_data), GFP_KERNEL);
608 	if (NULL == private)
609 		return -ENOMEM;
610 
611 	/* allocate firmware buffers */
612 	if (adev->firmware.smu_load)
613 		amdgpu_ucode_init_bo(adev);
614 
615 	adev->smu.priv = private;
616 	adev->smu.fw_flags = 0;
617 
618 	/* Allocate FW image data structure and header buffer */
619 	ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE,
620 			       true, AMDGPU_GEM_DOMAIN_VRAM,
621 			       AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
622 			       NULL, NULL, toc_buf);
623 	if (ret) {
624 		DRM_ERROR("Failed to allocate memory for TOC buffer\n");
625 		return -ENOMEM;
626 	}
627 
628 	/* Retrieve GPU address for header buffer and internal buffer */
629 	ret = amdgpu_bo_reserve(adev->smu.toc_buf, false);
630 	if (ret) {
631 		amdgpu_bo_unref(&adev->smu.toc_buf);
632 		DRM_ERROR("Failed to reserve the TOC buffer\n");
633 		return -EINVAL;
634 	}
635 
636 	ret = amdgpu_bo_pin(adev->smu.toc_buf, AMDGPU_GEM_DOMAIN_VRAM, &mc_addr);
637 	if (ret) {
638 		amdgpu_bo_unreserve(adev->smu.toc_buf);
639 		amdgpu_bo_unref(&adev->smu.toc_buf);
640 		DRM_ERROR("Failed to pin the TOC buffer\n");
641 		return -EINVAL;
642 	}
643 
644 	ret = amdgpu_bo_kmap(*toc_buf, &toc_buf_ptr);
645 	if (ret) {
646 		amdgpu_bo_unreserve(adev->smu.toc_buf);
647 		amdgpu_bo_unref(&adev->smu.toc_buf);
648 		DRM_ERROR("Failed to map the TOC buffer\n");
649 		return -EINVAL;
650 	}
651 
652 	amdgpu_bo_unreserve(adev->smu.toc_buf);
653 	private->header_addr_low = lower_32_bits(mc_addr);
654 	private->header_addr_high = upper_32_bits(mc_addr);
655 	private->header = toc_buf_ptr;
656 
657 	adev->smu.smumgr_funcs = &iceland_smumgr_funcs;
658 
659 	return 0;
660 }
661 
iceland_smu_fini(struct amdgpu_device * adev)662 int iceland_smu_fini(struct amdgpu_device *adev)
663 {
664 	amdgpu_bo_unref(&adev->smu.toc_buf);
665 	kfree(adev->smu.priv);
666 	adev->smu.priv = NULL;
667 	if (adev->firmware.fw_buf)
668 		amdgpu_ucode_fini_bo(adev);
669 
670 	return 0;
671 }
672