• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include <linux/firmware.h>
24 #include <linux/slab.h>
25 #include <linux/module.h>
26 #include "drmP.h"
27 #include "amdgpu.h"
28 #include "amdgpu_atombios.h"
29 #include "amdgpu_ih.h"
30 #include "amdgpu_uvd.h"
31 #include "amdgpu_vce.h"
32 #include "amdgpu_ucode.h"
33 #include "atom.h"
34 
35 #include "gmc/gmc_8_1_d.h"
36 #include "gmc/gmc_8_1_sh_mask.h"
37 
38 #include "oss/oss_3_0_d.h"
39 #include "oss/oss_3_0_sh_mask.h"
40 
41 #include "bif/bif_5_0_d.h"
42 #include "bif/bif_5_0_sh_mask.h"
43 
44 #include "gca/gfx_8_0_d.h"
45 #include "gca/gfx_8_0_sh_mask.h"
46 
47 #include "smu/smu_7_1_1_d.h"
48 #include "smu/smu_7_1_1_sh_mask.h"
49 
50 #include "uvd/uvd_5_0_d.h"
51 #include "uvd/uvd_5_0_sh_mask.h"
52 
53 #include "vce/vce_3_0_d.h"
54 #include "vce/vce_3_0_sh_mask.h"
55 
56 #include "dce/dce_10_0_d.h"
57 #include "dce/dce_10_0_sh_mask.h"
58 
59 #include "vid.h"
60 #include "vi.h"
61 #include "vi_dpm.h"
62 #include "gmc_v8_0.h"
63 #include "gmc_v7_0.h"
64 #include "gfx_v8_0.h"
65 #include "sdma_v2_4.h"
66 #include "sdma_v3_0.h"
67 #include "dce_v10_0.h"
68 #include "dce_v11_0.h"
69 #include "iceland_ih.h"
70 #include "tonga_ih.h"
71 #include "cz_ih.h"
72 #include "uvd_v5_0.h"
73 #include "uvd_v6_0.h"
74 #include "vce_v3_0.h"
75 
76 /*
77  * Indirect registers accessor
78  */
vi_pcie_rreg(struct amdgpu_device * adev,u32 reg)79 static u32 vi_pcie_rreg(struct amdgpu_device *adev, u32 reg)
80 {
81 	unsigned long flags;
82 	u32 r;
83 
84 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
85 	WREG32(mmPCIE_INDEX, reg);
86 	(void)RREG32(mmPCIE_INDEX);
87 	r = RREG32(mmPCIE_DATA);
88 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
89 	return r;
90 }
91 
vi_pcie_wreg(struct amdgpu_device * adev,u32 reg,u32 v)92 static void vi_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
93 {
94 	unsigned long flags;
95 
96 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
97 	WREG32(mmPCIE_INDEX, reg);
98 	(void)RREG32(mmPCIE_INDEX);
99 	WREG32(mmPCIE_DATA, v);
100 	(void)RREG32(mmPCIE_DATA);
101 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
102 }
103 
vi_smc_rreg(struct amdgpu_device * adev,u32 reg)104 static u32 vi_smc_rreg(struct amdgpu_device *adev, u32 reg)
105 {
106 	unsigned long flags;
107 	u32 r;
108 
109 	spin_lock_irqsave(&adev->smc_idx_lock, flags);
110 	WREG32(mmSMC_IND_INDEX_0, (reg));
111 	r = RREG32(mmSMC_IND_DATA_0);
112 	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
113 	return r;
114 }
115 
vi_smc_wreg(struct amdgpu_device * adev,u32 reg,u32 v)116 static void vi_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
117 {
118 	unsigned long flags;
119 
120 	spin_lock_irqsave(&adev->smc_idx_lock, flags);
121 	WREG32(mmSMC_IND_INDEX_0, (reg));
122 	WREG32(mmSMC_IND_DATA_0, (v));
123 	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
124 }
125 
126 /* smu_8_0_d.h */
127 #define mmMP0PUB_IND_INDEX                                                      0x180
128 #define mmMP0PUB_IND_DATA                                                       0x181
129 
cz_smc_rreg(struct amdgpu_device * adev,u32 reg)130 static u32 cz_smc_rreg(struct amdgpu_device *adev, u32 reg)
131 {
132 	unsigned long flags;
133 	u32 r;
134 
135 	spin_lock_irqsave(&adev->smc_idx_lock, flags);
136 	WREG32(mmMP0PUB_IND_INDEX, (reg));
137 	r = RREG32(mmMP0PUB_IND_DATA);
138 	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
139 	return r;
140 }
141 
cz_smc_wreg(struct amdgpu_device * adev,u32 reg,u32 v)142 static void cz_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
143 {
144 	unsigned long flags;
145 
146 	spin_lock_irqsave(&adev->smc_idx_lock, flags);
147 	WREG32(mmMP0PUB_IND_INDEX, (reg));
148 	WREG32(mmMP0PUB_IND_DATA, (v));
149 	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
150 }
151 
vi_uvd_ctx_rreg(struct amdgpu_device * adev,u32 reg)152 static u32 vi_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg)
153 {
154 	unsigned long flags;
155 	u32 r;
156 
157 	spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
158 	WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff));
159 	r = RREG32(mmUVD_CTX_DATA);
160 	spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
161 	return r;
162 }
163 
vi_uvd_ctx_wreg(struct amdgpu_device * adev,u32 reg,u32 v)164 static void vi_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
165 {
166 	unsigned long flags;
167 
168 	spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
169 	WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff));
170 	WREG32(mmUVD_CTX_DATA, (v));
171 	spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
172 }
173 
vi_didt_rreg(struct amdgpu_device * adev,u32 reg)174 static u32 vi_didt_rreg(struct amdgpu_device *adev, u32 reg)
175 {
176 	unsigned long flags;
177 	u32 r;
178 
179 	spin_lock_irqsave(&adev->didt_idx_lock, flags);
180 	WREG32(mmDIDT_IND_INDEX, (reg));
181 	r = RREG32(mmDIDT_IND_DATA);
182 	spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
183 	return r;
184 }
185 
vi_didt_wreg(struct amdgpu_device * adev,u32 reg,u32 v)186 static void vi_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
187 {
188 	unsigned long flags;
189 
190 	spin_lock_irqsave(&adev->didt_idx_lock, flags);
191 	WREG32(mmDIDT_IND_INDEX, (reg));
192 	WREG32(mmDIDT_IND_DATA, (v));
193 	spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
194 }
195 
196 static const u32 tonga_mgcg_cgcg_init[] =
197 {
198 	mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
199 	mmPCIE_INDEX, 0xffffffff, 0x0140001c,
200 	mmPCIE_DATA, 0x000f0000, 0x00000000,
201 	mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C,
202 	mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
203 	mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
204 	mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
205 };
206 
207 static const u32 fiji_mgcg_cgcg_init[] =
208 {
209 	mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
210 	mmPCIE_INDEX, 0xffffffff, 0x0140001c,
211 	mmPCIE_DATA, 0x000f0000, 0x00000000,
212 	mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C,
213 	mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
214 	mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
215 	mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
216 };
217 
218 static const u32 iceland_mgcg_cgcg_init[] =
219 {
220 	mmPCIE_INDEX, 0xffffffff, ixPCIE_CNTL2,
221 	mmPCIE_DATA, 0x000f0000, 0x00000000,
222 	mmSMC_IND_INDEX_4, 0xffffffff, ixCGTT_ROM_CLK_CTRL0,
223 	mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
224 	mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
225 };
226 
227 static const u32 cz_mgcg_cgcg_init[] =
228 {
229 	mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
230 	mmPCIE_INDEX, 0xffffffff, 0x0140001c,
231 	mmPCIE_DATA, 0x000f0000, 0x00000000,
232 	mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
233 	mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
234 };
235 
236 static const u32 stoney_mgcg_cgcg_init[] =
237 {
238 	mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00000100,
239 	mmHDP_XDP_CGTT_BLK_CTRL, 0xffffffff, 0x00000104,
240 	mmHDP_HOST_PATH_CNTL, 0xffffffff, 0x0f000027,
241 };
242 
vi_init_golden_registers(struct amdgpu_device * adev)243 static void vi_init_golden_registers(struct amdgpu_device *adev)
244 {
245 	/* Some of the registers might be dependent on GRBM_GFX_INDEX */
246 	mutex_lock(&adev->grbm_idx_mutex);
247 
248 	switch (adev->asic_type) {
249 	case CHIP_TOPAZ:
250 		amdgpu_program_register_sequence(adev,
251 						 iceland_mgcg_cgcg_init,
252 						 (const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init));
253 		break;
254 	case CHIP_FIJI:
255 		amdgpu_program_register_sequence(adev,
256 						 fiji_mgcg_cgcg_init,
257 						 (const u32)ARRAY_SIZE(fiji_mgcg_cgcg_init));
258 		break;
259 	case CHIP_TONGA:
260 		amdgpu_program_register_sequence(adev,
261 						 tonga_mgcg_cgcg_init,
262 						 (const u32)ARRAY_SIZE(tonga_mgcg_cgcg_init));
263 		break;
264 	case CHIP_CARRIZO:
265 		amdgpu_program_register_sequence(adev,
266 						 cz_mgcg_cgcg_init,
267 						 (const u32)ARRAY_SIZE(cz_mgcg_cgcg_init));
268 		break;
269 	case CHIP_STONEY:
270 		amdgpu_program_register_sequence(adev,
271 						 stoney_mgcg_cgcg_init,
272 						 (const u32)ARRAY_SIZE(stoney_mgcg_cgcg_init));
273 		break;
274 	default:
275 		break;
276 	}
277 	mutex_unlock(&adev->grbm_idx_mutex);
278 }
279 
280 /**
281  * vi_get_xclk - get the xclk
282  *
283  * @adev: amdgpu_device pointer
284  *
285  * Returns the reference clock used by the gfx engine
286  * (VI).
287  */
vi_get_xclk(struct amdgpu_device * adev)288 static u32 vi_get_xclk(struct amdgpu_device *adev)
289 {
290 	u32 reference_clock = adev->clock.spll.reference_freq;
291 	u32 tmp;
292 
293 	if (adev->flags & AMD_IS_APU)
294 		return reference_clock;
295 
296 	tmp = RREG32_SMC(ixCG_CLKPIN_CNTL_2);
297 	if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK))
298 		return 1000;
299 
300 	tmp = RREG32_SMC(ixCG_CLKPIN_CNTL);
301 	if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL, XTALIN_DIVIDE))
302 		return reference_clock / 4;
303 
304 	return reference_clock;
305 }
306 
307 /**
308  * vi_srbm_select - select specific register instances
309  *
310  * @adev: amdgpu_device pointer
311  * @me: selected ME (micro engine)
312  * @pipe: pipe
313  * @queue: queue
314  * @vmid: VMID
315  *
316  * Switches the currently active registers instances.  Some
317  * registers are instanced per VMID, others are instanced per
318  * me/pipe/queue combination.
319  */
vi_srbm_select(struct amdgpu_device * adev,u32 me,u32 pipe,u32 queue,u32 vmid)320 void vi_srbm_select(struct amdgpu_device *adev,
321 		     u32 me, u32 pipe, u32 queue, u32 vmid)
322 {
323 	u32 srbm_gfx_cntl = 0;
324 	srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, PIPEID, pipe);
325 	srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, MEID, me);
326 	srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, VMID, vmid);
327 	srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, QUEUEID, queue);
328 	WREG32(mmSRBM_GFX_CNTL, srbm_gfx_cntl);
329 }
330 
vi_vga_set_state(struct amdgpu_device * adev,bool state)331 static void vi_vga_set_state(struct amdgpu_device *adev, bool state)
332 {
333 	/* todo */
334 }
335 
vi_read_disabled_bios(struct amdgpu_device * adev)336 static bool vi_read_disabled_bios(struct amdgpu_device *adev)
337 {
338 	u32 bus_cntl;
339 	u32 d1vga_control = 0;
340 	u32 d2vga_control = 0;
341 	u32 vga_render_control = 0;
342 	u32 rom_cntl;
343 	bool r;
344 
345 	bus_cntl = RREG32(mmBUS_CNTL);
346 	if (adev->mode_info.num_crtc) {
347 		d1vga_control = RREG32(mmD1VGA_CONTROL);
348 		d2vga_control = RREG32(mmD2VGA_CONTROL);
349 		vga_render_control = RREG32(mmVGA_RENDER_CONTROL);
350 	}
351 	rom_cntl = RREG32_SMC(ixROM_CNTL);
352 
353 	/* enable the rom */
354 	WREG32(mmBUS_CNTL, (bus_cntl & ~BUS_CNTL__BIOS_ROM_DIS_MASK));
355 	if (adev->mode_info.num_crtc) {
356 		/* Disable VGA mode */
357 		WREG32(mmD1VGA_CONTROL,
358 		       (d1vga_control & ~(D1VGA_CONTROL__D1VGA_MODE_ENABLE_MASK |
359 					  D1VGA_CONTROL__D1VGA_TIMING_SELECT_MASK)));
360 		WREG32(mmD2VGA_CONTROL,
361 		       (d2vga_control & ~(D2VGA_CONTROL__D2VGA_MODE_ENABLE_MASK |
362 					  D2VGA_CONTROL__D2VGA_TIMING_SELECT_MASK)));
363 		WREG32(mmVGA_RENDER_CONTROL,
364 		       (vga_render_control & ~VGA_RENDER_CONTROL__VGA_VSTATUS_CNTL_MASK));
365 	}
366 	WREG32_SMC(ixROM_CNTL, rom_cntl | ROM_CNTL__SCK_OVERWRITE_MASK);
367 
368 	r = amdgpu_read_bios(adev);
369 
370 	/* restore regs */
371 	WREG32(mmBUS_CNTL, bus_cntl);
372 	if (adev->mode_info.num_crtc) {
373 		WREG32(mmD1VGA_CONTROL, d1vga_control);
374 		WREG32(mmD2VGA_CONTROL, d2vga_control);
375 		WREG32(mmVGA_RENDER_CONTROL, vga_render_control);
376 	}
377 	WREG32_SMC(ixROM_CNTL, rom_cntl);
378 	return r;
379 }
380 static struct amdgpu_allowed_register_entry tonga_allowed_read_registers[] = {
381 	{mmGB_MACROTILE_MODE7, true},
382 };
383 
384 static struct amdgpu_allowed_register_entry cz_allowed_read_registers[] = {
385 	{mmGB_TILE_MODE7, true},
386 	{mmGB_TILE_MODE12, true},
387 	{mmGB_TILE_MODE17, true},
388 	{mmGB_TILE_MODE23, true},
389 	{mmGB_MACROTILE_MODE7, true},
390 };
391 
392 static struct amdgpu_allowed_register_entry vi_allowed_read_registers[] = {
393 	{mmGRBM_STATUS, false},
394 	{mmGRBM_STATUS2, false},
395 	{mmGRBM_STATUS_SE0, false},
396 	{mmGRBM_STATUS_SE1, false},
397 	{mmGRBM_STATUS_SE2, false},
398 	{mmGRBM_STATUS_SE3, false},
399 	{mmSRBM_STATUS, false},
400 	{mmSRBM_STATUS2, false},
401 	{mmSRBM_STATUS3, false},
402 	{mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET, false},
403 	{mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET, false},
404 	{mmCP_STAT, false},
405 	{mmCP_STALLED_STAT1, false},
406 	{mmCP_STALLED_STAT2, false},
407 	{mmCP_STALLED_STAT3, false},
408 	{mmCP_CPF_BUSY_STAT, false},
409 	{mmCP_CPF_STALLED_STAT1, false},
410 	{mmCP_CPF_STATUS, false},
411 	{mmCP_CPC_BUSY_STAT, false},
412 	{mmCP_CPC_STALLED_STAT1, false},
413 	{mmCP_CPC_STATUS, false},
414 	{mmGB_ADDR_CONFIG, false},
415 	{mmMC_ARB_RAMCFG, false},
416 	{mmGB_TILE_MODE0, false},
417 	{mmGB_TILE_MODE1, false},
418 	{mmGB_TILE_MODE2, false},
419 	{mmGB_TILE_MODE3, false},
420 	{mmGB_TILE_MODE4, false},
421 	{mmGB_TILE_MODE5, false},
422 	{mmGB_TILE_MODE6, false},
423 	{mmGB_TILE_MODE7, false},
424 	{mmGB_TILE_MODE8, false},
425 	{mmGB_TILE_MODE9, false},
426 	{mmGB_TILE_MODE10, false},
427 	{mmGB_TILE_MODE11, false},
428 	{mmGB_TILE_MODE12, false},
429 	{mmGB_TILE_MODE13, false},
430 	{mmGB_TILE_MODE14, false},
431 	{mmGB_TILE_MODE15, false},
432 	{mmGB_TILE_MODE16, false},
433 	{mmGB_TILE_MODE17, false},
434 	{mmGB_TILE_MODE18, false},
435 	{mmGB_TILE_MODE19, false},
436 	{mmGB_TILE_MODE20, false},
437 	{mmGB_TILE_MODE21, false},
438 	{mmGB_TILE_MODE22, false},
439 	{mmGB_TILE_MODE23, false},
440 	{mmGB_TILE_MODE24, false},
441 	{mmGB_TILE_MODE25, false},
442 	{mmGB_TILE_MODE26, false},
443 	{mmGB_TILE_MODE27, false},
444 	{mmGB_TILE_MODE28, false},
445 	{mmGB_TILE_MODE29, false},
446 	{mmGB_TILE_MODE30, false},
447 	{mmGB_TILE_MODE31, false},
448 	{mmGB_MACROTILE_MODE0, false},
449 	{mmGB_MACROTILE_MODE1, false},
450 	{mmGB_MACROTILE_MODE2, false},
451 	{mmGB_MACROTILE_MODE3, false},
452 	{mmGB_MACROTILE_MODE4, false},
453 	{mmGB_MACROTILE_MODE5, false},
454 	{mmGB_MACROTILE_MODE6, false},
455 	{mmGB_MACROTILE_MODE7, false},
456 	{mmGB_MACROTILE_MODE8, false},
457 	{mmGB_MACROTILE_MODE9, false},
458 	{mmGB_MACROTILE_MODE10, false},
459 	{mmGB_MACROTILE_MODE11, false},
460 	{mmGB_MACROTILE_MODE12, false},
461 	{mmGB_MACROTILE_MODE13, false},
462 	{mmGB_MACROTILE_MODE14, false},
463 	{mmGB_MACROTILE_MODE15, false},
464 	{mmCC_RB_BACKEND_DISABLE, false, true},
465 	{mmGC_USER_RB_BACKEND_DISABLE, false, true},
466 	{mmGB_BACKEND_MAP, false, false},
467 	{mmPA_SC_RASTER_CONFIG, false, true},
468 	{mmPA_SC_RASTER_CONFIG_1, false, true},
469 };
470 
vi_read_indexed_register(struct amdgpu_device * adev,u32 se_num,u32 sh_num,u32 reg_offset)471 static uint32_t vi_read_indexed_register(struct amdgpu_device *adev, u32 se_num,
472 					 u32 sh_num, u32 reg_offset)
473 {
474 	uint32_t val;
475 
476 	mutex_lock(&adev->grbm_idx_mutex);
477 	if (se_num != 0xffffffff || sh_num != 0xffffffff)
478 		gfx_v8_0_select_se_sh(adev, se_num, sh_num);
479 
480 	val = RREG32(reg_offset);
481 
482 	if (se_num != 0xffffffff || sh_num != 0xffffffff)
483 		gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
484 	mutex_unlock(&adev->grbm_idx_mutex);
485 	return val;
486 }
487 
vi_read_register(struct amdgpu_device * adev,u32 se_num,u32 sh_num,u32 reg_offset,u32 * value)488 static int vi_read_register(struct amdgpu_device *adev, u32 se_num,
489 			    u32 sh_num, u32 reg_offset, u32 *value)
490 {
491 	struct amdgpu_allowed_register_entry *asic_register_table = NULL;
492 	struct amdgpu_allowed_register_entry *asic_register_entry;
493 	uint32_t size, i;
494 
495 	*value = 0;
496 	switch (adev->asic_type) {
497 	case CHIP_TOPAZ:
498 		asic_register_table = tonga_allowed_read_registers;
499 		size = ARRAY_SIZE(tonga_allowed_read_registers);
500 		break;
501 	case CHIP_FIJI:
502 	case CHIP_TONGA:
503 	case CHIP_CARRIZO:
504 	case CHIP_STONEY:
505 		asic_register_table = cz_allowed_read_registers;
506 		size = ARRAY_SIZE(cz_allowed_read_registers);
507 		break;
508 	default:
509 		return -EINVAL;
510 	}
511 
512 	if (asic_register_table) {
513 		for (i = 0; i < size; i++) {
514 			asic_register_entry = asic_register_table + i;
515 			if (reg_offset != asic_register_entry->reg_offset)
516 				continue;
517 			if (!asic_register_entry->untouched)
518 				*value = asic_register_entry->grbm_indexed ?
519 					vi_read_indexed_register(adev, se_num,
520 								 sh_num, reg_offset) :
521 					RREG32(reg_offset);
522 			return 0;
523 		}
524 	}
525 
526 	for (i = 0; i < ARRAY_SIZE(vi_allowed_read_registers); i++) {
527 		if (reg_offset != vi_allowed_read_registers[i].reg_offset)
528 			continue;
529 
530 		if (!vi_allowed_read_registers[i].untouched)
531 			*value = vi_allowed_read_registers[i].grbm_indexed ?
532 				vi_read_indexed_register(adev, se_num,
533 							 sh_num, reg_offset) :
534 				RREG32(reg_offset);
535 		return 0;
536 	}
537 	return -EINVAL;
538 }
539 
vi_print_gpu_status_regs(struct amdgpu_device * adev)540 static void vi_print_gpu_status_regs(struct amdgpu_device *adev)
541 {
542 	dev_info(adev->dev, "  GRBM_STATUS=0x%08X\n",
543 		RREG32(mmGRBM_STATUS));
544 	dev_info(adev->dev, "  GRBM_STATUS2=0x%08X\n",
545 		RREG32(mmGRBM_STATUS2));
546 	dev_info(adev->dev, "  GRBM_STATUS_SE0=0x%08X\n",
547 		RREG32(mmGRBM_STATUS_SE0));
548 	dev_info(adev->dev, "  GRBM_STATUS_SE1=0x%08X\n",
549 		RREG32(mmGRBM_STATUS_SE1));
550 	dev_info(adev->dev, "  GRBM_STATUS_SE2=0x%08X\n",
551 		RREG32(mmGRBM_STATUS_SE2));
552 	dev_info(adev->dev, "  GRBM_STATUS_SE3=0x%08X\n",
553 		RREG32(mmGRBM_STATUS_SE3));
554 	dev_info(adev->dev, "  SRBM_STATUS=0x%08X\n",
555 		RREG32(mmSRBM_STATUS));
556 	dev_info(adev->dev, "  SRBM_STATUS2=0x%08X\n",
557 		RREG32(mmSRBM_STATUS2));
558 	dev_info(adev->dev, "  SDMA0_STATUS_REG   = 0x%08X\n",
559 		RREG32(mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET));
560 	if (adev->sdma.num_instances > 1) {
561 		dev_info(adev->dev, "  SDMA1_STATUS_REG   = 0x%08X\n",
562 			RREG32(mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET));
563 	}
564 	dev_info(adev->dev, "  CP_STAT = 0x%08x\n", RREG32(mmCP_STAT));
565 	dev_info(adev->dev, "  CP_STALLED_STAT1 = 0x%08x\n",
566 		 RREG32(mmCP_STALLED_STAT1));
567 	dev_info(adev->dev, "  CP_STALLED_STAT2 = 0x%08x\n",
568 		 RREG32(mmCP_STALLED_STAT2));
569 	dev_info(adev->dev, "  CP_STALLED_STAT3 = 0x%08x\n",
570 		 RREG32(mmCP_STALLED_STAT3));
571 	dev_info(adev->dev, "  CP_CPF_BUSY_STAT = 0x%08x\n",
572 		 RREG32(mmCP_CPF_BUSY_STAT));
573 	dev_info(adev->dev, "  CP_CPF_STALLED_STAT1 = 0x%08x\n",
574 		 RREG32(mmCP_CPF_STALLED_STAT1));
575 	dev_info(adev->dev, "  CP_CPF_STATUS = 0x%08x\n", RREG32(mmCP_CPF_STATUS));
576 	dev_info(adev->dev, "  CP_CPC_BUSY_STAT = 0x%08x\n", RREG32(mmCP_CPC_BUSY_STAT));
577 	dev_info(adev->dev, "  CP_CPC_STALLED_STAT1 = 0x%08x\n",
578 		 RREG32(mmCP_CPC_STALLED_STAT1));
579 	dev_info(adev->dev, "  CP_CPC_STATUS = 0x%08x\n", RREG32(mmCP_CPC_STATUS));
580 }
581 
582 /**
583  * vi_gpu_check_soft_reset - check which blocks are busy
584  *
585  * @adev: amdgpu_device pointer
586  *
587  * Check which blocks are busy and return the relevant reset
588  * mask to be used by vi_gpu_soft_reset().
589  * Returns a mask of the blocks to be reset.
590  */
vi_gpu_check_soft_reset(struct amdgpu_device * adev)591 u32 vi_gpu_check_soft_reset(struct amdgpu_device *adev)
592 {
593 	u32 reset_mask = 0;
594 	u32 tmp;
595 
596 	/* GRBM_STATUS */
597 	tmp = RREG32(mmGRBM_STATUS);
598 	if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK |
599 		   GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK |
600 		   GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK |
601 		   GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK |
602 		   GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK |
603 		   GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK))
604 		reset_mask |= AMDGPU_RESET_GFX;
605 
606 	if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK))
607 		reset_mask |= AMDGPU_RESET_CP;
608 
609 	/* GRBM_STATUS2 */
610 	tmp = RREG32(mmGRBM_STATUS2);
611 	if (tmp & GRBM_STATUS2__RLC_BUSY_MASK)
612 		reset_mask |= AMDGPU_RESET_RLC;
613 
614 	if (tmp & (GRBM_STATUS2__CPF_BUSY_MASK |
615 		   GRBM_STATUS2__CPC_BUSY_MASK |
616 		   GRBM_STATUS2__CPG_BUSY_MASK))
617 		reset_mask |= AMDGPU_RESET_CP;
618 
619 	/* SRBM_STATUS2 */
620 	tmp = RREG32(mmSRBM_STATUS2);
621 	if (tmp & SRBM_STATUS2__SDMA_BUSY_MASK)
622 		reset_mask |= AMDGPU_RESET_DMA;
623 
624 	if (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK)
625 		reset_mask |= AMDGPU_RESET_DMA1;
626 
627 	/* SRBM_STATUS */
628 	tmp = RREG32(mmSRBM_STATUS);
629 
630 	if (tmp & SRBM_STATUS__IH_BUSY_MASK)
631 		reset_mask |= AMDGPU_RESET_IH;
632 
633 	if (tmp & SRBM_STATUS__SEM_BUSY_MASK)
634 		reset_mask |= AMDGPU_RESET_SEM;
635 
636 	if (tmp & SRBM_STATUS__GRBM_RQ_PENDING_MASK)
637 		reset_mask |= AMDGPU_RESET_GRBM;
638 
639 	if (adev->asic_type != CHIP_TOPAZ) {
640 		if (tmp & (SRBM_STATUS__UVD_RQ_PENDING_MASK |
641 			   SRBM_STATUS__UVD_BUSY_MASK))
642 			reset_mask |= AMDGPU_RESET_UVD;
643 	}
644 
645 	if (tmp & SRBM_STATUS__VMC_BUSY_MASK)
646 		reset_mask |= AMDGPU_RESET_VMC;
647 
648 	if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
649 		   SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK))
650 		reset_mask |= AMDGPU_RESET_MC;
651 
652 	/* SDMA0_STATUS_REG */
653 	tmp = RREG32(mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET);
654 	if (!(tmp & SDMA0_STATUS_REG__IDLE_MASK))
655 		reset_mask |= AMDGPU_RESET_DMA;
656 
657 	/* SDMA1_STATUS_REG */
658 	if (adev->sdma.num_instances > 1) {
659 		tmp = RREG32(mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET);
660 		if (!(tmp & SDMA0_STATUS_REG__IDLE_MASK))
661 			reset_mask |= AMDGPU_RESET_DMA1;
662 	}
663 #if 0
664 	/* VCE_STATUS */
665 	if (adev->asic_type != CHIP_TOPAZ) {
666 		tmp = RREG32(mmVCE_STATUS);
667 		if (tmp & VCE_STATUS__VCPU_REPORT_RB0_BUSY_MASK)
668 			reset_mask |= AMDGPU_RESET_VCE;
669 		if (tmp & VCE_STATUS__VCPU_REPORT_RB1_BUSY_MASK)
670 			reset_mask |= AMDGPU_RESET_VCE1;
671 
672 	}
673 
674 	if (adev->asic_type != CHIP_TOPAZ) {
675 		if (amdgpu_display_is_display_hung(adev))
676 			reset_mask |= AMDGPU_RESET_DISPLAY;
677 	}
678 #endif
679 
680 	/* Skip MC reset as it's mostly likely not hung, just busy */
681 	if (reset_mask & AMDGPU_RESET_MC) {
682 		DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
683 		reset_mask &= ~AMDGPU_RESET_MC;
684 	}
685 
686 	return reset_mask;
687 }
688 
689 /**
690  * vi_gpu_soft_reset - soft reset GPU
691  *
692  * @adev: amdgpu_device pointer
693  * @reset_mask: mask of which blocks to reset
694  *
695  * Soft reset the blocks specified in @reset_mask.
696  */
vi_gpu_soft_reset(struct amdgpu_device * adev,u32 reset_mask)697 static void vi_gpu_soft_reset(struct amdgpu_device *adev, u32 reset_mask)
698 {
699 	struct amdgpu_mode_mc_save save;
700 	u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
701 	u32 tmp;
702 
703 	if (reset_mask == 0)
704 		return;
705 
706 	dev_info(adev->dev, "GPU softreset: 0x%08X\n", reset_mask);
707 
708 	vi_print_gpu_status_regs(adev);
709 	dev_info(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
710 		 RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR));
711 	dev_info(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
712 		 RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS));
713 
714 	/* disable CG/PG */
715 
716 	/* stop the rlc */
717 	//XXX
718 	//gfx_v8_0_rlc_stop(adev);
719 
720 	/* Disable GFX parsing/prefetching */
721 	tmp = RREG32(mmCP_ME_CNTL);
722 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, 1);
723 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, 1);
724 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, 1);
725 	WREG32(mmCP_ME_CNTL, tmp);
726 
727 	/* Disable MEC parsing/prefetching */
728 	tmp = RREG32(mmCP_MEC_CNTL);
729 	tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME1_HALT, 1);
730 	tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME2_HALT, 1);
731 	WREG32(mmCP_MEC_CNTL, tmp);
732 
733 	if (reset_mask & AMDGPU_RESET_DMA) {
734 		/* sdma0 */
735 		tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET);
736 		tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 1);
737 		WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp);
738 	}
739 	if (reset_mask & AMDGPU_RESET_DMA1) {
740 		/* sdma1 */
741 		tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET);
742 		tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 1);
743 		WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp);
744 	}
745 
746 	gmc_v8_0_mc_stop(adev, &save);
747 	if (amdgpu_asic_wait_for_mc_idle(adev)) {
748 		dev_warn(adev->dev, "Wait for MC idle timedout !\n");
749 	}
750 
751 	if (reset_mask & (AMDGPU_RESET_GFX | AMDGPU_RESET_COMPUTE | AMDGPU_RESET_CP)) {
752 		grbm_soft_reset =
753 			REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
754 		grbm_soft_reset =
755 			REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_GFX, 1);
756 	}
757 
758 	if (reset_mask & AMDGPU_RESET_CP) {
759 		grbm_soft_reset =
760 			REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
761 		srbm_soft_reset =
762 			REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_GRBM, 1);
763 	}
764 
765 	if (reset_mask & AMDGPU_RESET_DMA)
766 		srbm_soft_reset =
767 			REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA, 1);
768 
769 	if (reset_mask & AMDGPU_RESET_DMA1)
770 		srbm_soft_reset =
771 			REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA1, 1);
772 
773 	if (reset_mask & AMDGPU_RESET_DISPLAY)
774 		srbm_soft_reset =
775 			REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_DC, 1);
776 
777 	if (reset_mask & AMDGPU_RESET_RLC)
778 		grbm_soft_reset =
779 			REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
780 
781 	if (reset_mask & AMDGPU_RESET_SEM)
782 		srbm_soft_reset =
783 			REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SEM, 1);
784 
785 	if (reset_mask & AMDGPU_RESET_IH)
786 		srbm_soft_reset =
787 			REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_IH, 1);
788 
789 	if (reset_mask & AMDGPU_RESET_GRBM)
790 		srbm_soft_reset =
791 			REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_GRBM, 1);
792 
793 	if (reset_mask & AMDGPU_RESET_VMC)
794 		srbm_soft_reset =
795 			REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VMC, 1);
796 
797 	if (reset_mask & AMDGPU_RESET_UVD)
798 		srbm_soft_reset =
799 			REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_UVD, 1);
800 
801 	if (reset_mask & AMDGPU_RESET_VCE)
802 		srbm_soft_reset =
803 			REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1);
804 
805 	if (reset_mask & AMDGPU_RESET_VCE)
806 		srbm_soft_reset =
807 			REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
808 
809 	if (!(adev->flags & AMD_IS_APU)) {
810 		if (reset_mask & AMDGPU_RESET_MC)
811 		srbm_soft_reset =
812 			REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_MC, 1);
813 	}
814 
815 	if (grbm_soft_reset) {
816 		tmp = RREG32(mmGRBM_SOFT_RESET);
817 		tmp |= grbm_soft_reset;
818 		dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
819 		WREG32(mmGRBM_SOFT_RESET, tmp);
820 		tmp = RREG32(mmGRBM_SOFT_RESET);
821 
822 		udelay(50);
823 
824 		tmp &= ~grbm_soft_reset;
825 		WREG32(mmGRBM_SOFT_RESET, tmp);
826 		tmp = RREG32(mmGRBM_SOFT_RESET);
827 	}
828 
829 	if (srbm_soft_reset) {
830 		tmp = RREG32(mmSRBM_SOFT_RESET);
831 		tmp |= srbm_soft_reset;
832 		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
833 		WREG32(mmSRBM_SOFT_RESET, tmp);
834 		tmp = RREG32(mmSRBM_SOFT_RESET);
835 
836 		udelay(50);
837 
838 		tmp &= ~srbm_soft_reset;
839 		WREG32(mmSRBM_SOFT_RESET, tmp);
840 		tmp = RREG32(mmSRBM_SOFT_RESET);
841 	}
842 
843 	/* Wait a little for things to settle down */
844 	udelay(50);
845 
846 	gmc_v8_0_mc_resume(adev, &save);
847 	udelay(50);
848 
849 	vi_print_gpu_status_regs(adev);
850 }
851 
vi_gpu_pci_config_reset(struct amdgpu_device * adev)852 static void vi_gpu_pci_config_reset(struct amdgpu_device *adev)
853 {
854 	struct amdgpu_mode_mc_save save;
855 	u32 tmp, i;
856 
857 	dev_info(adev->dev, "GPU pci config reset\n");
858 
859 	/* disable dpm? */
860 
861 	/* disable cg/pg */
862 
863 	/* Disable GFX parsing/prefetching */
864 	tmp = RREG32(mmCP_ME_CNTL);
865 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, 1);
866 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, 1);
867 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, 1);
868 	WREG32(mmCP_ME_CNTL, tmp);
869 
870 	/* Disable MEC parsing/prefetching */
871 	tmp = RREG32(mmCP_MEC_CNTL);
872 	tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME1_HALT, 1);
873 	tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME2_HALT, 1);
874 	WREG32(mmCP_MEC_CNTL, tmp);
875 
876 	/* Disable GFX parsing/prefetching */
877 	WREG32(mmCP_ME_CNTL, CP_ME_CNTL__ME_HALT_MASK |
878 		CP_ME_CNTL__PFP_HALT_MASK | CP_ME_CNTL__CE_HALT_MASK);
879 
880 	/* Disable MEC parsing/prefetching */
881 	WREG32(mmCP_MEC_CNTL,
882 			CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK);
883 
884 	/* sdma0 */
885 	tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET);
886 	tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 1);
887 	WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp);
888 
889 	/* sdma1 */
890 	tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET);
891 	tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 1);
892 	WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp);
893 
894 	/* XXX other engines? */
895 
896 	/* halt the rlc, disable cp internal ints */
897 	//XXX
898 	//gfx_v8_0_rlc_stop(adev);
899 
900 	udelay(50);
901 
902 	/* disable mem access */
903 	gmc_v8_0_mc_stop(adev, &save);
904 	if (amdgpu_asic_wait_for_mc_idle(adev)) {
905 		dev_warn(adev->dev, "Wait for MC idle timed out !\n");
906 	}
907 
908 	/* disable BM */
909 	pci_clear_master(adev->pdev);
910 	/* reset */
911 	amdgpu_pci_config_reset(adev);
912 
913 	udelay(100);
914 
915 	/* wait for asic to come out of reset */
916 	for (i = 0; i < adev->usec_timeout; i++) {
917 		if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff)
918 			break;
919 		udelay(1);
920 	}
921 
922 }
923 
vi_set_bios_scratch_engine_hung(struct amdgpu_device * adev,bool hung)924 static void vi_set_bios_scratch_engine_hung(struct amdgpu_device *adev, bool hung)
925 {
926 	u32 tmp = RREG32(mmBIOS_SCRATCH_3);
927 
928 	if (hung)
929 		tmp |= ATOM_S3_ASIC_GUI_ENGINE_HUNG;
930 	else
931 		tmp &= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG;
932 
933 	WREG32(mmBIOS_SCRATCH_3, tmp);
934 }
935 
936 /**
937  * vi_asic_reset - soft reset GPU
938  *
939  * @adev: amdgpu_device pointer
940  *
941  * Look up which blocks are hung and attempt
942  * to reset them.
943  * Returns 0 for success.
944  */
vi_asic_reset(struct amdgpu_device * adev)945 static int vi_asic_reset(struct amdgpu_device *adev)
946 {
947 	u32 reset_mask;
948 
949 	reset_mask = vi_gpu_check_soft_reset(adev);
950 
951 	if (reset_mask)
952 		vi_set_bios_scratch_engine_hung(adev, true);
953 
954 	/* try soft reset */
955 	vi_gpu_soft_reset(adev, reset_mask);
956 
957 	reset_mask = vi_gpu_check_soft_reset(adev);
958 
959 	/* try pci config reset */
960 	if (reset_mask && amdgpu_hard_reset)
961 		vi_gpu_pci_config_reset(adev);
962 
963 	reset_mask = vi_gpu_check_soft_reset(adev);
964 
965 	if (!reset_mask)
966 		vi_set_bios_scratch_engine_hung(adev, false);
967 
968 	return 0;
969 }
970 
vi_set_uvd_clock(struct amdgpu_device * adev,u32 clock,u32 cntl_reg,u32 status_reg)971 static int vi_set_uvd_clock(struct amdgpu_device *adev, u32 clock,
972 			u32 cntl_reg, u32 status_reg)
973 {
974 	int r, i;
975 	struct atom_clock_dividers dividers;
976 	uint32_t tmp;
977 
978 	r = amdgpu_atombios_get_clock_dividers(adev,
979 					       COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
980 					       clock, false, &dividers);
981 	if (r)
982 		return r;
983 
984 	tmp = RREG32_SMC(cntl_reg);
985 	tmp &= ~(CG_DCLK_CNTL__DCLK_DIR_CNTL_EN_MASK |
986 		CG_DCLK_CNTL__DCLK_DIVIDER_MASK);
987 	tmp |= dividers.post_divider;
988 	WREG32_SMC(cntl_reg, tmp);
989 
990 	for (i = 0; i < 100; i++) {
991 		if (RREG32_SMC(status_reg) & CG_DCLK_STATUS__DCLK_STATUS_MASK)
992 			break;
993 		mdelay(10);
994 	}
995 	if (i == 100)
996 		return -ETIMEDOUT;
997 
998 	return 0;
999 }
1000 
vi_set_uvd_clocks(struct amdgpu_device * adev,u32 vclk,u32 dclk)1001 static int vi_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
1002 {
1003 	int r;
1004 
1005 	r = vi_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS);
1006 	if (r)
1007 		return r;
1008 
1009 	r = vi_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS);
1010 
1011 	return 0;
1012 }
1013 
vi_set_vce_clocks(struct amdgpu_device * adev,u32 evclk,u32 ecclk)1014 static int vi_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
1015 {
1016 	/* todo */
1017 
1018 	return 0;
1019 }
1020 
vi_pcie_gen3_enable(struct amdgpu_device * adev)1021 static void vi_pcie_gen3_enable(struct amdgpu_device *adev)
1022 {
1023 	u32 mask;
1024 	int ret;
1025 
1026 	if (pci_is_root_bus(adev->pdev->bus))
1027 		return;
1028 
1029 	if (amdgpu_pcie_gen2 == 0)
1030 		return;
1031 
1032 	if (adev->flags & AMD_IS_APU)
1033 		return;
1034 
1035 	ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
1036 	if (ret != 0)
1037 		return;
1038 
1039 	if (!(mask & (DRM_PCIE_SPEED_50 | DRM_PCIE_SPEED_80)))
1040 		return;
1041 
1042 	/* todo */
1043 }
1044 
vi_program_aspm(struct amdgpu_device * adev)1045 static void vi_program_aspm(struct amdgpu_device *adev)
1046 {
1047 
1048 	if (amdgpu_aspm == 0)
1049 		return;
1050 
1051 	/* todo */
1052 }
1053 
vi_enable_doorbell_aperture(struct amdgpu_device * adev,bool enable)1054 static void vi_enable_doorbell_aperture(struct amdgpu_device *adev,
1055 					bool enable)
1056 {
1057 	u32 tmp;
1058 
1059 	/* not necessary on CZ */
1060 	if (adev->flags & AMD_IS_APU)
1061 		return;
1062 
1063 	tmp = RREG32(mmBIF_DOORBELL_APER_EN);
1064 	if (enable)
1065 		tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 1);
1066 	else
1067 		tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 0);
1068 
1069 	WREG32(mmBIF_DOORBELL_APER_EN, tmp);
1070 }
1071 
1072 /* topaz has no DCE, UVD, VCE */
1073 static const struct amdgpu_ip_block_version topaz_ip_blocks[] =
1074 {
1075 	/* ORDER MATTERS! */
1076 	{
1077 		.type = AMD_IP_BLOCK_TYPE_COMMON,
1078 		.major = 2,
1079 		.minor = 0,
1080 		.rev = 0,
1081 		.funcs = &vi_common_ip_funcs,
1082 	},
1083 	{
1084 		.type = AMD_IP_BLOCK_TYPE_GMC,
1085 		.major = 7,
1086 		.minor = 4,
1087 		.rev = 0,
1088 		.funcs = &gmc_v7_0_ip_funcs,
1089 	},
1090 	{
1091 		.type = AMD_IP_BLOCK_TYPE_IH,
1092 		.major = 2,
1093 		.minor = 4,
1094 		.rev = 0,
1095 		.funcs = &iceland_ih_ip_funcs,
1096 	},
1097 	{
1098 		.type = AMD_IP_BLOCK_TYPE_SMC,
1099 		.major = 7,
1100 		.minor = 1,
1101 		.rev = 0,
1102 		.funcs = &iceland_dpm_ip_funcs,
1103 	},
1104 	{
1105 		.type = AMD_IP_BLOCK_TYPE_GFX,
1106 		.major = 8,
1107 		.minor = 0,
1108 		.rev = 0,
1109 		.funcs = &gfx_v8_0_ip_funcs,
1110 	},
1111 	{
1112 		.type = AMD_IP_BLOCK_TYPE_SDMA,
1113 		.major = 2,
1114 		.minor = 4,
1115 		.rev = 0,
1116 		.funcs = &sdma_v2_4_ip_funcs,
1117 	},
1118 };
1119 
1120 static const struct amdgpu_ip_block_version tonga_ip_blocks[] =
1121 {
1122 	/* ORDER MATTERS! */
1123 	{
1124 		.type = AMD_IP_BLOCK_TYPE_COMMON,
1125 		.major = 2,
1126 		.minor = 0,
1127 		.rev = 0,
1128 		.funcs = &vi_common_ip_funcs,
1129 	},
1130 	{
1131 		.type = AMD_IP_BLOCK_TYPE_GMC,
1132 		.major = 8,
1133 		.minor = 0,
1134 		.rev = 0,
1135 		.funcs = &gmc_v8_0_ip_funcs,
1136 	},
1137 	{
1138 		.type = AMD_IP_BLOCK_TYPE_IH,
1139 		.major = 3,
1140 		.minor = 0,
1141 		.rev = 0,
1142 		.funcs = &tonga_ih_ip_funcs,
1143 	},
1144 	{
1145 		.type = AMD_IP_BLOCK_TYPE_SMC,
1146 		.major = 7,
1147 		.minor = 1,
1148 		.rev = 0,
1149 		.funcs = &tonga_dpm_ip_funcs,
1150 	},
1151 	{
1152 		.type = AMD_IP_BLOCK_TYPE_DCE,
1153 		.major = 10,
1154 		.minor = 0,
1155 		.rev = 0,
1156 		.funcs = &dce_v10_0_ip_funcs,
1157 	},
1158 	{
1159 		.type = AMD_IP_BLOCK_TYPE_GFX,
1160 		.major = 8,
1161 		.minor = 0,
1162 		.rev = 0,
1163 		.funcs = &gfx_v8_0_ip_funcs,
1164 	},
1165 	{
1166 		.type = AMD_IP_BLOCK_TYPE_SDMA,
1167 		.major = 3,
1168 		.minor = 0,
1169 		.rev = 0,
1170 		.funcs = &sdma_v3_0_ip_funcs,
1171 	},
1172 	{
1173 		.type = AMD_IP_BLOCK_TYPE_UVD,
1174 		.major = 5,
1175 		.minor = 0,
1176 		.rev = 0,
1177 		.funcs = &uvd_v5_0_ip_funcs,
1178 	},
1179 	{
1180 		.type = AMD_IP_BLOCK_TYPE_VCE,
1181 		.major = 3,
1182 		.minor = 0,
1183 		.rev = 0,
1184 		.funcs = &vce_v3_0_ip_funcs,
1185 	},
1186 };
1187 
1188 static const struct amdgpu_ip_block_version fiji_ip_blocks[] =
1189 {
1190 	/* ORDER MATTERS! */
1191 	{
1192 		.type = AMD_IP_BLOCK_TYPE_COMMON,
1193 		.major = 2,
1194 		.minor = 0,
1195 		.rev = 0,
1196 		.funcs = &vi_common_ip_funcs,
1197 	},
1198 	{
1199 		.type = AMD_IP_BLOCK_TYPE_GMC,
1200 		.major = 8,
1201 		.minor = 5,
1202 		.rev = 0,
1203 		.funcs = &gmc_v8_0_ip_funcs,
1204 	},
1205 	{
1206 		.type = AMD_IP_BLOCK_TYPE_IH,
1207 		.major = 3,
1208 		.minor = 0,
1209 		.rev = 0,
1210 		.funcs = &tonga_ih_ip_funcs,
1211 	},
1212 	{
1213 		.type = AMD_IP_BLOCK_TYPE_SMC,
1214 		.major = 7,
1215 		.minor = 1,
1216 		.rev = 0,
1217 		.funcs = &fiji_dpm_ip_funcs,
1218 	},
1219 	{
1220 		.type = AMD_IP_BLOCK_TYPE_DCE,
1221 		.major = 10,
1222 		.minor = 1,
1223 		.rev = 0,
1224 		.funcs = &dce_v10_0_ip_funcs,
1225 	},
1226 	{
1227 		.type = AMD_IP_BLOCK_TYPE_GFX,
1228 		.major = 8,
1229 		.minor = 0,
1230 		.rev = 0,
1231 		.funcs = &gfx_v8_0_ip_funcs,
1232 	},
1233 	{
1234 		.type = AMD_IP_BLOCK_TYPE_SDMA,
1235 		.major = 3,
1236 		.minor = 0,
1237 		.rev = 0,
1238 		.funcs = &sdma_v3_0_ip_funcs,
1239 	},
1240 	{
1241 		.type = AMD_IP_BLOCK_TYPE_UVD,
1242 		.major = 6,
1243 		.minor = 0,
1244 		.rev = 0,
1245 		.funcs = &uvd_v6_0_ip_funcs,
1246 	},
1247 	{
1248 		.type = AMD_IP_BLOCK_TYPE_VCE,
1249 		.major = 3,
1250 		.minor = 0,
1251 		.rev = 0,
1252 		.funcs = &vce_v3_0_ip_funcs,
1253 	},
1254 };
1255 
1256 static const struct amdgpu_ip_block_version cz_ip_blocks[] =
1257 {
1258 	/* ORDER MATTERS! */
1259 	{
1260 		.type = AMD_IP_BLOCK_TYPE_COMMON,
1261 		.major = 2,
1262 		.minor = 0,
1263 		.rev = 0,
1264 		.funcs = &vi_common_ip_funcs,
1265 	},
1266 	{
1267 		.type = AMD_IP_BLOCK_TYPE_GMC,
1268 		.major = 8,
1269 		.minor = 0,
1270 		.rev = 0,
1271 		.funcs = &gmc_v8_0_ip_funcs,
1272 	},
1273 	{
1274 		.type = AMD_IP_BLOCK_TYPE_IH,
1275 		.major = 3,
1276 		.minor = 0,
1277 		.rev = 0,
1278 		.funcs = &cz_ih_ip_funcs,
1279 	},
1280 	{
1281 		.type = AMD_IP_BLOCK_TYPE_SMC,
1282 		.major = 8,
1283 		.minor = 0,
1284 		.rev = 0,
1285 		.funcs = &cz_dpm_ip_funcs,
1286 	},
1287 	{
1288 		.type = AMD_IP_BLOCK_TYPE_DCE,
1289 		.major = 11,
1290 		.minor = 0,
1291 		.rev = 0,
1292 		.funcs = &dce_v11_0_ip_funcs,
1293 	},
1294 	{
1295 		.type = AMD_IP_BLOCK_TYPE_GFX,
1296 		.major = 8,
1297 		.minor = 0,
1298 		.rev = 0,
1299 		.funcs = &gfx_v8_0_ip_funcs,
1300 	},
1301 	{
1302 		.type = AMD_IP_BLOCK_TYPE_SDMA,
1303 		.major = 3,
1304 		.minor = 0,
1305 		.rev = 0,
1306 		.funcs = &sdma_v3_0_ip_funcs,
1307 	},
1308 	{
1309 		.type = AMD_IP_BLOCK_TYPE_UVD,
1310 		.major = 6,
1311 		.minor = 0,
1312 		.rev = 0,
1313 		.funcs = &uvd_v6_0_ip_funcs,
1314 	},
1315 	{
1316 		.type = AMD_IP_BLOCK_TYPE_VCE,
1317 		.major = 3,
1318 		.minor = 0,
1319 		.rev = 0,
1320 		.funcs = &vce_v3_0_ip_funcs,
1321 	},
1322 };
1323 
vi_set_ip_blocks(struct amdgpu_device * adev)1324 int vi_set_ip_blocks(struct amdgpu_device *adev)
1325 {
1326 	switch (adev->asic_type) {
1327 	case CHIP_TOPAZ:
1328 		adev->ip_blocks = topaz_ip_blocks;
1329 		adev->num_ip_blocks = ARRAY_SIZE(topaz_ip_blocks);
1330 		break;
1331 	case CHIP_FIJI:
1332 		adev->ip_blocks = fiji_ip_blocks;
1333 		adev->num_ip_blocks = ARRAY_SIZE(fiji_ip_blocks);
1334 		break;
1335 	case CHIP_TONGA:
1336 		adev->ip_blocks = tonga_ip_blocks;
1337 		adev->num_ip_blocks = ARRAY_SIZE(tonga_ip_blocks);
1338 		break;
1339 	case CHIP_CARRIZO:
1340 	case CHIP_STONEY:
1341 		adev->ip_blocks = cz_ip_blocks;
1342 		adev->num_ip_blocks = ARRAY_SIZE(cz_ip_blocks);
1343 		break;
1344 	default:
1345 		/* FIXME: not supported yet */
1346 		return -EINVAL;
1347 	}
1348 
1349 	return 0;
1350 }
1351 
1352 #define ATI_REV_ID_FUSE_MACRO__ADDRESS      0xC0014044
1353 #define ATI_REV_ID_FUSE_MACRO__SHIFT        9
1354 #define ATI_REV_ID_FUSE_MACRO__MASK         0x00001E00
1355 
vi_get_rev_id(struct amdgpu_device * adev)1356 static uint32_t vi_get_rev_id(struct amdgpu_device *adev)
1357 {
1358 	if (adev->asic_type == CHIP_TOPAZ)
1359 		return (RREG32(mmPCIE_EFUSE4) & PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID_MASK)
1360 			>> PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID__SHIFT;
1361 	else if (adev->flags & AMD_IS_APU)
1362 		return (RREG32_SMC(ATI_REV_ID_FUSE_MACRO__ADDRESS) & ATI_REV_ID_FUSE_MACRO__MASK)
1363 			>> ATI_REV_ID_FUSE_MACRO__SHIFT;
1364 	else
1365 		return (RREG32(mmCC_DRM_ID_STRAPS) & CC_DRM_ID_STRAPS__ATI_REV_ID_MASK)
1366 			>> CC_DRM_ID_STRAPS__ATI_REV_ID__SHIFT;
1367 }
1368 
1369 static const struct amdgpu_asic_funcs vi_asic_funcs =
1370 {
1371 	.read_disabled_bios = &vi_read_disabled_bios,
1372 	.read_register = &vi_read_register,
1373 	.reset = &vi_asic_reset,
1374 	.set_vga_state = &vi_vga_set_state,
1375 	.get_xclk = &vi_get_xclk,
1376 	.set_uvd_clocks = &vi_set_uvd_clocks,
1377 	.set_vce_clocks = &vi_set_vce_clocks,
1378 	.get_cu_info = &gfx_v8_0_get_cu_info,
1379 	/* these should be moved to their own ip modules */
1380 	.get_gpu_clock_counter = &gfx_v8_0_get_gpu_clock_counter,
1381 	.wait_for_mc_idle = &gmc_v8_0_mc_wait_for_idle,
1382 };
1383 
vi_common_early_init(void * handle)1384 static int vi_common_early_init(void *handle)
1385 {
1386 	bool smc_enabled = false;
1387 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1388 
1389 	if (adev->flags & AMD_IS_APU) {
1390 		adev->smc_rreg = &cz_smc_rreg;
1391 		adev->smc_wreg = &cz_smc_wreg;
1392 	} else {
1393 		adev->smc_rreg = &vi_smc_rreg;
1394 		adev->smc_wreg = &vi_smc_wreg;
1395 	}
1396 	adev->pcie_rreg = &vi_pcie_rreg;
1397 	adev->pcie_wreg = &vi_pcie_wreg;
1398 	adev->uvd_ctx_rreg = &vi_uvd_ctx_rreg;
1399 	adev->uvd_ctx_wreg = &vi_uvd_ctx_wreg;
1400 	adev->didt_rreg = &vi_didt_rreg;
1401 	adev->didt_wreg = &vi_didt_wreg;
1402 
1403 	adev->asic_funcs = &vi_asic_funcs;
1404 
1405 	if (amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_SMC) &&
1406 		(amdgpu_ip_block_mask & (1 << AMD_IP_BLOCK_TYPE_SMC)))
1407 		smc_enabled = true;
1408 
1409 	adev->rev_id = vi_get_rev_id(adev);
1410 	adev->external_rev_id = 0xFF;
1411 	switch (adev->asic_type) {
1412 	case CHIP_TOPAZ:
1413 		adev->has_uvd = false;
1414 		adev->cg_flags = 0;
1415 		adev->pg_flags = 0;
1416 		adev->external_rev_id = 0x1;
1417 		break;
1418 	case CHIP_FIJI:
1419 		adev->has_uvd = true;
1420 		adev->cg_flags = 0;
1421 		adev->pg_flags = 0;
1422 		adev->external_rev_id = adev->rev_id + 0x3c;
1423 		break;
1424 	case CHIP_TONGA:
1425 		adev->has_uvd = true;
1426 		adev->cg_flags = 0;
1427 		adev->pg_flags = 0;
1428 		adev->external_rev_id = adev->rev_id + 0x14;
1429 		break;
1430 	case CHIP_CARRIZO:
1431 	case CHIP_STONEY:
1432 		adev->has_uvd = true;
1433 		adev->cg_flags = 0;
1434 		/* Disable UVD pg */
1435 		adev->pg_flags = /* AMDGPU_PG_SUPPORT_UVD | */AMDGPU_PG_SUPPORT_VCE;
1436 		adev->external_rev_id = adev->rev_id + 0x1;
1437 		break;
1438 	default:
1439 		/* FIXME: not supported yet */
1440 		return -EINVAL;
1441 	}
1442 
1443 	if (amdgpu_smc_load_fw && smc_enabled)
1444 		adev->firmware.smu_load = true;
1445 
1446 	return 0;
1447 }
1448 
vi_common_sw_init(void * handle)1449 static int vi_common_sw_init(void *handle)
1450 {
1451 	return 0;
1452 }
1453 
vi_common_sw_fini(void * handle)1454 static int vi_common_sw_fini(void *handle)
1455 {
1456 	return 0;
1457 }
1458 
vi_common_hw_init(void * handle)1459 static int vi_common_hw_init(void *handle)
1460 {
1461 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1462 
1463 	/* move the golden regs per IP block */
1464 	vi_init_golden_registers(adev);
1465 	/* enable pcie gen2/3 link */
1466 	vi_pcie_gen3_enable(adev);
1467 	/* enable aspm */
1468 	vi_program_aspm(adev);
1469 	/* enable the doorbell aperture */
1470 	vi_enable_doorbell_aperture(adev, true);
1471 
1472 	return 0;
1473 }
1474 
vi_common_hw_fini(void * handle)1475 static int vi_common_hw_fini(void *handle)
1476 {
1477 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1478 
1479 	/* enable the doorbell aperture */
1480 	vi_enable_doorbell_aperture(adev, false);
1481 
1482 	return 0;
1483 }
1484 
vi_common_suspend(void * handle)1485 static int vi_common_suspend(void *handle)
1486 {
1487 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1488 
1489 	return vi_common_hw_fini(adev);
1490 }
1491 
vi_common_resume(void * handle)1492 static int vi_common_resume(void *handle)
1493 {
1494 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1495 
1496 	return vi_common_hw_init(adev);
1497 }
1498 
vi_common_is_idle(void * handle)1499 static bool vi_common_is_idle(void *handle)
1500 {
1501 	return true;
1502 }
1503 
vi_common_wait_for_idle(void * handle)1504 static int vi_common_wait_for_idle(void *handle)
1505 {
1506 	return 0;
1507 }
1508 
vi_common_print_status(void * handle)1509 static void vi_common_print_status(void *handle)
1510 {
1511 	return;
1512 }
1513 
vi_common_soft_reset(void * handle)1514 static int vi_common_soft_reset(void *handle)
1515 {
1516 	return 0;
1517 }
1518 
vi_common_set_clockgating_state(void * handle,enum amd_clockgating_state state)1519 static int vi_common_set_clockgating_state(void *handle,
1520 					    enum amd_clockgating_state state)
1521 {
1522 	return 0;
1523 }
1524 
vi_common_set_powergating_state(void * handle,enum amd_powergating_state state)1525 static int vi_common_set_powergating_state(void *handle,
1526 					    enum amd_powergating_state state)
1527 {
1528 	return 0;
1529 }
1530 
1531 const struct amd_ip_funcs vi_common_ip_funcs = {
1532 	.early_init = vi_common_early_init,
1533 	.late_init = NULL,
1534 	.sw_init = vi_common_sw_init,
1535 	.sw_fini = vi_common_sw_fini,
1536 	.hw_init = vi_common_hw_init,
1537 	.hw_fini = vi_common_hw_fini,
1538 	.suspend = vi_common_suspend,
1539 	.resume = vi_common_resume,
1540 	.is_idle = vi_common_is_idle,
1541 	.wait_for_idle = vi_common_wait_for_idle,
1542 	.soft_reset = vi_common_soft_reset,
1543 	.print_status = vi_common_print_status,
1544 	.set_clockgating_state = vi_common_set_clockgating_state,
1545 	.set_powergating_state = vi_common_set_powergating_state,
1546 };
1547 
1548