• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2023 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/firmware.h>
25 #include "amdgpu.h"
26 #include "amdgpu_vcn.h"
27 #include "amdgpu_pm.h"
28 #include "soc15.h"
29 #include "soc15d.h"
30 #include "soc15_hw_ip.h"
31 #include "vcn_v2_0.h"
32 
33 #include "vcn/vcn_5_0_0_offset.h"
34 #include "vcn/vcn_5_0_0_sh_mask.h"
35 #include "ivsrcid/vcn/irqsrcs_vcn_4_0.h"
36 #include "vcn_v5_0_0.h"
37 
38 #include <drm/drm_drv.h>
39 
40 static const struct amdgpu_hwip_reg_entry vcn_reg_list_5_0[] = {
41 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_POWER_STATUS),
42 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_STATUS),
43 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_CONTEXT_ID),
44 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_CONTEXT_ID2),
45 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_DATA0),
46 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_DATA1),
47 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_CMD),
48 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI),
49 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO),
50 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI2),
51 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO2),
52 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI3),
53 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO3),
54 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI4),
55 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO4),
56 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR),
57 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR),
58 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR2),
59 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR2),
60 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR3),
61 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR3),
62 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR4),
63 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR4),
64 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE),
65 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE2),
66 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE3),
67 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE4),
68 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_CTL),
69 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_DATA),
70 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_MASK),
71 	SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_PAUSE)
72 };
73 
74 static int amdgpu_ih_clientid_vcns[] = {
75 	SOC15_IH_CLIENTID_VCN,
76 	SOC15_IH_CLIENTID_VCN1
77 };
78 
79 static void vcn_v5_0_0_set_unified_ring_funcs(struct amdgpu_device *adev);
80 static void vcn_v5_0_0_set_irq_funcs(struct amdgpu_device *adev);
81 static int vcn_v5_0_0_set_powergating_state(void *handle,
82 		enum amd_powergating_state state);
83 static int vcn_v5_0_0_pause_dpg_mode(struct amdgpu_device *adev,
84 		int inst_idx, struct dpg_pause_state *new_state);
85 static void vcn_v5_0_0_unified_ring_set_wptr(struct amdgpu_ring *ring);
86 
87 /**
88  * vcn_v5_0_0_early_init - set function pointers and load microcode
89  *
90  * @handle: amdgpu_device pointer
91  *
92  * Set ring and irq function pointers
93  * Load microcode from filesystem
94  */
vcn_v5_0_0_early_init(void * handle)95 static int vcn_v5_0_0_early_init(void *handle)
96 {
97 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
98 
99 	/* re-use enc ring as unified ring */
100 	adev->vcn.num_enc_rings = 1;
101 
102 	vcn_v5_0_0_set_unified_ring_funcs(adev);
103 	vcn_v5_0_0_set_irq_funcs(adev);
104 
105 	return amdgpu_vcn_early_init(adev);
106 }
107 
108 /**
109  * vcn_v5_0_0_sw_init - sw init for VCN block
110  *
111  * @handle: amdgpu_device pointer
112  *
113  * Load firmware and sw initialization
114  */
vcn_v5_0_0_sw_init(void * handle)115 static int vcn_v5_0_0_sw_init(void *handle)
116 {
117 	struct amdgpu_ring *ring;
118 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
119 	int i, r;
120 	uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_5_0);
121 	uint32_t *ptr;
122 
123 	r = amdgpu_vcn_sw_init(adev);
124 	if (r)
125 		return r;
126 
127 	amdgpu_vcn_setup_ucode(adev);
128 
129 	r = amdgpu_vcn_resume(adev);
130 	if (r)
131 		return r;
132 
133 	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
134 		volatile struct amdgpu_vcn5_fw_shared *fw_shared;
135 
136 		if (adev->vcn.harvest_config & (1 << i))
137 			continue;
138 
139 		atomic_set(&adev->vcn.inst[i].sched_score, 0);
140 
141 		/* VCN UNIFIED TRAP */
142 		r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[i],
143 				VCN_4_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst[i].irq);
144 		if (r)
145 			return r;
146 
147 		/* VCN POISON TRAP */
148 		r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[i],
149 				VCN_4_0__SRCID_UVD_POISON, &adev->vcn.inst[i].irq);
150 		if (r)
151 			return r;
152 
153 		ring = &adev->vcn.inst[i].ring_enc[0];
154 		ring->use_doorbell = true;
155 		ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + 8 * i;
156 
157 		ring->vm_hub = AMDGPU_MMHUB0(0);
158 		sprintf(ring->name, "vcn_unified_%d", i);
159 
160 		r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[i].irq, 0,
161 						AMDGPU_RING_PRIO_0, &adev->vcn.inst[i].sched_score);
162 		if (r)
163 			return r;
164 
165 		fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
166 		fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE);
167 		fw_shared->sq.is_enabled = 1;
168 
169 		if (amdgpu_vcnfw_log)
170 			amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]);
171 	}
172 
173 	if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
174 		adev->vcn.pause_dpg_mode = vcn_v5_0_0_pause_dpg_mode;
175 
176 	/* Allocate memory for VCN IP Dump buffer */
177 	ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL);
178 	if (!ptr) {
179 		DRM_ERROR("Failed to allocate memory for VCN IP Dump\n");
180 		adev->vcn.ip_dump = NULL;
181 	} else {
182 		adev->vcn.ip_dump = ptr;
183 	}
184 	return 0;
185 }
186 
187 /**
188  * vcn_v5_0_0_sw_fini - sw fini for VCN block
189  *
190  * @handle: amdgpu_device pointer
191  *
192  * VCN suspend and free up sw allocation
193  */
vcn_v5_0_0_sw_fini(void * handle)194 static int vcn_v5_0_0_sw_fini(void *handle)
195 {
196 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
197 	int i, r, idx;
198 
199 	if (drm_dev_enter(adev_to_drm(adev), &idx)) {
200 		for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
201 			volatile struct amdgpu_vcn5_fw_shared *fw_shared;
202 
203 			if (adev->vcn.harvest_config & (1 << i))
204 				continue;
205 
206 			fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
207 			fw_shared->present_flag_0 = 0;
208 			fw_shared->sq.is_enabled = 0;
209 		}
210 
211 		drm_dev_exit(idx);
212 	}
213 
214 	r = amdgpu_vcn_suspend(adev);
215 	if (r)
216 		return r;
217 
218 	r = amdgpu_vcn_sw_fini(adev);
219 
220 	kfree(adev->vcn.ip_dump);
221 
222 	return r;
223 }
224 
225 /**
226  * vcn_v5_0_0_hw_init - start and test VCN block
227  *
228  * @handle: amdgpu_device pointer
229  *
230  * Initialize the hardware, boot up the VCPU and do some testing
231  */
vcn_v5_0_0_hw_init(void * handle)232 static int vcn_v5_0_0_hw_init(void *handle)
233 {
234 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
235 	struct amdgpu_ring *ring;
236 	int i, r;
237 
238 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
239 		if (adev->vcn.harvest_config & (1 << i))
240 			continue;
241 
242 		ring = &adev->vcn.inst[i].ring_enc[0];
243 
244 		adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
245 			((adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 8 * i), i);
246 
247 		r = amdgpu_ring_test_helper(ring);
248 		if (r)
249 			return r;
250 	}
251 
252 	return 0;
253 }
254 
255 /**
256  * vcn_v5_0_0_hw_fini - stop the hardware block
257  *
258  * @handle: amdgpu_device pointer
259  *
260  * Stop the VCN block, mark ring as not ready any more
261  */
vcn_v5_0_0_hw_fini(void * handle)262 static int vcn_v5_0_0_hw_fini(void *handle)
263 {
264 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
265 	int i;
266 
267 	cancel_delayed_work_sync(&adev->vcn.idle_work);
268 
269 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
270 		if (adev->vcn.harvest_config & (1 << i))
271 			continue;
272 		if (!amdgpu_sriov_vf(adev)) {
273 			if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
274 				(adev->vcn.cur_state != AMD_PG_STATE_GATE &&
275 				RREG32_SOC15(VCN, i, regUVD_STATUS))) {
276 				vcn_v5_0_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
277 			}
278 		}
279 	}
280 
281 	return 0;
282 }
283 
284 /**
285  * vcn_v5_0_0_suspend - suspend VCN block
286  *
287  * @handle: amdgpu_device pointer
288  *
289  * HW fini and suspend VCN block
290  */
vcn_v5_0_0_suspend(void * handle)291 static int vcn_v5_0_0_suspend(void *handle)
292 {
293 	int r;
294 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
295 
296 	r = vcn_v5_0_0_hw_fini(adev);
297 	if (r)
298 		return r;
299 
300 	r = amdgpu_vcn_suspend(adev);
301 
302 	return r;
303 }
304 
305 /**
306  * vcn_v5_0_0_resume - resume VCN block
307  *
308  * @handle: amdgpu_device pointer
309  *
310  * Resume firmware and hw init VCN block
311  */
vcn_v5_0_0_resume(void * handle)312 static int vcn_v5_0_0_resume(void *handle)
313 {
314 	int r;
315 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
316 
317 	r = amdgpu_vcn_resume(adev);
318 	if (r)
319 		return r;
320 
321 	r = vcn_v5_0_0_hw_init(adev);
322 
323 	return r;
324 }
325 
326 /**
327  * vcn_v5_0_0_mc_resume - memory controller programming
328  *
329  * @adev: amdgpu_device pointer
330  * @inst: instance number
331  *
332  * Let the VCN memory controller know it's offsets
333  */
vcn_v5_0_0_mc_resume(struct amdgpu_device * adev,int inst)334 static void vcn_v5_0_0_mc_resume(struct amdgpu_device *adev, int inst)
335 {
336 	uint32_t offset, size;
337 	const struct common_firmware_header *hdr;
338 
339 	hdr = (const struct common_firmware_header *)adev->vcn.fw[inst]->data;
340 	size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
341 
342 	/* cache window 0: fw */
343 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
344 		WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
345 			(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst].tmr_mc_addr_lo));
346 		WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
347 			(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst].tmr_mc_addr_hi));
348 		WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_OFFSET0, 0);
349 		offset = 0;
350 	} else {
351 		WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
352 			lower_32_bits(adev->vcn.inst[inst].gpu_addr));
353 		WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
354 			upper_32_bits(adev->vcn.inst[inst].gpu_addr));
355 		offset = size;
356 		WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_OFFSET0, AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
357 	}
358 	WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_SIZE0, size);
359 
360 	/* cache window 1: stack */
361 	WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
362 		lower_32_bits(adev->vcn.inst[inst].gpu_addr + offset));
363 	WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
364 		upper_32_bits(adev->vcn.inst[inst].gpu_addr + offset));
365 	WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_OFFSET1, 0);
366 	WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE);
367 
368 	/* cache window 2: context */
369 	WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
370 		lower_32_bits(adev->vcn.inst[inst].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
371 	WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
372 		upper_32_bits(adev->vcn.inst[inst].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
373 	WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_OFFSET2, 0);
374 	WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
375 
376 	/* non-cache window */
377 	WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW,
378 		lower_32_bits(adev->vcn.inst[inst].fw_shared.gpu_addr));
379 	WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH,
380 		upper_32_bits(adev->vcn.inst[inst].fw_shared.gpu_addr));
381 	WREG32_SOC15(VCN, inst, regUVD_VCPU_NONCACHE_OFFSET0, 0);
382 	WREG32_SOC15(VCN, inst, regUVD_VCPU_NONCACHE_SIZE0,
383 		AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn5_fw_shared)));
384 }
385 
386 /**
387  * vcn_v5_0_0_mc_resume_dpg_mode - memory controller programming for dpg mode
388  *
389  * @adev: amdgpu_device pointer
390  * @inst_idx: instance number index
391  * @indirect: indirectly write sram
392  *
393  * Let the VCN memory controller know it's offsets with dpg mode
394  */
vcn_v5_0_0_mc_resume_dpg_mode(struct amdgpu_device * adev,int inst_idx,bool indirect)395 static void vcn_v5_0_0_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
396 {
397 	uint32_t offset, size;
398 	const struct common_firmware_header *hdr;
399 
400 	hdr = (const struct common_firmware_header *)adev->vcn.fw[inst_idx]->data;
401 	size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
402 
403 	/* cache window 0: fw */
404 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
405 		if (!indirect) {
406 			WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
407 				VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
408 				(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_lo), 0, indirect);
409 			WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
410 				VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
411 				(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_hi), 0, indirect);
412 			WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
413 				VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
414 		} else {
415 			WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
416 				VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect);
417 			WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
418 				VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect);
419 			WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
420 				VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
421 		}
422 		offset = 0;
423 	} else {
424 		WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
425 			VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
426 			lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
427 		WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
428 			VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
429 			upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
430 		offset = size;
431 		WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
432 			VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET0),
433 			AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect);
434 	}
435 
436 	if (!indirect)
437 		WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
438 			VCN, inst_idx, regUVD_VCPU_CACHE_SIZE0), size, 0, indirect);
439 	else
440 		WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
441 			VCN, inst_idx, regUVD_VCPU_CACHE_SIZE0), 0, 0, indirect);
442 
443 	/* cache window 1: stack */
444 	if (!indirect) {
445 		WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
446 			VCN, inst_idx, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
447 			lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
448 		WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
449 			VCN, inst_idx, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
450 			upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
451 		WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
452 			VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
453 	} else {
454 		WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
455 			VCN, inst_idx, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect);
456 		WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
457 			VCN, inst_idx, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect);
458 		WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
459 			VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
460 	}
461 		WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
462 			VCN, inst_idx, regUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect);
463 
464 	/* cache window 2: context */
465 	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
466 		VCN, inst_idx, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
467 		lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
468 	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
469 		VCN, inst_idx, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
470 		upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
471 	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
472 		VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect);
473 	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
474 		VCN, inst_idx, regUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect);
475 
476 	/* non-cache window */
477 	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
478 		VCN, inst_idx, regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW),
479 		lower_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect);
480 	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
481 		VCN, inst_idx, regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH),
482 		upper_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect);
483 	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
484 		VCN, inst_idx, regUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
485 	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
486 		VCN, inst_idx, regUVD_VCPU_NONCACHE_SIZE0),
487 		AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn5_fw_shared)), 0, indirect);
488 
489 	/* VCN global tiling registers */
490 	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
491 		VCN, inst_idx, regUVD_GFX10_ADDR_CONFIG),
492 		adev->gfx.config.gb_addr_config, 0, indirect);
493 
494 	return;
495 }
496 
497 /**
498  * vcn_v5_0_0_disable_static_power_gating - disable VCN static power gating
499  *
500  * @adev: amdgpu_device pointer
501  * @inst: instance number
502  *
503  * Disable static power gating for VCN block
504  */
vcn_v5_0_0_disable_static_power_gating(struct amdgpu_device * adev,int inst)505 static void vcn_v5_0_0_disable_static_power_gating(struct amdgpu_device *adev, int inst)
506 {
507 	uint32_t data = 0;
508 
509 	if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
510 		data = 1 << UVD_IPX_DLDO_CONFIG__ONO2_PWR_CONFIG__SHIFT;
511 		WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data);
512 		SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS, 0,
513 				UVD_IPX_DLDO_STATUS__ONO2_PWR_STATUS_MASK);
514 
515 		data = 2 << UVD_IPX_DLDO_CONFIG__ONO3_PWR_CONFIG__SHIFT;
516 		WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data);
517 		SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS,
518 				1 << UVD_IPX_DLDO_STATUS__ONO3_PWR_STATUS__SHIFT,
519 				UVD_IPX_DLDO_STATUS__ONO3_PWR_STATUS_MASK);
520 
521 		data = 2 << UVD_IPX_DLDO_CONFIG__ONO4_PWR_CONFIG__SHIFT;
522 		WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data);
523 		SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS,
524 				1 << UVD_IPX_DLDO_STATUS__ONO4_PWR_STATUS__SHIFT,
525 				UVD_IPX_DLDO_STATUS__ONO4_PWR_STATUS_MASK);
526 
527 		data = 2 << UVD_IPX_DLDO_CONFIG__ONO5_PWR_CONFIG__SHIFT;
528 		WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data);
529 		SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS,
530 				1 << UVD_IPX_DLDO_STATUS__ONO5_PWR_STATUS__SHIFT,
531 				UVD_IPX_DLDO_STATUS__ONO5_PWR_STATUS_MASK);
532 	} else {
533 		data = 1 << UVD_IPX_DLDO_CONFIG__ONO2_PWR_CONFIG__SHIFT;
534 		WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data);
535 		SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS, 0,
536 				UVD_IPX_DLDO_STATUS__ONO2_PWR_STATUS_MASK);
537 
538 		data = 1 << UVD_IPX_DLDO_CONFIG__ONO3_PWR_CONFIG__SHIFT;
539 		WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data);
540 		SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS, 0,
541 				UVD_IPX_DLDO_STATUS__ONO3_PWR_STATUS_MASK);
542 
543 		data = 1 << UVD_IPX_DLDO_CONFIG__ONO4_PWR_CONFIG__SHIFT;
544 		WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data);
545 		SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS, 0,
546 				UVD_IPX_DLDO_STATUS__ONO4_PWR_STATUS_MASK);
547 
548 		data = 1 << UVD_IPX_DLDO_CONFIG__ONO5_PWR_CONFIG__SHIFT;
549 		WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data);
550 		SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS, 0,
551 				UVD_IPX_DLDO_STATUS__ONO5_PWR_STATUS_MASK);
552 	}
553 
554 	data = RREG32_SOC15(VCN, inst, regUVD_POWER_STATUS);
555 	data &= ~0x103;
556 	if (adev->pg_flags & AMD_PG_SUPPORT_VCN)
557 		data |= UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON |
558 			UVD_POWER_STATUS__UVD_PG_EN_MASK;
559 
560 	WREG32_SOC15(VCN, inst, regUVD_POWER_STATUS, data);
561 	return;
562 }
563 
564 /**
565  * vcn_v5_0_0_enable_static_power_gating - enable VCN static power gating
566  *
567  * @adev: amdgpu_device pointer
568  * @inst: instance number
569  *
570  * Enable static power gating for VCN block
571  */
vcn_v5_0_0_enable_static_power_gating(struct amdgpu_device * adev,int inst)572 static void vcn_v5_0_0_enable_static_power_gating(struct amdgpu_device *adev, int inst)
573 {
574 	uint32_t data;
575 
576 	if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
577 		/* Before power off, this indicator has to be turned on */
578 		data = RREG32_SOC15(VCN, inst, regUVD_POWER_STATUS);
579 		data &= ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK;
580 		data |= UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF;
581 		WREG32_SOC15(VCN, inst, regUVD_POWER_STATUS, data);
582 
583 		data = 2 << UVD_IPX_DLDO_CONFIG__ONO5_PWR_CONFIG__SHIFT;
584 		WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data);
585 		SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS,
586 				1 << UVD_IPX_DLDO_STATUS__ONO5_PWR_STATUS__SHIFT,
587 				UVD_IPX_DLDO_STATUS__ONO5_PWR_STATUS_MASK);
588 
589 		data = 2 << UVD_IPX_DLDO_CONFIG__ONO4_PWR_CONFIG__SHIFT;
590 		WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data);
591 		SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS,
592 				1 << UVD_IPX_DLDO_STATUS__ONO4_PWR_STATUS__SHIFT,
593 				UVD_IPX_DLDO_STATUS__ONO4_PWR_STATUS_MASK);
594 
595 		data = 2 << UVD_IPX_DLDO_CONFIG__ONO3_PWR_CONFIG__SHIFT;
596 		WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data);
597 		SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS,
598 				1 << UVD_IPX_DLDO_STATUS__ONO3_PWR_STATUS__SHIFT,
599 				UVD_IPX_DLDO_STATUS__ONO3_PWR_STATUS_MASK);
600 
601 		data = 2 << UVD_IPX_DLDO_CONFIG__ONO2_PWR_CONFIG__SHIFT;
602 		WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data);
603 		SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS,
604 				1 << UVD_IPX_DLDO_STATUS__ONO2_PWR_STATUS__SHIFT,
605 				UVD_IPX_DLDO_STATUS__ONO2_PWR_STATUS_MASK);
606 	}
607 	return;
608 }
609 
610 /**
611  * vcn_v5_0_0_disable_clock_gating - disable VCN clock gating
612  *
613  * @adev: amdgpu_device pointer
614  * @inst: instance number
615  *
616  * Disable clock gating for VCN block
617  */
vcn_v5_0_0_disable_clock_gating(struct amdgpu_device * adev,int inst)618 static void vcn_v5_0_0_disable_clock_gating(struct amdgpu_device *adev, int inst)
619 {
620 	return;
621 }
622 
623 #if 0
624 /**
625  * vcn_v5_0_0_disable_clock_gating_dpg_mode - disable VCN clock gating dpg mode
626  *
627  * @adev: amdgpu_device pointer
628  * @sram_sel: sram select
629  * @inst_idx: instance number index
630  * @indirect: indirectly write sram
631  *
632  * Disable clock gating for VCN block with dpg mode
633  */
634 static void vcn_v5_0_0_disable_clock_gating_dpg_mode(struct amdgpu_device *adev, uint8_t sram_sel,
635 	int inst_idx, uint8_t indirect)
636 {
637 	return;
638 }
639 #endif
640 
641 /**
642  * vcn_v5_0_0_enable_clock_gating - enable VCN clock gating
643  *
644  * @adev: amdgpu_device pointer
645  * @inst: instance number
646  *
647  * Enable clock gating for VCN block
648  */
vcn_v5_0_0_enable_clock_gating(struct amdgpu_device * adev,int inst)649 static void vcn_v5_0_0_enable_clock_gating(struct amdgpu_device *adev, int inst)
650 {
651 	return;
652 }
653 
654 /**
655  * vcn_v5_0_0_start_dpg_mode - VCN start with dpg mode
656  *
657  * @adev: amdgpu_device pointer
658  * @inst_idx: instance number index
659  * @indirect: indirectly write sram
660  *
661  * Start VCN block with dpg mode
662  */
vcn_v5_0_0_start_dpg_mode(struct amdgpu_device * adev,int inst_idx,bool indirect)663 static int vcn_v5_0_0_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
664 {
665 	volatile struct amdgpu_vcn5_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
666 	struct amdgpu_ring *ring;
667 	uint32_t tmp;
668 
669 	/* disable register anti-hang mechanism */
670 	WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, regUVD_POWER_STATUS), 1,
671 		~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
672 
673 	/* enable dynamic power gating mode */
674 	tmp = RREG32_SOC15(VCN, inst_idx, regUVD_POWER_STATUS);
675 	tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK;
676 	tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK;
677 	WREG32_SOC15(VCN, inst_idx, regUVD_POWER_STATUS, tmp);
678 
679 	if (indirect)
680 		adev->vcn.inst[inst_idx].dpg_sram_curr_addr = (uint32_t *)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr;
681 
682 	/* enable VCPU clock */
683 	tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
684 	tmp |= UVD_VCPU_CNTL__CLK_EN_MASK | UVD_VCPU_CNTL__BLK_RST_MASK;
685 	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
686 		VCN, inst_idx, regUVD_VCPU_CNTL), tmp, 0, indirect);
687 
688 	/* disable master interrupt */
689 	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
690 		VCN, inst_idx, regUVD_MASTINT_EN), 0, 0, indirect);
691 
692 	/* setup regUVD_LMI_CTRL */
693 	tmp = (UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
694 		UVD_LMI_CTRL__REQ_MODE_MASK |
695 		UVD_LMI_CTRL__CRC_RESET_MASK |
696 		UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
697 		UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
698 		UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
699 		(8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
700 		0x00100000L);
701 	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
702 		VCN, inst_idx, regUVD_LMI_CTRL), tmp, 0, indirect);
703 
704 	vcn_v5_0_0_mc_resume_dpg_mode(adev, inst_idx, indirect);
705 
706 	tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
707 	tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
708 	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
709 		VCN, inst_idx, regUVD_VCPU_CNTL), tmp, 0, indirect);
710 
711 	/* enable LMI MC and UMC channels */
712 	tmp = 0x1f << UVD_LMI_CTRL2__RE_OFLD_MIF_WR_REQ_NUM__SHIFT;
713 	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
714 		VCN, inst_idx, regUVD_LMI_CTRL2), tmp, 0, indirect);
715 
716 	/* enable master interrupt */
717 	WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
718 		VCN, inst_idx, regUVD_MASTINT_EN),
719 		UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
720 
721 	if (indirect)
722 		amdgpu_vcn_psp_update_sram(adev, inst_idx, 0);
723 
724 	ring = &adev->vcn.inst[inst_idx].ring_enc[0];
725 
726 	WREG32_SOC15(VCN, inst_idx, regUVD_RB_BASE_LO, ring->gpu_addr);
727 	WREG32_SOC15(VCN, inst_idx, regUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
728 	WREG32_SOC15(VCN, inst_idx, regUVD_RB_SIZE, ring->ring_size / 4);
729 
730 	tmp = RREG32_SOC15(VCN, inst_idx, regVCN_RB_ENABLE);
731 	tmp &= ~(VCN_RB_ENABLE__RB1_EN_MASK);
732 	WREG32_SOC15(VCN, inst_idx, regVCN_RB_ENABLE, tmp);
733 	fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET;
734 	WREG32_SOC15(VCN, inst_idx, regUVD_RB_RPTR, 0);
735 	WREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR, 0);
736 
737 	tmp = RREG32_SOC15(VCN, inst_idx, regUVD_RB_RPTR);
738 	WREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR, tmp);
739 	ring->wptr = RREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR);
740 
741 	tmp = RREG32_SOC15(VCN, inst_idx, regVCN_RB_ENABLE);
742 	tmp |= VCN_RB_ENABLE__RB1_EN_MASK;
743 	WREG32_SOC15(VCN, inst_idx, regVCN_RB_ENABLE, tmp);
744 	fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF);
745 
746 	WREG32_SOC15(VCN, inst_idx, regVCN_RB1_DB_CTRL,
747 		ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
748 		VCN_RB1_DB_CTRL__EN_MASK);
749 
750 	return 0;
751 }
752 
753 /**
754  * vcn_v5_0_0_start - VCN start
755  *
756  * @adev: amdgpu_device pointer
757  *
758  * Start VCN block
759  */
vcn_v5_0_0_start(struct amdgpu_device * adev)760 static int vcn_v5_0_0_start(struct amdgpu_device *adev)
761 {
762 	volatile struct amdgpu_vcn5_fw_shared *fw_shared;
763 	struct amdgpu_ring *ring;
764 	uint32_t tmp;
765 	int i, j, k, r;
766 
767 	if (adev->pm.dpm_enabled)
768 		amdgpu_dpm_enable_uvd(adev, true);
769 
770 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
771 		if (adev->vcn.harvest_config & (1 << i))
772 			continue;
773 
774 		fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
775 
776 		if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
777 			r = vcn_v5_0_0_start_dpg_mode(adev, i, adev->vcn.indirect_sram);
778 			continue;
779 		}
780 
781 		/* disable VCN power gating */
782 		vcn_v5_0_0_disable_static_power_gating(adev, i);
783 
784 		/* set VCN status busy */
785 		tmp = RREG32_SOC15(VCN, i, regUVD_STATUS) | UVD_STATUS__UVD_BUSY;
786 		WREG32_SOC15(VCN, i, regUVD_STATUS, tmp);
787 
788 		/* enable VCPU clock */
789 		WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
790 			UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
791 
792 		/* disable master interrupt */
793 		WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_MASTINT_EN), 0,
794 			~UVD_MASTINT_EN__VCPU_EN_MASK);
795 
796 		/* enable LMI MC and UMC channels */
797 		WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_LMI_CTRL2), 0,
798 			~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
799 
800 		tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET);
801 		tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
802 		tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
803 		WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp);
804 
805 		/* setup regUVD_LMI_CTRL */
806 		tmp = RREG32_SOC15(VCN, i, regUVD_LMI_CTRL);
807 		WREG32_SOC15(VCN, i, regUVD_LMI_CTRL, tmp |
808 			UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
809 			UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
810 			UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
811 			UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
812 
813 		vcn_v5_0_0_mc_resume(adev, i);
814 
815 		/* VCN global tiling registers */
816 		WREG32_SOC15(VCN, i, regUVD_GFX10_ADDR_CONFIG,
817 			adev->gfx.config.gb_addr_config);
818 
819 		/* unblock VCPU register access */
820 		WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_RB_ARB_CTRL), 0,
821 			~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
822 
823 		/* release VCPU reset to boot */
824 		WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
825 			~UVD_VCPU_CNTL__BLK_RST_MASK);
826 
827 		for (j = 0; j < 10; ++j) {
828 			uint32_t status;
829 
830 			for (k = 0; k < 100; ++k) {
831 				status = RREG32_SOC15(VCN, i, regUVD_STATUS);
832 				if (status & 2)
833 					break;
834 				mdelay(10);
835 				if (amdgpu_emu_mode == 1)
836 					msleep(1);
837 			}
838 
839 			if (amdgpu_emu_mode == 1) {
840 				r = -1;
841 				if (status & 2) {
842 					r = 0;
843 					break;
844 				}
845 			} else {
846 				r = 0;
847 				if (status & 2)
848 					break;
849 
850 				dev_err(adev->dev,
851 					"VCN[%d] is not responding, trying to reset the VCPU!!!\n", i);
852 				WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
853 							UVD_VCPU_CNTL__BLK_RST_MASK,
854 							~UVD_VCPU_CNTL__BLK_RST_MASK);
855 				mdelay(10);
856 				WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
857 							~UVD_VCPU_CNTL__BLK_RST_MASK);
858 
859 				mdelay(10);
860 				r = -1;
861 			}
862 		}
863 
864 		if (r) {
865 			dev_err(adev->dev, "VCN[%d] is not responding, giving up!!!\n", i);
866 			return r;
867 		}
868 
869 		/* enable master interrupt */
870 		WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_MASTINT_EN),
871 				UVD_MASTINT_EN__VCPU_EN_MASK,
872 				~UVD_MASTINT_EN__VCPU_EN_MASK);
873 
874 		/* clear the busy bit of VCN_STATUS */
875 		WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_STATUS), 0,
876 			~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
877 
878 		ring = &adev->vcn.inst[i].ring_enc[0];
879 		WREG32_SOC15(VCN, i, regVCN_RB1_DB_CTRL,
880 			ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
881 			VCN_RB1_DB_CTRL__EN_MASK);
882 
883 		WREG32_SOC15(VCN, i, regUVD_RB_BASE_LO, ring->gpu_addr);
884 		WREG32_SOC15(VCN, i, regUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
885 		WREG32_SOC15(VCN, i, regUVD_RB_SIZE, ring->ring_size / 4);
886 
887 		tmp = RREG32_SOC15(VCN, i, regVCN_RB_ENABLE);
888 		tmp &= ~(VCN_RB_ENABLE__RB1_EN_MASK);
889 		WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp);
890 		fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET;
891 		WREG32_SOC15(VCN, i, regUVD_RB_RPTR, 0);
892 		WREG32_SOC15(VCN, i, regUVD_RB_WPTR, 0);
893 
894 		tmp = RREG32_SOC15(VCN, i, regUVD_RB_RPTR);
895 		WREG32_SOC15(VCN, i, regUVD_RB_WPTR, tmp);
896 		ring->wptr = RREG32_SOC15(VCN, i, regUVD_RB_WPTR);
897 
898 		tmp = RREG32_SOC15(VCN, i, regVCN_RB_ENABLE);
899 		tmp |= VCN_RB_ENABLE__RB1_EN_MASK;
900 		WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp);
901 		fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF);
902 	}
903 
904 	return 0;
905 }
906 
907 /**
908  * vcn_v5_0_0_stop_dpg_mode - VCN stop with dpg mode
909  *
910  * @adev: amdgpu_device pointer
911  * @inst_idx: instance number index
912  *
913  * Stop VCN block with dpg mode
914  */
vcn_v5_0_0_stop_dpg_mode(struct amdgpu_device * adev,int inst_idx)915 static void vcn_v5_0_0_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
916 {
917 	struct dpg_pause_state state = {.fw_based = VCN_DPG_STATE__UNPAUSE};
918 	uint32_t tmp;
919 
920 	vcn_v5_0_0_pause_dpg_mode(adev, inst_idx, &state);
921 
922 	/* Wait for power status to be 1 */
923 	SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_POWER_STATUS, 1,
924 		UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
925 
926 	/* wait for read ptr to be equal to write ptr */
927 	tmp = RREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR);
928 	SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_RB_RPTR, tmp, 0xFFFFFFFF);
929 
930 	/* disable dynamic power gating mode */
931 	WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, regUVD_POWER_STATUS), 0,
932 		~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
933 
934 	return;
935 }
936 
937 /**
938  * vcn_v5_0_0_stop - VCN stop
939  *
940  * @adev: amdgpu_device pointer
941  *
942  * Stop VCN block
943  */
vcn_v5_0_0_stop(struct amdgpu_device * adev)944 static int vcn_v5_0_0_stop(struct amdgpu_device *adev)
945 {
946 	volatile struct amdgpu_vcn5_fw_shared *fw_shared;
947 	uint32_t tmp;
948 	int i, r = 0;
949 
950 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
951 		if (adev->vcn.harvest_config & (1 << i))
952 			continue;
953 
954 		fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
955 		fw_shared->sq.queue_mode |= FW_QUEUE_DPG_HOLD_OFF;
956 
957 		if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
958 			vcn_v5_0_0_stop_dpg_mode(adev, i);
959 			continue;
960 		}
961 
962 		/* wait for vcn idle */
963 		r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_STATUS, UVD_STATUS__IDLE, 0x7);
964 		if (r)
965 			return r;
966 
967 		tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
968 		      UVD_LMI_STATUS__READ_CLEAN_MASK |
969 		      UVD_LMI_STATUS__WRITE_CLEAN_MASK |
970 		      UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
971 		r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_LMI_STATUS, tmp, tmp);
972 		if (r)
973 			return r;
974 
975 		/* disable LMI UMC channel */
976 		tmp = RREG32_SOC15(VCN, i, regUVD_LMI_CTRL2);
977 		tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
978 		WREG32_SOC15(VCN, i, regUVD_LMI_CTRL2, tmp);
979 		tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK |
980 		      UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
981 		r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_LMI_STATUS, tmp, tmp);
982 		if (r)
983 			return r;
984 
985 		/* block VCPU register access */
986 		WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_RB_ARB_CTRL),
987 			UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
988 			~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
989 
990 		/* reset VCPU */
991 		WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
992 			UVD_VCPU_CNTL__BLK_RST_MASK,
993 			~UVD_VCPU_CNTL__BLK_RST_MASK);
994 
995 		/* disable VCPU clock */
996 		WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
997 			~(UVD_VCPU_CNTL__CLK_EN_MASK));
998 
999 		/* apply soft reset */
1000 		tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET);
1001 		tmp |= UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
1002 		WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp);
1003 		tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET);
1004 		tmp |= UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
1005 		WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp);
1006 
1007 		/* clear status */
1008 		WREG32_SOC15(VCN, i, regUVD_STATUS, 0);
1009 
1010 		/* enable VCN power gating */
1011 		vcn_v5_0_0_enable_static_power_gating(adev, i);
1012 	}
1013 
1014 	if (adev->pm.dpm_enabled)
1015 		amdgpu_dpm_enable_uvd(adev, false);
1016 
1017 	return 0;
1018 }
1019 
1020 /**
1021  * vcn_v5_0_0_pause_dpg_mode - VCN pause with dpg mode
1022  *
1023  * @adev: amdgpu_device pointer
1024  * @inst_idx: instance number index
1025  * @new_state: pause state
1026  *
1027  * Pause dpg mode for VCN block
1028  */
vcn_v5_0_0_pause_dpg_mode(struct amdgpu_device * adev,int inst_idx,struct dpg_pause_state * new_state)1029 static int vcn_v5_0_0_pause_dpg_mode(struct amdgpu_device *adev, int inst_idx,
1030 	struct dpg_pause_state *new_state)
1031 {
1032 	uint32_t reg_data = 0;
1033 	int ret_code;
1034 
1035 	/* pause/unpause if state is changed */
1036 	if (adev->vcn.inst[inst_idx].pause_state.fw_based != new_state->fw_based) {
1037 		DRM_DEV_DEBUG(adev->dev, "dpg pause state changed %d -> %d",
1038 			adev->vcn.inst[inst_idx].pause_state.fw_based,  new_state->fw_based);
1039 		reg_data = RREG32_SOC15(VCN, inst_idx, regUVD_DPG_PAUSE) &
1040 			(~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
1041 
1042 		if (new_state->fw_based == VCN_DPG_STATE__PAUSE) {
1043 			ret_code = SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_POWER_STATUS, 0x1,
1044 					UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1045 
1046 			if (!ret_code) {
1047 				/* pause DPG */
1048 				reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1049 				WREG32_SOC15(VCN, inst_idx, regUVD_DPG_PAUSE, reg_data);
1050 
1051 				/* wait for ACK */
1052 				SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_DPG_PAUSE,
1053 					UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,
1054 					UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
1055 			}
1056 		} else {
1057 			/* unpause dpg, no need to wait */
1058 			reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1059 			WREG32_SOC15(VCN, inst_idx, regUVD_DPG_PAUSE, reg_data);
1060 		}
1061 		adev->vcn.inst[inst_idx].pause_state.fw_based = new_state->fw_based;
1062 	}
1063 
1064 	return 0;
1065 }
1066 
1067 /**
1068  * vcn_v5_0_0_unified_ring_get_rptr - get unified read pointer
1069  *
1070  * @ring: amdgpu_ring pointer
1071  *
1072  * Returns the current hardware unified read pointer
1073  */
vcn_v5_0_0_unified_ring_get_rptr(struct amdgpu_ring * ring)1074 static uint64_t vcn_v5_0_0_unified_ring_get_rptr(struct amdgpu_ring *ring)
1075 {
1076 	struct amdgpu_device *adev = ring->adev;
1077 
1078 	if (ring != &adev->vcn.inst[ring->me].ring_enc[0])
1079 		DRM_ERROR("wrong ring id is identified in %s", __func__);
1080 
1081 	return RREG32_SOC15(VCN, ring->me, regUVD_RB_RPTR);
1082 }
1083 
1084 /**
1085  * vcn_v5_0_0_unified_ring_get_wptr - get unified write pointer
1086  *
1087  * @ring: amdgpu_ring pointer
1088  *
1089  * Returns the current hardware unified write pointer
1090  */
vcn_v5_0_0_unified_ring_get_wptr(struct amdgpu_ring * ring)1091 static uint64_t vcn_v5_0_0_unified_ring_get_wptr(struct amdgpu_ring *ring)
1092 {
1093 	struct amdgpu_device *adev = ring->adev;
1094 
1095 	if (ring != &adev->vcn.inst[ring->me].ring_enc[0])
1096 		DRM_ERROR("wrong ring id is identified in %s", __func__);
1097 
1098 	if (ring->use_doorbell)
1099 		return *ring->wptr_cpu_addr;
1100 	else
1101 		return RREG32_SOC15(VCN, ring->me, regUVD_RB_WPTR);
1102 }
1103 
1104 /**
1105  * vcn_v5_0_0_unified_ring_set_wptr - set enc write pointer
1106  *
1107  * @ring: amdgpu_ring pointer
1108  *
1109  * Commits the enc write pointer to the hardware
1110  */
vcn_v5_0_0_unified_ring_set_wptr(struct amdgpu_ring * ring)1111 static void vcn_v5_0_0_unified_ring_set_wptr(struct amdgpu_ring *ring)
1112 {
1113 	struct amdgpu_device *adev = ring->adev;
1114 
1115 	if (ring != &adev->vcn.inst[ring->me].ring_enc[0])
1116 		DRM_ERROR("wrong ring id is identified in %s", __func__);
1117 
1118 	if (ring->use_doorbell) {
1119 		*ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
1120 		WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1121 	} else {
1122 		WREG32_SOC15(VCN, ring->me, regUVD_RB_WPTR, lower_32_bits(ring->wptr));
1123 	}
1124 }
1125 
1126 static const struct amdgpu_ring_funcs vcn_v5_0_0_unified_ring_vm_funcs = {
1127 	.type = AMDGPU_RING_TYPE_VCN_ENC,
1128 	.align_mask = 0x3f,
1129 	.nop = VCN_ENC_CMD_NO_OP,
1130 	.get_rptr = vcn_v5_0_0_unified_ring_get_rptr,
1131 	.get_wptr = vcn_v5_0_0_unified_ring_get_wptr,
1132 	.set_wptr = vcn_v5_0_0_unified_ring_set_wptr,
1133 	.emit_frame_size =
1134 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1135 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
1136 		4 + /* vcn_v2_0_enc_ring_emit_vm_flush */
1137 		5 + 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */
1138 		1, /* vcn_v2_0_enc_ring_insert_end */
1139 	.emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */
1140 	.emit_ib = vcn_v2_0_enc_ring_emit_ib,
1141 	.emit_fence = vcn_v2_0_enc_ring_emit_fence,
1142 	.emit_vm_flush = vcn_v2_0_enc_ring_emit_vm_flush,
1143 	.test_ring = amdgpu_vcn_enc_ring_test_ring,
1144 	.test_ib = amdgpu_vcn_unified_ring_test_ib,
1145 	.insert_nop = amdgpu_ring_insert_nop,
1146 	.insert_end = vcn_v2_0_enc_ring_insert_end,
1147 	.pad_ib = amdgpu_ring_generic_pad_ib,
1148 	.begin_use = amdgpu_vcn_ring_begin_use,
1149 	.end_use = amdgpu_vcn_ring_end_use,
1150 	.emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
1151 	.emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
1152 	.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1153 };
1154 
1155 /**
1156  * vcn_v5_0_0_set_unified_ring_funcs - set unified ring functions
1157  *
1158  * @adev: amdgpu_device pointer
1159  *
1160  * Set unified ring functions
1161  */
vcn_v5_0_0_set_unified_ring_funcs(struct amdgpu_device * adev)1162 static void vcn_v5_0_0_set_unified_ring_funcs(struct amdgpu_device *adev)
1163 {
1164 	int i;
1165 
1166 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1167 		if (adev->vcn.harvest_config & (1 << i))
1168 			continue;
1169 
1170 		adev->vcn.inst[i].ring_enc[0].funcs = &vcn_v5_0_0_unified_ring_vm_funcs;
1171 		adev->vcn.inst[i].ring_enc[0].me = i;
1172 	}
1173 }
1174 
1175 /**
1176  * vcn_v5_0_0_is_idle - check VCN block is idle
1177  *
1178  * @handle: amdgpu_device pointer
1179  *
1180  * Check whether VCN block is idle
1181  */
vcn_v5_0_0_is_idle(void * handle)1182 static bool vcn_v5_0_0_is_idle(void *handle)
1183 {
1184 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1185 	int i, ret = 1;
1186 
1187 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1188 		if (adev->vcn.harvest_config & (1 << i))
1189 			continue;
1190 
1191 		ret &= (RREG32_SOC15(VCN, i, regUVD_STATUS) == UVD_STATUS__IDLE);
1192 	}
1193 
1194 	return ret;
1195 }
1196 
1197 /**
1198  * vcn_v5_0_0_wait_for_idle - wait for VCN block idle
1199  *
1200  * @handle: amdgpu_device pointer
1201  *
1202  * Wait for VCN block idle
1203  */
vcn_v5_0_0_wait_for_idle(void * handle)1204 static int vcn_v5_0_0_wait_for_idle(void *handle)
1205 {
1206 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1207 	int i, ret = 0;
1208 
1209 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1210 		if (adev->vcn.harvest_config & (1 << i))
1211 			continue;
1212 
1213 		ret = SOC15_WAIT_ON_RREG(VCN, i, regUVD_STATUS, UVD_STATUS__IDLE,
1214 			UVD_STATUS__IDLE);
1215 		if (ret)
1216 			return ret;
1217 	}
1218 
1219 	return ret;
1220 }
1221 
1222 /**
1223  * vcn_v5_0_0_set_clockgating_state - set VCN block clockgating state
1224  *
1225  * @handle: amdgpu_device pointer
1226  * @state: clock gating state
1227  *
1228  * Set VCN block clockgating state
1229  */
vcn_v5_0_0_set_clockgating_state(void * handle,enum amd_clockgating_state state)1230 static int vcn_v5_0_0_set_clockgating_state(void *handle, enum amd_clockgating_state state)
1231 {
1232 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1233 	bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
1234 	int i;
1235 
1236 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1237 		if (adev->vcn.harvest_config & (1 << i))
1238 			continue;
1239 
1240 		if (enable) {
1241 			if (RREG32_SOC15(VCN, i, regUVD_STATUS) != UVD_STATUS__IDLE)
1242 				return -EBUSY;
1243 			vcn_v5_0_0_enable_clock_gating(adev, i);
1244 		} else {
1245 			vcn_v5_0_0_disable_clock_gating(adev, i);
1246 		}
1247 	}
1248 
1249 	return 0;
1250 }
1251 
1252 /**
1253  * vcn_v5_0_0_set_powergating_state - set VCN block powergating state
1254  *
1255  * @handle: amdgpu_device pointer
1256  * @state: power gating state
1257  *
1258  * Set VCN block powergating state
1259  */
vcn_v5_0_0_set_powergating_state(void * handle,enum amd_powergating_state state)1260 static int vcn_v5_0_0_set_powergating_state(void *handle, enum amd_powergating_state state)
1261 {
1262 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1263 	int ret;
1264 
1265 	if (state == adev->vcn.cur_state)
1266 		return 0;
1267 
1268 	if (state == AMD_PG_STATE_GATE)
1269 		ret = vcn_v5_0_0_stop(adev);
1270 	else
1271 		ret = vcn_v5_0_0_start(adev);
1272 
1273 	if (!ret)
1274 		adev->vcn.cur_state = state;
1275 
1276 	return ret;
1277 }
1278 
1279 /**
1280  * vcn_v5_0_0_process_interrupt - process VCN block interrupt
1281  *
1282  * @adev: amdgpu_device pointer
1283  * @source: interrupt sources
1284  * @entry: interrupt entry from clients and sources
1285  *
1286  * Process VCN block interrupt
1287  */
vcn_v5_0_0_process_interrupt(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)1288 static int vcn_v5_0_0_process_interrupt(struct amdgpu_device *adev, struct amdgpu_irq_src *source,
1289 	struct amdgpu_iv_entry *entry)
1290 {
1291 	uint32_t ip_instance;
1292 
1293 	switch (entry->client_id) {
1294 	case SOC15_IH_CLIENTID_VCN:
1295 		ip_instance = 0;
1296 		break;
1297 	case SOC15_IH_CLIENTID_VCN1:
1298 		ip_instance = 1;
1299 		break;
1300 	default:
1301 		DRM_ERROR("Unhandled client id: %d\n", entry->client_id);
1302 		return 0;
1303 	}
1304 
1305 	DRM_DEBUG("IH: VCN TRAP\n");
1306 
1307 	switch (entry->src_id) {
1308 	case VCN_4_0__SRCID__UVD_ENC_GENERAL_PURPOSE:
1309 		amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[0]);
1310 		break;
1311 	case VCN_4_0__SRCID_UVD_POISON:
1312 		amdgpu_vcn_process_poison_irq(adev, source, entry);
1313 		break;
1314 	default:
1315 		DRM_ERROR("Unhandled interrupt: %d %d\n",
1316 			  entry->src_id, entry->src_data[0]);
1317 		break;
1318 	}
1319 
1320 	return 0;
1321 }
1322 
1323 static const struct amdgpu_irq_src_funcs vcn_v5_0_0_irq_funcs = {
1324 	.process = vcn_v5_0_0_process_interrupt,
1325 };
1326 
1327 /**
1328  * vcn_v5_0_0_set_irq_funcs - set VCN block interrupt irq functions
1329  *
1330  * @adev: amdgpu_device pointer
1331  *
1332  * Set VCN block interrupt irq functions
1333  */
vcn_v5_0_0_set_irq_funcs(struct amdgpu_device * adev)1334 static void vcn_v5_0_0_set_irq_funcs(struct amdgpu_device *adev)
1335 {
1336 	int i;
1337 
1338 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1339 		if (adev->vcn.harvest_config & (1 << i))
1340 			continue;
1341 
1342 		adev->vcn.inst[i].irq.num_types = adev->vcn.num_enc_rings + 1;
1343 		adev->vcn.inst[i].irq.funcs = &vcn_v5_0_0_irq_funcs;
1344 	}
1345 }
1346 
vcn_v5_0_print_ip_state(void * handle,struct drm_printer * p)1347 static void vcn_v5_0_print_ip_state(void *handle, struct drm_printer *p)
1348 {
1349 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1350 	int i, j;
1351 	uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_5_0);
1352 	uint32_t inst_off, is_powered;
1353 
1354 	if (!adev->vcn.ip_dump)
1355 		return;
1356 
1357 	drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst);
1358 	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
1359 		if (adev->vcn.harvest_config & (1 << i)) {
1360 			drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i);
1361 			continue;
1362 		}
1363 
1364 		inst_off = i * reg_count;
1365 		is_powered = (adev->vcn.ip_dump[inst_off] &
1366 				UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
1367 
1368 		if (is_powered) {
1369 			drm_printf(p, "\nActive Instance:VCN%d\n", i);
1370 			for (j = 0; j < reg_count; j++)
1371 				drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_5_0[j].reg_name,
1372 					   adev->vcn.ip_dump[inst_off + j]);
1373 		} else {
1374 			drm_printf(p, "\nInactive Instance:VCN%d\n", i);
1375 		}
1376 	}
1377 }
1378 
vcn_v5_0_dump_ip_state(void * handle)1379 static void vcn_v5_0_dump_ip_state(void *handle)
1380 {
1381 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1382 	int i, j;
1383 	bool is_powered;
1384 	uint32_t inst_off;
1385 	uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_5_0);
1386 
1387 	if (!adev->vcn.ip_dump)
1388 		return;
1389 
1390 	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
1391 		if (adev->vcn.harvest_config & (1 << i))
1392 			continue;
1393 
1394 		inst_off = i * reg_count;
1395 		/* mmUVD_POWER_STATUS is always readable and is first element of the array */
1396 		adev->vcn.ip_dump[inst_off] = RREG32_SOC15(VCN, i, regUVD_POWER_STATUS);
1397 		is_powered = (adev->vcn.ip_dump[inst_off] &
1398 				UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
1399 
1400 		if (is_powered)
1401 			for (j = 1; j < reg_count; j++)
1402 				adev->vcn.ip_dump[inst_off + j] =
1403 					RREG32(SOC15_REG_ENTRY_OFFSET_INST(vcn_reg_list_5_0[j], i));
1404 	}
1405 }
1406 
1407 static const struct amd_ip_funcs vcn_v5_0_0_ip_funcs = {
1408 	.name = "vcn_v5_0_0",
1409 	.early_init = vcn_v5_0_0_early_init,
1410 	.late_init = NULL,
1411 	.sw_init = vcn_v5_0_0_sw_init,
1412 	.sw_fini = vcn_v5_0_0_sw_fini,
1413 	.hw_init = vcn_v5_0_0_hw_init,
1414 	.hw_fini = vcn_v5_0_0_hw_fini,
1415 	.suspend = vcn_v5_0_0_suspend,
1416 	.resume = vcn_v5_0_0_resume,
1417 	.is_idle = vcn_v5_0_0_is_idle,
1418 	.wait_for_idle = vcn_v5_0_0_wait_for_idle,
1419 	.check_soft_reset = NULL,
1420 	.pre_soft_reset = NULL,
1421 	.soft_reset = NULL,
1422 	.post_soft_reset = NULL,
1423 	.set_clockgating_state = vcn_v5_0_0_set_clockgating_state,
1424 	.set_powergating_state = vcn_v5_0_0_set_powergating_state,
1425 	.dump_ip_state = vcn_v5_0_dump_ip_state,
1426 	.print_ip_state = vcn_v5_0_print_ip_state,
1427 };
1428 
1429 const struct amdgpu_ip_block_version vcn_v5_0_0_ip_block = {
1430 	.type = AMD_IP_BLOCK_TYPE_VCN,
1431 	.major = 5,
1432 	.minor = 0,
1433 	.rev = 0,
1434 	.funcs = &vcn_v5_0_0_ip_funcs,
1435 };
1436