1 /*
2 * Copyright 2019 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24 #include <linux/firmware.h>
25 #include <drm/drm_drv.h>
26
27 #include "amdgpu.h"
28 #include "amdgpu_vcn.h"
29 #include "amdgpu_pm.h"
30 #include "soc15.h"
31 #include "soc15d.h"
32 #include "vcn_v2_0.h"
33 #include "mmsch_v1_0.h"
34
35 #include "vcn/vcn_2_5_offset.h"
36 #include "vcn/vcn_2_5_sh_mask.h"
37 #include "ivsrcid/vcn/irqsrcs_vcn_2_0.h"
38
39 #define mmUVD_CONTEXT_ID_INTERNAL_OFFSET 0x27
40 #define mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET 0x0f
41 #define mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET 0x10
42 #define mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET 0x11
43 #define mmUVD_NO_OP_INTERNAL_OFFSET 0x29
44 #define mmUVD_GP_SCRATCH8_INTERNAL_OFFSET 0x66
45 #define mmUVD_SCRATCH9_INTERNAL_OFFSET 0xc01d
46
47 #define mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET 0x431
48 #define mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET 0x3b4
49 #define mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x3b5
50 #define mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET 0x25c
51
52 #define VCN25_MAX_HW_INSTANCES_ARCTURUS 2
53
54 static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev);
55 static void vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device *adev);
56 static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev);
57 static int vcn_v2_5_set_powergating_state(void *handle,
58 enum amd_powergating_state state);
59 static int vcn_v2_5_pause_dpg_mode(struct amdgpu_device *adev,
60 int inst_idx, struct dpg_pause_state *new_state);
61 static int vcn_v2_5_sriov_start(struct amdgpu_device *adev);
62
63 static int amdgpu_ih_clientid_vcns[] = {
64 SOC15_IH_CLIENTID_VCN,
65 SOC15_IH_CLIENTID_VCN1
66 };
67
68 /**
69 * vcn_v2_5_early_init - set function pointers
70 *
71 * @handle: amdgpu_device pointer
72 *
73 * Set ring and irq function pointers
74 */
vcn_v2_5_early_init(void * handle)75 static int vcn_v2_5_early_init(void *handle)
76 {
77 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
78
79 if (amdgpu_sriov_vf(adev)) {
80 adev->vcn.num_vcn_inst = 2;
81 adev->vcn.harvest_config = 0;
82 adev->vcn.num_enc_rings = 1;
83 } else {
84 u32 harvest;
85 int i;
86 adev->vcn.num_vcn_inst = VCN25_MAX_HW_INSTANCES_ARCTURUS;
87 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
88 harvest = RREG32_SOC15(VCN, i, mmCC_UVD_HARVESTING);
89 if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK)
90 adev->vcn.harvest_config |= 1 << i;
91 }
92 if (adev->vcn.harvest_config == (AMDGPU_VCN_HARVEST_VCN0 |
93 AMDGPU_VCN_HARVEST_VCN1))
94 /* both instances are harvested, disable the block */
95 return -ENOENT;
96
97 adev->vcn.num_enc_rings = 2;
98 }
99
100 vcn_v2_5_set_dec_ring_funcs(adev);
101 vcn_v2_5_set_enc_ring_funcs(adev);
102 vcn_v2_5_set_irq_funcs(adev);
103
104 return 0;
105 }
106
107 /**
108 * vcn_v2_5_sw_init - sw init for VCN block
109 *
110 * @handle: amdgpu_device pointer
111 *
112 * Load firmware and sw initialization
113 */
vcn_v2_5_sw_init(void * handle)114 static int vcn_v2_5_sw_init(void *handle)
115 {
116 struct amdgpu_ring *ring;
117 int i, j, r;
118 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
119
120 for (j = 0; j < adev->vcn.num_vcn_inst; j++) {
121 if (adev->vcn.harvest_config & (1 << j))
122 continue;
123 /* VCN DEC TRAP */
124 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[j],
125 VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->vcn.inst[j].irq);
126 if (r)
127 return r;
128
129 /* VCN ENC TRAP */
130 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
131 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[j],
132 i + VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst[j].irq);
133 if (r)
134 return r;
135 }
136 }
137
138 r = amdgpu_vcn_sw_init(adev);
139 if (r)
140 return r;
141
142 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
143 const struct common_firmware_header *hdr;
144 hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
145 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].ucode_id = AMDGPU_UCODE_ID_VCN;
146 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].fw = adev->vcn.fw;
147 adev->firmware.fw_size +=
148 ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
149
150 if (adev->vcn.num_vcn_inst == VCN25_MAX_HW_INSTANCES_ARCTURUS) {
151 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN1].ucode_id = AMDGPU_UCODE_ID_VCN1;
152 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN1].fw = adev->vcn.fw;
153 adev->firmware.fw_size +=
154 ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
155 }
156 dev_info(adev->dev, "Will use PSP to load VCN firmware\n");
157 }
158
159 r = amdgpu_vcn_resume(adev);
160 if (r)
161 return r;
162
163 for (j = 0; j < adev->vcn.num_vcn_inst; j++) {
164 volatile struct amdgpu_fw_shared *fw_shared;
165
166 if (adev->vcn.harvest_config & (1 << j))
167 continue;
168 adev->vcn.internal.context_id = mmUVD_CONTEXT_ID_INTERNAL_OFFSET;
169 adev->vcn.internal.ib_vmid = mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET;
170 adev->vcn.internal.ib_bar_low = mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET;
171 adev->vcn.internal.ib_bar_high = mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET;
172 adev->vcn.internal.ib_size = mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET;
173 adev->vcn.internal.gp_scratch8 = mmUVD_GP_SCRATCH8_INTERNAL_OFFSET;
174
175 adev->vcn.internal.scratch9 = mmUVD_SCRATCH9_INTERNAL_OFFSET;
176 adev->vcn.inst[j].external.scratch9 = SOC15_REG_OFFSET(VCN, j, mmUVD_SCRATCH9);
177 adev->vcn.internal.data0 = mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET;
178 adev->vcn.inst[j].external.data0 = SOC15_REG_OFFSET(VCN, j, mmUVD_GPCOM_VCPU_DATA0);
179 adev->vcn.internal.data1 = mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET;
180 adev->vcn.inst[j].external.data1 = SOC15_REG_OFFSET(VCN, j, mmUVD_GPCOM_VCPU_DATA1);
181 adev->vcn.internal.cmd = mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET;
182 adev->vcn.inst[j].external.cmd = SOC15_REG_OFFSET(VCN, j, mmUVD_GPCOM_VCPU_CMD);
183 adev->vcn.internal.nop = mmUVD_NO_OP_INTERNAL_OFFSET;
184 adev->vcn.inst[j].external.nop = SOC15_REG_OFFSET(VCN, j, mmUVD_NO_OP);
185
186 ring = &adev->vcn.inst[j].ring_dec;
187 ring->use_doorbell = true;
188
189 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
190 (amdgpu_sriov_vf(adev) ? 2*j : 8*j);
191 sprintf(ring->name, "vcn_dec_%d", j);
192 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq,
193 0, AMDGPU_RING_PRIO_DEFAULT, NULL);
194 if (r)
195 return r;
196
197 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
198 ring = &adev->vcn.inst[j].ring_enc[i];
199 ring->use_doorbell = true;
200
201 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
202 (amdgpu_sriov_vf(adev) ? (1 + i + 2*j) : (2 + i + 8*j));
203
204 sprintf(ring->name, "vcn_enc_%d.%d", j, i);
205 r = amdgpu_ring_init(adev, ring, 512,
206 &adev->vcn.inst[j].irq, 0,
207 AMDGPU_RING_PRIO_DEFAULT, NULL);
208 if (r)
209 return r;
210 }
211
212 fw_shared = adev->vcn.inst[j].fw_shared_cpu_addr;
213 fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_VCN_MULTI_QUEUE_FLAG);
214 }
215
216 if (amdgpu_sriov_vf(adev)) {
217 r = amdgpu_virt_alloc_mm_table(adev);
218 if (r)
219 return r;
220 }
221
222 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
223 adev->vcn.pause_dpg_mode = vcn_v2_5_pause_dpg_mode;
224
225 return 0;
226 }
227
228 /**
229 * vcn_v2_5_sw_fini - sw fini for VCN block
230 *
231 * @handle: amdgpu_device pointer
232 *
233 * VCN suspend and free up sw allocation
234 */
vcn_v2_5_sw_fini(void * handle)235 static int vcn_v2_5_sw_fini(void *handle)
236 {
237 int i, r, idx;
238 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
239 volatile struct amdgpu_fw_shared *fw_shared;
240
241 if (drm_dev_enter(&adev->ddev, &idx)) {
242 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
243 if (adev->vcn.harvest_config & (1 << i))
244 continue;
245 fw_shared = adev->vcn.inst[i].fw_shared_cpu_addr;
246 fw_shared->present_flag_0 = 0;
247 }
248 drm_dev_exit(idx);
249 }
250
251
252 if (amdgpu_sriov_vf(adev))
253 amdgpu_virt_free_mm_table(adev);
254
255 r = amdgpu_vcn_suspend(adev);
256 if (r)
257 return r;
258
259 r = amdgpu_vcn_sw_fini(adev);
260
261 return r;
262 }
263
264 /**
265 * vcn_v2_5_hw_init - start and test VCN block
266 *
267 * @handle: amdgpu_device pointer
268 *
269 * Initialize the hardware, boot up the VCPU and do some testing
270 */
vcn_v2_5_hw_init(void * handle)271 static int vcn_v2_5_hw_init(void *handle)
272 {
273 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
274 struct amdgpu_ring *ring;
275 int i, j, r = 0;
276
277 if (amdgpu_sriov_vf(adev))
278 r = vcn_v2_5_sriov_start(adev);
279
280 for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
281 if (adev->vcn.harvest_config & (1 << j))
282 continue;
283
284 if (amdgpu_sriov_vf(adev)) {
285 adev->vcn.inst[j].ring_enc[0].sched.ready = true;
286 adev->vcn.inst[j].ring_enc[1].sched.ready = false;
287 adev->vcn.inst[j].ring_enc[2].sched.ready = false;
288 adev->vcn.inst[j].ring_dec.sched.ready = true;
289 } else {
290
291 ring = &adev->vcn.inst[j].ring_dec;
292
293 adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
294 ring->doorbell_index, j);
295
296 r = amdgpu_ring_test_helper(ring);
297 if (r)
298 goto done;
299
300 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
301 ring = &adev->vcn.inst[j].ring_enc[i];
302 r = amdgpu_ring_test_helper(ring);
303 if (r)
304 goto done;
305 }
306 }
307 }
308
309 done:
310 if (!r)
311 DRM_INFO("VCN decode and encode initialized successfully(under %s).\n",
312 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)?"DPG Mode":"SPG Mode");
313
314 return r;
315 }
316
317 /**
318 * vcn_v2_5_hw_fini - stop the hardware block
319 *
320 * @handle: amdgpu_device pointer
321 *
322 * Stop the VCN block, mark ring as not ready any more
323 */
vcn_v2_5_hw_fini(void * handle)324 static int vcn_v2_5_hw_fini(void *handle)
325 {
326 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
327 int i;
328
329 cancel_delayed_work_sync(&adev->vcn.idle_work);
330
331 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
332 if (adev->vcn.harvest_config & (1 << i))
333 continue;
334
335 if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
336 (adev->vcn.cur_state != AMD_PG_STATE_GATE &&
337 RREG32_SOC15(VCN, i, mmUVD_STATUS)))
338 vcn_v2_5_set_powergating_state(adev, AMD_PG_STATE_GATE);
339 }
340
341 return 0;
342 }
343
344 /**
345 * vcn_v2_5_suspend - suspend VCN block
346 *
347 * @handle: amdgpu_device pointer
348 *
349 * HW fini and suspend VCN block
350 */
vcn_v2_5_suspend(void * handle)351 static int vcn_v2_5_suspend(void *handle)
352 {
353 int r;
354 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
355
356 r = vcn_v2_5_hw_fini(adev);
357 if (r)
358 return r;
359
360 r = amdgpu_vcn_suspend(adev);
361
362 return r;
363 }
364
365 /**
366 * vcn_v2_5_resume - resume VCN block
367 *
368 * @handle: amdgpu_device pointer
369 *
370 * Resume firmware and hw init VCN block
371 */
vcn_v2_5_resume(void * handle)372 static int vcn_v2_5_resume(void *handle)
373 {
374 int r;
375 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
376
377 r = amdgpu_vcn_resume(adev);
378 if (r)
379 return r;
380
381 r = vcn_v2_5_hw_init(adev);
382
383 return r;
384 }
385
386 /**
387 * vcn_v2_5_mc_resume - memory controller programming
388 *
389 * @adev: amdgpu_device pointer
390 *
391 * Let the VCN memory controller know it's offsets
392 */
vcn_v2_5_mc_resume(struct amdgpu_device * adev)393 static void vcn_v2_5_mc_resume(struct amdgpu_device *adev)
394 {
395 uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
396 uint32_t offset;
397 int i;
398
399 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
400 if (adev->vcn.harvest_config & (1 << i))
401 continue;
402 /* cache window 0: fw */
403 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
404 WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
405 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo));
406 WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
407 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi));
408 WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET0, 0);
409 offset = 0;
410 } else {
411 WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
412 lower_32_bits(adev->vcn.inst[i].gpu_addr));
413 WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
414 upper_32_bits(adev->vcn.inst[i].gpu_addr));
415 offset = size;
416 WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET0,
417 AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
418 }
419 WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_SIZE0, size);
420
421 /* cache window 1: stack */
422 WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
423 lower_32_bits(adev->vcn.inst[i].gpu_addr + offset));
424 WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
425 upper_32_bits(adev->vcn.inst[i].gpu_addr + offset));
426 WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET1, 0);
427 WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE);
428
429 /* cache window 2: context */
430 WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
431 lower_32_bits(adev->vcn.inst[i].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
432 WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
433 upper_32_bits(adev->vcn.inst[i].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
434 WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET2, 0);
435 WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
436
437 /* non-cache window */
438 WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW,
439 lower_32_bits(adev->vcn.inst[i].fw_shared_gpu_addr));
440 WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH,
441 upper_32_bits(adev->vcn.inst[i].fw_shared_gpu_addr));
442 WREG32_SOC15(VCN, i, mmUVD_VCPU_NONCACHE_OFFSET0, 0);
443 WREG32_SOC15(VCN, i, mmUVD_VCPU_NONCACHE_SIZE0,
444 AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)));
445 }
446 }
447
vcn_v2_5_mc_resume_dpg_mode(struct amdgpu_device * adev,int inst_idx,bool indirect)448 static void vcn_v2_5_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
449 {
450 uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
451 uint32_t offset;
452
453 /* cache window 0: fw */
454 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
455 if (!indirect) {
456 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
457 VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
458 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_lo), 0, indirect);
459 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
460 VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
461 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_hi), 0, indirect);
462 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
463 VCN, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
464 } else {
465 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
466 VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect);
467 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
468 VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect);
469 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
470 VCN, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
471 }
472 offset = 0;
473 } else {
474 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
475 VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
476 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
477 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
478 VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
479 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
480 offset = size;
481 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
482 VCN, 0, mmUVD_VCPU_CACHE_OFFSET0),
483 AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect);
484 }
485
486 if (!indirect)
487 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
488 VCN, 0, mmUVD_VCPU_CACHE_SIZE0), size, 0, indirect);
489 else
490 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
491 VCN, 0, mmUVD_VCPU_CACHE_SIZE0), 0, 0, indirect);
492
493 /* cache window 1: stack */
494 if (!indirect) {
495 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
496 VCN, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
497 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
498 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
499 VCN, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
500 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
501 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
502 VCN, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
503 } else {
504 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
505 VCN, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect);
506 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
507 VCN, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect);
508 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
509 VCN, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
510 }
511 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
512 VCN, 0, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect);
513
514 /* cache window 2: context */
515 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
516 VCN, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
517 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
518 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
519 VCN, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
520 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
521 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
522 VCN, 0, mmUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect);
523 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
524 VCN, 0, mmUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect);
525
526 /* non-cache window */
527 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
528 VCN, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW),
529 lower_32_bits(adev->vcn.inst[inst_idx].fw_shared_gpu_addr), 0, indirect);
530 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
531 VCN, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH),
532 upper_32_bits(adev->vcn.inst[inst_idx].fw_shared_gpu_addr), 0, indirect);
533 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
534 VCN, 0, mmUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
535 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
536 VCN, 0, mmUVD_VCPU_NONCACHE_SIZE0),
537 AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)), 0, indirect);
538
539 /* VCN global tiling registers */
540 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
541 VCN, 0, mmUVD_GFX8_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
542 }
543
544 /**
545 * vcn_v2_5_disable_clock_gating - disable VCN clock gating
546 *
547 * @adev: amdgpu_device pointer
548 *
549 * Disable clock gating for VCN block
550 */
vcn_v2_5_disable_clock_gating(struct amdgpu_device * adev)551 static void vcn_v2_5_disable_clock_gating(struct amdgpu_device *adev)
552 {
553 uint32_t data;
554 int i;
555
556 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
557 if (adev->vcn.harvest_config & (1 << i))
558 continue;
559 /* UVD disable CGC */
560 data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
561 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
562 data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
563 else
564 data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
565 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
566 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
567 WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
568
569 data = RREG32_SOC15(VCN, i, mmUVD_CGC_GATE);
570 data &= ~(UVD_CGC_GATE__SYS_MASK
571 | UVD_CGC_GATE__UDEC_MASK
572 | UVD_CGC_GATE__MPEG2_MASK
573 | UVD_CGC_GATE__REGS_MASK
574 | UVD_CGC_GATE__RBC_MASK
575 | UVD_CGC_GATE__LMI_MC_MASK
576 | UVD_CGC_GATE__LMI_UMC_MASK
577 | UVD_CGC_GATE__IDCT_MASK
578 | UVD_CGC_GATE__MPRD_MASK
579 | UVD_CGC_GATE__MPC_MASK
580 | UVD_CGC_GATE__LBSI_MASK
581 | UVD_CGC_GATE__LRBBM_MASK
582 | UVD_CGC_GATE__UDEC_RE_MASK
583 | UVD_CGC_GATE__UDEC_CM_MASK
584 | UVD_CGC_GATE__UDEC_IT_MASK
585 | UVD_CGC_GATE__UDEC_DB_MASK
586 | UVD_CGC_GATE__UDEC_MP_MASK
587 | UVD_CGC_GATE__WCB_MASK
588 | UVD_CGC_GATE__VCPU_MASK
589 | UVD_CGC_GATE__MMSCH_MASK);
590
591 WREG32_SOC15(VCN, i, mmUVD_CGC_GATE, data);
592
593 SOC15_WAIT_ON_RREG(VCN, i, mmUVD_CGC_GATE, 0, 0xFFFFFFFF);
594
595 data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
596 data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK
597 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
598 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
599 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
600 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
601 | UVD_CGC_CTRL__SYS_MODE_MASK
602 | UVD_CGC_CTRL__UDEC_MODE_MASK
603 | UVD_CGC_CTRL__MPEG2_MODE_MASK
604 | UVD_CGC_CTRL__REGS_MODE_MASK
605 | UVD_CGC_CTRL__RBC_MODE_MASK
606 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
607 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
608 | UVD_CGC_CTRL__IDCT_MODE_MASK
609 | UVD_CGC_CTRL__MPRD_MODE_MASK
610 | UVD_CGC_CTRL__MPC_MODE_MASK
611 | UVD_CGC_CTRL__LBSI_MODE_MASK
612 | UVD_CGC_CTRL__LRBBM_MODE_MASK
613 | UVD_CGC_CTRL__WCB_MODE_MASK
614 | UVD_CGC_CTRL__VCPU_MODE_MASK
615 | UVD_CGC_CTRL__MMSCH_MODE_MASK);
616 WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
617
618 /* turn on */
619 data = RREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_GATE);
620 data |= (UVD_SUVD_CGC_GATE__SRE_MASK
621 | UVD_SUVD_CGC_GATE__SIT_MASK
622 | UVD_SUVD_CGC_GATE__SMP_MASK
623 | UVD_SUVD_CGC_GATE__SCM_MASK
624 | UVD_SUVD_CGC_GATE__SDB_MASK
625 | UVD_SUVD_CGC_GATE__SRE_H264_MASK
626 | UVD_SUVD_CGC_GATE__SRE_HEVC_MASK
627 | UVD_SUVD_CGC_GATE__SIT_H264_MASK
628 | UVD_SUVD_CGC_GATE__SIT_HEVC_MASK
629 | UVD_SUVD_CGC_GATE__SCM_H264_MASK
630 | UVD_SUVD_CGC_GATE__SCM_HEVC_MASK
631 | UVD_SUVD_CGC_GATE__SDB_H264_MASK
632 | UVD_SUVD_CGC_GATE__SDB_HEVC_MASK
633 | UVD_SUVD_CGC_GATE__SCLR_MASK
634 | UVD_SUVD_CGC_GATE__UVD_SC_MASK
635 | UVD_SUVD_CGC_GATE__ENT_MASK
636 | UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK
637 | UVD_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK
638 | UVD_SUVD_CGC_GATE__SITE_MASK
639 | UVD_SUVD_CGC_GATE__SRE_VP9_MASK
640 | UVD_SUVD_CGC_GATE__SCM_VP9_MASK
641 | UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK
642 | UVD_SUVD_CGC_GATE__SDB_VP9_MASK
643 | UVD_SUVD_CGC_GATE__IME_HEVC_MASK);
644 WREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_GATE, data);
645
646 data = RREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL);
647 data &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
648 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
649 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
650 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
651 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
652 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
653 | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
654 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
655 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
656 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
657 WREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL, data);
658 }
659 }
660
vcn_v2_5_clock_gating_dpg_mode(struct amdgpu_device * adev,uint8_t sram_sel,int inst_idx,uint8_t indirect)661 static void vcn_v2_5_clock_gating_dpg_mode(struct amdgpu_device *adev,
662 uint8_t sram_sel, int inst_idx, uint8_t indirect)
663 {
664 uint32_t reg_data = 0;
665
666 /* enable sw clock gating control */
667 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
668 reg_data = 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
669 else
670 reg_data = 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
671 reg_data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
672 reg_data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
673 reg_data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
674 UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
675 UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
676 UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
677 UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
678 UVD_CGC_CTRL__SYS_MODE_MASK |
679 UVD_CGC_CTRL__UDEC_MODE_MASK |
680 UVD_CGC_CTRL__MPEG2_MODE_MASK |
681 UVD_CGC_CTRL__REGS_MODE_MASK |
682 UVD_CGC_CTRL__RBC_MODE_MASK |
683 UVD_CGC_CTRL__LMI_MC_MODE_MASK |
684 UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
685 UVD_CGC_CTRL__IDCT_MODE_MASK |
686 UVD_CGC_CTRL__MPRD_MODE_MASK |
687 UVD_CGC_CTRL__MPC_MODE_MASK |
688 UVD_CGC_CTRL__LBSI_MODE_MASK |
689 UVD_CGC_CTRL__LRBBM_MODE_MASK |
690 UVD_CGC_CTRL__WCB_MODE_MASK |
691 UVD_CGC_CTRL__VCPU_MODE_MASK |
692 UVD_CGC_CTRL__MMSCH_MODE_MASK);
693 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
694 VCN, 0, mmUVD_CGC_CTRL), reg_data, sram_sel, indirect);
695
696 /* turn off clock gating */
697 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
698 VCN, 0, mmUVD_CGC_GATE), 0, sram_sel, indirect);
699
700 /* turn on SUVD clock gating */
701 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
702 VCN, 0, mmUVD_SUVD_CGC_GATE), 1, sram_sel, indirect);
703
704 /* turn on sw mode in UVD_SUVD_CGC_CTRL */
705 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
706 VCN, 0, mmUVD_SUVD_CGC_CTRL), 0, sram_sel, indirect);
707 }
708
709 /**
710 * vcn_v2_5_enable_clock_gating - enable VCN clock gating
711 *
712 * @adev: amdgpu_device pointer
713 *
714 * Enable clock gating for VCN block
715 */
vcn_v2_5_enable_clock_gating(struct amdgpu_device * adev)716 static void vcn_v2_5_enable_clock_gating(struct amdgpu_device *adev)
717 {
718 uint32_t data = 0;
719 int i;
720
721 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
722 if (adev->vcn.harvest_config & (1 << i))
723 continue;
724 /* enable UVD CGC */
725 data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
726 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
727 data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
728 else
729 data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
730 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
731 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
732 WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
733
734 data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
735 data |= (UVD_CGC_CTRL__UDEC_RE_MODE_MASK
736 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
737 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
738 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
739 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
740 | UVD_CGC_CTRL__SYS_MODE_MASK
741 | UVD_CGC_CTRL__UDEC_MODE_MASK
742 | UVD_CGC_CTRL__MPEG2_MODE_MASK
743 | UVD_CGC_CTRL__REGS_MODE_MASK
744 | UVD_CGC_CTRL__RBC_MODE_MASK
745 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
746 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
747 | UVD_CGC_CTRL__IDCT_MODE_MASK
748 | UVD_CGC_CTRL__MPRD_MODE_MASK
749 | UVD_CGC_CTRL__MPC_MODE_MASK
750 | UVD_CGC_CTRL__LBSI_MODE_MASK
751 | UVD_CGC_CTRL__LRBBM_MODE_MASK
752 | UVD_CGC_CTRL__WCB_MODE_MASK
753 | UVD_CGC_CTRL__VCPU_MODE_MASK);
754 WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
755
756 data = RREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL);
757 data |= (UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
758 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
759 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
760 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
761 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
762 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
763 | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
764 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
765 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
766 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
767 WREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL, data);
768 }
769 }
770
vcn_v2_5_start_dpg_mode(struct amdgpu_device * adev,int inst_idx,bool indirect)771 static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
772 {
773 volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared_cpu_addr;
774 struct amdgpu_ring *ring;
775 uint32_t rb_bufsz, tmp;
776
777 /* disable register anti-hang mechanism */
778 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 1,
779 ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
780 /* enable dynamic power gating mode */
781 tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_POWER_STATUS);
782 tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK;
783 tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK;
784 WREG32_SOC15(VCN, inst_idx, mmUVD_POWER_STATUS, tmp);
785
786 if (indirect)
787 adev->vcn.inst[inst_idx].dpg_sram_curr_addr = (uint32_t *)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr;
788
789 /* enable clock gating */
790 vcn_v2_5_clock_gating_dpg_mode(adev, 0, inst_idx, indirect);
791
792 /* enable VCPU clock */
793 tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
794 tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
795 tmp |= UVD_VCPU_CNTL__BLK_RST_MASK;
796 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
797 VCN, 0, mmUVD_VCPU_CNTL), tmp, 0, indirect);
798
799 /* disable master interupt */
800 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
801 VCN, 0, mmUVD_MASTINT_EN), 0, 0, indirect);
802
803 /* setup mmUVD_LMI_CTRL */
804 tmp = (0x8 | UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
805 UVD_LMI_CTRL__REQ_MODE_MASK |
806 UVD_LMI_CTRL__CRC_RESET_MASK |
807 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
808 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
809 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
810 (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
811 0x00100000L);
812 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
813 VCN, 0, mmUVD_LMI_CTRL), tmp, 0, indirect);
814
815 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
816 VCN, 0, mmUVD_MPC_CNTL),
817 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0, indirect);
818
819 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
820 VCN, 0, mmUVD_MPC_SET_MUXA0),
821 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
822 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
823 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
824 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0, indirect);
825
826 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
827 VCN, 0, mmUVD_MPC_SET_MUXB0),
828 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
829 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
830 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
831 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0, indirect);
832
833 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
834 VCN, 0, mmUVD_MPC_SET_MUX),
835 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
836 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
837 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0, indirect);
838
839 vcn_v2_5_mc_resume_dpg_mode(adev, inst_idx, indirect);
840
841 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
842 VCN, 0, mmUVD_REG_XX_MASK), 0x10, 0, indirect);
843 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
844 VCN, 0, mmUVD_RBC_XX_IB_REG_CHECK), 0x3, 0, indirect);
845
846 /* enable LMI MC and UMC channels */
847 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
848 VCN, 0, mmUVD_LMI_CTRL2), 0, 0, indirect);
849
850 /* unblock VCPU register access */
851 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
852 VCN, 0, mmUVD_RB_ARB_CTRL), 0, 0, indirect);
853
854 tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
855 tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
856 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
857 VCN, 0, mmUVD_VCPU_CNTL), tmp, 0, indirect);
858
859 /* enable master interrupt */
860 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
861 VCN, 0, mmUVD_MASTINT_EN),
862 UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
863
864 if (indirect)
865 psp_update_vcn_sram(adev, inst_idx, adev->vcn.inst[inst_idx].dpg_sram_gpu_addr,
866 (uint32_t)((uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_curr_addr -
867 (uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr));
868
869 ring = &adev->vcn.inst[inst_idx].ring_dec;
870 /* force RBC into idle state */
871 rb_bufsz = order_base_2(ring->ring_size);
872 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
873 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
874 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
875 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
876 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
877 WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_CNTL, tmp);
878
879 /* Stall DPG before WPTR/RPTR reset */
880 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
881 UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK,
882 ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
883 fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET;
884
885 /* set the write pointer delay */
886 WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR_CNTL, 0);
887
888 /* set the wb address */
889 WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR_ADDR,
890 (upper_32_bits(ring->gpu_addr) >> 2));
891
892 /* program the RB_BASE for ring buffer */
893 WREG32_SOC15(VCN, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
894 lower_32_bits(ring->gpu_addr));
895 WREG32_SOC15(VCN, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
896 upper_32_bits(ring->gpu_addr));
897
898 /* Initialize the ring buffer's read and write pointers */
899 WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR, 0);
900
901 WREG32_SOC15(VCN, inst_idx, mmUVD_SCRATCH2, 0);
902
903 ring->wptr = RREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR);
904 WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR,
905 lower_32_bits(ring->wptr));
906
907 fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET;
908 /* Unstall DPG */
909 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
910 0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
911
912 return 0;
913 }
914
vcn_v2_5_start(struct amdgpu_device * adev)915 static int vcn_v2_5_start(struct amdgpu_device *adev)
916 {
917 struct amdgpu_ring *ring;
918 uint32_t rb_bufsz, tmp;
919 int i, j, k, r;
920
921 if (adev->pm.dpm_enabled)
922 amdgpu_dpm_enable_uvd(adev, true);
923
924 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
925 if (adev->vcn.harvest_config & (1 << i))
926 continue;
927 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
928 r = vcn_v2_5_start_dpg_mode(adev, i, adev->vcn.indirect_sram);
929 continue;
930 }
931
932 /* disable register anti-hang mechanism */
933 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_POWER_STATUS), 0,
934 ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
935
936 /* set uvd status busy */
937 tmp = RREG32_SOC15(VCN, i, mmUVD_STATUS) | UVD_STATUS__UVD_BUSY;
938 WREG32_SOC15(VCN, i, mmUVD_STATUS, tmp);
939 }
940
941 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
942 return 0;
943
944 /*SW clock gating */
945 vcn_v2_5_disable_clock_gating(adev);
946
947 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
948 if (adev->vcn.harvest_config & (1 << i))
949 continue;
950 /* enable VCPU clock */
951 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
952 UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
953
954 /* disable master interrupt */
955 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_MASTINT_EN), 0,
956 ~UVD_MASTINT_EN__VCPU_EN_MASK);
957
958 /* setup mmUVD_LMI_CTRL */
959 tmp = RREG32_SOC15(VCN, i, mmUVD_LMI_CTRL);
960 tmp &= ~0xff;
961 WREG32_SOC15(VCN, i, mmUVD_LMI_CTRL, tmp | 0x8|
962 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
963 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
964 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
965 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
966
967 /* setup mmUVD_MPC_CNTL */
968 tmp = RREG32_SOC15(VCN, i, mmUVD_MPC_CNTL);
969 tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
970 tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
971 WREG32_SOC15(VCN, i, mmUVD_MPC_CNTL, tmp);
972
973 /* setup UVD_MPC_SET_MUXA0 */
974 WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUXA0,
975 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
976 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
977 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
978 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
979
980 /* setup UVD_MPC_SET_MUXB0 */
981 WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUXB0,
982 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
983 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
984 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
985 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
986
987 /* setup mmUVD_MPC_SET_MUX */
988 WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUX,
989 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
990 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
991 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
992 }
993
994 vcn_v2_5_mc_resume(adev);
995
996 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
997 volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[i].fw_shared_cpu_addr;
998 if (adev->vcn.harvest_config & (1 << i))
999 continue;
1000 /* VCN global tiling registers */
1001 WREG32_SOC15(VCN, i, mmUVD_GFX8_ADDR_CONFIG,
1002 adev->gfx.config.gb_addr_config);
1003 WREG32_SOC15(VCN, i, mmUVD_GFX8_ADDR_CONFIG,
1004 adev->gfx.config.gb_addr_config);
1005
1006 /* enable LMI MC and UMC channels */
1007 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_LMI_CTRL2), 0,
1008 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1009
1010 /* unblock VCPU register access */
1011 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_RB_ARB_CTRL), 0,
1012 ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
1013
1014 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
1015 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1016
1017 for (k = 0; k < 10; ++k) {
1018 uint32_t status;
1019
1020 for (j = 0; j < 100; ++j) {
1021 status = RREG32_SOC15(VCN, i, mmUVD_STATUS);
1022 if (status & 2)
1023 break;
1024 if (amdgpu_emu_mode == 1)
1025 msleep(500);
1026 else
1027 mdelay(10);
1028 }
1029 r = 0;
1030 if (status & 2)
1031 break;
1032
1033 DRM_ERROR("VCN decode not responding, trying to reset the VCPU!!!\n");
1034 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
1035 UVD_VCPU_CNTL__BLK_RST_MASK,
1036 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1037 mdelay(10);
1038 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
1039 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1040
1041 mdelay(10);
1042 r = -1;
1043 }
1044
1045 if (r) {
1046 DRM_ERROR("VCN decode not responding, giving up!!!\n");
1047 return r;
1048 }
1049
1050 /* enable master interrupt */
1051 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_MASTINT_EN),
1052 UVD_MASTINT_EN__VCPU_EN_MASK,
1053 ~UVD_MASTINT_EN__VCPU_EN_MASK);
1054
1055 /* clear the busy bit of VCN_STATUS */
1056 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_STATUS), 0,
1057 ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
1058
1059 WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_VMID, 0);
1060
1061 ring = &adev->vcn.inst[i].ring_dec;
1062 /* force RBC into idle state */
1063 rb_bufsz = order_base_2(ring->ring_size);
1064 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
1065 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1066 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1067 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1068 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1069 WREG32_SOC15(VCN, i, mmUVD_RBC_RB_CNTL, tmp);
1070
1071 fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET;
1072 /* program the RB_BASE for ring buffer */
1073 WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
1074 lower_32_bits(ring->gpu_addr));
1075 WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
1076 upper_32_bits(ring->gpu_addr));
1077
1078 /* Initialize the ring buffer's read and write pointers */
1079 WREG32_SOC15(VCN, i, mmUVD_RBC_RB_RPTR, 0);
1080
1081 ring->wptr = RREG32_SOC15(VCN, i, mmUVD_RBC_RB_RPTR);
1082 WREG32_SOC15(VCN, i, mmUVD_RBC_RB_WPTR,
1083 lower_32_bits(ring->wptr));
1084 fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET;
1085
1086 fw_shared->multi_queue.encode_generalpurpose_queue_mode |= FW_QUEUE_RING_RESET;
1087 ring = &adev->vcn.inst[i].ring_enc[0];
1088 WREG32_SOC15(VCN, i, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
1089 WREG32_SOC15(VCN, i, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1090 WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO, ring->gpu_addr);
1091 WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1092 WREG32_SOC15(VCN, i, mmUVD_RB_SIZE, ring->ring_size / 4);
1093 fw_shared->multi_queue.encode_generalpurpose_queue_mode &= ~FW_QUEUE_RING_RESET;
1094
1095 fw_shared->multi_queue.encode_lowlatency_queue_mode |= FW_QUEUE_RING_RESET;
1096 ring = &adev->vcn.inst[i].ring_enc[1];
1097 WREG32_SOC15(VCN, i, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
1098 WREG32_SOC15(VCN, i, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1099 WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO2, ring->gpu_addr);
1100 WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
1101 WREG32_SOC15(VCN, i, mmUVD_RB_SIZE2, ring->ring_size / 4);
1102 fw_shared->multi_queue.encode_lowlatency_queue_mode &= ~FW_QUEUE_RING_RESET;
1103 }
1104
1105 return 0;
1106 }
1107
vcn_v2_5_mmsch_start(struct amdgpu_device * adev,struct amdgpu_mm_table * table)1108 static int vcn_v2_5_mmsch_start(struct amdgpu_device *adev,
1109 struct amdgpu_mm_table *table)
1110 {
1111 uint32_t data = 0, loop = 0, size = 0;
1112 uint64_t addr = table->gpu_addr;
1113 struct mmsch_v1_1_init_header *header = NULL;
1114
1115 header = (struct mmsch_v1_1_init_header *)table->cpu_addr;
1116 size = header->total_size;
1117
1118 /*
1119 * 1, write to vce_mmsch_vf_ctx_addr_lo/hi register with GPU mc addr of
1120 * memory descriptor location
1121 */
1122 WREG32_SOC15(VCN, 0, mmMMSCH_VF_CTX_ADDR_LO, lower_32_bits(addr));
1123 WREG32_SOC15(VCN, 0, mmMMSCH_VF_CTX_ADDR_HI, upper_32_bits(addr));
1124
1125 /* 2, update vmid of descriptor */
1126 data = RREG32_SOC15(VCN, 0, mmMMSCH_VF_VMID);
1127 data &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK;
1128 /* use domain0 for MM scheduler */
1129 data |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT);
1130 WREG32_SOC15(VCN, 0, mmMMSCH_VF_VMID, data);
1131
1132 /* 3, notify mmsch about the size of this descriptor */
1133 WREG32_SOC15(VCN, 0, mmMMSCH_VF_CTX_SIZE, size);
1134
1135 /* 4, set resp to zero */
1136 WREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_RESP, 0);
1137
1138 /*
1139 * 5, kick off the initialization and wait until
1140 * VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero
1141 */
1142 WREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_HOST, 0x10000001);
1143
1144 data = RREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_RESP);
1145 loop = 10;
1146 while ((data & 0x10000002) != 0x10000002) {
1147 udelay(100);
1148 data = RREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_RESP);
1149 loop--;
1150 if (!loop)
1151 break;
1152 }
1153
1154 if (!loop) {
1155 dev_err(adev->dev,
1156 "failed to init MMSCH, mmMMSCH_VF_MAILBOX_RESP = %x\n",
1157 data);
1158 return -EBUSY;
1159 }
1160
1161 return 0;
1162 }
1163
vcn_v2_5_sriov_start(struct amdgpu_device * adev)1164 static int vcn_v2_5_sriov_start(struct amdgpu_device *adev)
1165 {
1166 struct amdgpu_ring *ring;
1167 uint32_t offset, size, tmp, i, rb_bufsz;
1168 uint32_t table_size = 0;
1169 struct mmsch_v1_0_cmd_direct_write direct_wt = { { 0 } };
1170 struct mmsch_v1_0_cmd_direct_read_modify_write direct_rd_mod_wt = { { 0 } };
1171 struct mmsch_v1_0_cmd_end end = { { 0 } };
1172 uint32_t *init_table = adev->virt.mm_table.cpu_addr;
1173 struct mmsch_v1_1_init_header *header = (struct mmsch_v1_1_init_header *)init_table;
1174
1175 direct_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_WRITE;
1176 direct_rd_mod_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
1177 end.cmd_header.command_type = MMSCH_COMMAND__END;
1178
1179 header->version = MMSCH_VERSION;
1180 header->total_size = sizeof(struct mmsch_v1_1_init_header) >> 2;
1181 init_table += header->total_size;
1182
1183 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1184 header->eng[i].table_offset = header->total_size;
1185 header->eng[i].init_status = 0;
1186 header->eng[i].table_size = 0;
1187
1188 table_size = 0;
1189
1190 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(
1191 SOC15_REG_OFFSET(VCN, i, mmUVD_STATUS),
1192 ~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY);
1193
1194 size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
1195 /* mc resume*/
1196 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1197 MMSCH_V1_0_INSERT_DIRECT_WT(
1198 SOC15_REG_OFFSET(VCN, i,
1199 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
1200 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo);
1201 MMSCH_V1_0_INSERT_DIRECT_WT(
1202 SOC15_REG_OFFSET(VCN, i,
1203 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
1204 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi);
1205 offset = 0;
1206 MMSCH_V1_0_INSERT_DIRECT_WT(
1207 SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_OFFSET0), 0);
1208 } else {
1209 MMSCH_V1_0_INSERT_DIRECT_WT(
1210 SOC15_REG_OFFSET(VCN, i,
1211 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
1212 lower_32_bits(adev->vcn.inst[i].gpu_addr));
1213 MMSCH_V1_0_INSERT_DIRECT_WT(
1214 SOC15_REG_OFFSET(VCN, i,
1215 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
1216 upper_32_bits(adev->vcn.inst[i].gpu_addr));
1217 offset = size;
1218 MMSCH_V1_0_INSERT_DIRECT_WT(
1219 SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_OFFSET0),
1220 AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
1221 }
1222
1223 MMSCH_V1_0_INSERT_DIRECT_WT(
1224 SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_SIZE0),
1225 size);
1226 MMSCH_V1_0_INSERT_DIRECT_WT(
1227 SOC15_REG_OFFSET(VCN, i,
1228 mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
1229 lower_32_bits(adev->vcn.inst[i].gpu_addr + offset));
1230 MMSCH_V1_0_INSERT_DIRECT_WT(
1231 SOC15_REG_OFFSET(VCN, i,
1232 mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
1233 upper_32_bits(adev->vcn.inst[i].gpu_addr + offset));
1234 MMSCH_V1_0_INSERT_DIRECT_WT(
1235 SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_OFFSET1),
1236 0);
1237 MMSCH_V1_0_INSERT_DIRECT_WT(
1238 SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_SIZE1),
1239 AMDGPU_VCN_STACK_SIZE);
1240 MMSCH_V1_0_INSERT_DIRECT_WT(
1241 SOC15_REG_OFFSET(VCN, i,
1242 mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
1243 lower_32_bits(adev->vcn.inst[i].gpu_addr + offset +
1244 AMDGPU_VCN_STACK_SIZE));
1245 MMSCH_V1_0_INSERT_DIRECT_WT(
1246 SOC15_REG_OFFSET(VCN, i,
1247 mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
1248 upper_32_bits(adev->vcn.inst[i].gpu_addr + offset +
1249 AMDGPU_VCN_STACK_SIZE));
1250 MMSCH_V1_0_INSERT_DIRECT_WT(
1251 SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_OFFSET2),
1252 0);
1253 MMSCH_V1_0_INSERT_DIRECT_WT(
1254 SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_SIZE2),
1255 AMDGPU_VCN_CONTEXT_SIZE);
1256
1257 ring = &adev->vcn.inst[i].ring_enc[0];
1258 ring->wptr = 0;
1259
1260 MMSCH_V1_0_INSERT_DIRECT_WT(
1261 SOC15_REG_OFFSET(VCN, i, mmUVD_RB_BASE_LO),
1262 lower_32_bits(ring->gpu_addr));
1263 MMSCH_V1_0_INSERT_DIRECT_WT(
1264 SOC15_REG_OFFSET(VCN, i, mmUVD_RB_BASE_HI),
1265 upper_32_bits(ring->gpu_addr));
1266 MMSCH_V1_0_INSERT_DIRECT_WT(
1267 SOC15_REG_OFFSET(VCN, i, mmUVD_RB_SIZE),
1268 ring->ring_size / 4);
1269
1270 ring = &adev->vcn.inst[i].ring_dec;
1271 ring->wptr = 0;
1272 MMSCH_V1_0_INSERT_DIRECT_WT(
1273 SOC15_REG_OFFSET(VCN, i,
1274 mmUVD_LMI_RBC_RB_64BIT_BAR_LOW),
1275 lower_32_bits(ring->gpu_addr));
1276 MMSCH_V1_0_INSERT_DIRECT_WT(
1277 SOC15_REG_OFFSET(VCN, i,
1278 mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH),
1279 upper_32_bits(ring->gpu_addr));
1280
1281 /* force RBC into idle state */
1282 rb_bufsz = order_base_2(ring->ring_size);
1283 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
1284 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1285 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1286 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1287 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1288 MMSCH_V1_0_INSERT_DIRECT_WT(
1289 SOC15_REG_OFFSET(VCN, i, mmUVD_RBC_RB_CNTL), tmp);
1290
1291 /* add end packet */
1292 memcpy((void *)init_table, &end, sizeof(struct mmsch_v1_0_cmd_end));
1293 table_size += sizeof(struct mmsch_v1_0_cmd_end) / 4;
1294 init_table += sizeof(struct mmsch_v1_0_cmd_end) / 4;
1295
1296 /* refine header */
1297 header->eng[i].table_size = table_size;
1298 header->total_size += table_size;
1299 }
1300
1301 return vcn_v2_5_mmsch_start(adev, &adev->virt.mm_table);
1302 }
1303
vcn_v2_5_stop_dpg_mode(struct amdgpu_device * adev,int inst_idx)1304 static int vcn_v2_5_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
1305 {
1306 uint32_t tmp;
1307
1308 /* Wait for power status to be 1 */
1309 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1,
1310 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1311
1312 /* wait for read ptr to be equal to write ptr */
1313 tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR);
1314 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RB_RPTR, tmp, 0xFFFFFFFF);
1315
1316 tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR2);
1317 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RB_RPTR2, tmp, 0xFFFFFFFF);
1318
1319 tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF;
1320 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF);
1321
1322 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1,
1323 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1324
1325 /* disable dynamic power gating mode */
1326 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 0,
1327 ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
1328
1329 return 0;
1330 }
1331
vcn_v2_5_stop(struct amdgpu_device * adev)1332 static int vcn_v2_5_stop(struct amdgpu_device *adev)
1333 {
1334 uint32_t tmp;
1335 int i, r = 0;
1336
1337 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1338 if (adev->vcn.harvest_config & (1 << i))
1339 continue;
1340 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
1341 r = vcn_v2_5_stop_dpg_mode(adev, i);
1342 continue;
1343 }
1344
1345 /* wait for vcn idle */
1346 r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7);
1347 if (r)
1348 return r;
1349
1350 tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
1351 UVD_LMI_STATUS__READ_CLEAN_MASK |
1352 UVD_LMI_STATUS__WRITE_CLEAN_MASK |
1353 UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
1354 r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp);
1355 if (r)
1356 return r;
1357
1358 /* block LMI UMC channel */
1359 tmp = RREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2);
1360 tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
1361 WREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2, tmp);
1362
1363 tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK|
1364 UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
1365 r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp);
1366 if (r)
1367 return r;
1368
1369 /* block VCPU register access */
1370 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_RB_ARB_CTRL),
1371 UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
1372 ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
1373
1374 /* reset VCPU */
1375 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
1376 UVD_VCPU_CNTL__BLK_RST_MASK,
1377 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1378
1379 /* disable VCPU clock */
1380 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
1381 ~(UVD_VCPU_CNTL__CLK_EN_MASK));
1382
1383 /* clear status */
1384 WREG32_SOC15(VCN, i, mmUVD_STATUS, 0);
1385
1386 vcn_v2_5_enable_clock_gating(adev);
1387
1388 /* enable register anti-hang mechanism */
1389 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_POWER_STATUS),
1390 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK,
1391 ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1392 }
1393
1394 if (adev->pm.dpm_enabled)
1395 amdgpu_dpm_enable_uvd(adev, false);
1396
1397 return 0;
1398 }
1399
vcn_v2_5_pause_dpg_mode(struct amdgpu_device * adev,int inst_idx,struct dpg_pause_state * new_state)1400 static int vcn_v2_5_pause_dpg_mode(struct amdgpu_device *adev,
1401 int inst_idx, struct dpg_pause_state *new_state)
1402 {
1403 struct amdgpu_ring *ring;
1404 uint32_t reg_data = 0;
1405 int ret_code = 0;
1406
1407 /* pause/unpause if state is changed */
1408 if (adev->vcn.inst[inst_idx].pause_state.fw_based != new_state->fw_based) {
1409 DRM_DEBUG("dpg pause state changed %d -> %d",
1410 adev->vcn.inst[inst_idx].pause_state.fw_based, new_state->fw_based);
1411 reg_data = RREG32_SOC15(VCN, inst_idx, mmUVD_DPG_PAUSE) &
1412 (~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
1413
1414 if (new_state->fw_based == VCN_DPG_STATE__PAUSE) {
1415 ret_code = SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 0x1,
1416 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1417
1418 if (!ret_code) {
1419 volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared_cpu_addr;
1420
1421 /* pause DPG */
1422 reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1423 WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_PAUSE, reg_data);
1424
1425 /* wait for ACK */
1426 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_DPG_PAUSE,
1427 UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,
1428 UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
1429
1430 /* Stall DPG before WPTR/RPTR reset */
1431 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
1432 UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK,
1433 ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
1434
1435 /* Restore */
1436 fw_shared->multi_queue.encode_generalpurpose_queue_mode |= FW_QUEUE_RING_RESET;
1437 ring = &adev->vcn.inst[inst_idx].ring_enc[0];
1438 ring->wptr = 0;
1439 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_LO, ring->gpu_addr);
1440 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1441 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_SIZE, ring->ring_size / 4);
1442 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
1443 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1444 fw_shared->multi_queue.encode_generalpurpose_queue_mode &= ~FW_QUEUE_RING_RESET;
1445
1446 fw_shared->multi_queue.encode_lowlatency_queue_mode |= FW_QUEUE_RING_RESET;
1447 ring = &adev->vcn.inst[inst_idx].ring_enc[1];
1448 ring->wptr = 0;
1449 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_LO2, ring->gpu_addr);
1450 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
1451 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_SIZE2, ring->ring_size / 4);
1452 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
1453 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1454 fw_shared->multi_queue.encode_lowlatency_queue_mode &= ~FW_QUEUE_RING_RESET;
1455
1456 /* Unstall DPG */
1457 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
1458 0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
1459
1460 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS,
1461 UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON, UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1462 }
1463 } else {
1464 reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1465 WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_PAUSE, reg_data);
1466 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 0x1,
1467 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1468 }
1469 adev->vcn.inst[inst_idx].pause_state.fw_based = new_state->fw_based;
1470 }
1471
1472 return 0;
1473 }
1474
1475 /**
1476 * vcn_v2_5_dec_ring_get_rptr - get read pointer
1477 *
1478 * @ring: amdgpu_ring pointer
1479 *
1480 * Returns the current hardware read pointer
1481 */
vcn_v2_5_dec_ring_get_rptr(struct amdgpu_ring * ring)1482 static uint64_t vcn_v2_5_dec_ring_get_rptr(struct amdgpu_ring *ring)
1483 {
1484 struct amdgpu_device *adev = ring->adev;
1485
1486 return RREG32_SOC15(VCN, ring->me, mmUVD_RBC_RB_RPTR);
1487 }
1488
1489 /**
1490 * vcn_v2_5_dec_ring_get_wptr - get write pointer
1491 *
1492 * @ring: amdgpu_ring pointer
1493 *
1494 * Returns the current hardware write pointer
1495 */
vcn_v2_5_dec_ring_get_wptr(struct amdgpu_ring * ring)1496 static uint64_t vcn_v2_5_dec_ring_get_wptr(struct amdgpu_ring *ring)
1497 {
1498 struct amdgpu_device *adev = ring->adev;
1499
1500 if (ring->use_doorbell)
1501 return adev->wb.wb[ring->wptr_offs];
1502 else
1503 return RREG32_SOC15(VCN, ring->me, mmUVD_RBC_RB_WPTR);
1504 }
1505
1506 /**
1507 * vcn_v2_5_dec_ring_set_wptr - set write pointer
1508 *
1509 * @ring: amdgpu_ring pointer
1510 *
1511 * Commits the write pointer to the hardware
1512 */
vcn_v2_5_dec_ring_set_wptr(struct amdgpu_ring * ring)1513 static void vcn_v2_5_dec_ring_set_wptr(struct amdgpu_ring *ring)
1514 {
1515 struct amdgpu_device *adev = ring->adev;
1516
1517 if (ring->use_doorbell) {
1518 adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
1519 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1520 } else {
1521 WREG32_SOC15(VCN, ring->me, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
1522 }
1523 }
1524
1525 static const struct amdgpu_ring_funcs vcn_v2_5_dec_ring_vm_funcs = {
1526 .type = AMDGPU_RING_TYPE_VCN_DEC,
1527 .align_mask = 0xf,
1528 .vmhub = AMDGPU_MMHUB_1,
1529 .get_rptr = vcn_v2_5_dec_ring_get_rptr,
1530 .get_wptr = vcn_v2_5_dec_ring_get_wptr,
1531 .set_wptr = vcn_v2_5_dec_ring_set_wptr,
1532 .emit_frame_size =
1533 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
1534 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
1535 8 + /* vcn_v2_0_dec_ring_emit_vm_flush */
1536 14 + 14 + /* vcn_v2_0_dec_ring_emit_fence x2 vm fence */
1537 6,
1538 .emit_ib_size = 8, /* vcn_v2_0_dec_ring_emit_ib */
1539 .emit_ib = vcn_v2_0_dec_ring_emit_ib,
1540 .emit_fence = vcn_v2_0_dec_ring_emit_fence,
1541 .emit_vm_flush = vcn_v2_0_dec_ring_emit_vm_flush,
1542 .test_ring = vcn_v2_0_dec_ring_test_ring,
1543 .test_ib = amdgpu_vcn_dec_ring_test_ib,
1544 .insert_nop = vcn_v2_0_dec_ring_insert_nop,
1545 .insert_start = vcn_v2_0_dec_ring_insert_start,
1546 .insert_end = vcn_v2_0_dec_ring_insert_end,
1547 .pad_ib = amdgpu_ring_generic_pad_ib,
1548 .begin_use = amdgpu_vcn_ring_begin_use,
1549 .end_use = amdgpu_vcn_ring_end_use,
1550 .emit_wreg = vcn_v2_0_dec_ring_emit_wreg,
1551 .emit_reg_wait = vcn_v2_0_dec_ring_emit_reg_wait,
1552 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1553 };
1554
1555 static const struct amdgpu_ring_funcs vcn_v2_6_dec_ring_vm_funcs = {
1556 .type = AMDGPU_RING_TYPE_VCN_DEC,
1557 .align_mask = 0xf,
1558 .vmhub = AMDGPU_MMHUB_0,
1559 .get_rptr = vcn_v2_5_dec_ring_get_rptr,
1560 .get_wptr = vcn_v2_5_dec_ring_get_wptr,
1561 .set_wptr = vcn_v2_5_dec_ring_set_wptr,
1562 .emit_frame_size =
1563 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
1564 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
1565 8 + /* vcn_v2_0_dec_ring_emit_vm_flush */
1566 14 + 14 + /* vcn_v2_0_dec_ring_emit_fence x2 vm fence */
1567 6,
1568 .emit_ib_size = 8, /* vcn_v2_0_dec_ring_emit_ib */
1569 .emit_ib = vcn_v2_0_dec_ring_emit_ib,
1570 .emit_fence = vcn_v2_0_dec_ring_emit_fence,
1571 .emit_vm_flush = vcn_v2_0_dec_ring_emit_vm_flush,
1572 .test_ring = vcn_v2_0_dec_ring_test_ring,
1573 .test_ib = amdgpu_vcn_dec_ring_test_ib,
1574 .insert_nop = vcn_v2_0_dec_ring_insert_nop,
1575 .insert_start = vcn_v2_0_dec_ring_insert_start,
1576 .insert_end = vcn_v2_0_dec_ring_insert_end,
1577 .pad_ib = amdgpu_ring_generic_pad_ib,
1578 .begin_use = amdgpu_vcn_ring_begin_use,
1579 .end_use = amdgpu_vcn_ring_end_use,
1580 .emit_wreg = vcn_v2_0_dec_ring_emit_wreg,
1581 .emit_reg_wait = vcn_v2_0_dec_ring_emit_reg_wait,
1582 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1583 };
1584
1585 /**
1586 * vcn_v2_5_enc_ring_get_rptr - get enc read pointer
1587 *
1588 * @ring: amdgpu_ring pointer
1589 *
1590 * Returns the current hardware enc read pointer
1591 */
vcn_v2_5_enc_ring_get_rptr(struct amdgpu_ring * ring)1592 static uint64_t vcn_v2_5_enc_ring_get_rptr(struct amdgpu_ring *ring)
1593 {
1594 struct amdgpu_device *adev = ring->adev;
1595
1596 if (ring == &adev->vcn.inst[ring->me].ring_enc[0])
1597 return RREG32_SOC15(VCN, ring->me, mmUVD_RB_RPTR);
1598 else
1599 return RREG32_SOC15(VCN, ring->me, mmUVD_RB_RPTR2);
1600 }
1601
1602 /**
1603 * vcn_v2_5_enc_ring_get_wptr - get enc write pointer
1604 *
1605 * @ring: amdgpu_ring pointer
1606 *
1607 * Returns the current hardware enc write pointer
1608 */
vcn_v2_5_enc_ring_get_wptr(struct amdgpu_ring * ring)1609 static uint64_t vcn_v2_5_enc_ring_get_wptr(struct amdgpu_ring *ring)
1610 {
1611 struct amdgpu_device *adev = ring->adev;
1612
1613 if (ring == &adev->vcn.inst[ring->me].ring_enc[0]) {
1614 if (ring->use_doorbell)
1615 return adev->wb.wb[ring->wptr_offs];
1616 else
1617 return RREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR);
1618 } else {
1619 if (ring->use_doorbell)
1620 return adev->wb.wb[ring->wptr_offs];
1621 else
1622 return RREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR2);
1623 }
1624 }
1625
1626 /**
1627 * vcn_v2_5_enc_ring_set_wptr - set enc write pointer
1628 *
1629 * @ring: amdgpu_ring pointer
1630 *
1631 * Commits the enc write pointer to the hardware
1632 */
vcn_v2_5_enc_ring_set_wptr(struct amdgpu_ring * ring)1633 static void vcn_v2_5_enc_ring_set_wptr(struct amdgpu_ring *ring)
1634 {
1635 struct amdgpu_device *adev = ring->adev;
1636
1637 if (ring == &adev->vcn.inst[ring->me].ring_enc[0]) {
1638 if (ring->use_doorbell) {
1639 adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
1640 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1641 } else {
1642 WREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1643 }
1644 } else {
1645 if (ring->use_doorbell) {
1646 adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
1647 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1648 } else {
1649 WREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1650 }
1651 }
1652 }
1653
1654 static const struct amdgpu_ring_funcs vcn_v2_5_enc_ring_vm_funcs = {
1655 .type = AMDGPU_RING_TYPE_VCN_ENC,
1656 .align_mask = 0x3f,
1657 .nop = VCN_ENC_CMD_NO_OP,
1658 .vmhub = AMDGPU_MMHUB_1,
1659 .get_rptr = vcn_v2_5_enc_ring_get_rptr,
1660 .get_wptr = vcn_v2_5_enc_ring_get_wptr,
1661 .set_wptr = vcn_v2_5_enc_ring_set_wptr,
1662 .emit_frame_size =
1663 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1664 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
1665 4 + /* vcn_v2_0_enc_ring_emit_vm_flush */
1666 5 + 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */
1667 1, /* vcn_v2_0_enc_ring_insert_end */
1668 .emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */
1669 .emit_ib = vcn_v2_0_enc_ring_emit_ib,
1670 .emit_fence = vcn_v2_0_enc_ring_emit_fence,
1671 .emit_vm_flush = vcn_v2_0_enc_ring_emit_vm_flush,
1672 .test_ring = amdgpu_vcn_enc_ring_test_ring,
1673 .test_ib = amdgpu_vcn_enc_ring_test_ib,
1674 .insert_nop = amdgpu_ring_insert_nop,
1675 .insert_end = vcn_v2_0_enc_ring_insert_end,
1676 .pad_ib = amdgpu_ring_generic_pad_ib,
1677 .begin_use = amdgpu_vcn_ring_begin_use,
1678 .end_use = amdgpu_vcn_ring_end_use,
1679 .emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
1680 .emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
1681 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1682 };
1683
1684 static const struct amdgpu_ring_funcs vcn_v2_6_enc_ring_vm_funcs = {
1685 .type = AMDGPU_RING_TYPE_VCN_ENC,
1686 .align_mask = 0x3f,
1687 .nop = VCN_ENC_CMD_NO_OP,
1688 .vmhub = AMDGPU_MMHUB_0,
1689 .get_rptr = vcn_v2_5_enc_ring_get_rptr,
1690 .get_wptr = vcn_v2_5_enc_ring_get_wptr,
1691 .set_wptr = vcn_v2_5_enc_ring_set_wptr,
1692 .emit_frame_size =
1693 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1694 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
1695 4 + /* vcn_v2_0_enc_ring_emit_vm_flush */
1696 5 + 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */
1697 1, /* vcn_v2_0_enc_ring_insert_end */
1698 .emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */
1699 .emit_ib = vcn_v2_0_enc_ring_emit_ib,
1700 .emit_fence = vcn_v2_0_enc_ring_emit_fence,
1701 .emit_vm_flush = vcn_v2_0_enc_ring_emit_vm_flush,
1702 .test_ring = amdgpu_vcn_enc_ring_test_ring,
1703 .test_ib = amdgpu_vcn_enc_ring_test_ib,
1704 .insert_nop = amdgpu_ring_insert_nop,
1705 .insert_end = vcn_v2_0_enc_ring_insert_end,
1706 .pad_ib = amdgpu_ring_generic_pad_ib,
1707 .begin_use = amdgpu_vcn_ring_begin_use,
1708 .end_use = amdgpu_vcn_ring_end_use,
1709 .emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
1710 .emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
1711 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1712 };
1713
vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device * adev)1714 static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev)
1715 {
1716 int i;
1717
1718 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1719 if (adev->vcn.harvest_config & (1 << i))
1720 continue;
1721 if (adev->asic_type == CHIP_ARCTURUS)
1722 adev->vcn.inst[i].ring_dec.funcs = &vcn_v2_5_dec_ring_vm_funcs;
1723 else /* CHIP_ALDEBARAN */
1724 adev->vcn.inst[i].ring_dec.funcs = &vcn_v2_6_dec_ring_vm_funcs;
1725 adev->vcn.inst[i].ring_dec.me = i;
1726 DRM_INFO("VCN(%d) decode is enabled in VM mode\n", i);
1727 }
1728 }
1729
vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device * adev)1730 static void vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device *adev)
1731 {
1732 int i, j;
1733
1734 for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
1735 if (adev->vcn.harvest_config & (1 << j))
1736 continue;
1737 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
1738 if (adev->asic_type == CHIP_ARCTURUS)
1739 adev->vcn.inst[j].ring_enc[i].funcs = &vcn_v2_5_enc_ring_vm_funcs;
1740 else /* CHIP_ALDEBARAN */
1741 adev->vcn.inst[j].ring_enc[i].funcs = &vcn_v2_6_enc_ring_vm_funcs;
1742 adev->vcn.inst[j].ring_enc[i].me = j;
1743 }
1744 DRM_INFO("VCN(%d) encode is enabled in VM mode\n", j);
1745 }
1746 }
1747
vcn_v2_5_is_idle(void * handle)1748 static bool vcn_v2_5_is_idle(void *handle)
1749 {
1750 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1751 int i, ret = 1;
1752
1753 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1754 if (adev->vcn.harvest_config & (1 << i))
1755 continue;
1756 ret &= (RREG32_SOC15(VCN, i, mmUVD_STATUS) == UVD_STATUS__IDLE);
1757 }
1758
1759 return ret;
1760 }
1761
vcn_v2_5_wait_for_idle(void * handle)1762 static int vcn_v2_5_wait_for_idle(void *handle)
1763 {
1764 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1765 int i, ret = 0;
1766
1767 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1768 if (adev->vcn.harvest_config & (1 << i))
1769 continue;
1770 ret = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE,
1771 UVD_STATUS__IDLE);
1772 if (ret)
1773 return ret;
1774 }
1775
1776 return ret;
1777 }
1778
vcn_v2_5_set_clockgating_state(void * handle,enum amd_clockgating_state state)1779 static int vcn_v2_5_set_clockgating_state(void *handle,
1780 enum amd_clockgating_state state)
1781 {
1782 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1783 bool enable = (state == AMD_CG_STATE_GATE);
1784
1785 if (amdgpu_sriov_vf(adev))
1786 return 0;
1787
1788 if (enable) {
1789 if (!vcn_v2_5_is_idle(handle))
1790 return -EBUSY;
1791 vcn_v2_5_enable_clock_gating(adev);
1792 } else {
1793 vcn_v2_5_disable_clock_gating(adev);
1794 }
1795
1796 return 0;
1797 }
1798
vcn_v2_5_set_powergating_state(void * handle,enum amd_powergating_state state)1799 static int vcn_v2_5_set_powergating_state(void *handle,
1800 enum amd_powergating_state state)
1801 {
1802 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1803 int ret;
1804
1805 if (amdgpu_sriov_vf(adev))
1806 return 0;
1807
1808 if(state == adev->vcn.cur_state)
1809 return 0;
1810
1811 if (state == AMD_PG_STATE_GATE)
1812 ret = vcn_v2_5_stop(adev);
1813 else
1814 ret = vcn_v2_5_start(adev);
1815
1816 if(!ret)
1817 adev->vcn.cur_state = state;
1818
1819 return ret;
1820 }
1821
vcn_v2_5_set_interrupt_state(struct amdgpu_device * adev,struct amdgpu_irq_src * source,unsigned type,enum amdgpu_interrupt_state state)1822 static int vcn_v2_5_set_interrupt_state(struct amdgpu_device *adev,
1823 struct amdgpu_irq_src *source,
1824 unsigned type,
1825 enum amdgpu_interrupt_state state)
1826 {
1827 return 0;
1828 }
1829
vcn_v2_5_process_interrupt(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)1830 static int vcn_v2_5_process_interrupt(struct amdgpu_device *adev,
1831 struct amdgpu_irq_src *source,
1832 struct amdgpu_iv_entry *entry)
1833 {
1834 uint32_t ip_instance;
1835
1836 switch (entry->client_id) {
1837 case SOC15_IH_CLIENTID_VCN:
1838 ip_instance = 0;
1839 break;
1840 case SOC15_IH_CLIENTID_VCN1:
1841 ip_instance = 1;
1842 break;
1843 default:
1844 DRM_ERROR("Unhandled client id: %d\n", entry->client_id);
1845 return 0;
1846 }
1847
1848 DRM_DEBUG("IH: VCN TRAP\n");
1849
1850 switch (entry->src_id) {
1851 case VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT:
1852 amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_dec);
1853 break;
1854 case VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE:
1855 amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[0]);
1856 break;
1857 case VCN_2_0__SRCID__UVD_ENC_LOW_LATENCY:
1858 amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[1]);
1859 break;
1860 default:
1861 DRM_ERROR("Unhandled interrupt: %d %d\n",
1862 entry->src_id, entry->src_data[0]);
1863 break;
1864 }
1865
1866 return 0;
1867 }
1868
1869 static const struct amdgpu_irq_src_funcs vcn_v2_5_irq_funcs = {
1870 .set = vcn_v2_5_set_interrupt_state,
1871 .process = vcn_v2_5_process_interrupt,
1872 };
1873
vcn_v2_5_set_irq_funcs(struct amdgpu_device * adev)1874 static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev)
1875 {
1876 int i;
1877
1878 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1879 if (adev->vcn.harvest_config & (1 << i))
1880 continue;
1881 adev->vcn.inst[i].irq.num_types = adev->vcn.num_enc_rings + 1;
1882 adev->vcn.inst[i].irq.funcs = &vcn_v2_5_irq_funcs;
1883 }
1884 }
1885
1886 static const struct amd_ip_funcs vcn_v2_5_ip_funcs = {
1887 .name = "vcn_v2_5",
1888 .early_init = vcn_v2_5_early_init,
1889 .late_init = NULL,
1890 .sw_init = vcn_v2_5_sw_init,
1891 .sw_fini = vcn_v2_5_sw_fini,
1892 .hw_init = vcn_v2_5_hw_init,
1893 .hw_fini = vcn_v2_5_hw_fini,
1894 .suspend = vcn_v2_5_suspend,
1895 .resume = vcn_v2_5_resume,
1896 .is_idle = vcn_v2_5_is_idle,
1897 .wait_for_idle = vcn_v2_5_wait_for_idle,
1898 .check_soft_reset = NULL,
1899 .pre_soft_reset = NULL,
1900 .soft_reset = NULL,
1901 .post_soft_reset = NULL,
1902 .set_clockgating_state = vcn_v2_5_set_clockgating_state,
1903 .set_powergating_state = vcn_v2_5_set_powergating_state,
1904 };
1905
1906 static const struct amd_ip_funcs vcn_v2_6_ip_funcs = {
1907 .name = "vcn_v2_6",
1908 .early_init = vcn_v2_5_early_init,
1909 .late_init = NULL,
1910 .sw_init = vcn_v2_5_sw_init,
1911 .sw_fini = vcn_v2_5_sw_fini,
1912 .hw_init = vcn_v2_5_hw_init,
1913 .hw_fini = vcn_v2_5_hw_fini,
1914 .suspend = vcn_v2_5_suspend,
1915 .resume = vcn_v2_5_resume,
1916 .is_idle = vcn_v2_5_is_idle,
1917 .wait_for_idle = vcn_v2_5_wait_for_idle,
1918 .check_soft_reset = NULL,
1919 .pre_soft_reset = NULL,
1920 .soft_reset = NULL,
1921 .post_soft_reset = NULL,
1922 .set_clockgating_state = vcn_v2_5_set_clockgating_state,
1923 .set_powergating_state = vcn_v2_5_set_powergating_state,
1924 };
1925
1926 const struct amdgpu_ip_block_version vcn_v2_5_ip_block =
1927 {
1928 .type = AMD_IP_BLOCK_TYPE_VCN,
1929 .major = 2,
1930 .minor = 5,
1931 .rev = 0,
1932 .funcs = &vcn_v2_5_ip_funcs,
1933 };
1934
1935 const struct amdgpu_ip_block_version vcn_v2_6_ip_block =
1936 {
1937 .type = AMD_IP_BLOCK_TYPE_VCN,
1938 .major = 2,
1939 .minor = 6,
1940 .rev = 0,
1941 .funcs = &vcn_v2_6_ip_funcs,
1942 };
1943