1 /*
2 * Copyright 2020 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Sonny Jiang <sonny.jiang@amd.com>
23 */
24
25 #include <linux/firmware.h>
26
27 #include "amdgpu.h"
28 #include "amdgpu_uvd.h"
29 #include "sid.h"
30
31 #include "uvd/uvd_3_1_d.h"
32 #include "uvd/uvd_3_1_sh_mask.h"
33
34 #include "oss/oss_1_0_d.h"
35 #include "oss/oss_1_0_sh_mask.h"
36
37 /**
38 * uvd_v3_1_ring_get_rptr - get read pointer
39 *
40 * @ring: amdgpu_ring pointer
41 *
42 * Returns the current hardware read pointer
43 */
uvd_v3_1_ring_get_rptr(struct amdgpu_ring * ring)44 static uint64_t uvd_v3_1_ring_get_rptr(struct amdgpu_ring *ring)
45 {
46 struct amdgpu_device *adev = ring->adev;
47
48 return RREG32(mmUVD_RBC_RB_RPTR);
49 }
50
51 /**
52 * uvd_v3_1_ring_get_wptr - get write pointer
53 *
54 * @ring: amdgpu_ring pointer
55 *
56 * Returns the current hardware write pointer
57 */
uvd_v3_1_ring_get_wptr(struct amdgpu_ring * ring)58 static uint64_t uvd_v3_1_ring_get_wptr(struct amdgpu_ring *ring)
59 {
60 struct amdgpu_device *adev = ring->adev;
61
62 return RREG32(mmUVD_RBC_RB_WPTR);
63 }
64
65 /**
66 * uvd_v3_1_ring_set_wptr - set write pointer
67 *
68 * @ring: amdgpu_ring pointer
69 *
70 * Commits the write pointer to the hardware
71 */
uvd_v3_1_ring_set_wptr(struct amdgpu_ring * ring)72 static void uvd_v3_1_ring_set_wptr(struct amdgpu_ring *ring)
73 {
74 struct amdgpu_device *adev = ring->adev;
75
76 WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
77 }
78
79 /**
80 * uvd_v3_1_ring_emit_ib - execute indirect buffer
81 *
82 * @ring: amdgpu_ring pointer
83 * @ib: indirect buffer to execute
84 *
85 * Write ring commands to execute the indirect buffer
86 */
uvd_v3_1_ring_emit_ib(struct amdgpu_ring * ring,struct amdgpu_job * job,struct amdgpu_ib * ib,uint32_t flags)87 static void uvd_v3_1_ring_emit_ib(struct amdgpu_ring *ring,
88 struct amdgpu_job *job,
89 struct amdgpu_ib *ib,
90 uint32_t flags)
91 {
92 amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_BASE, 0));
93 amdgpu_ring_write(ring, ib->gpu_addr);
94 amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_SIZE, 0));
95 amdgpu_ring_write(ring, ib->length_dw);
96 }
97
98 /**
99 * uvd_v3_1_ring_emit_fence - emit an fence & trap command
100 *
101 * @ring: amdgpu_ring pointer
102 * @fence: fence to emit
103 *
104 * Write a fence and a trap command to the ring.
105 */
uvd_v3_1_ring_emit_fence(struct amdgpu_ring * ring,u64 addr,u64 seq,unsigned flags)106 static void uvd_v3_1_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
107 unsigned flags)
108 {
109 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
110
111 amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
112 amdgpu_ring_write(ring, seq);
113 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
114 amdgpu_ring_write(ring, addr & 0xffffffff);
115 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
116 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
117 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
118 amdgpu_ring_write(ring, 0);
119
120 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
121 amdgpu_ring_write(ring, 0);
122 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
123 amdgpu_ring_write(ring, 0);
124 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
125 amdgpu_ring_write(ring, 2);
126 }
127
128 /**
129 * uvd_v3_1_ring_test_ring - register write test
130 *
131 * @ring: amdgpu_ring pointer
132 *
133 * Test if we can successfully write to the context register
134 */
uvd_v3_1_ring_test_ring(struct amdgpu_ring * ring)135 static int uvd_v3_1_ring_test_ring(struct amdgpu_ring *ring)
136 {
137 struct amdgpu_device *adev = ring->adev;
138 uint32_t tmp = 0;
139 unsigned i;
140 int r;
141
142 WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD);
143 r = amdgpu_ring_alloc(ring, 3);
144 if (r)
145 return r;
146
147 amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
148 amdgpu_ring_write(ring, 0xDEADBEEF);
149 amdgpu_ring_commit(ring);
150 for (i = 0; i < adev->usec_timeout; i++) {
151 tmp = RREG32(mmUVD_CONTEXT_ID);
152 if (tmp == 0xDEADBEEF)
153 break;
154 udelay(1);
155 }
156
157 if (i >= adev->usec_timeout)
158 r = -ETIMEDOUT;
159
160 return r;
161 }
162
uvd_v3_1_ring_insert_nop(struct amdgpu_ring * ring,uint32_t count)163 static void uvd_v3_1_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
164 {
165 int i;
166
167 WARN_ON(ring->wptr % 2 || count % 2);
168
169 for (i = 0; i < count / 2; i++) {
170 amdgpu_ring_write(ring, PACKET0(mmUVD_NO_OP, 0));
171 amdgpu_ring_write(ring, 0);
172 }
173 }
174
175 static const struct amdgpu_ring_funcs uvd_v3_1_ring_funcs = {
176 .type = AMDGPU_RING_TYPE_UVD,
177 .align_mask = 0xf,
178 .support_64bit_ptrs = false,
179 .no_user_fence = true,
180 .get_rptr = uvd_v3_1_ring_get_rptr,
181 .get_wptr = uvd_v3_1_ring_get_wptr,
182 .set_wptr = uvd_v3_1_ring_set_wptr,
183 .parse_cs = amdgpu_uvd_ring_parse_cs,
184 .emit_frame_size =
185 14, /* uvd_v3_1_ring_emit_fence x1 no user fence */
186 .emit_ib_size = 4, /* uvd_v3_1_ring_emit_ib */
187 .emit_ib = uvd_v3_1_ring_emit_ib,
188 .emit_fence = uvd_v3_1_ring_emit_fence,
189 .test_ring = uvd_v3_1_ring_test_ring,
190 .test_ib = amdgpu_uvd_ring_test_ib,
191 .insert_nop = uvd_v3_1_ring_insert_nop,
192 .pad_ib = amdgpu_ring_generic_pad_ib,
193 .begin_use = amdgpu_uvd_ring_begin_use,
194 .end_use = amdgpu_uvd_ring_end_use,
195 };
196
uvd_v3_1_set_ring_funcs(struct amdgpu_device * adev)197 static void uvd_v3_1_set_ring_funcs(struct amdgpu_device *adev)
198 {
199 adev->uvd.inst->ring.funcs = &uvd_v3_1_ring_funcs;
200 }
201
uvd_v3_1_set_dcm(struct amdgpu_device * adev,bool sw_mode)202 static void uvd_v3_1_set_dcm(struct amdgpu_device *adev,
203 bool sw_mode)
204 {
205 u32 tmp, tmp2;
206
207 WREG32_FIELD(UVD_CGC_GATE, REGS, 0);
208
209 tmp = RREG32(mmUVD_CGC_CTRL);
210 tmp &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK | UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
211 tmp |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
212 (1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT) |
213 (4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT);
214
215 if (sw_mode) {
216 tmp &= ~0x7ffff800;
217 tmp2 = UVD_CGC_CTRL2__DYN_OCLK_RAMP_EN_MASK |
218 UVD_CGC_CTRL2__DYN_RCLK_RAMP_EN_MASK |
219 (7 << UVD_CGC_CTRL2__GATER_DIV_ID__SHIFT);
220 } else {
221 tmp |= 0x7ffff800;
222 tmp2 = 0;
223 }
224
225 WREG32(mmUVD_CGC_CTRL, tmp);
226 WREG32_UVD_CTX(ixUVD_CGC_CTRL2, tmp2);
227 }
228
229 /**
230 * uvd_v3_1_mc_resume - memory controller programming
231 *
232 * @adev: amdgpu_device pointer
233 *
234 * Let the UVD memory controller know it's offsets
235 */
uvd_v3_1_mc_resume(struct amdgpu_device * adev)236 static void uvd_v3_1_mc_resume(struct amdgpu_device *adev)
237 {
238 uint64_t addr;
239 uint32_t size;
240
241 /* programm the VCPU memory controller bits 0-27 */
242 addr = (adev->uvd.inst->gpu_addr + AMDGPU_UVD_FIRMWARE_OFFSET) >> 3;
243 size = AMDGPU_UVD_FIRMWARE_SIZE(adev) >> 3;
244 WREG32(mmUVD_VCPU_CACHE_OFFSET0, addr);
245 WREG32(mmUVD_VCPU_CACHE_SIZE0, size);
246
247 addr += size;
248 size = AMDGPU_UVD_HEAP_SIZE >> 3;
249 WREG32(mmUVD_VCPU_CACHE_OFFSET1, addr);
250 WREG32(mmUVD_VCPU_CACHE_SIZE1, size);
251
252 addr += size;
253 size = (AMDGPU_UVD_STACK_SIZE +
254 (AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles)) >> 3;
255 WREG32(mmUVD_VCPU_CACHE_OFFSET2, addr);
256 WREG32(mmUVD_VCPU_CACHE_SIZE2, size);
257
258 /* bits 28-31 */
259 addr = (adev->uvd.inst->gpu_addr >> 28) & 0xF;
260 WREG32(mmUVD_LMI_ADDR_EXT, (addr << 12) | (addr << 0));
261
262 /* bits 32-39 */
263 addr = (adev->uvd.inst->gpu_addr >> 32) & 0xFF;
264 WREG32(mmUVD_LMI_EXT40_ADDR, addr | (0x9 << 16) | (0x1 << 31));
265
266 WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
267 WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
268 WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
269 }
270
271 /**
272 * uvd_v3_1_fw_validate - FW validation operation
273 *
274 * @adev: amdgpu_device pointer
275 *
276 * Initialate and check UVD validation.
277 */
uvd_v3_1_fw_validate(struct amdgpu_device * adev)278 static int uvd_v3_1_fw_validate(struct amdgpu_device *adev)
279 {
280 int i;
281 uint32_t keysel = adev->uvd.keyselect;
282
283 WREG32(mmUVD_FW_START, keysel);
284
285 for (i = 0; i < 10; ++i) {
286 mdelay(10);
287 if (RREG32(mmUVD_FW_STATUS) & UVD_FW_STATUS__DONE_MASK)
288 break;
289 }
290
291 if (i == 10)
292 return -ETIMEDOUT;
293
294 if (!(RREG32(mmUVD_FW_STATUS) & UVD_FW_STATUS__PASS_MASK))
295 return -EINVAL;
296
297 for (i = 0; i < 10; ++i) {
298 mdelay(10);
299 if (!(RREG32(mmUVD_FW_STATUS) & UVD_FW_STATUS__BUSY_MASK))
300 break;
301 }
302
303 if (i == 10)
304 return -ETIMEDOUT;
305
306 return 0;
307 }
308
309 /**
310 * uvd_v3_1_start - start UVD block
311 *
312 * @adev: amdgpu_device pointer
313 *
314 * Setup and start the UVD block
315 */
uvd_v3_1_start(struct amdgpu_device * adev)316 static int uvd_v3_1_start(struct amdgpu_device *adev)
317 {
318 struct amdgpu_ring *ring = &adev->uvd.inst->ring;
319 uint32_t rb_bufsz;
320 int i, j, r;
321 u32 tmp;
322 /* disable byte swapping */
323 u32 lmi_swap_cntl = 0;
324 u32 mp_swap_cntl = 0;
325
326 /* set uvd busy */
327 WREG32_P(mmUVD_STATUS, 1<<2, ~(1<<2));
328
329 uvd_v3_1_set_dcm(adev, true);
330 WREG32(mmUVD_CGC_GATE, 0);
331
332 /* take UVD block out of reset */
333 WREG32_P(mmSRBM_SOFT_RESET, 0, ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK);
334 mdelay(5);
335
336 /* enable VCPU clock */
337 WREG32(mmUVD_VCPU_CNTL, 1 << 9);
338
339 /* disable interupt */
340 WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1));
341
342 #ifdef __BIG_ENDIAN
343 /* swap (8 in 32) RB and IB */
344 lmi_swap_cntl = 0xa;
345 mp_swap_cntl = 0;
346 #endif
347 WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
348 WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl);
349
350 /* initialize UVD memory controller */
351 WREG32(mmUVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) |
352 (1 << 21) | (1 << 9) | (1 << 20));
353
354 tmp = RREG32(mmUVD_MPC_CNTL);
355 WREG32(mmUVD_MPC_CNTL, tmp | 0x10);
356
357 WREG32(mmUVD_MPC_SET_MUXA0, 0x40c2040);
358 WREG32(mmUVD_MPC_SET_MUXA1, 0x0);
359 WREG32(mmUVD_MPC_SET_MUXB0, 0x40c2040);
360 WREG32(mmUVD_MPC_SET_MUXB1, 0x0);
361 WREG32(mmUVD_MPC_SET_ALU, 0);
362 WREG32(mmUVD_MPC_SET_MUX, 0x88);
363
364 tmp = RREG32_UVD_CTX(ixUVD_LMI_CACHE_CTRL);
365 WREG32_UVD_CTX(ixUVD_LMI_CACHE_CTRL, tmp & (~0x10));
366
367 /* enable UMC */
368 WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
369
370 WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK);
371
372 WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
373
374 WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
375
376 mdelay(10);
377
378 for (i = 0; i < 10; ++i) {
379 uint32_t status;
380 for (j = 0; j < 100; ++j) {
381 status = RREG32(mmUVD_STATUS);
382 if (status & 2)
383 break;
384 mdelay(10);
385 }
386 r = 0;
387 if (status & 2)
388 break;
389
390 DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n");
391 WREG32_P(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
392 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
393 mdelay(10);
394 WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
395 mdelay(10);
396 r = -1;
397 }
398
399 if (r) {
400 DRM_ERROR("UVD not responding, giving up!!!\n");
401 return r;
402 }
403
404 /* enable interupt */
405 WREG32_P(mmUVD_MASTINT_EN, 3<<1, ~(3 << 1));
406
407 WREG32_P(mmUVD_STATUS, 0, ~(1<<2));
408
409 /* force RBC into idle state */
410 WREG32(mmUVD_RBC_RB_CNTL, 0x11010101);
411
412 /* Set the write pointer delay */
413 WREG32(mmUVD_RBC_RB_WPTR_CNTL, 0);
414
415 /* programm the 4GB memory segment for rptr and ring buffer */
416 WREG32(mmUVD_LMI_EXT40_ADDR, upper_32_bits(ring->gpu_addr) |
417 (0x7 << 16) | (0x1 << 31));
418
419 /* Initialize the ring buffer's read and write pointers */
420 WREG32(mmUVD_RBC_RB_RPTR, 0x0);
421
422 ring->wptr = RREG32(mmUVD_RBC_RB_RPTR);
423 WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
424
425 /* set the ring address */
426 WREG32(mmUVD_RBC_RB_BASE, ring->gpu_addr);
427
428 /* Set ring buffer size */
429 rb_bufsz = order_base_2(ring->ring_size);
430 rb_bufsz = (0x1 << 8) | rb_bufsz;
431 WREG32_P(mmUVD_RBC_RB_CNTL, rb_bufsz, ~0x11f1f);
432
433 return 0;
434 }
435
436 /**
437 * uvd_v3_1_stop - stop UVD block
438 *
439 * @adev: amdgpu_device pointer
440 *
441 * stop the UVD block
442 */
uvd_v3_1_stop(struct amdgpu_device * adev)443 static void uvd_v3_1_stop(struct amdgpu_device *adev)
444 {
445 uint32_t i, j;
446 uint32_t status;
447
448 WREG32(mmUVD_RBC_RB_CNTL, 0x11010101);
449
450 for (i = 0; i < 10; ++i) {
451 for (j = 0; j < 100; ++j) {
452 status = RREG32(mmUVD_STATUS);
453 if (status & 2)
454 break;
455 mdelay(1);
456 }
457 if (status & 2)
458 break;
459 }
460
461 for (i = 0; i < 10; ++i) {
462 for (j = 0; j < 100; ++j) {
463 status = RREG32(mmUVD_LMI_STATUS);
464 if (status & 0xf)
465 break;
466 mdelay(1);
467 }
468 if (status & 0xf)
469 break;
470 }
471
472 /* Stall UMC and register bus before resetting VCPU */
473 WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
474
475 for (i = 0; i < 10; ++i) {
476 for (j = 0; j < 100; ++j) {
477 status = RREG32(mmUVD_LMI_STATUS);
478 if (status & 0x240)
479 break;
480 mdelay(1);
481 }
482 if (status & 0x240)
483 break;
484 }
485
486 WREG32_P(0x3D49, 0, ~(1 << 2));
487
488 WREG32_P(mmUVD_VCPU_CNTL, 0, ~(1 << 9));
489
490 /* put LMI, VCPU, RBC etc... into reset */
491 WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
492 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
493 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
494
495 WREG32(mmUVD_STATUS, 0);
496
497 uvd_v3_1_set_dcm(adev, false);
498 }
499
uvd_v3_1_set_interrupt_state(struct amdgpu_device * adev,struct amdgpu_irq_src * source,unsigned type,enum amdgpu_interrupt_state state)500 static int uvd_v3_1_set_interrupt_state(struct amdgpu_device *adev,
501 struct amdgpu_irq_src *source,
502 unsigned type,
503 enum amdgpu_interrupt_state state)
504 {
505 return 0;
506 }
507
uvd_v3_1_process_interrupt(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)508 static int uvd_v3_1_process_interrupt(struct amdgpu_device *adev,
509 struct amdgpu_irq_src *source,
510 struct amdgpu_iv_entry *entry)
511 {
512 DRM_DEBUG("IH: UVD TRAP\n");
513 amdgpu_fence_process(&adev->uvd.inst->ring);
514 return 0;
515 }
516
517
518 static const struct amdgpu_irq_src_funcs uvd_v3_1_irq_funcs = {
519 .set = uvd_v3_1_set_interrupt_state,
520 .process = uvd_v3_1_process_interrupt,
521 };
522
uvd_v3_1_set_irq_funcs(struct amdgpu_device * adev)523 static void uvd_v3_1_set_irq_funcs(struct amdgpu_device *adev)
524 {
525 adev->uvd.inst->irq.num_types = 1;
526 adev->uvd.inst->irq.funcs = &uvd_v3_1_irq_funcs;
527 }
528
529
uvd_v3_1_early_init(void * handle)530 static int uvd_v3_1_early_init(void *handle)
531 {
532 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
533 adev->uvd.num_uvd_inst = 1;
534
535 uvd_v3_1_set_ring_funcs(adev);
536 uvd_v3_1_set_irq_funcs(adev);
537
538 return 0;
539 }
540
uvd_v3_1_sw_init(void * handle)541 static int uvd_v3_1_sw_init(void *handle)
542 {
543 struct amdgpu_ring *ring;
544 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
545 int r;
546 void *ptr;
547 uint32_t ucode_len;
548
549 /* UVD TRAP */
550 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 124, &adev->uvd.inst->irq);
551 if (r)
552 return r;
553
554 r = amdgpu_uvd_sw_init(adev);
555 if (r)
556 return r;
557
558 ring = &adev->uvd.inst->ring;
559 sprintf(ring->name, "uvd");
560 r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0,
561 AMDGPU_RING_PRIO_DEFAULT);
562 if (r)
563 return r;
564
565 r = amdgpu_uvd_resume(adev);
566 if (r)
567 return r;
568
569 /* Retrieval firmware validate key */
570 ptr = adev->uvd.inst[0].cpu_addr;
571 ptr += 192 + 16;
572 memcpy(&ucode_len, ptr, 4);
573 ptr += ucode_len;
574 memcpy(&adev->uvd.keyselect, ptr, 4);
575
576 r = amdgpu_uvd_entity_init(adev);
577
578 return r;
579 }
580
uvd_v3_1_sw_fini(void * handle)581 static int uvd_v3_1_sw_fini(void *handle)
582 {
583 int r;
584 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
585
586 r = amdgpu_uvd_suspend(adev);
587 if (r)
588 return r;
589
590 return amdgpu_uvd_sw_fini(adev);
591 }
592
uvd_v3_1_enable_mgcg(struct amdgpu_device * adev,bool enable)593 static void uvd_v3_1_enable_mgcg(struct amdgpu_device *adev,
594 bool enable)
595 {
596 u32 orig, data;
597
598 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) {
599 data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
600 data |= 0x3fff;
601 WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
602
603 orig = data = RREG32(mmUVD_CGC_CTRL);
604 data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
605 if (orig != data)
606 WREG32(mmUVD_CGC_CTRL, data);
607 } else {
608 data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
609 data &= ~0x3fff;
610 WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
611
612 orig = data = RREG32(mmUVD_CGC_CTRL);
613 data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
614 if (orig != data)
615 WREG32(mmUVD_CGC_CTRL, data);
616 }
617 }
618
619 /**
620 * uvd_v3_1_hw_init - start and test UVD block
621 *
622 * @adev: amdgpu_device pointer
623 *
624 * Initialize the hardware, boot up the VCPU and do some testing
625 */
uvd_v3_1_hw_init(void * handle)626 static int uvd_v3_1_hw_init(void *handle)
627 {
628 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
629 struct amdgpu_ring *ring = &adev->uvd.inst->ring;
630 uint32_t tmp;
631 int r;
632
633 uvd_v3_1_mc_resume(adev);
634
635 r = uvd_v3_1_fw_validate(adev);
636 if (r) {
637 DRM_ERROR("amdgpu: UVD Firmware validate fail (%d).\n", r);
638 return r;
639 }
640
641 uvd_v3_1_enable_mgcg(adev, true);
642 amdgpu_asic_set_uvd_clocks(adev, 53300, 40000);
643
644 uvd_v3_1_start(adev);
645
646 r = amdgpu_ring_test_helper(ring);
647 if (r) {
648 DRM_ERROR("amdgpu: UVD ring test fail (%d).\n", r);
649 goto done;
650 }
651
652 r = amdgpu_ring_alloc(ring, 10);
653 if (r) {
654 DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r);
655 goto done;
656 }
657
658 tmp = PACKET0(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0);
659 amdgpu_ring_write(ring, tmp);
660 amdgpu_ring_write(ring, 0xFFFFF);
661
662 tmp = PACKET0(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0);
663 amdgpu_ring_write(ring, tmp);
664 amdgpu_ring_write(ring, 0xFFFFF);
665
666 tmp = PACKET0(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0);
667 amdgpu_ring_write(ring, tmp);
668 amdgpu_ring_write(ring, 0xFFFFF);
669
670 /* Clear timeout status bits */
671 amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_TIMEOUT_STATUS, 0));
672 amdgpu_ring_write(ring, 0x8);
673
674 amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0));
675 amdgpu_ring_write(ring, 3);
676
677 amdgpu_ring_commit(ring);
678
679 done:
680 if (!r)
681 DRM_INFO("UVD initialized successfully.\n");
682
683 return r;
684 }
685
686 /**
687 * uvd_v3_1_hw_fini - stop the hardware block
688 *
689 * @adev: amdgpu_device pointer
690 *
691 * Stop the UVD block, mark ring as not ready any more
692 */
uvd_v3_1_hw_fini(void * handle)693 static int uvd_v3_1_hw_fini(void *handle)
694 {
695 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
696
697 if (RREG32(mmUVD_STATUS) != 0)
698 uvd_v3_1_stop(adev);
699
700 return 0;
701 }
702
uvd_v3_1_suspend(void * handle)703 static int uvd_v3_1_suspend(void *handle)
704 {
705 int r;
706 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
707
708 r = uvd_v3_1_hw_fini(adev);
709 if (r)
710 return r;
711
712 return amdgpu_uvd_suspend(adev);
713 }
714
uvd_v3_1_resume(void * handle)715 static int uvd_v3_1_resume(void *handle)
716 {
717 int r;
718 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
719
720 r = amdgpu_uvd_resume(adev);
721 if (r)
722 return r;
723
724 return uvd_v3_1_hw_init(adev);
725 }
726
uvd_v3_1_is_idle(void * handle)727 static bool uvd_v3_1_is_idle(void *handle)
728 {
729 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
730
731 return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
732 }
733
uvd_v3_1_wait_for_idle(void * handle)734 static int uvd_v3_1_wait_for_idle(void *handle)
735 {
736 unsigned i;
737 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
738
739 for (i = 0; i < adev->usec_timeout; i++) {
740 if (!(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK))
741 return 0;
742 }
743 return -ETIMEDOUT;
744 }
745
uvd_v3_1_soft_reset(void * handle)746 static int uvd_v3_1_soft_reset(void *handle)
747 {
748 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
749
750 uvd_v3_1_stop(adev);
751
752 WREG32_P(mmSRBM_SOFT_RESET, SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK,
753 ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK);
754 mdelay(5);
755
756 return uvd_v3_1_start(adev);
757 }
758
uvd_v3_1_set_clockgating_state(void * handle,enum amd_clockgating_state state)759 static int uvd_v3_1_set_clockgating_state(void *handle,
760 enum amd_clockgating_state state)
761 {
762 return 0;
763 }
764
uvd_v3_1_set_powergating_state(void * handle,enum amd_powergating_state state)765 static int uvd_v3_1_set_powergating_state(void *handle,
766 enum amd_powergating_state state)
767 {
768 return 0;
769 }
770
771 static const struct amd_ip_funcs uvd_v3_1_ip_funcs = {
772 .name = "uvd_v3_1",
773 .early_init = uvd_v3_1_early_init,
774 .late_init = NULL,
775 .sw_init = uvd_v3_1_sw_init,
776 .sw_fini = uvd_v3_1_sw_fini,
777 .hw_init = uvd_v3_1_hw_init,
778 .hw_fini = uvd_v3_1_hw_fini,
779 .suspend = uvd_v3_1_suspend,
780 .resume = uvd_v3_1_resume,
781 .is_idle = uvd_v3_1_is_idle,
782 .wait_for_idle = uvd_v3_1_wait_for_idle,
783 .soft_reset = uvd_v3_1_soft_reset,
784 .set_clockgating_state = uvd_v3_1_set_clockgating_state,
785 .set_powergating_state = uvd_v3_1_set_powergating_state,
786 };
787
788 const struct amdgpu_ip_block_version uvd_v3_1_ip_block =
789 {
790 .type = AMD_IP_BLOCK_TYPE_UVD,
791 .major = 3,
792 .minor = 1,
793 .rev = 0,
794 .funcs = &uvd_v3_1_ip_funcs,
795 };
796