• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Christian König <christian.koenig@amd.com>
23  */
24 
25 #include <linux/firmware.h>
26 
27 #include "amdgpu.h"
28 #include "amdgpu_uvd.h"
29 #include "vid.h"
30 #include "uvd/uvd_6_0_d.h"
31 #include "uvd/uvd_6_0_sh_mask.h"
32 #include "oss/oss_2_0_d.h"
33 #include "oss/oss_2_0_sh_mask.h"
34 #include "smu/smu_7_1_3_d.h"
35 #include "smu/smu_7_1_3_sh_mask.h"
36 #include "bif/bif_5_1_d.h"
37 #include "gmc/gmc_8_1_d.h"
38 #include "vi.h"
39 #include "ivsrcid/ivsrcid_vislands30.h"
40 
41 /* Polaris10/11/12 firmware version */
42 #define FW_1_130_16 ((1 << 24) | (130 << 16) | (16 << 8))
43 
44 static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev);
45 static void uvd_v6_0_set_enc_ring_funcs(struct amdgpu_device *adev);
46 
47 static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev);
48 static int uvd_v6_0_start(struct amdgpu_device *adev);
49 static void uvd_v6_0_stop(struct amdgpu_device *adev);
50 static void uvd_v6_0_set_sw_clock_gating(struct amdgpu_device *adev);
51 static int uvd_v6_0_set_clockgating_state(void *handle,
52 					  enum amd_clockgating_state state);
53 static void uvd_v6_0_enable_mgcg(struct amdgpu_device *adev,
54 				 bool enable);
55 
56 /**
57 * uvd_v6_0_enc_support - get encode support status
58 *
59 * @adev: amdgpu_device pointer
60 *
61 * Returns the current hardware encode support status
62 */
uvd_v6_0_enc_support(struct amdgpu_device * adev)63 static inline bool uvd_v6_0_enc_support(struct amdgpu_device *adev)
64 {
65 	return ((adev->asic_type >= CHIP_POLARIS10) &&
66 			(adev->asic_type <= CHIP_VEGAM) &&
67 			(!adev->uvd.fw_version || adev->uvd.fw_version >= FW_1_130_16));
68 }
69 
70 /**
71  * uvd_v6_0_ring_get_rptr - get read pointer
72  *
73  * @ring: amdgpu_ring pointer
74  *
75  * Returns the current hardware read pointer
76  */
uvd_v6_0_ring_get_rptr(struct amdgpu_ring * ring)77 static uint64_t uvd_v6_0_ring_get_rptr(struct amdgpu_ring *ring)
78 {
79 	struct amdgpu_device *adev = ring->adev;
80 
81 	return RREG32(mmUVD_RBC_RB_RPTR);
82 }
83 
84 /**
85  * uvd_v6_0_enc_ring_get_rptr - get enc read pointer
86  *
87  * @ring: amdgpu_ring pointer
88  *
89  * Returns the current hardware enc read pointer
90  */
uvd_v6_0_enc_ring_get_rptr(struct amdgpu_ring * ring)91 static uint64_t uvd_v6_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
92 {
93 	struct amdgpu_device *adev = ring->adev;
94 
95 	if (ring == &adev->uvd.inst->ring_enc[0])
96 		return RREG32(mmUVD_RB_RPTR);
97 	else
98 		return RREG32(mmUVD_RB_RPTR2);
99 }
100 /**
101  * uvd_v6_0_ring_get_wptr - get write pointer
102  *
103  * @ring: amdgpu_ring pointer
104  *
105  * Returns the current hardware write pointer
106  */
uvd_v6_0_ring_get_wptr(struct amdgpu_ring * ring)107 static uint64_t uvd_v6_0_ring_get_wptr(struct amdgpu_ring *ring)
108 {
109 	struct amdgpu_device *adev = ring->adev;
110 
111 	return RREG32(mmUVD_RBC_RB_WPTR);
112 }
113 
114 /**
115  * uvd_v6_0_enc_ring_get_wptr - get enc write pointer
116  *
117  * @ring: amdgpu_ring pointer
118  *
119  * Returns the current hardware enc write pointer
120  */
uvd_v6_0_enc_ring_get_wptr(struct amdgpu_ring * ring)121 static uint64_t uvd_v6_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
122 {
123 	struct amdgpu_device *adev = ring->adev;
124 
125 	if (ring == &adev->uvd.inst->ring_enc[0])
126 		return RREG32(mmUVD_RB_WPTR);
127 	else
128 		return RREG32(mmUVD_RB_WPTR2);
129 }
130 
131 /**
132  * uvd_v6_0_ring_set_wptr - set write pointer
133  *
134  * @ring: amdgpu_ring pointer
135  *
136  * Commits the write pointer to the hardware
137  */
uvd_v6_0_ring_set_wptr(struct amdgpu_ring * ring)138 static void uvd_v6_0_ring_set_wptr(struct amdgpu_ring *ring)
139 {
140 	struct amdgpu_device *adev = ring->adev;
141 
142 	WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
143 }
144 
145 /**
146  * uvd_v6_0_enc_ring_set_wptr - set enc write pointer
147  *
148  * @ring: amdgpu_ring pointer
149  *
150  * Commits the enc write pointer to the hardware
151  */
uvd_v6_0_enc_ring_set_wptr(struct amdgpu_ring * ring)152 static void uvd_v6_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
153 {
154 	struct amdgpu_device *adev = ring->adev;
155 
156 	if (ring == &adev->uvd.inst->ring_enc[0])
157 		WREG32(mmUVD_RB_WPTR,
158 			lower_32_bits(ring->wptr));
159 	else
160 		WREG32(mmUVD_RB_WPTR2,
161 			lower_32_bits(ring->wptr));
162 }
163 
164 /**
165  * uvd_v6_0_enc_ring_test_ring - test if UVD ENC ring is working
166  *
167  * @ring: the engine to test on
168  *
169  */
uvd_v6_0_enc_ring_test_ring(struct amdgpu_ring * ring)170 static int uvd_v6_0_enc_ring_test_ring(struct amdgpu_ring *ring)
171 {
172 	struct amdgpu_device *adev = ring->adev;
173 	uint32_t rptr;
174 	unsigned i;
175 	int r;
176 
177 	r = amdgpu_ring_alloc(ring, 16);
178 	if (r)
179 		return r;
180 
181 	rptr = amdgpu_ring_get_rptr(ring);
182 
183 	amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
184 	amdgpu_ring_commit(ring);
185 
186 	for (i = 0; i < adev->usec_timeout; i++) {
187 		if (amdgpu_ring_get_rptr(ring) != rptr)
188 			break;
189 		udelay(1);
190 	}
191 
192 	if (i >= adev->usec_timeout)
193 		r = -ETIMEDOUT;
194 
195 	return r;
196 }
197 
198 /**
199  * uvd_v6_0_enc_get_create_msg - generate a UVD ENC create msg
200  *
201  * @ring: ring we should submit the msg to
202  * @handle: session handle to use
203  * @bo: amdgpu object for which we query the offset
204  * @fence: optional fence to return
205  *
206  * Open up a stream for HW test
207  */
uvd_v6_0_enc_get_create_msg(struct amdgpu_ring * ring,uint32_t handle,struct amdgpu_bo * bo,struct dma_fence ** fence)208 static int uvd_v6_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
209 				       struct amdgpu_bo *bo,
210 				       struct dma_fence **fence)
211 {
212 	const unsigned ib_size_dw = 16;
213 	struct amdgpu_job *job;
214 	struct amdgpu_ib *ib;
215 	struct dma_fence *f = NULL;
216 	uint64_t addr;
217 	int i, r;
218 
219 	r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
220 					AMDGPU_IB_POOL_DIRECT, &job);
221 	if (r)
222 		return r;
223 
224 	ib = &job->ibs[0];
225 	addr = amdgpu_bo_gpu_offset(bo);
226 
227 	ib->length_dw = 0;
228 	ib->ptr[ib->length_dw++] = 0x00000018;
229 	ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
230 	ib->ptr[ib->length_dw++] = handle;
231 	ib->ptr[ib->length_dw++] = 0x00010000;
232 	ib->ptr[ib->length_dw++] = upper_32_bits(addr);
233 	ib->ptr[ib->length_dw++] = addr;
234 
235 	ib->ptr[ib->length_dw++] = 0x00000014;
236 	ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
237 	ib->ptr[ib->length_dw++] = 0x0000001c;
238 	ib->ptr[ib->length_dw++] = 0x00000001;
239 	ib->ptr[ib->length_dw++] = 0x00000000;
240 
241 	ib->ptr[ib->length_dw++] = 0x00000008;
242 	ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */
243 
244 	for (i = ib->length_dw; i < ib_size_dw; ++i)
245 		ib->ptr[i] = 0x0;
246 
247 	r = amdgpu_job_submit_direct(job, ring, &f);
248 	if (r)
249 		goto err;
250 
251 	if (fence)
252 		*fence = dma_fence_get(f);
253 	dma_fence_put(f);
254 	return 0;
255 
256 err:
257 	amdgpu_job_free(job);
258 	return r;
259 }
260 
261 /**
262  * uvd_v6_0_enc_get_destroy_msg - generate a UVD ENC destroy msg
263  *
264  * @ring: ring we should submit the msg to
265  * @handle: session handle to use
266  * @bo: amdgpu object for which we query the offset
267  * @fence: optional fence to return
268  *
269  * Close up a stream for HW test or if userspace failed to do so
270  */
uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring * ring,uint32_t handle,struct amdgpu_bo * bo,struct dma_fence ** fence)271 static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring,
272 					uint32_t handle,
273 					struct amdgpu_bo *bo,
274 					struct dma_fence **fence)
275 {
276 	const unsigned ib_size_dw = 16;
277 	struct amdgpu_job *job;
278 	struct amdgpu_ib *ib;
279 	struct dma_fence *f = NULL;
280 	uint64_t addr;
281 	int i, r;
282 
283 	r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
284 					AMDGPU_IB_POOL_DIRECT, &job);
285 	if (r)
286 		return r;
287 
288 	ib = &job->ibs[0];
289 	addr = amdgpu_bo_gpu_offset(bo);
290 
291 	ib->length_dw = 0;
292 	ib->ptr[ib->length_dw++] = 0x00000018;
293 	ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
294 	ib->ptr[ib->length_dw++] = handle;
295 	ib->ptr[ib->length_dw++] = 0x00010000;
296 	ib->ptr[ib->length_dw++] = upper_32_bits(addr);
297 	ib->ptr[ib->length_dw++] = addr;
298 
299 	ib->ptr[ib->length_dw++] = 0x00000014;
300 	ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
301 	ib->ptr[ib->length_dw++] = 0x0000001c;
302 	ib->ptr[ib->length_dw++] = 0x00000001;
303 	ib->ptr[ib->length_dw++] = 0x00000000;
304 
305 	ib->ptr[ib->length_dw++] = 0x00000008;
306 	ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */
307 
308 	for (i = ib->length_dw; i < ib_size_dw; ++i)
309 		ib->ptr[i] = 0x0;
310 
311 	r = amdgpu_job_submit_direct(job, ring, &f);
312 	if (r)
313 		goto err;
314 
315 	if (fence)
316 		*fence = dma_fence_get(f);
317 	dma_fence_put(f);
318 	return 0;
319 
320 err:
321 	amdgpu_job_free(job);
322 	return r;
323 }
324 
325 /**
326  * uvd_v6_0_enc_ring_test_ib - test if UVD ENC IBs are working
327  *
328  * @ring: the engine to test on
329  * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
330  *
331  */
uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring * ring,long timeout)332 static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
333 {
334 	struct dma_fence *fence = NULL;
335 	struct amdgpu_bo *bo = NULL;
336 	long r;
337 
338 	r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE,
339 				      AMDGPU_GEM_DOMAIN_VRAM,
340 				      &bo, NULL, NULL);
341 	if (r)
342 		return r;
343 
344 	r = uvd_v6_0_enc_get_create_msg(ring, 1, bo, NULL);
345 	if (r)
346 		goto error;
347 
348 	r = uvd_v6_0_enc_get_destroy_msg(ring, 1, bo, &fence);
349 	if (r)
350 		goto error;
351 
352 	r = dma_fence_wait_timeout(fence, false, timeout);
353 	if (r == 0)
354 		r = -ETIMEDOUT;
355 	else if (r > 0)
356 		r = 0;
357 
358 error:
359 	dma_fence_put(fence);
360 	amdgpu_bo_unpin(bo);
361 	amdgpu_bo_unreserve(bo);
362 	amdgpu_bo_unref(&bo);
363 	return r;
364 }
365 
uvd_v6_0_early_init(void * handle)366 static int uvd_v6_0_early_init(void *handle)
367 {
368 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
369 	adev->uvd.num_uvd_inst = 1;
370 
371 	if (!(adev->flags & AMD_IS_APU) &&
372 	    (RREG32_SMC(ixCC_HARVEST_FUSES) & CC_HARVEST_FUSES__UVD_DISABLE_MASK))
373 		return -ENOENT;
374 
375 	uvd_v6_0_set_ring_funcs(adev);
376 
377 	if (uvd_v6_0_enc_support(adev)) {
378 		adev->uvd.num_enc_rings = 2;
379 		uvd_v6_0_set_enc_ring_funcs(adev);
380 	}
381 
382 	uvd_v6_0_set_irq_funcs(adev);
383 
384 	return 0;
385 }
386 
uvd_v6_0_sw_init(void * handle)387 static int uvd_v6_0_sw_init(void *handle)
388 {
389 	struct amdgpu_ring *ring;
390 	int i, r;
391 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
392 
393 	/* UVD TRAP */
394 	r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_UVD_SYSTEM_MESSAGE, &adev->uvd.inst->irq);
395 	if (r)
396 		return r;
397 
398 	/* UVD ENC TRAP */
399 	if (uvd_v6_0_enc_support(adev)) {
400 		for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
401 			r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i + VISLANDS30_IV_SRCID_UVD_ENC_GEN_PURP, &adev->uvd.inst->irq);
402 			if (r)
403 				return r;
404 		}
405 	}
406 
407 	r = amdgpu_uvd_sw_init(adev);
408 	if (r)
409 		return r;
410 
411 	if (!uvd_v6_0_enc_support(adev)) {
412 		for (i = 0; i < adev->uvd.num_enc_rings; ++i)
413 			adev->uvd.inst->ring_enc[i].funcs = NULL;
414 
415 		adev->uvd.inst->irq.num_types = 1;
416 		adev->uvd.num_enc_rings = 0;
417 
418 		DRM_INFO("UVD ENC is disabled\n");
419 	}
420 
421 	ring = &adev->uvd.inst->ring;
422 	sprintf(ring->name, "uvd");
423 	r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0,
424 			     AMDGPU_RING_PRIO_DEFAULT, NULL);
425 	if (r)
426 		return r;
427 
428 	r = amdgpu_uvd_resume(adev);
429 	if (r)
430 		return r;
431 
432 	if (uvd_v6_0_enc_support(adev)) {
433 		for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
434 			ring = &adev->uvd.inst->ring_enc[i];
435 			sprintf(ring->name, "uvd_enc%d", i);
436 			r = amdgpu_ring_init(adev, ring, 512,
437 					     &adev->uvd.inst->irq, 0,
438 					     AMDGPU_RING_PRIO_DEFAULT, NULL);
439 			if (r)
440 				return r;
441 		}
442 	}
443 
444 	r = amdgpu_uvd_entity_init(adev);
445 
446 	return r;
447 }
448 
uvd_v6_0_sw_fini(void * handle)449 static int uvd_v6_0_sw_fini(void *handle)
450 {
451 	int i, r;
452 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
453 
454 	r = amdgpu_uvd_suspend(adev);
455 	if (r)
456 		return r;
457 
458 	if (uvd_v6_0_enc_support(adev)) {
459 		for (i = 0; i < adev->uvd.num_enc_rings; ++i)
460 			amdgpu_ring_fini(&adev->uvd.inst->ring_enc[i]);
461 	}
462 
463 	return amdgpu_uvd_sw_fini(adev);
464 }
465 
466 /**
467  * uvd_v6_0_hw_init - start and test UVD block
468  *
469  * @handle: handle used to pass amdgpu_device pointer
470  *
471  * Initialize the hardware, boot up the VCPU and do some testing
472  */
uvd_v6_0_hw_init(void * handle)473 static int uvd_v6_0_hw_init(void *handle)
474 {
475 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
476 	struct amdgpu_ring *ring = &adev->uvd.inst->ring;
477 	uint32_t tmp;
478 	int i, r;
479 
480 	amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
481 	uvd_v6_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
482 	uvd_v6_0_enable_mgcg(adev, true);
483 
484 	r = amdgpu_ring_test_helper(ring);
485 	if (r)
486 		goto done;
487 
488 	r = amdgpu_ring_alloc(ring, 10);
489 	if (r) {
490 		DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r);
491 		goto done;
492 	}
493 
494 	tmp = PACKET0(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0);
495 	amdgpu_ring_write(ring, tmp);
496 	amdgpu_ring_write(ring, 0xFFFFF);
497 
498 	tmp = PACKET0(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0);
499 	amdgpu_ring_write(ring, tmp);
500 	amdgpu_ring_write(ring, 0xFFFFF);
501 
502 	tmp = PACKET0(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0);
503 	amdgpu_ring_write(ring, tmp);
504 	amdgpu_ring_write(ring, 0xFFFFF);
505 
506 	/* Clear timeout status bits */
507 	amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_TIMEOUT_STATUS, 0));
508 	amdgpu_ring_write(ring, 0x8);
509 
510 	amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0));
511 	amdgpu_ring_write(ring, 3);
512 
513 	amdgpu_ring_commit(ring);
514 
515 	if (uvd_v6_0_enc_support(adev)) {
516 		for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
517 			ring = &adev->uvd.inst->ring_enc[i];
518 			r = amdgpu_ring_test_helper(ring);
519 			if (r)
520 				goto done;
521 		}
522 	}
523 
524 done:
525 	if (!r) {
526 		if (uvd_v6_0_enc_support(adev))
527 			DRM_INFO("UVD and UVD ENC initialized successfully.\n");
528 		else
529 			DRM_INFO("UVD initialized successfully.\n");
530 	}
531 
532 	return r;
533 }
534 
535 /**
536  * uvd_v6_0_hw_fini - stop the hardware block
537  *
538  * @handle: handle used to pass amdgpu_device pointer
539  *
540  * Stop the UVD block, mark ring as not ready any more
541  */
uvd_v6_0_hw_fini(void * handle)542 static int uvd_v6_0_hw_fini(void *handle)
543 {
544 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
545 
546 	cancel_delayed_work_sync(&adev->uvd.idle_work);
547 
548 	if (RREG32(mmUVD_STATUS) != 0)
549 		uvd_v6_0_stop(adev);
550 
551 	return 0;
552 }
553 
uvd_v6_0_suspend(void * handle)554 static int uvd_v6_0_suspend(void *handle)
555 {
556 	int r;
557 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
558 
559 	/*
560 	 * Proper cleanups before halting the HW engine:
561 	 *   - cancel the delayed idle work
562 	 *   - enable powergating
563 	 *   - enable clockgating
564 	 *   - disable dpm
565 	 *
566 	 * TODO: to align with the VCN implementation, move the
567 	 * jobs for clockgating/powergating/dpm setting to
568 	 * ->set_powergating_state().
569 	 */
570 	cancel_delayed_work_sync(&adev->uvd.idle_work);
571 
572 	if (adev->pm.dpm_enabled) {
573 		amdgpu_dpm_enable_uvd(adev, false);
574 	} else {
575 		amdgpu_asic_set_uvd_clocks(adev, 0, 0);
576 		/* shutdown the UVD block */
577 		amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
578 						       AMD_PG_STATE_GATE);
579 		amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
580 						       AMD_CG_STATE_GATE);
581 	}
582 
583 	r = uvd_v6_0_hw_fini(adev);
584 	if (r)
585 		return r;
586 
587 	return amdgpu_uvd_suspend(adev);
588 }
589 
uvd_v6_0_resume(void * handle)590 static int uvd_v6_0_resume(void *handle)
591 {
592 	int r;
593 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
594 
595 	r = amdgpu_uvd_resume(adev);
596 	if (r)
597 		return r;
598 
599 	return uvd_v6_0_hw_init(adev);
600 }
601 
602 /**
603  * uvd_v6_0_mc_resume - memory controller programming
604  *
605  * @adev: amdgpu_device pointer
606  *
607  * Let the UVD memory controller know it's offsets
608  */
uvd_v6_0_mc_resume(struct amdgpu_device * adev)609 static void uvd_v6_0_mc_resume(struct amdgpu_device *adev)
610 {
611 	uint64_t offset;
612 	uint32_t size;
613 
614 	/* program memory controller bits 0-27 */
615 	WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
616 			lower_32_bits(adev->uvd.inst->gpu_addr));
617 	WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
618 			upper_32_bits(adev->uvd.inst->gpu_addr));
619 
620 	offset = AMDGPU_UVD_FIRMWARE_OFFSET;
621 	size = AMDGPU_UVD_FIRMWARE_SIZE(adev);
622 	WREG32(mmUVD_VCPU_CACHE_OFFSET0, offset >> 3);
623 	WREG32(mmUVD_VCPU_CACHE_SIZE0, size);
624 
625 	offset += size;
626 	size = AMDGPU_UVD_HEAP_SIZE;
627 	WREG32(mmUVD_VCPU_CACHE_OFFSET1, offset >> 3);
628 	WREG32(mmUVD_VCPU_CACHE_SIZE1, size);
629 
630 	offset += size;
631 	size = AMDGPU_UVD_STACK_SIZE +
632 	       (AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles);
633 	WREG32(mmUVD_VCPU_CACHE_OFFSET2, offset >> 3);
634 	WREG32(mmUVD_VCPU_CACHE_SIZE2, size);
635 
636 	WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
637 	WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
638 	WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
639 
640 	WREG32(mmUVD_GP_SCRATCH4, adev->uvd.max_handles);
641 }
642 
643 #if 0
644 static void cz_set_uvd_clock_gating_branches(struct amdgpu_device *adev,
645 		bool enable)
646 {
647 	u32 data, data1;
648 
649 	data = RREG32(mmUVD_CGC_GATE);
650 	data1 = RREG32(mmUVD_SUVD_CGC_GATE);
651 	if (enable) {
652 		data |= UVD_CGC_GATE__SYS_MASK |
653 				UVD_CGC_GATE__UDEC_MASK |
654 				UVD_CGC_GATE__MPEG2_MASK |
655 				UVD_CGC_GATE__RBC_MASK |
656 				UVD_CGC_GATE__LMI_MC_MASK |
657 				UVD_CGC_GATE__IDCT_MASK |
658 				UVD_CGC_GATE__MPRD_MASK |
659 				UVD_CGC_GATE__MPC_MASK |
660 				UVD_CGC_GATE__LBSI_MASK |
661 				UVD_CGC_GATE__LRBBM_MASK |
662 				UVD_CGC_GATE__UDEC_RE_MASK |
663 				UVD_CGC_GATE__UDEC_CM_MASK |
664 				UVD_CGC_GATE__UDEC_IT_MASK |
665 				UVD_CGC_GATE__UDEC_DB_MASK |
666 				UVD_CGC_GATE__UDEC_MP_MASK |
667 				UVD_CGC_GATE__WCB_MASK |
668 				UVD_CGC_GATE__VCPU_MASK |
669 				UVD_CGC_GATE__SCPU_MASK;
670 		data1 |= UVD_SUVD_CGC_GATE__SRE_MASK |
671 				UVD_SUVD_CGC_GATE__SIT_MASK |
672 				UVD_SUVD_CGC_GATE__SMP_MASK |
673 				UVD_SUVD_CGC_GATE__SCM_MASK |
674 				UVD_SUVD_CGC_GATE__SDB_MASK |
675 				UVD_SUVD_CGC_GATE__SRE_H264_MASK |
676 				UVD_SUVD_CGC_GATE__SRE_HEVC_MASK |
677 				UVD_SUVD_CGC_GATE__SIT_H264_MASK |
678 				UVD_SUVD_CGC_GATE__SIT_HEVC_MASK |
679 				UVD_SUVD_CGC_GATE__SCM_H264_MASK |
680 				UVD_SUVD_CGC_GATE__SCM_HEVC_MASK |
681 				UVD_SUVD_CGC_GATE__SDB_H264_MASK |
682 				UVD_SUVD_CGC_GATE__SDB_HEVC_MASK;
683 	} else {
684 		data &= ~(UVD_CGC_GATE__SYS_MASK |
685 				UVD_CGC_GATE__UDEC_MASK |
686 				UVD_CGC_GATE__MPEG2_MASK |
687 				UVD_CGC_GATE__RBC_MASK |
688 				UVD_CGC_GATE__LMI_MC_MASK |
689 				UVD_CGC_GATE__LMI_UMC_MASK |
690 				UVD_CGC_GATE__IDCT_MASK |
691 				UVD_CGC_GATE__MPRD_MASK |
692 				UVD_CGC_GATE__MPC_MASK |
693 				UVD_CGC_GATE__LBSI_MASK |
694 				UVD_CGC_GATE__LRBBM_MASK |
695 				UVD_CGC_GATE__UDEC_RE_MASK |
696 				UVD_CGC_GATE__UDEC_CM_MASK |
697 				UVD_CGC_GATE__UDEC_IT_MASK |
698 				UVD_CGC_GATE__UDEC_DB_MASK |
699 				UVD_CGC_GATE__UDEC_MP_MASK |
700 				UVD_CGC_GATE__WCB_MASK |
701 				UVD_CGC_GATE__VCPU_MASK |
702 				UVD_CGC_GATE__SCPU_MASK);
703 		data1 &= ~(UVD_SUVD_CGC_GATE__SRE_MASK |
704 				UVD_SUVD_CGC_GATE__SIT_MASK |
705 				UVD_SUVD_CGC_GATE__SMP_MASK |
706 				UVD_SUVD_CGC_GATE__SCM_MASK |
707 				UVD_SUVD_CGC_GATE__SDB_MASK |
708 				UVD_SUVD_CGC_GATE__SRE_H264_MASK |
709 				UVD_SUVD_CGC_GATE__SRE_HEVC_MASK |
710 				UVD_SUVD_CGC_GATE__SIT_H264_MASK |
711 				UVD_SUVD_CGC_GATE__SIT_HEVC_MASK |
712 				UVD_SUVD_CGC_GATE__SCM_H264_MASK |
713 				UVD_SUVD_CGC_GATE__SCM_HEVC_MASK |
714 				UVD_SUVD_CGC_GATE__SDB_H264_MASK |
715 				UVD_SUVD_CGC_GATE__SDB_HEVC_MASK);
716 	}
717 	WREG32(mmUVD_CGC_GATE, data);
718 	WREG32(mmUVD_SUVD_CGC_GATE, data1);
719 }
720 #endif
721 
722 /**
723  * uvd_v6_0_start - start UVD block
724  *
725  * @adev: amdgpu_device pointer
726  *
727  * Setup and start the UVD block
728  */
uvd_v6_0_start(struct amdgpu_device * adev)729 static int uvd_v6_0_start(struct amdgpu_device *adev)
730 {
731 	struct amdgpu_ring *ring = &adev->uvd.inst->ring;
732 	uint32_t rb_bufsz, tmp;
733 	uint32_t lmi_swap_cntl;
734 	uint32_t mp_swap_cntl;
735 	int i, j, r;
736 
737 	/* disable DPG */
738 	WREG32_P(mmUVD_POWER_STATUS, 0, ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
739 
740 	/* disable byte swapping */
741 	lmi_swap_cntl = 0;
742 	mp_swap_cntl = 0;
743 
744 	uvd_v6_0_mc_resume(adev);
745 
746 	/* disable interupt */
747 	WREG32_FIELD(UVD_MASTINT_EN, VCPU_EN, 0);
748 
749 	/* stall UMC and register bus before resetting VCPU */
750 	WREG32_FIELD(UVD_LMI_CTRL2, STALL_ARB_UMC, 1);
751 	mdelay(1);
752 
753 	/* put LMI, VCPU, RBC etc... into reset */
754 	WREG32(mmUVD_SOFT_RESET,
755 		UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
756 		UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
757 		UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
758 		UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
759 		UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
760 		UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
761 		UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
762 		UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
763 	mdelay(5);
764 
765 	/* take UVD block out of reset */
766 	WREG32_FIELD(SRBM_SOFT_RESET, SOFT_RESET_UVD, 0);
767 	mdelay(5);
768 
769 	/* initialize UVD memory controller */
770 	WREG32(mmUVD_LMI_CTRL,
771 		(0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
772 		UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
773 		UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
774 		UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
775 		UVD_LMI_CTRL__REQ_MODE_MASK |
776 		UVD_LMI_CTRL__DISABLE_ON_FWV_FAIL_MASK);
777 
778 #ifdef __BIG_ENDIAN
779 	/* swap (8 in 32) RB and IB */
780 	lmi_swap_cntl = 0xa;
781 	mp_swap_cntl = 0;
782 #endif
783 	WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
784 	WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl);
785 
786 	WREG32(mmUVD_MPC_SET_MUXA0, 0x40c2040);
787 	WREG32(mmUVD_MPC_SET_MUXA1, 0x0);
788 	WREG32(mmUVD_MPC_SET_MUXB0, 0x40c2040);
789 	WREG32(mmUVD_MPC_SET_MUXB1, 0x0);
790 	WREG32(mmUVD_MPC_SET_ALU, 0);
791 	WREG32(mmUVD_MPC_SET_MUX, 0x88);
792 
793 	/* take all subblocks out of reset, except VCPU */
794 	WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
795 	mdelay(5);
796 
797 	/* enable VCPU clock */
798 	WREG32(mmUVD_VCPU_CNTL, UVD_VCPU_CNTL__CLK_EN_MASK);
799 
800 	/* enable UMC */
801 	WREG32_FIELD(UVD_LMI_CTRL2, STALL_ARB_UMC, 0);
802 
803 	/* boot up the VCPU */
804 	WREG32(mmUVD_SOFT_RESET, 0);
805 	mdelay(10);
806 
807 	for (i = 0; i < 10; ++i) {
808 		uint32_t status;
809 
810 		for (j = 0; j < 100; ++j) {
811 			status = RREG32(mmUVD_STATUS);
812 			if (status & 2)
813 				break;
814 			mdelay(10);
815 		}
816 		r = 0;
817 		if (status & 2)
818 			break;
819 
820 		DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n");
821 		WREG32_FIELD(UVD_SOFT_RESET, VCPU_SOFT_RESET, 1);
822 		mdelay(10);
823 		WREG32_FIELD(UVD_SOFT_RESET, VCPU_SOFT_RESET, 0);
824 		mdelay(10);
825 		r = -1;
826 	}
827 
828 	if (r) {
829 		DRM_ERROR("UVD not responding, giving up!!!\n");
830 		return r;
831 	}
832 	/* enable master interrupt */
833 	WREG32_P(mmUVD_MASTINT_EN,
834 		(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
835 		~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
836 
837 	/* clear the bit 4 of UVD_STATUS */
838 	WREG32_P(mmUVD_STATUS, 0, ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
839 
840 	/* force RBC into idle state */
841 	rb_bufsz = order_base_2(ring->ring_size);
842 	tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
843 	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
844 	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
845 	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
846 	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
847 	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
848 	WREG32(mmUVD_RBC_RB_CNTL, tmp);
849 
850 	/* set the write pointer delay */
851 	WREG32(mmUVD_RBC_RB_WPTR_CNTL, 0);
852 
853 	/* set the wb address */
854 	WREG32(mmUVD_RBC_RB_RPTR_ADDR, (upper_32_bits(ring->gpu_addr) >> 2));
855 
856 	/* program the RB_BASE for ring buffer */
857 	WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
858 			lower_32_bits(ring->gpu_addr));
859 	WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
860 			upper_32_bits(ring->gpu_addr));
861 
862 	/* Initialize the ring buffer's read and write pointers */
863 	WREG32(mmUVD_RBC_RB_RPTR, 0);
864 
865 	ring->wptr = RREG32(mmUVD_RBC_RB_RPTR);
866 	WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
867 
868 	WREG32_FIELD(UVD_RBC_RB_CNTL, RB_NO_FETCH, 0);
869 
870 	if (uvd_v6_0_enc_support(adev)) {
871 		ring = &adev->uvd.inst->ring_enc[0];
872 		WREG32(mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
873 		WREG32(mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
874 		WREG32(mmUVD_RB_BASE_LO, ring->gpu_addr);
875 		WREG32(mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
876 		WREG32(mmUVD_RB_SIZE, ring->ring_size / 4);
877 
878 		ring = &adev->uvd.inst->ring_enc[1];
879 		WREG32(mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
880 		WREG32(mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
881 		WREG32(mmUVD_RB_BASE_LO2, ring->gpu_addr);
882 		WREG32(mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
883 		WREG32(mmUVD_RB_SIZE2, ring->ring_size / 4);
884 	}
885 
886 	return 0;
887 }
888 
889 /**
890  * uvd_v6_0_stop - stop UVD block
891  *
892  * @adev: amdgpu_device pointer
893  *
894  * stop the UVD block
895  */
uvd_v6_0_stop(struct amdgpu_device * adev)896 static void uvd_v6_0_stop(struct amdgpu_device *adev)
897 {
898 	/* force RBC into idle state */
899 	WREG32(mmUVD_RBC_RB_CNTL, 0x11010101);
900 
901 	/* Stall UMC and register bus before resetting VCPU */
902 	WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
903 	mdelay(1);
904 
905 	/* put VCPU into reset */
906 	WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
907 	mdelay(5);
908 
909 	/* disable VCPU clock */
910 	WREG32(mmUVD_VCPU_CNTL, 0x0);
911 
912 	/* Unstall UMC and register bus */
913 	WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
914 
915 	WREG32(mmUVD_STATUS, 0);
916 }
917 
918 /**
919  * uvd_v6_0_ring_emit_fence - emit an fence & trap command
920  *
921  * @ring: amdgpu_ring pointer
922  * @addr: address
923  * @seq: sequence number
924  * @flags: fence related flags
925  *
926  * Write a fence and a trap command to the ring.
927  */
uvd_v6_0_ring_emit_fence(struct amdgpu_ring * ring,u64 addr,u64 seq,unsigned flags)928 static void uvd_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
929 				     unsigned flags)
930 {
931 	WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
932 
933 	amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
934 	amdgpu_ring_write(ring, seq);
935 	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
936 	amdgpu_ring_write(ring, addr & 0xffffffff);
937 	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
938 	amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
939 	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
940 	amdgpu_ring_write(ring, 0);
941 
942 	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
943 	amdgpu_ring_write(ring, 0);
944 	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
945 	amdgpu_ring_write(ring, 0);
946 	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
947 	amdgpu_ring_write(ring, 2);
948 }
949 
950 /**
951  * uvd_v6_0_enc_ring_emit_fence - emit an enc fence & trap command
952  *
953  * @ring: amdgpu_ring pointer
954  * @addr: address
955  * @seq: sequence number
956  * @flags: fence related flags
957  *
958  * Write enc a fence and a trap command to the ring.
959  */
uvd_v6_0_enc_ring_emit_fence(struct amdgpu_ring * ring,u64 addr,u64 seq,unsigned flags)960 static void uvd_v6_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
961 			u64 seq, unsigned flags)
962 {
963 	WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
964 
965 	amdgpu_ring_write(ring, HEVC_ENC_CMD_FENCE);
966 	amdgpu_ring_write(ring, addr);
967 	amdgpu_ring_write(ring, upper_32_bits(addr));
968 	amdgpu_ring_write(ring, seq);
969 	amdgpu_ring_write(ring, HEVC_ENC_CMD_TRAP);
970 }
971 
972 /**
973  * uvd_v6_0_ring_emit_hdp_flush - skip HDP flushing
974  *
975  * @ring: amdgpu_ring pointer
976  */
uvd_v6_0_ring_emit_hdp_flush(struct amdgpu_ring * ring)977 static void uvd_v6_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
978 {
979 	/* The firmware doesn't seem to like touching registers at this point. */
980 }
981 
982 /**
983  * uvd_v6_0_ring_test_ring - register write test
984  *
985  * @ring: amdgpu_ring pointer
986  *
987  * Test if we can successfully write to the context register
988  */
uvd_v6_0_ring_test_ring(struct amdgpu_ring * ring)989 static int uvd_v6_0_ring_test_ring(struct amdgpu_ring *ring)
990 {
991 	struct amdgpu_device *adev = ring->adev;
992 	uint32_t tmp = 0;
993 	unsigned i;
994 	int r;
995 
996 	WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD);
997 	r = amdgpu_ring_alloc(ring, 3);
998 	if (r)
999 		return r;
1000 
1001 	amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
1002 	amdgpu_ring_write(ring, 0xDEADBEEF);
1003 	amdgpu_ring_commit(ring);
1004 	for (i = 0; i < adev->usec_timeout; i++) {
1005 		tmp = RREG32(mmUVD_CONTEXT_ID);
1006 		if (tmp == 0xDEADBEEF)
1007 			break;
1008 		udelay(1);
1009 	}
1010 
1011 	if (i >= adev->usec_timeout)
1012 		r = -ETIMEDOUT;
1013 
1014 	return r;
1015 }
1016 
1017 /**
1018  * uvd_v6_0_ring_emit_ib - execute indirect buffer
1019  *
1020  * @ring: amdgpu_ring pointer
1021  * @job: job to retrieve vmid from
1022  * @ib: indirect buffer to execute
1023  * @flags: unused
1024  *
1025  * Write ring commands to execute the indirect buffer
1026  */
uvd_v6_0_ring_emit_ib(struct amdgpu_ring * ring,struct amdgpu_job * job,struct amdgpu_ib * ib,uint32_t flags)1027 static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
1028 				  struct amdgpu_job *job,
1029 				  struct amdgpu_ib *ib,
1030 				  uint32_t flags)
1031 {
1032 	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1033 
1034 	amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_VMID, 0));
1035 	amdgpu_ring_write(ring, vmid);
1036 
1037 	amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0));
1038 	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1039 	amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH, 0));
1040 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1041 	amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_SIZE, 0));
1042 	amdgpu_ring_write(ring, ib->length_dw);
1043 }
1044 
1045 /**
1046  * uvd_v6_0_enc_ring_emit_ib - enc execute indirect buffer
1047  *
1048  * @ring: amdgpu_ring pointer
1049  * @job: job to retrive vmid from
1050  * @ib: indirect buffer to execute
1051  * @flags: unused
1052  *
1053  * Write enc ring commands to execute the indirect buffer
1054  */
uvd_v6_0_enc_ring_emit_ib(struct amdgpu_ring * ring,struct amdgpu_job * job,struct amdgpu_ib * ib,uint32_t flags)1055 static void uvd_v6_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
1056 					struct amdgpu_job *job,
1057 					struct amdgpu_ib *ib,
1058 					uint32_t flags)
1059 {
1060 	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1061 
1062 	amdgpu_ring_write(ring, HEVC_ENC_CMD_IB_VM);
1063 	amdgpu_ring_write(ring, vmid);
1064 	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1065 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1066 	amdgpu_ring_write(ring, ib->length_dw);
1067 }
1068 
uvd_v6_0_ring_emit_wreg(struct amdgpu_ring * ring,uint32_t reg,uint32_t val)1069 static void uvd_v6_0_ring_emit_wreg(struct amdgpu_ring *ring,
1070 				    uint32_t reg, uint32_t val)
1071 {
1072 	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
1073 	amdgpu_ring_write(ring, reg << 2);
1074 	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
1075 	amdgpu_ring_write(ring, val);
1076 	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
1077 	amdgpu_ring_write(ring, 0x8);
1078 }
1079 
uvd_v6_0_ring_emit_vm_flush(struct amdgpu_ring * ring,unsigned vmid,uint64_t pd_addr)1080 static void uvd_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
1081 					unsigned vmid, uint64_t pd_addr)
1082 {
1083 	amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1084 
1085 	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
1086 	amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
1087 	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
1088 	amdgpu_ring_write(ring, 0);
1089 	amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH8, 0));
1090 	amdgpu_ring_write(ring, 1 << vmid); /* mask */
1091 	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
1092 	amdgpu_ring_write(ring, 0xC);
1093 }
1094 
uvd_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring * ring)1095 static void uvd_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
1096 {
1097 	uint32_t seq = ring->fence_drv.sync_seq;
1098 	uint64_t addr = ring->fence_drv.gpu_addr;
1099 
1100 	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
1101 	amdgpu_ring_write(ring, lower_32_bits(addr));
1102 	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
1103 	amdgpu_ring_write(ring, upper_32_bits(addr));
1104 	amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH8, 0));
1105 	amdgpu_ring_write(ring, 0xffffffff); /* mask */
1106 	amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH9, 0));
1107 	amdgpu_ring_write(ring, seq);
1108 	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
1109 	amdgpu_ring_write(ring, 0xE);
1110 }
1111 
uvd_v6_0_ring_insert_nop(struct amdgpu_ring * ring,uint32_t count)1112 static void uvd_v6_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
1113 {
1114 	int i;
1115 
1116 	WARN_ON(ring->wptr % 2 || count % 2);
1117 
1118 	for (i = 0; i < count / 2; i++) {
1119 		amdgpu_ring_write(ring, PACKET0(mmUVD_NO_OP, 0));
1120 		amdgpu_ring_write(ring, 0);
1121 	}
1122 }
1123 
uvd_v6_0_enc_ring_emit_pipeline_sync(struct amdgpu_ring * ring)1124 static void uvd_v6_0_enc_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
1125 {
1126 	uint32_t seq = ring->fence_drv.sync_seq;
1127 	uint64_t addr = ring->fence_drv.gpu_addr;
1128 
1129 	amdgpu_ring_write(ring, HEVC_ENC_CMD_WAIT_GE);
1130 	amdgpu_ring_write(ring, lower_32_bits(addr));
1131 	amdgpu_ring_write(ring, upper_32_bits(addr));
1132 	amdgpu_ring_write(ring, seq);
1133 }
1134 
uvd_v6_0_enc_ring_insert_end(struct amdgpu_ring * ring)1135 static void uvd_v6_0_enc_ring_insert_end(struct amdgpu_ring *ring)
1136 {
1137 	amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
1138 }
1139 
uvd_v6_0_enc_ring_emit_vm_flush(struct amdgpu_ring * ring,unsigned int vmid,uint64_t pd_addr)1140 static void uvd_v6_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
1141 					    unsigned int vmid, uint64_t pd_addr)
1142 {
1143 	amdgpu_ring_write(ring, HEVC_ENC_CMD_UPDATE_PTB);
1144 	amdgpu_ring_write(ring, vmid);
1145 	amdgpu_ring_write(ring, pd_addr >> 12);
1146 
1147 	amdgpu_ring_write(ring, HEVC_ENC_CMD_FLUSH_TLB);
1148 	amdgpu_ring_write(ring, vmid);
1149 }
1150 
uvd_v6_0_is_idle(void * handle)1151 static bool uvd_v6_0_is_idle(void *handle)
1152 {
1153 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1154 
1155 	return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
1156 }
1157 
uvd_v6_0_wait_for_idle(void * handle)1158 static int uvd_v6_0_wait_for_idle(void *handle)
1159 {
1160 	unsigned i;
1161 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1162 
1163 	for (i = 0; i < adev->usec_timeout; i++) {
1164 		if (uvd_v6_0_is_idle(handle))
1165 			return 0;
1166 	}
1167 	return -ETIMEDOUT;
1168 }
1169 
1170 #define AMDGPU_UVD_STATUS_BUSY_MASK    0xfd
uvd_v6_0_check_soft_reset(void * handle)1171 static bool uvd_v6_0_check_soft_reset(void *handle)
1172 {
1173 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1174 	u32 srbm_soft_reset = 0;
1175 	u32 tmp = RREG32(mmSRBM_STATUS);
1176 
1177 	if (REG_GET_FIELD(tmp, SRBM_STATUS, UVD_RQ_PENDING) ||
1178 	    REG_GET_FIELD(tmp, SRBM_STATUS, UVD_BUSY) ||
1179 	    (RREG32(mmUVD_STATUS) & AMDGPU_UVD_STATUS_BUSY_MASK))
1180 		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_UVD, 1);
1181 
1182 	if (srbm_soft_reset) {
1183 		adev->uvd.inst->srbm_soft_reset = srbm_soft_reset;
1184 		return true;
1185 	} else {
1186 		adev->uvd.inst->srbm_soft_reset = 0;
1187 		return false;
1188 	}
1189 }
1190 
uvd_v6_0_pre_soft_reset(void * handle)1191 static int uvd_v6_0_pre_soft_reset(void *handle)
1192 {
1193 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1194 
1195 	if (!adev->uvd.inst->srbm_soft_reset)
1196 		return 0;
1197 
1198 	uvd_v6_0_stop(adev);
1199 	return 0;
1200 }
1201 
uvd_v6_0_soft_reset(void * handle)1202 static int uvd_v6_0_soft_reset(void *handle)
1203 {
1204 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1205 	u32 srbm_soft_reset;
1206 
1207 	if (!adev->uvd.inst->srbm_soft_reset)
1208 		return 0;
1209 	srbm_soft_reset = adev->uvd.inst->srbm_soft_reset;
1210 
1211 	if (srbm_soft_reset) {
1212 		u32 tmp;
1213 
1214 		tmp = RREG32(mmSRBM_SOFT_RESET);
1215 		tmp |= srbm_soft_reset;
1216 		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1217 		WREG32(mmSRBM_SOFT_RESET, tmp);
1218 		tmp = RREG32(mmSRBM_SOFT_RESET);
1219 
1220 		udelay(50);
1221 
1222 		tmp &= ~srbm_soft_reset;
1223 		WREG32(mmSRBM_SOFT_RESET, tmp);
1224 		tmp = RREG32(mmSRBM_SOFT_RESET);
1225 
1226 		/* Wait a little for things to settle down */
1227 		udelay(50);
1228 	}
1229 
1230 	return 0;
1231 }
1232 
uvd_v6_0_post_soft_reset(void * handle)1233 static int uvd_v6_0_post_soft_reset(void *handle)
1234 {
1235 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1236 
1237 	if (!adev->uvd.inst->srbm_soft_reset)
1238 		return 0;
1239 
1240 	mdelay(5);
1241 
1242 	return uvd_v6_0_start(adev);
1243 }
1244 
uvd_v6_0_set_interrupt_state(struct amdgpu_device * adev,struct amdgpu_irq_src * source,unsigned type,enum amdgpu_interrupt_state state)1245 static int uvd_v6_0_set_interrupt_state(struct amdgpu_device *adev,
1246 					struct amdgpu_irq_src *source,
1247 					unsigned type,
1248 					enum amdgpu_interrupt_state state)
1249 {
1250 	// TODO
1251 	return 0;
1252 }
1253 
uvd_v6_0_process_interrupt(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)1254 static int uvd_v6_0_process_interrupt(struct amdgpu_device *adev,
1255 				      struct amdgpu_irq_src *source,
1256 				      struct amdgpu_iv_entry *entry)
1257 {
1258 	bool int_handled = true;
1259 	DRM_DEBUG("IH: UVD TRAP\n");
1260 
1261 	switch (entry->src_id) {
1262 	case 124:
1263 		amdgpu_fence_process(&adev->uvd.inst->ring);
1264 		break;
1265 	case 119:
1266 		if (likely(uvd_v6_0_enc_support(adev)))
1267 			amdgpu_fence_process(&adev->uvd.inst->ring_enc[0]);
1268 		else
1269 			int_handled = false;
1270 		break;
1271 	case 120:
1272 		if (likely(uvd_v6_0_enc_support(adev)))
1273 			amdgpu_fence_process(&adev->uvd.inst->ring_enc[1]);
1274 		else
1275 			int_handled = false;
1276 		break;
1277 	}
1278 
1279 	if (!int_handled)
1280 		DRM_ERROR("Unhandled interrupt: %d %d\n",
1281 			  entry->src_id, entry->src_data[0]);
1282 
1283 	return 0;
1284 }
1285 
uvd_v6_0_enable_clock_gating(struct amdgpu_device * adev,bool enable)1286 static void uvd_v6_0_enable_clock_gating(struct amdgpu_device *adev, bool enable)
1287 {
1288 	uint32_t data1, data3;
1289 
1290 	data1 = RREG32(mmUVD_SUVD_CGC_GATE);
1291 	data3 = RREG32(mmUVD_CGC_GATE);
1292 
1293 	data1 |= UVD_SUVD_CGC_GATE__SRE_MASK |
1294 		     UVD_SUVD_CGC_GATE__SIT_MASK |
1295 		     UVD_SUVD_CGC_GATE__SMP_MASK |
1296 		     UVD_SUVD_CGC_GATE__SCM_MASK |
1297 		     UVD_SUVD_CGC_GATE__SDB_MASK |
1298 		     UVD_SUVD_CGC_GATE__SRE_H264_MASK |
1299 		     UVD_SUVD_CGC_GATE__SRE_HEVC_MASK |
1300 		     UVD_SUVD_CGC_GATE__SIT_H264_MASK |
1301 		     UVD_SUVD_CGC_GATE__SIT_HEVC_MASK |
1302 		     UVD_SUVD_CGC_GATE__SCM_H264_MASK |
1303 		     UVD_SUVD_CGC_GATE__SCM_HEVC_MASK |
1304 		     UVD_SUVD_CGC_GATE__SDB_H264_MASK |
1305 		     UVD_SUVD_CGC_GATE__SDB_HEVC_MASK;
1306 
1307 	if (enable) {
1308 		data3 |= (UVD_CGC_GATE__SYS_MASK       |
1309 			UVD_CGC_GATE__UDEC_MASK      |
1310 			UVD_CGC_GATE__MPEG2_MASK     |
1311 			UVD_CGC_GATE__RBC_MASK       |
1312 			UVD_CGC_GATE__LMI_MC_MASK    |
1313 			UVD_CGC_GATE__LMI_UMC_MASK   |
1314 			UVD_CGC_GATE__IDCT_MASK      |
1315 			UVD_CGC_GATE__MPRD_MASK      |
1316 			UVD_CGC_GATE__MPC_MASK       |
1317 			UVD_CGC_GATE__LBSI_MASK      |
1318 			UVD_CGC_GATE__LRBBM_MASK     |
1319 			UVD_CGC_GATE__UDEC_RE_MASK   |
1320 			UVD_CGC_GATE__UDEC_CM_MASK   |
1321 			UVD_CGC_GATE__UDEC_IT_MASK   |
1322 			UVD_CGC_GATE__UDEC_DB_MASK   |
1323 			UVD_CGC_GATE__UDEC_MP_MASK   |
1324 			UVD_CGC_GATE__WCB_MASK       |
1325 			UVD_CGC_GATE__JPEG_MASK      |
1326 			UVD_CGC_GATE__SCPU_MASK      |
1327 			UVD_CGC_GATE__JPEG2_MASK);
1328 		/* only in pg enabled, we can gate clock to vcpu*/
1329 		if (adev->pg_flags & AMD_PG_SUPPORT_UVD)
1330 			data3 |= UVD_CGC_GATE__VCPU_MASK;
1331 
1332 		data3 &= ~UVD_CGC_GATE__REGS_MASK;
1333 	} else {
1334 		data3 = 0;
1335 	}
1336 
1337 	WREG32(mmUVD_SUVD_CGC_GATE, data1);
1338 	WREG32(mmUVD_CGC_GATE, data3);
1339 }
1340 
uvd_v6_0_set_sw_clock_gating(struct amdgpu_device * adev)1341 static void uvd_v6_0_set_sw_clock_gating(struct amdgpu_device *adev)
1342 {
1343 	uint32_t data, data2;
1344 
1345 	data = RREG32(mmUVD_CGC_CTRL);
1346 	data2 = RREG32(mmUVD_SUVD_CGC_CTRL);
1347 
1348 
1349 	data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK |
1350 		  UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
1351 
1352 
1353 	data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
1354 		(1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER)) |
1355 		(4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY));
1356 
1357 	data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
1358 			UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
1359 			UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
1360 			UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
1361 			UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
1362 			UVD_CGC_CTRL__SYS_MODE_MASK |
1363 			UVD_CGC_CTRL__UDEC_MODE_MASK |
1364 			UVD_CGC_CTRL__MPEG2_MODE_MASK |
1365 			UVD_CGC_CTRL__REGS_MODE_MASK |
1366 			UVD_CGC_CTRL__RBC_MODE_MASK |
1367 			UVD_CGC_CTRL__LMI_MC_MODE_MASK |
1368 			UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
1369 			UVD_CGC_CTRL__IDCT_MODE_MASK |
1370 			UVD_CGC_CTRL__MPRD_MODE_MASK |
1371 			UVD_CGC_CTRL__MPC_MODE_MASK |
1372 			UVD_CGC_CTRL__LBSI_MODE_MASK |
1373 			UVD_CGC_CTRL__LRBBM_MODE_MASK |
1374 			UVD_CGC_CTRL__WCB_MODE_MASK |
1375 			UVD_CGC_CTRL__VCPU_MODE_MASK |
1376 			UVD_CGC_CTRL__JPEG_MODE_MASK |
1377 			UVD_CGC_CTRL__SCPU_MODE_MASK |
1378 			UVD_CGC_CTRL__JPEG2_MODE_MASK);
1379 	data2 &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK |
1380 			UVD_SUVD_CGC_CTRL__SIT_MODE_MASK |
1381 			UVD_SUVD_CGC_CTRL__SMP_MODE_MASK |
1382 			UVD_SUVD_CGC_CTRL__SCM_MODE_MASK |
1383 			UVD_SUVD_CGC_CTRL__SDB_MODE_MASK);
1384 
1385 	WREG32(mmUVD_CGC_CTRL, data);
1386 	WREG32(mmUVD_SUVD_CGC_CTRL, data2);
1387 }
1388 
1389 #if 0
1390 static void uvd_v6_0_set_hw_clock_gating(struct amdgpu_device *adev)
1391 {
1392 	uint32_t data, data1, cgc_flags, suvd_flags;
1393 
1394 	data = RREG32(mmUVD_CGC_GATE);
1395 	data1 = RREG32(mmUVD_SUVD_CGC_GATE);
1396 
1397 	cgc_flags = UVD_CGC_GATE__SYS_MASK |
1398 		UVD_CGC_GATE__UDEC_MASK |
1399 		UVD_CGC_GATE__MPEG2_MASK |
1400 		UVD_CGC_GATE__RBC_MASK |
1401 		UVD_CGC_GATE__LMI_MC_MASK |
1402 		UVD_CGC_GATE__IDCT_MASK |
1403 		UVD_CGC_GATE__MPRD_MASK |
1404 		UVD_CGC_GATE__MPC_MASK |
1405 		UVD_CGC_GATE__LBSI_MASK |
1406 		UVD_CGC_GATE__LRBBM_MASK |
1407 		UVD_CGC_GATE__UDEC_RE_MASK |
1408 		UVD_CGC_GATE__UDEC_CM_MASK |
1409 		UVD_CGC_GATE__UDEC_IT_MASK |
1410 		UVD_CGC_GATE__UDEC_DB_MASK |
1411 		UVD_CGC_GATE__UDEC_MP_MASK |
1412 		UVD_CGC_GATE__WCB_MASK |
1413 		UVD_CGC_GATE__VCPU_MASK |
1414 		UVD_CGC_GATE__SCPU_MASK |
1415 		UVD_CGC_GATE__JPEG_MASK |
1416 		UVD_CGC_GATE__JPEG2_MASK;
1417 
1418 	suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
1419 				UVD_SUVD_CGC_GATE__SIT_MASK |
1420 				UVD_SUVD_CGC_GATE__SMP_MASK |
1421 				UVD_SUVD_CGC_GATE__SCM_MASK |
1422 				UVD_SUVD_CGC_GATE__SDB_MASK;
1423 
1424 	data |= cgc_flags;
1425 	data1 |= suvd_flags;
1426 
1427 	WREG32(mmUVD_CGC_GATE, data);
1428 	WREG32(mmUVD_SUVD_CGC_GATE, data1);
1429 }
1430 #endif
1431 
uvd_v6_0_enable_mgcg(struct amdgpu_device * adev,bool enable)1432 static void uvd_v6_0_enable_mgcg(struct amdgpu_device *adev,
1433 				 bool enable)
1434 {
1435 	u32 orig, data;
1436 
1437 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) {
1438 		data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
1439 		data |= 0xfff;
1440 		WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
1441 
1442 		orig = data = RREG32(mmUVD_CGC_CTRL);
1443 		data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
1444 		if (orig != data)
1445 			WREG32(mmUVD_CGC_CTRL, data);
1446 	} else {
1447 		data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
1448 		data &= ~0xfff;
1449 		WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
1450 
1451 		orig = data = RREG32(mmUVD_CGC_CTRL);
1452 		data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
1453 		if (orig != data)
1454 			WREG32(mmUVD_CGC_CTRL, data);
1455 	}
1456 }
1457 
uvd_v6_0_set_clockgating_state(void * handle,enum amd_clockgating_state state)1458 static int uvd_v6_0_set_clockgating_state(void *handle,
1459 					  enum amd_clockgating_state state)
1460 {
1461 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1462 	bool enable = (state == AMD_CG_STATE_GATE);
1463 
1464 	if (enable) {
1465 		/* wait for STATUS to clear */
1466 		if (uvd_v6_0_wait_for_idle(handle))
1467 			return -EBUSY;
1468 		uvd_v6_0_enable_clock_gating(adev, true);
1469 		/* enable HW gates because UVD is idle */
1470 /*		uvd_v6_0_set_hw_clock_gating(adev); */
1471 	} else {
1472 		/* disable HW gating and enable Sw gating */
1473 		uvd_v6_0_enable_clock_gating(adev, false);
1474 	}
1475 	uvd_v6_0_set_sw_clock_gating(adev);
1476 	return 0;
1477 }
1478 
uvd_v6_0_set_powergating_state(void * handle,enum amd_powergating_state state)1479 static int uvd_v6_0_set_powergating_state(void *handle,
1480 					  enum amd_powergating_state state)
1481 {
1482 	/* This doesn't actually powergate the UVD block.
1483 	 * That's done in the dpm code via the SMC.  This
1484 	 * just re-inits the block as necessary.  The actual
1485 	 * gating still happens in the dpm code.  We should
1486 	 * revisit this when there is a cleaner line between
1487 	 * the smc and the hw blocks
1488 	 */
1489 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1490 	int ret = 0;
1491 
1492 	WREG32(mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_PG_EN_MASK);
1493 
1494 	if (state == AMD_PG_STATE_GATE) {
1495 		uvd_v6_0_stop(adev);
1496 	} else {
1497 		ret = uvd_v6_0_start(adev);
1498 		if (ret)
1499 			goto out;
1500 	}
1501 
1502 out:
1503 	return ret;
1504 }
1505 
uvd_v6_0_get_clockgating_state(void * handle,u32 * flags)1506 static void uvd_v6_0_get_clockgating_state(void *handle, u32 *flags)
1507 {
1508 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1509 	int data;
1510 
1511 	mutex_lock(&adev->pm.mutex);
1512 
1513 	if (adev->flags & AMD_IS_APU)
1514 		data = RREG32_SMC(ixCURRENT_PG_STATUS_APU);
1515 	else
1516 		data = RREG32_SMC(ixCURRENT_PG_STATUS);
1517 
1518 	if (data & CURRENT_PG_STATUS__UVD_PG_STATUS_MASK) {
1519 		DRM_INFO("Cannot get clockgating state when UVD is powergated.\n");
1520 		goto out;
1521 	}
1522 
1523 	/* AMD_CG_SUPPORT_UVD_MGCG */
1524 	data = RREG32(mmUVD_CGC_CTRL);
1525 	if (data & UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK)
1526 		*flags |= AMD_CG_SUPPORT_UVD_MGCG;
1527 
1528 out:
1529 	mutex_unlock(&adev->pm.mutex);
1530 }
1531 
1532 static const struct amd_ip_funcs uvd_v6_0_ip_funcs = {
1533 	.name = "uvd_v6_0",
1534 	.early_init = uvd_v6_0_early_init,
1535 	.late_init = NULL,
1536 	.sw_init = uvd_v6_0_sw_init,
1537 	.sw_fini = uvd_v6_0_sw_fini,
1538 	.hw_init = uvd_v6_0_hw_init,
1539 	.hw_fini = uvd_v6_0_hw_fini,
1540 	.suspend = uvd_v6_0_suspend,
1541 	.resume = uvd_v6_0_resume,
1542 	.is_idle = uvd_v6_0_is_idle,
1543 	.wait_for_idle = uvd_v6_0_wait_for_idle,
1544 	.check_soft_reset = uvd_v6_0_check_soft_reset,
1545 	.pre_soft_reset = uvd_v6_0_pre_soft_reset,
1546 	.soft_reset = uvd_v6_0_soft_reset,
1547 	.post_soft_reset = uvd_v6_0_post_soft_reset,
1548 	.set_clockgating_state = uvd_v6_0_set_clockgating_state,
1549 	.set_powergating_state = uvd_v6_0_set_powergating_state,
1550 	.get_clockgating_state = uvd_v6_0_get_clockgating_state,
1551 };
1552 
1553 static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = {
1554 	.type = AMDGPU_RING_TYPE_UVD,
1555 	.align_mask = 0xf,
1556 	.support_64bit_ptrs = false,
1557 	.no_user_fence = true,
1558 	.get_rptr = uvd_v6_0_ring_get_rptr,
1559 	.get_wptr = uvd_v6_0_ring_get_wptr,
1560 	.set_wptr = uvd_v6_0_ring_set_wptr,
1561 	.parse_cs = amdgpu_uvd_ring_parse_cs,
1562 	.emit_frame_size =
1563 		6 + /* hdp invalidate */
1564 		10 + /* uvd_v6_0_ring_emit_pipeline_sync */
1565 		14, /* uvd_v6_0_ring_emit_fence x1 no user fence */
1566 	.emit_ib_size = 8, /* uvd_v6_0_ring_emit_ib */
1567 	.emit_ib = uvd_v6_0_ring_emit_ib,
1568 	.emit_fence = uvd_v6_0_ring_emit_fence,
1569 	.emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush,
1570 	.test_ring = uvd_v6_0_ring_test_ring,
1571 	.test_ib = amdgpu_uvd_ring_test_ib,
1572 	.insert_nop = uvd_v6_0_ring_insert_nop,
1573 	.pad_ib = amdgpu_ring_generic_pad_ib,
1574 	.begin_use = amdgpu_uvd_ring_begin_use,
1575 	.end_use = amdgpu_uvd_ring_end_use,
1576 	.emit_wreg = uvd_v6_0_ring_emit_wreg,
1577 };
1578 
1579 static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = {
1580 	.type = AMDGPU_RING_TYPE_UVD,
1581 	.align_mask = 0xf,
1582 	.support_64bit_ptrs = false,
1583 	.no_user_fence = true,
1584 	.get_rptr = uvd_v6_0_ring_get_rptr,
1585 	.get_wptr = uvd_v6_0_ring_get_wptr,
1586 	.set_wptr = uvd_v6_0_ring_set_wptr,
1587 	.emit_frame_size =
1588 		6 + /* hdp invalidate */
1589 		10 + /* uvd_v6_0_ring_emit_pipeline_sync */
1590 		VI_FLUSH_GPU_TLB_NUM_WREG * 6 + 8 + /* uvd_v6_0_ring_emit_vm_flush */
1591 		14 + 14, /* uvd_v6_0_ring_emit_fence x2 vm fence */
1592 	.emit_ib_size = 8, /* uvd_v6_0_ring_emit_ib */
1593 	.emit_ib = uvd_v6_0_ring_emit_ib,
1594 	.emit_fence = uvd_v6_0_ring_emit_fence,
1595 	.emit_vm_flush = uvd_v6_0_ring_emit_vm_flush,
1596 	.emit_pipeline_sync = uvd_v6_0_ring_emit_pipeline_sync,
1597 	.emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush,
1598 	.test_ring = uvd_v6_0_ring_test_ring,
1599 	.test_ib = amdgpu_uvd_ring_test_ib,
1600 	.insert_nop = uvd_v6_0_ring_insert_nop,
1601 	.pad_ib = amdgpu_ring_generic_pad_ib,
1602 	.begin_use = amdgpu_uvd_ring_begin_use,
1603 	.end_use = amdgpu_uvd_ring_end_use,
1604 	.emit_wreg = uvd_v6_0_ring_emit_wreg,
1605 };
1606 
1607 static const struct amdgpu_ring_funcs uvd_v6_0_enc_ring_vm_funcs = {
1608 	.type = AMDGPU_RING_TYPE_UVD_ENC,
1609 	.align_mask = 0x3f,
1610 	.nop = HEVC_ENC_CMD_NO_OP,
1611 	.support_64bit_ptrs = false,
1612 	.no_user_fence = true,
1613 	.get_rptr = uvd_v6_0_enc_ring_get_rptr,
1614 	.get_wptr = uvd_v6_0_enc_ring_get_wptr,
1615 	.set_wptr = uvd_v6_0_enc_ring_set_wptr,
1616 	.emit_frame_size =
1617 		4 + /* uvd_v6_0_enc_ring_emit_pipeline_sync */
1618 		5 + /* uvd_v6_0_enc_ring_emit_vm_flush */
1619 		5 + 5 + /* uvd_v6_0_enc_ring_emit_fence x2 vm fence */
1620 		1, /* uvd_v6_0_enc_ring_insert_end */
1621 	.emit_ib_size = 5, /* uvd_v6_0_enc_ring_emit_ib */
1622 	.emit_ib = uvd_v6_0_enc_ring_emit_ib,
1623 	.emit_fence = uvd_v6_0_enc_ring_emit_fence,
1624 	.emit_vm_flush = uvd_v6_0_enc_ring_emit_vm_flush,
1625 	.emit_pipeline_sync = uvd_v6_0_enc_ring_emit_pipeline_sync,
1626 	.test_ring = uvd_v6_0_enc_ring_test_ring,
1627 	.test_ib = uvd_v6_0_enc_ring_test_ib,
1628 	.insert_nop = amdgpu_ring_insert_nop,
1629 	.insert_end = uvd_v6_0_enc_ring_insert_end,
1630 	.pad_ib = amdgpu_ring_generic_pad_ib,
1631 	.begin_use = amdgpu_uvd_ring_begin_use,
1632 	.end_use = amdgpu_uvd_ring_end_use,
1633 };
1634 
uvd_v6_0_set_ring_funcs(struct amdgpu_device * adev)1635 static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev)
1636 {
1637 	if (adev->asic_type >= CHIP_POLARIS10) {
1638 		adev->uvd.inst->ring.funcs = &uvd_v6_0_ring_vm_funcs;
1639 		DRM_INFO("UVD is enabled in VM mode\n");
1640 	} else {
1641 		adev->uvd.inst->ring.funcs = &uvd_v6_0_ring_phys_funcs;
1642 		DRM_INFO("UVD is enabled in physical mode\n");
1643 	}
1644 }
1645 
uvd_v6_0_set_enc_ring_funcs(struct amdgpu_device * adev)1646 static void uvd_v6_0_set_enc_ring_funcs(struct amdgpu_device *adev)
1647 {
1648 	int i;
1649 
1650 	for (i = 0; i < adev->uvd.num_enc_rings; ++i)
1651 		adev->uvd.inst->ring_enc[i].funcs = &uvd_v6_0_enc_ring_vm_funcs;
1652 
1653 	DRM_INFO("UVD ENC is enabled in VM mode\n");
1654 }
1655 
1656 static const struct amdgpu_irq_src_funcs uvd_v6_0_irq_funcs = {
1657 	.set = uvd_v6_0_set_interrupt_state,
1658 	.process = uvd_v6_0_process_interrupt,
1659 };
1660 
uvd_v6_0_set_irq_funcs(struct amdgpu_device * adev)1661 static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev)
1662 {
1663 	if (uvd_v6_0_enc_support(adev))
1664 		adev->uvd.inst->irq.num_types = adev->uvd.num_enc_rings + 1;
1665 	else
1666 		adev->uvd.inst->irq.num_types = 1;
1667 
1668 	adev->uvd.inst->irq.funcs = &uvd_v6_0_irq_funcs;
1669 }
1670 
1671 const struct amdgpu_ip_block_version uvd_v6_0_ip_block =
1672 {
1673 		.type = AMD_IP_BLOCK_TYPE_UVD,
1674 		.major = 6,
1675 		.minor = 0,
1676 		.rev = 0,
1677 		.funcs = &uvd_v6_0_ip_funcs,
1678 };
1679 
1680 const struct amdgpu_ip_block_version uvd_v6_2_ip_block =
1681 {
1682 		.type = AMD_IP_BLOCK_TYPE_UVD,
1683 		.major = 6,
1684 		.minor = 2,
1685 		.rev = 0,
1686 		.funcs = &uvd_v6_0_ip_funcs,
1687 };
1688 
1689 const struct amdgpu_ip_block_version uvd_v6_3_ip_block =
1690 {
1691 		.type = AMD_IP_BLOCK_TYPE_UVD,
1692 		.major = 6,
1693 		.minor = 3,
1694 		.rev = 0,
1695 		.funcs = &uvd_v6_0_ip_funcs,
1696 };
1697