1 /*
2 * Copyright 2011 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26 /*
27 * Authors:
28 * Christian König <deathsimple@vodafone.de>
29 */
30
31 #include <linux/firmware.h>
32 #include <linux/module.h>
33
34 #include <drm/drm.h>
35
36 #include "amdgpu.h"
37 #include "amdgpu_pm.h"
38 #include "amdgpu_uvd.h"
39 #include "cikd.h"
40 #include "uvd/uvd_4_2_d.h"
41
42 #include "amdgpu_ras.h"
43
44 /* 1 second timeout */
45 #define UVD_IDLE_TIMEOUT msecs_to_jiffies(1000)
46
47 /* Firmware versions for VI */
48 #define FW_1_65_10 ((1 << 24) | (65 << 16) | (10 << 8))
49 #define FW_1_87_11 ((1 << 24) | (87 << 16) | (11 << 8))
50 #define FW_1_87_12 ((1 << 24) | (87 << 16) | (12 << 8))
51 #define FW_1_37_15 ((1 << 24) | (37 << 16) | (15 << 8))
52
53 /* Polaris10/11 firmware version */
54 #define FW_1_66_16 ((1 << 24) | (66 << 16) | (16 << 8))
55
56 /* Firmware Names */
57 #ifdef CONFIG_DRM_AMDGPU_SI
58 #define FIRMWARE_TAHITI "amdgpu/tahiti_uvd.bin"
59 #define FIRMWARE_VERDE "amdgpu/verde_uvd.bin"
60 #define FIRMWARE_PITCAIRN "amdgpu/pitcairn_uvd.bin"
61 #define FIRMWARE_OLAND "amdgpu/oland_uvd.bin"
62 #endif
63 #ifdef CONFIG_DRM_AMDGPU_CIK
64 #define FIRMWARE_BONAIRE "amdgpu/bonaire_uvd.bin"
65 #define FIRMWARE_KABINI "amdgpu/kabini_uvd.bin"
66 #define FIRMWARE_KAVERI "amdgpu/kaveri_uvd.bin"
67 #define FIRMWARE_HAWAII "amdgpu/hawaii_uvd.bin"
68 #define FIRMWARE_MULLINS "amdgpu/mullins_uvd.bin"
69 #endif
70 #define FIRMWARE_TONGA "amdgpu/tonga_uvd.bin"
71 #define FIRMWARE_CARRIZO "amdgpu/carrizo_uvd.bin"
72 #define FIRMWARE_FIJI "amdgpu/fiji_uvd.bin"
73 #define FIRMWARE_STONEY "amdgpu/stoney_uvd.bin"
74 #define FIRMWARE_POLARIS10 "amdgpu/polaris10_uvd.bin"
75 #define FIRMWARE_POLARIS11 "amdgpu/polaris11_uvd.bin"
76 #define FIRMWARE_POLARIS12 "amdgpu/polaris12_uvd.bin"
77 #define FIRMWARE_VEGAM "amdgpu/vegam_uvd.bin"
78
79 #define FIRMWARE_VEGA10 "amdgpu/vega10_uvd.bin"
80 #define FIRMWARE_VEGA12 "amdgpu/vega12_uvd.bin"
81 #define FIRMWARE_VEGA20 "amdgpu/vega20_uvd.bin"
82
83 /* These are common relative offsets for all asics, from uvd_7_0_offset.h, */
84 #define UVD_GPCOM_VCPU_CMD 0x03c3
85 #define UVD_GPCOM_VCPU_DATA0 0x03c4
86 #define UVD_GPCOM_VCPU_DATA1 0x03c5
87 #define UVD_NO_OP 0x03ff
88 #define UVD_BASE_SI 0x3800
89
90 /**
91 * amdgpu_uvd_cs_ctx - Command submission parser context
92 *
93 * Used for emulating virtual memory support on UVD 4.2.
94 */
95 struct amdgpu_uvd_cs_ctx {
96 struct amdgpu_cs_parser *parser;
97 unsigned reg, count;
98 unsigned data0, data1;
99 unsigned idx;
100 unsigned ib_idx;
101
102 /* does the IB has a msg command */
103 bool has_msg_cmd;
104
105 /* minimum buffer sizes */
106 unsigned *buf_sizes;
107 };
108
109 #ifdef CONFIG_DRM_AMDGPU_SI
110 MODULE_FIRMWARE(FIRMWARE_TAHITI);
111 MODULE_FIRMWARE(FIRMWARE_VERDE);
112 MODULE_FIRMWARE(FIRMWARE_PITCAIRN);
113 MODULE_FIRMWARE(FIRMWARE_OLAND);
114 #endif
115 #ifdef CONFIG_DRM_AMDGPU_CIK
116 MODULE_FIRMWARE(FIRMWARE_BONAIRE);
117 MODULE_FIRMWARE(FIRMWARE_KABINI);
118 MODULE_FIRMWARE(FIRMWARE_KAVERI);
119 MODULE_FIRMWARE(FIRMWARE_HAWAII);
120 MODULE_FIRMWARE(FIRMWARE_MULLINS);
121 #endif
122 MODULE_FIRMWARE(FIRMWARE_TONGA);
123 MODULE_FIRMWARE(FIRMWARE_CARRIZO);
124 MODULE_FIRMWARE(FIRMWARE_FIJI);
125 MODULE_FIRMWARE(FIRMWARE_STONEY);
126 MODULE_FIRMWARE(FIRMWARE_POLARIS10);
127 MODULE_FIRMWARE(FIRMWARE_POLARIS11);
128 MODULE_FIRMWARE(FIRMWARE_POLARIS12);
129 MODULE_FIRMWARE(FIRMWARE_VEGAM);
130
131 MODULE_FIRMWARE(FIRMWARE_VEGA10);
132 MODULE_FIRMWARE(FIRMWARE_VEGA12);
133 MODULE_FIRMWARE(FIRMWARE_VEGA20);
134
135 static void amdgpu_uvd_idle_work_handler(struct work_struct *work);
136
amdgpu_uvd_sw_init(struct amdgpu_device * adev)137 int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
138 {
139 unsigned long bo_size;
140 const char *fw_name;
141 const struct common_firmware_header *hdr;
142 unsigned family_id;
143 int i, j, r;
144
145 INIT_DELAYED_WORK(&adev->uvd.idle_work, amdgpu_uvd_idle_work_handler);
146
147 switch (adev->asic_type) {
148 #ifdef CONFIG_DRM_AMDGPU_SI
149 case CHIP_TAHITI:
150 fw_name = FIRMWARE_TAHITI;
151 break;
152 case CHIP_VERDE:
153 fw_name = FIRMWARE_VERDE;
154 break;
155 case CHIP_PITCAIRN:
156 fw_name = FIRMWARE_PITCAIRN;
157 break;
158 case CHIP_OLAND:
159 fw_name = FIRMWARE_OLAND;
160 break;
161 #endif
162 #ifdef CONFIG_DRM_AMDGPU_CIK
163 case CHIP_BONAIRE:
164 fw_name = FIRMWARE_BONAIRE;
165 break;
166 case CHIP_KABINI:
167 fw_name = FIRMWARE_KABINI;
168 break;
169 case CHIP_KAVERI:
170 fw_name = FIRMWARE_KAVERI;
171 break;
172 case CHIP_HAWAII:
173 fw_name = FIRMWARE_HAWAII;
174 break;
175 case CHIP_MULLINS:
176 fw_name = FIRMWARE_MULLINS;
177 break;
178 #endif
179 case CHIP_TONGA:
180 fw_name = FIRMWARE_TONGA;
181 break;
182 case CHIP_FIJI:
183 fw_name = FIRMWARE_FIJI;
184 break;
185 case CHIP_CARRIZO:
186 fw_name = FIRMWARE_CARRIZO;
187 break;
188 case CHIP_STONEY:
189 fw_name = FIRMWARE_STONEY;
190 break;
191 case CHIP_POLARIS10:
192 fw_name = FIRMWARE_POLARIS10;
193 break;
194 case CHIP_POLARIS11:
195 fw_name = FIRMWARE_POLARIS11;
196 break;
197 case CHIP_POLARIS12:
198 fw_name = FIRMWARE_POLARIS12;
199 break;
200 case CHIP_VEGA10:
201 fw_name = FIRMWARE_VEGA10;
202 break;
203 case CHIP_VEGA12:
204 fw_name = FIRMWARE_VEGA12;
205 break;
206 case CHIP_VEGAM:
207 fw_name = FIRMWARE_VEGAM;
208 break;
209 case CHIP_VEGA20:
210 fw_name = FIRMWARE_VEGA20;
211 break;
212 default:
213 return -EINVAL;
214 }
215
216 r = request_firmware(&adev->uvd.fw, fw_name, adev->dev);
217 if (r) {
218 dev_err(adev->dev, "amdgpu_uvd: Can't load firmware \"%s\"\n",
219 fw_name);
220 return r;
221 }
222
223 r = amdgpu_ucode_validate(adev->uvd.fw);
224 if (r) {
225 dev_err(adev->dev, "amdgpu_uvd: Can't validate firmware \"%s\"\n",
226 fw_name);
227 release_firmware(adev->uvd.fw);
228 adev->uvd.fw = NULL;
229 return r;
230 }
231
232 /* Set the default UVD handles that the firmware can handle */
233 adev->uvd.max_handles = AMDGPU_DEFAULT_UVD_HANDLES;
234
235 hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
236 family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
237
238 if (adev->asic_type < CHIP_VEGA20) {
239 unsigned version_major, version_minor;
240
241 version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
242 version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
243 DRM_INFO("Found UVD firmware Version: %hu.%hu Family ID: %hu\n",
244 version_major, version_minor, family_id);
245
246 /*
247 * Limit the number of UVD handles depending on microcode major
248 * and minor versions. The firmware version which has 40 UVD
249 * instances support is 1.80. So all subsequent versions should
250 * also have the same support.
251 */
252 if ((version_major > 0x01) ||
253 ((version_major == 0x01) && (version_minor >= 0x50)))
254 adev->uvd.max_handles = AMDGPU_MAX_UVD_HANDLES;
255
256 adev->uvd.fw_version = ((version_major << 24) | (version_minor << 16) |
257 (family_id << 8));
258
259 if ((adev->asic_type == CHIP_POLARIS10 ||
260 adev->asic_type == CHIP_POLARIS11) &&
261 (adev->uvd.fw_version < FW_1_66_16))
262 DRM_ERROR("POLARIS10/11 UVD firmware version %u.%u is too old.\n",
263 version_major, version_minor);
264 } else {
265 unsigned int enc_major, enc_minor, dec_minor;
266
267 dec_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
268 enc_minor = (le32_to_cpu(hdr->ucode_version) >> 24) & 0x3f;
269 enc_major = (le32_to_cpu(hdr->ucode_version) >> 30) & 0x3;
270 DRM_INFO("Found UVD firmware ENC: %hu.%hu DEC: .%hu Family ID: %hu\n",
271 enc_major, enc_minor, dec_minor, family_id);
272
273 adev->uvd.max_handles = AMDGPU_MAX_UVD_HANDLES;
274
275 adev->uvd.fw_version = le32_to_cpu(hdr->ucode_version);
276 }
277
278 bo_size = AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE
279 + AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles;
280 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
281 bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
282
283 for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
284 if (adev->uvd.harvest_config & (1 << j))
285 continue;
286 r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
287 AMDGPU_GEM_DOMAIN_VRAM, &adev->uvd.inst[j].vcpu_bo,
288 &adev->uvd.inst[j].gpu_addr, &adev->uvd.inst[j].cpu_addr);
289 if (r) {
290 dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r);
291 return r;
292 }
293 }
294
295 for (i = 0; i < adev->uvd.max_handles; ++i) {
296 atomic_set(&adev->uvd.handles[i], 0);
297 adev->uvd.filp[i] = NULL;
298 }
299
300 /* from uvd v5.0 HW addressing capacity increased to 64 bits */
301 if (!amdgpu_device_ip_block_version_cmp(adev, AMD_IP_BLOCK_TYPE_UVD, 5, 0))
302 adev->uvd.address_64_bit = true;
303
304 switch (adev->asic_type) {
305 case CHIP_TONGA:
306 adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_65_10;
307 break;
308 case CHIP_CARRIZO:
309 adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_87_11;
310 break;
311 case CHIP_FIJI:
312 adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_87_12;
313 break;
314 case CHIP_STONEY:
315 adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_37_15;
316 break;
317 default:
318 adev->uvd.use_ctx_buf = adev->asic_type >= CHIP_POLARIS10;
319 }
320
321 return 0;
322 }
323
amdgpu_uvd_sw_fini(struct amdgpu_device * adev)324 int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
325 {
326 int i, j;
327
328 cancel_delayed_work_sync(&adev->uvd.idle_work);
329 drm_sched_entity_destroy(&adev->uvd.entity);
330
331 for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
332 if (adev->uvd.harvest_config & (1 << j))
333 continue;
334 kvfree(adev->uvd.inst[j].saved_bo);
335
336 amdgpu_bo_free_kernel(&adev->uvd.inst[j].vcpu_bo,
337 &adev->uvd.inst[j].gpu_addr,
338 (void **)&adev->uvd.inst[j].cpu_addr);
339
340 amdgpu_ring_fini(&adev->uvd.inst[j].ring);
341
342 for (i = 0; i < AMDGPU_MAX_UVD_ENC_RINGS; ++i)
343 amdgpu_ring_fini(&adev->uvd.inst[j].ring_enc[i]);
344 }
345 release_firmware(adev->uvd.fw);
346
347 return 0;
348 }
349
350 /**
351 * amdgpu_uvd_entity_init - init entity
352 *
353 * @adev: amdgpu_device pointer
354 *
355 */
amdgpu_uvd_entity_init(struct amdgpu_device * adev)356 int amdgpu_uvd_entity_init(struct amdgpu_device *adev)
357 {
358 struct amdgpu_ring *ring;
359 struct drm_gpu_scheduler *sched;
360 int r;
361
362 ring = &adev->uvd.inst[0].ring;
363 sched = &ring->sched;
364 r = drm_sched_entity_init(&adev->uvd.entity, DRM_SCHED_PRIORITY_NORMAL,
365 &sched, 1, NULL);
366 if (r) {
367 DRM_ERROR("Failed setting up UVD kernel entity.\n");
368 return r;
369 }
370
371 return 0;
372 }
373
amdgpu_uvd_suspend(struct amdgpu_device * adev)374 int amdgpu_uvd_suspend(struct amdgpu_device *adev)
375 {
376 unsigned size;
377 void *ptr;
378 int i, j;
379 bool in_ras_intr = amdgpu_ras_intr_triggered();
380
381 cancel_delayed_work_sync(&adev->uvd.idle_work);
382
383 /* only valid for physical mode */
384 if (adev->asic_type < CHIP_POLARIS10) {
385 for (i = 0; i < adev->uvd.max_handles; ++i)
386 if (atomic_read(&adev->uvd.handles[i]))
387 break;
388
389 if (i == adev->uvd.max_handles)
390 return 0;
391 }
392
393 for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
394 if (adev->uvd.harvest_config & (1 << j))
395 continue;
396 if (adev->uvd.inst[j].vcpu_bo == NULL)
397 continue;
398
399 size = amdgpu_bo_size(adev->uvd.inst[j].vcpu_bo);
400 ptr = adev->uvd.inst[j].cpu_addr;
401
402 adev->uvd.inst[j].saved_bo = kvmalloc(size, GFP_KERNEL);
403 if (!adev->uvd.inst[j].saved_bo)
404 return -ENOMEM;
405
406 /* re-write 0 since err_event_athub will corrupt VCPU buffer */
407 if (in_ras_intr)
408 memset(adev->uvd.inst[j].saved_bo, 0, size);
409 else
410 memcpy_fromio(adev->uvd.inst[j].saved_bo, ptr, size);
411 }
412
413 if (in_ras_intr)
414 DRM_WARN("UVD VCPU state may lost due to RAS ERREVENT_ATHUB_INTERRUPT\n");
415
416 return 0;
417 }
418
amdgpu_uvd_resume(struct amdgpu_device * adev)419 int amdgpu_uvd_resume(struct amdgpu_device *adev)
420 {
421 unsigned size;
422 void *ptr;
423 int i;
424
425 for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
426 if (adev->uvd.harvest_config & (1 << i))
427 continue;
428 if (adev->uvd.inst[i].vcpu_bo == NULL)
429 return -EINVAL;
430
431 size = amdgpu_bo_size(adev->uvd.inst[i].vcpu_bo);
432 ptr = adev->uvd.inst[i].cpu_addr;
433
434 if (adev->uvd.inst[i].saved_bo != NULL) {
435 memcpy_toio(ptr, adev->uvd.inst[i].saved_bo, size);
436 kvfree(adev->uvd.inst[i].saved_bo);
437 adev->uvd.inst[i].saved_bo = NULL;
438 } else {
439 const struct common_firmware_header *hdr;
440 unsigned offset;
441
442 hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
443 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
444 offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
445 memcpy_toio(adev->uvd.inst[i].cpu_addr, adev->uvd.fw->data + offset,
446 le32_to_cpu(hdr->ucode_size_bytes));
447 size -= le32_to_cpu(hdr->ucode_size_bytes);
448 ptr += le32_to_cpu(hdr->ucode_size_bytes);
449 }
450 memset_io(ptr, 0, size);
451 /* to restore uvd fence seq */
452 amdgpu_fence_driver_force_completion(&adev->uvd.inst[i].ring);
453 }
454 }
455 return 0;
456 }
457
amdgpu_uvd_free_handles(struct amdgpu_device * adev,struct drm_file * filp)458 void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
459 {
460 struct amdgpu_ring *ring = &adev->uvd.inst[0].ring;
461 int i, r;
462
463 for (i = 0; i < adev->uvd.max_handles; ++i) {
464 uint32_t handle = atomic_read(&adev->uvd.handles[i]);
465
466 if (handle != 0 && adev->uvd.filp[i] == filp) {
467 struct dma_fence *fence;
468
469 r = amdgpu_uvd_get_destroy_msg(ring, handle, false,
470 &fence);
471 if (r) {
472 DRM_ERROR("Error destroying UVD %d!\n", r);
473 continue;
474 }
475
476 dma_fence_wait(fence, false);
477 dma_fence_put(fence);
478
479 adev->uvd.filp[i] = NULL;
480 atomic_set(&adev->uvd.handles[i], 0);
481 }
482 }
483 }
484
amdgpu_uvd_force_into_uvd_segment(struct amdgpu_bo * abo)485 static void amdgpu_uvd_force_into_uvd_segment(struct amdgpu_bo *abo)
486 {
487 int i;
488 for (i = 0; i < abo->placement.num_placement; ++i) {
489 abo->placements[i].fpfn = 0 >> PAGE_SHIFT;
490 abo->placements[i].lpfn = (256 * 1024 * 1024) >> PAGE_SHIFT;
491 }
492 }
493
amdgpu_uvd_get_addr_from_ctx(struct amdgpu_uvd_cs_ctx * ctx)494 static u64 amdgpu_uvd_get_addr_from_ctx(struct amdgpu_uvd_cs_ctx *ctx)
495 {
496 uint32_t lo, hi;
497 uint64_t addr;
498
499 lo = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data0);
500 hi = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data1);
501 addr = ((uint64_t)lo) | (((uint64_t)hi) << 32);
502
503 return addr;
504 }
505
506 /**
507 * amdgpu_uvd_cs_pass1 - first parsing round
508 *
509 * @ctx: UVD parser context
510 *
511 * Make sure UVD message and feedback buffers are in VRAM and
512 * nobody is violating an 256MB boundary.
513 */
amdgpu_uvd_cs_pass1(struct amdgpu_uvd_cs_ctx * ctx)514 static int amdgpu_uvd_cs_pass1(struct amdgpu_uvd_cs_ctx *ctx)
515 {
516 struct ttm_operation_ctx tctx = { false, false };
517 struct amdgpu_bo_va_mapping *mapping;
518 struct amdgpu_bo *bo;
519 uint32_t cmd;
520 uint64_t addr = amdgpu_uvd_get_addr_from_ctx(ctx);
521 int r = 0;
522
523 r = amdgpu_cs_find_mapping(ctx->parser, addr, &bo, &mapping);
524 if (r) {
525 DRM_ERROR("Can't find BO for addr 0x%08Lx\n", addr);
526 return r;
527 }
528
529 if (!ctx->parser->adev->uvd.address_64_bit) {
530 /* check if it's a message or feedback command */
531 cmd = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->idx) >> 1;
532 if (cmd == 0x0 || cmd == 0x3) {
533 /* yes, force it into VRAM */
534 uint32_t domain = AMDGPU_GEM_DOMAIN_VRAM;
535 amdgpu_bo_placement_from_domain(bo, domain);
536 }
537 amdgpu_uvd_force_into_uvd_segment(bo);
538
539 r = ttm_bo_validate(&bo->tbo, &bo->placement, &tctx);
540 }
541
542 return r;
543 }
544
545 /**
546 * amdgpu_uvd_cs_msg_decode - handle UVD decode message
547 *
548 * @msg: pointer to message structure
549 * @buf_sizes: returned buffer sizes
550 *
551 * Peek into the decode message and calculate the necessary buffer sizes.
552 */
amdgpu_uvd_cs_msg_decode(struct amdgpu_device * adev,uint32_t * msg,unsigned buf_sizes[])553 static int amdgpu_uvd_cs_msg_decode(struct amdgpu_device *adev, uint32_t *msg,
554 unsigned buf_sizes[])
555 {
556 unsigned stream_type = msg[4];
557 unsigned width = msg[6];
558 unsigned height = msg[7];
559 unsigned dpb_size = msg[9];
560 unsigned pitch = msg[28];
561 unsigned level = msg[57];
562
563 unsigned width_in_mb = width / 16;
564 unsigned height_in_mb = ALIGN(height / 16, 2);
565 unsigned fs_in_mb = width_in_mb * height_in_mb;
566
567 unsigned image_size, tmp, min_dpb_size, num_dpb_buffer;
568 unsigned min_ctx_size = ~0;
569
570 image_size = width * height;
571 image_size += image_size / 2;
572 image_size = ALIGN(image_size, 1024);
573
574 switch (stream_type) {
575 case 0: /* H264 */
576 switch(level) {
577 case 30:
578 num_dpb_buffer = 8100 / fs_in_mb;
579 break;
580 case 31:
581 num_dpb_buffer = 18000 / fs_in_mb;
582 break;
583 case 32:
584 num_dpb_buffer = 20480 / fs_in_mb;
585 break;
586 case 41:
587 num_dpb_buffer = 32768 / fs_in_mb;
588 break;
589 case 42:
590 num_dpb_buffer = 34816 / fs_in_mb;
591 break;
592 case 50:
593 num_dpb_buffer = 110400 / fs_in_mb;
594 break;
595 case 51:
596 num_dpb_buffer = 184320 / fs_in_mb;
597 break;
598 default:
599 num_dpb_buffer = 184320 / fs_in_mb;
600 break;
601 }
602 num_dpb_buffer++;
603 if (num_dpb_buffer > 17)
604 num_dpb_buffer = 17;
605
606 /* reference picture buffer */
607 min_dpb_size = image_size * num_dpb_buffer;
608
609 /* macroblock context buffer */
610 min_dpb_size += width_in_mb * height_in_mb * num_dpb_buffer * 192;
611
612 /* IT surface buffer */
613 min_dpb_size += width_in_mb * height_in_mb * 32;
614 break;
615
616 case 1: /* VC1 */
617
618 /* reference picture buffer */
619 min_dpb_size = image_size * 3;
620
621 /* CONTEXT_BUFFER */
622 min_dpb_size += width_in_mb * height_in_mb * 128;
623
624 /* IT surface buffer */
625 min_dpb_size += width_in_mb * 64;
626
627 /* DB surface buffer */
628 min_dpb_size += width_in_mb * 128;
629
630 /* BP */
631 tmp = max(width_in_mb, height_in_mb);
632 min_dpb_size += ALIGN(tmp * 7 * 16, 64);
633 break;
634
635 case 3: /* MPEG2 */
636
637 /* reference picture buffer */
638 min_dpb_size = image_size * 3;
639 break;
640
641 case 4: /* MPEG4 */
642
643 /* reference picture buffer */
644 min_dpb_size = image_size * 3;
645
646 /* CM */
647 min_dpb_size += width_in_mb * height_in_mb * 64;
648
649 /* IT surface buffer */
650 min_dpb_size += ALIGN(width_in_mb * height_in_mb * 32, 64);
651 break;
652
653 case 7: /* H264 Perf */
654 switch(level) {
655 case 30:
656 num_dpb_buffer = 8100 / fs_in_mb;
657 break;
658 case 31:
659 num_dpb_buffer = 18000 / fs_in_mb;
660 break;
661 case 32:
662 num_dpb_buffer = 20480 / fs_in_mb;
663 break;
664 case 41:
665 num_dpb_buffer = 32768 / fs_in_mb;
666 break;
667 case 42:
668 num_dpb_buffer = 34816 / fs_in_mb;
669 break;
670 case 50:
671 num_dpb_buffer = 110400 / fs_in_mb;
672 break;
673 case 51:
674 num_dpb_buffer = 184320 / fs_in_mb;
675 break;
676 default:
677 num_dpb_buffer = 184320 / fs_in_mb;
678 break;
679 }
680 num_dpb_buffer++;
681 if (num_dpb_buffer > 17)
682 num_dpb_buffer = 17;
683
684 /* reference picture buffer */
685 min_dpb_size = image_size * num_dpb_buffer;
686
687 if (!adev->uvd.use_ctx_buf){
688 /* macroblock context buffer */
689 min_dpb_size +=
690 width_in_mb * height_in_mb * num_dpb_buffer * 192;
691
692 /* IT surface buffer */
693 min_dpb_size += width_in_mb * height_in_mb * 32;
694 } else {
695 /* macroblock context buffer */
696 min_ctx_size =
697 width_in_mb * height_in_mb * num_dpb_buffer * 192;
698 }
699 break;
700
701 case 8: /* MJPEG */
702 min_dpb_size = 0;
703 break;
704
705 case 16: /* H265 */
706 image_size = (ALIGN(width, 16) * ALIGN(height, 16) * 3) / 2;
707 image_size = ALIGN(image_size, 256);
708
709 num_dpb_buffer = (le32_to_cpu(msg[59]) & 0xff) + 2;
710 min_dpb_size = image_size * num_dpb_buffer;
711 min_ctx_size = ((width + 255) / 16) * ((height + 255) / 16)
712 * 16 * num_dpb_buffer + 52 * 1024;
713 break;
714
715 default:
716 DRM_ERROR("UVD codec not handled %d!\n", stream_type);
717 return -EINVAL;
718 }
719
720 if (width > pitch) {
721 DRM_ERROR("Invalid UVD decoding target pitch!\n");
722 return -EINVAL;
723 }
724
725 if (dpb_size < min_dpb_size) {
726 DRM_ERROR("Invalid dpb_size in UVD message (%d / %d)!\n",
727 dpb_size, min_dpb_size);
728 return -EINVAL;
729 }
730
731 buf_sizes[0x1] = dpb_size;
732 buf_sizes[0x2] = image_size;
733 buf_sizes[0x4] = min_ctx_size;
734 /* store image width to adjust nb memory pstate */
735 adev->uvd.decode_image_width = width;
736 return 0;
737 }
738
739 /**
740 * amdgpu_uvd_cs_msg - handle UVD message
741 *
742 * @ctx: UVD parser context
743 * @bo: buffer object containing the message
744 * @offset: offset into the buffer object
745 *
746 * Peek into the UVD message and extract the session id.
747 * Make sure that we don't open up to many sessions.
748 */
amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx * ctx,struct amdgpu_bo * bo,unsigned offset)749 static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
750 struct amdgpu_bo *bo, unsigned offset)
751 {
752 struct amdgpu_device *adev = ctx->parser->adev;
753 int32_t *msg, msg_type, handle;
754 void *ptr;
755 long r;
756 int i;
757
758 if (offset & 0x3F) {
759 DRM_ERROR("UVD messages must be 64 byte aligned!\n");
760 return -EINVAL;
761 }
762
763 r = amdgpu_bo_kmap(bo, &ptr);
764 if (r) {
765 DRM_ERROR("Failed mapping the UVD) message (%ld)!\n", r);
766 return r;
767 }
768
769 msg = ptr + offset;
770
771 msg_type = msg[1];
772 handle = msg[2];
773
774 if (handle == 0) {
775 DRM_ERROR("Invalid UVD handle!\n");
776 return -EINVAL;
777 }
778
779 switch (msg_type) {
780 case 0:
781 /* it's a create msg, calc image size (width * height) */
782 amdgpu_bo_kunmap(bo);
783
784 /* try to alloc a new handle */
785 for (i = 0; i < adev->uvd.max_handles; ++i) {
786 if (atomic_read(&adev->uvd.handles[i]) == handle) {
787 DRM_ERROR(")Handle 0x%x already in use!\n",
788 handle);
789 return -EINVAL;
790 }
791
792 if (!atomic_cmpxchg(&adev->uvd.handles[i], 0, handle)) {
793 adev->uvd.filp[i] = ctx->parser->filp;
794 return 0;
795 }
796 }
797
798 DRM_ERROR("No more free UVD handles!\n");
799 return -ENOSPC;
800
801 case 1:
802 /* it's a decode msg, calc buffer sizes */
803 r = amdgpu_uvd_cs_msg_decode(adev, msg, ctx->buf_sizes);
804 amdgpu_bo_kunmap(bo);
805 if (r)
806 return r;
807
808 /* validate the handle */
809 for (i = 0; i < adev->uvd.max_handles; ++i) {
810 if (atomic_read(&adev->uvd.handles[i]) == handle) {
811 if (adev->uvd.filp[i] != ctx->parser->filp) {
812 DRM_ERROR("UVD handle collision detected!\n");
813 return -EINVAL;
814 }
815 return 0;
816 }
817 }
818
819 DRM_ERROR("Invalid UVD handle 0x%x!\n", handle);
820 return -ENOENT;
821
822 case 2:
823 /* it's a destroy msg, free the handle */
824 for (i = 0; i < adev->uvd.max_handles; ++i)
825 atomic_cmpxchg(&adev->uvd.handles[i], handle, 0);
826 amdgpu_bo_kunmap(bo);
827 return 0;
828
829 default:
830 DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type);
831 return -EINVAL;
832 }
833 BUG();
834 return -EINVAL;
835 }
836
837 /**
838 * amdgpu_uvd_cs_pass2 - second parsing round
839 *
840 * @ctx: UVD parser context
841 *
842 * Patch buffer addresses, make sure buffer sizes are correct.
843 */
amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx * ctx)844 static int amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx *ctx)
845 {
846 struct amdgpu_bo_va_mapping *mapping;
847 struct amdgpu_bo *bo;
848 uint32_t cmd;
849 uint64_t start, end;
850 uint64_t addr = amdgpu_uvd_get_addr_from_ctx(ctx);
851 int r;
852
853 r = amdgpu_cs_find_mapping(ctx->parser, addr, &bo, &mapping);
854 if (r) {
855 DRM_ERROR("Can't find BO for addr 0x%08Lx\n", addr);
856 return r;
857 }
858
859 start = amdgpu_bo_gpu_offset(bo);
860
861 end = (mapping->last + 1 - mapping->start);
862 end = end * AMDGPU_GPU_PAGE_SIZE + start;
863
864 addr -= mapping->start * AMDGPU_GPU_PAGE_SIZE;
865 start += addr;
866
867 amdgpu_set_ib_value(ctx->parser, ctx->ib_idx, ctx->data0,
868 lower_32_bits(start));
869 amdgpu_set_ib_value(ctx->parser, ctx->ib_idx, ctx->data1,
870 upper_32_bits(start));
871
872 cmd = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->idx) >> 1;
873 if (cmd < 0x4) {
874 if ((end - start) < ctx->buf_sizes[cmd]) {
875 DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd,
876 (unsigned)(end - start),
877 ctx->buf_sizes[cmd]);
878 return -EINVAL;
879 }
880
881 } else if (cmd == 0x206) {
882 if ((end - start) < ctx->buf_sizes[4]) {
883 DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd,
884 (unsigned)(end - start),
885 ctx->buf_sizes[4]);
886 return -EINVAL;
887 }
888 } else if ((cmd != 0x100) && (cmd != 0x204)) {
889 DRM_ERROR("invalid UVD command %X!\n", cmd);
890 return -EINVAL;
891 }
892
893 if (!ctx->parser->adev->uvd.address_64_bit) {
894 if ((start >> 28) != ((end - 1) >> 28)) {
895 DRM_ERROR("reloc %LX-%LX crossing 256MB boundary!\n",
896 start, end);
897 return -EINVAL;
898 }
899
900 if ((cmd == 0 || cmd == 0x3) &&
901 (start >> 28) != (ctx->parser->adev->uvd.inst->gpu_addr >> 28)) {
902 DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n",
903 start, end);
904 return -EINVAL;
905 }
906 }
907
908 if (cmd == 0) {
909 ctx->has_msg_cmd = true;
910 r = amdgpu_uvd_cs_msg(ctx, bo, addr);
911 if (r)
912 return r;
913 } else if (!ctx->has_msg_cmd) {
914 DRM_ERROR("Message needed before other commands are send!\n");
915 return -EINVAL;
916 }
917
918 return 0;
919 }
920
921 /**
922 * amdgpu_uvd_cs_reg - parse register writes
923 *
924 * @ctx: UVD parser context
925 * @cb: callback function
926 *
927 * Parse the register writes, call cb on each complete command.
928 */
amdgpu_uvd_cs_reg(struct amdgpu_uvd_cs_ctx * ctx,int (* cb)(struct amdgpu_uvd_cs_ctx * ctx))929 static int amdgpu_uvd_cs_reg(struct amdgpu_uvd_cs_ctx *ctx,
930 int (*cb)(struct amdgpu_uvd_cs_ctx *ctx))
931 {
932 struct amdgpu_ib *ib = &ctx->parser->job->ibs[ctx->ib_idx];
933 int i, r;
934
935 ctx->idx++;
936 for (i = 0; i <= ctx->count; ++i) {
937 unsigned reg = ctx->reg + i;
938
939 if (ctx->idx >= ib->length_dw) {
940 DRM_ERROR("Register command after end of CS!\n");
941 return -EINVAL;
942 }
943
944 switch (reg) {
945 case mmUVD_GPCOM_VCPU_DATA0:
946 ctx->data0 = ctx->idx;
947 break;
948 case mmUVD_GPCOM_VCPU_DATA1:
949 ctx->data1 = ctx->idx;
950 break;
951 case mmUVD_GPCOM_VCPU_CMD:
952 r = cb(ctx);
953 if (r)
954 return r;
955 break;
956 case mmUVD_ENGINE_CNTL:
957 case mmUVD_NO_OP:
958 break;
959 default:
960 DRM_ERROR("Invalid reg 0x%X!\n", reg);
961 return -EINVAL;
962 }
963 ctx->idx++;
964 }
965 return 0;
966 }
967
968 /**
969 * amdgpu_uvd_cs_packets - parse UVD packets
970 *
971 * @ctx: UVD parser context
972 * @cb: callback function
973 *
974 * Parse the command stream packets.
975 */
amdgpu_uvd_cs_packets(struct amdgpu_uvd_cs_ctx * ctx,int (* cb)(struct amdgpu_uvd_cs_ctx * ctx))976 static int amdgpu_uvd_cs_packets(struct amdgpu_uvd_cs_ctx *ctx,
977 int (*cb)(struct amdgpu_uvd_cs_ctx *ctx))
978 {
979 struct amdgpu_ib *ib = &ctx->parser->job->ibs[ctx->ib_idx];
980 int r;
981
982 for (ctx->idx = 0 ; ctx->idx < ib->length_dw; ) {
983 uint32_t cmd = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->idx);
984 unsigned type = CP_PACKET_GET_TYPE(cmd);
985 switch (type) {
986 case PACKET_TYPE0:
987 ctx->reg = CP_PACKET0_GET_REG(cmd);
988 ctx->count = CP_PACKET_GET_COUNT(cmd);
989 r = amdgpu_uvd_cs_reg(ctx, cb);
990 if (r)
991 return r;
992 break;
993 case PACKET_TYPE2:
994 ++ctx->idx;
995 break;
996 default:
997 DRM_ERROR("Unknown packet type %d !\n", type);
998 return -EINVAL;
999 }
1000 }
1001 return 0;
1002 }
1003
1004 /**
1005 * amdgpu_uvd_ring_parse_cs - UVD command submission parser
1006 *
1007 * @parser: Command submission parser context
1008 *
1009 * Parse the command stream, patch in addresses as necessary.
1010 */
amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser * parser,uint32_t ib_idx)1011 int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
1012 {
1013 struct amdgpu_uvd_cs_ctx ctx = {};
1014 unsigned buf_sizes[] = {
1015 [0x00000000] = 2048,
1016 [0x00000001] = 0xFFFFFFFF,
1017 [0x00000002] = 0xFFFFFFFF,
1018 [0x00000003] = 2048,
1019 [0x00000004] = 0xFFFFFFFF,
1020 };
1021 struct amdgpu_ib *ib = &parser->job->ibs[ib_idx];
1022 int r;
1023
1024 parser->job->vm = NULL;
1025 ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
1026
1027 if (ib->length_dw % 16) {
1028 DRM_ERROR("UVD IB length (%d) not 16 dwords aligned!\n",
1029 ib->length_dw);
1030 return -EINVAL;
1031 }
1032
1033 ctx.parser = parser;
1034 ctx.buf_sizes = buf_sizes;
1035 ctx.ib_idx = ib_idx;
1036
1037 /* first round only required on chips without UVD 64 bit address support */
1038 if (!parser->adev->uvd.address_64_bit) {
1039 /* first round, make sure the buffers are actually in the UVD segment */
1040 r = amdgpu_uvd_cs_packets(&ctx, amdgpu_uvd_cs_pass1);
1041 if (r)
1042 return r;
1043 }
1044
1045 /* second round, patch buffer addresses into the command stream */
1046 r = amdgpu_uvd_cs_packets(&ctx, amdgpu_uvd_cs_pass2);
1047 if (r)
1048 return r;
1049
1050 if (!ctx.has_msg_cmd) {
1051 DRM_ERROR("UVD-IBs need a msg command!\n");
1052 return -EINVAL;
1053 }
1054
1055 return 0;
1056 }
1057
amdgpu_uvd_send_msg(struct amdgpu_ring * ring,struct amdgpu_bo * bo,bool direct,struct dma_fence ** fence)1058 static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
1059 bool direct, struct dma_fence **fence)
1060 {
1061 struct amdgpu_device *adev = ring->adev;
1062 struct dma_fence *f = NULL;
1063 struct amdgpu_job *job;
1064 struct amdgpu_ib *ib;
1065 uint32_t data[4];
1066 uint64_t addr;
1067 long r;
1068 int i;
1069 unsigned offset_idx = 0;
1070 unsigned offset[3] = { UVD_BASE_SI, 0, 0 };
1071
1072 amdgpu_bo_kunmap(bo);
1073 amdgpu_bo_unpin(bo);
1074
1075 if (!ring->adev->uvd.address_64_bit) {
1076 struct ttm_operation_ctx ctx = { true, false };
1077
1078 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
1079 amdgpu_uvd_force_into_uvd_segment(bo);
1080 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1081 if (r)
1082 goto err;
1083 }
1084
1085 r = amdgpu_job_alloc_with_ib(adev, 64, direct ? AMDGPU_IB_POOL_DIRECT :
1086 AMDGPU_IB_POOL_DELAYED, &job);
1087 if (r)
1088 goto err;
1089
1090 if (adev->asic_type >= CHIP_VEGA10) {
1091 offset_idx = 1 + ring->me;
1092 offset[1] = adev->reg_offset[UVD_HWIP][0][1];
1093 offset[2] = adev->reg_offset[UVD_HWIP][1][1];
1094 }
1095
1096 data[0] = PACKET0(offset[offset_idx] + UVD_GPCOM_VCPU_DATA0, 0);
1097 data[1] = PACKET0(offset[offset_idx] + UVD_GPCOM_VCPU_DATA1, 0);
1098 data[2] = PACKET0(offset[offset_idx] + UVD_GPCOM_VCPU_CMD, 0);
1099 data[3] = PACKET0(offset[offset_idx] + UVD_NO_OP, 0);
1100
1101 ib = &job->ibs[0];
1102 addr = amdgpu_bo_gpu_offset(bo);
1103 ib->ptr[0] = data[0];
1104 ib->ptr[1] = addr;
1105 ib->ptr[2] = data[1];
1106 ib->ptr[3] = addr >> 32;
1107 ib->ptr[4] = data[2];
1108 ib->ptr[5] = 0;
1109 for (i = 6; i < 16; i += 2) {
1110 ib->ptr[i] = data[3];
1111 ib->ptr[i+1] = 0;
1112 }
1113 ib->length_dw = 16;
1114
1115 if (direct) {
1116 r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv,
1117 true, false,
1118 msecs_to_jiffies(10));
1119 if (r == 0)
1120 r = -ETIMEDOUT;
1121 if (r < 0)
1122 goto err_free;
1123
1124 r = amdgpu_job_submit_direct(job, ring, &f);
1125 if (r)
1126 goto err_free;
1127 } else {
1128 r = amdgpu_sync_resv(adev, &job->sync, bo->tbo.base.resv,
1129 AMDGPU_SYNC_ALWAYS,
1130 AMDGPU_FENCE_OWNER_UNDEFINED);
1131 if (r)
1132 goto err_free;
1133
1134 r = amdgpu_job_submit(job, &adev->uvd.entity,
1135 AMDGPU_FENCE_OWNER_UNDEFINED, &f);
1136 if (r)
1137 goto err_free;
1138 }
1139
1140 amdgpu_bo_fence(bo, f, false);
1141 amdgpu_bo_unreserve(bo);
1142 amdgpu_bo_unref(&bo);
1143
1144 if (fence)
1145 *fence = dma_fence_get(f);
1146 dma_fence_put(f);
1147
1148 return 0;
1149
1150 err_free:
1151 amdgpu_job_free(job);
1152
1153 err:
1154 amdgpu_bo_unreserve(bo);
1155 amdgpu_bo_unref(&bo);
1156 return r;
1157 }
1158
1159 /* multiple fence commands without any stream commands in between can
1160 crash the vcpu so just try to emmit a dummy create/destroy msg to
1161 avoid this */
amdgpu_uvd_get_create_msg(struct amdgpu_ring * ring,uint32_t handle,struct dma_fence ** fence)1162 int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
1163 struct dma_fence **fence)
1164 {
1165 struct amdgpu_device *adev = ring->adev;
1166 struct amdgpu_bo *bo = NULL;
1167 uint32_t *msg;
1168 int r, i;
1169
1170 r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
1171 AMDGPU_GEM_DOMAIN_VRAM,
1172 &bo, NULL, (void **)&msg);
1173 if (r)
1174 return r;
1175
1176 /* stitch together an UVD create msg */
1177 msg[0] = cpu_to_le32(0x00000de4);
1178 msg[1] = cpu_to_le32(0x00000000);
1179 msg[2] = cpu_to_le32(handle);
1180 msg[3] = cpu_to_le32(0x00000000);
1181 msg[4] = cpu_to_le32(0x00000000);
1182 msg[5] = cpu_to_le32(0x00000000);
1183 msg[6] = cpu_to_le32(0x00000000);
1184 msg[7] = cpu_to_le32(0x00000780);
1185 msg[8] = cpu_to_le32(0x00000440);
1186 msg[9] = cpu_to_le32(0x00000000);
1187 msg[10] = cpu_to_le32(0x01b37000);
1188 for (i = 11; i < 1024; ++i)
1189 msg[i] = cpu_to_le32(0x0);
1190
1191 return amdgpu_uvd_send_msg(ring, bo, true, fence);
1192 }
1193
amdgpu_uvd_get_destroy_msg(struct amdgpu_ring * ring,uint32_t handle,bool direct,struct dma_fence ** fence)1194 int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
1195 bool direct, struct dma_fence **fence)
1196 {
1197 struct amdgpu_device *adev = ring->adev;
1198 struct amdgpu_bo *bo = NULL;
1199 uint32_t *msg;
1200 int r, i;
1201
1202 r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
1203 AMDGPU_GEM_DOMAIN_VRAM,
1204 &bo, NULL, (void **)&msg);
1205 if (r)
1206 return r;
1207
1208 /* stitch together an UVD destroy msg */
1209 msg[0] = cpu_to_le32(0x00000de4);
1210 msg[1] = cpu_to_le32(0x00000002);
1211 msg[2] = cpu_to_le32(handle);
1212 msg[3] = cpu_to_le32(0x00000000);
1213 for (i = 4; i < 1024; ++i)
1214 msg[i] = cpu_to_le32(0x0);
1215
1216 return amdgpu_uvd_send_msg(ring, bo, direct, fence);
1217 }
1218
amdgpu_uvd_idle_work_handler(struct work_struct * work)1219 static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
1220 {
1221 struct amdgpu_device *adev =
1222 container_of(work, struct amdgpu_device, uvd.idle_work.work);
1223 unsigned fences = 0, i, j;
1224
1225 for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
1226 if (adev->uvd.harvest_config & (1 << i))
1227 continue;
1228 fences += amdgpu_fence_count_emitted(&adev->uvd.inst[i].ring);
1229 for (j = 0; j < adev->uvd.num_enc_rings; ++j) {
1230 fences += amdgpu_fence_count_emitted(&adev->uvd.inst[i].ring_enc[j]);
1231 }
1232 }
1233
1234 if (fences == 0) {
1235 if (adev->pm.dpm_enabled) {
1236 amdgpu_dpm_enable_uvd(adev, false);
1237 } else {
1238 amdgpu_asic_set_uvd_clocks(adev, 0, 0);
1239 /* shutdown the UVD block */
1240 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
1241 AMD_PG_STATE_GATE);
1242 amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
1243 AMD_CG_STATE_GATE);
1244 }
1245 } else {
1246 schedule_delayed_work(&adev->uvd.idle_work, UVD_IDLE_TIMEOUT);
1247 }
1248 }
1249
amdgpu_uvd_ring_begin_use(struct amdgpu_ring * ring)1250 void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring)
1251 {
1252 struct amdgpu_device *adev = ring->adev;
1253 bool set_clocks;
1254
1255 if (amdgpu_sriov_vf(adev))
1256 return;
1257
1258 set_clocks = !cancel_delayed_work_sync(&adev->uvd.idle_work);
1259 if (set_clocks) {
1260 if (adev->pm.dpm_enabled) {
1261 amdgpu_dpm_enable_uvd(adev, true);
1262 } else {
1263 amdgpu_asic_set_uvd_clocks(adev, 53300, 40000);
1264 amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
1265 AMD_CG_STATE_UNGATE);
1266 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
1267 AMD_PG_STATE_UNGATE);
1268 }
1269 }
1270 }
1271
amdgpu_uvd_ring_end_use(struct amdgpu_ring * ring)1272 void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring)
1273 {
1274 if (!amdgpu_sriov_vf(ring->adev))
1275 schedule_delayed_work(&ring->adev->uvd.idle_work, UVD_IDLE_TIMEOUT);
1276 }
1277
1278 /**
1279 * amdgpu_uvd_ring_test_ib - test ib execution
1280 *
1281 * @ring: amdgpu_ring pointer
1282 *
1283 * Test if we can successfully execute an IB
1284 */
amdgpu_uvd_ring_test_ib(struct amdgpu_ring * ring,long timeout)1285 int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout)
1286 {
1287 struct dma_fence *fence;
1288 long r;
1289
1290 r = amdgpu_uvd_get_create_msg(ring, 1, NULL);
1291 if (r)
1292 goto error;
1293
1294 r = amdgpu_uvd_get_destroy_msg(ring, 1, true, &fence);
1295 if (r)
1296 goto error;
1297
1298 r = dma_fence_wait_timeout(fence, false, timeout);
1299 if (r == 0)
1300 r = -ETIMEDOUT;
1301 else if (r > 0)
1302 r = 0;
1303
1304 dma_fence_put(fence);
1305
1306 error:
1307 return r;
1308 }
1309
1310 /**
1311 * amdgpu_uvd_used_handles - returns used UVD handles
1312 *
1313 * @adev: amdgpu_device pointer
1314 *
1315 * Returns the number of UVD handles in use
1316 */
amdgpu_uvd_used_handles(struct amdgpu_device * adev)1317 uint32_t amdgpu_uvd_used_handles(struct amdgpu_device *adev)
1318 {
1319 unsigned i;
1320 uint32_t used_handles = 0;
1321
1322 for (i = 0; i < adev->uvd.max_handles; ++i) {
1323 /*
1324 * Handles can be freed in any order, and not
1325 * necessarily linear. So we need to count
1326 * all non-zero handles.
1327 */
1328 if (atomic_read(&adev->uvd.handles[i]))
1329 used_handles++;
1330 }
1331
1332 return used_handles;
1333 }
1334