• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2011 Advanced Micro Devices, Inc.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  */
26 /*
27  * Authors:
28  *    Christian König <deathsimple@vodafone.de>
29  */
30 
31 #include <linux/firmware.h>
32 #include <linux/module.h>
33 #include <drm/drmP.h>
34 #include <drm/drm.h>
35 
36 #include "amdgpu.h"
37 #include "amdgpu_pm.h"
38 #include "amdgpu_uvd.h"
39 #include "cikd.h"
40 #include "uvd/uvd_4_2_d.h"
41 
42 /* 1 second timeout */
43 #define UVD_IDLE_TIMEOUT_MS	1000
44 
45 /* Firmware Names */
46 #ifdef CONFIG_DRM_AMDGPU_CIK
47 #define FIRMWARE_BONAIRE	"radeon/bonaire_uvd.bin"
48 #define FIRMWARE_KABINI 	"radeon/kabini_uvd.bin"
49 #define FIRMWARE_KAVERI 	"radeon/kaveri_uvd.bin"
50 #define FIRMWARE_HAWAII 	"radeon/hawaii_uvd.bin"
51 #define FIRMWARE_MULLINS	"radeon/mullins_uvd.bin"
52 #endif
53 #define FIRMWARE_TONGA		"amdgpu/tonga_uvd.bin"
54 #define FIRMWARE_CARRIZO	"amdgpu/carrizo_uvd.bin"
55 #define FIRMWARE_FIJI		"amdgpu/fiji_uvd.bin"
56 #define FIRMWARE_STONEY		"amdgpu/stoney_uvd.bin"
57 
58 /**
59  * amdgpu_uvd_cs_ctx - Command submission parser context
60  *
61  * Used for emulating virtual memory support on UVD 4.2.
62  */
63 struct amdgpu_uvd_cs_ctx {
64 	struct amdgpu_cs_parser *parser;
65 	unsigned reg, count;
66 	unsigned data0, data1;
67 	unsigned idx;
68 	unsigned ib_idx;
69 
70 	/* does the IB has a msg command */
71 	bool has_msg_cmd;
72 
73 	/* minimum buffer sizes */
74 	unsigned *buf_sizes;
75 };
76 
77 #ifdef CONFIG_DRM_AMDGPU_CIK
78 MODULE_FIRMWARE(FIRMWARE_BONAIRE);
79 MODULE_FIRMWARE(FIRMWARE_KABINI);
80 MODULE_FIRMWARE(FIRMWARE_KAVERI);
81 MODULE_FIRMWARE(FIRMWARE_HAWAII);
82 MODULE_FIRMWARE(FIRMWARE_MULLINS);
83 #endif
84 MODULE_FIRMWARE(FIRMWARE_TONGA);
85 MODULE_FIRMWARE(FIRMWARE_CARRIZO);
86 MODULE_FIRMWARE(FIRMWARE_FIJI);
87 MODULE_FIRMWARE(FIRMWARE_STONEY);
88 
89 static void amdgpu_uvd_note_usage(struct amdgpu_device *adev);
90 static void amdgpu_uvd_idle_work_handler(struct work_struct *work);
91 
amdgpu_uvd_sw_init(struct amdgpu_device * adev)92 int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
93 {
94 	unsigned long bo_size;
95 	const char *fw_name;
96 	const struct common_firmware_header *hdr;
97 	unsigned version_major, version_minor, family_id;
98 	int i, r;
99 
100 	INIT_DELAYED_WORK(&adev->uvd.idle_work, amdgpu_uvd_idle_work_handler);
101 
102 	switch (adev->asic_type) {
103 #ifdef CONFIG_DRM_AMDGPU_CIK
104 	case CHIP_BONAIRE:
105 		fw_name = FIRMWARE_BONAIRE;
106 		break;
107 	case CHIP_KABINI:
108 		fw_name = FIRMWARE_KABINI;
109 		break;
110 	case CHIP_KAVERI:
111 		fw_name = FIRMWARE_KAVERI;
112 		break;
113 	case CHIP_HAWAII:
114 		fw_name = FIRMWARE_HAWAII;
115 		break;
116 	case CHIP_MULLINS:
117 		fw_name = FIRMWARE_MULLINS;
118 		break;
119 #endif
120 	case CHIP_TONGA:
121 		fw_name = FIRMWARE_TONGA;
122 		break;
123 	case CHIP_FIJI:
124 		fw_name = FIRMWARE_FIJI;
125 		break;
126 	case CHIP_CARRIZO:
127 		fw_name = FIRMWARE_CARRIZO;
128 		break;
129 	case CHIP_STONEY:
130 		fw_name = FIRMWARE_STONEY;
131 		break;
132 	default:
133 		return -EINVAL;
134 	}
135 
136 	r = request_firmware(&adev->uvd.fw, fw_name, adev->dev);
137 	if (r) {
138 		dev_err(adev->dev, "amdgpu_uvd: Can't load firmware \"%s\"\n",
139 			fw_name);
140 		return r;
141 	}
142 
143 	r = amdgpu_ucode_validate(adev->uvd.fw);
144 	if (r) {
145 		dev_err(adev->dev, "amdgpu_uvd: Can't validate firmware \"%s\"\n",
146 			fw_name);
147 		release_firmware(adev->uvd.fw);
148 		adev->uvd.fw = NULL;
149 		return r;
150 	}
151 
152 	hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
153 	family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
154 	version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
155 	version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
156 	DRM_INFO("Found UVD firmware Version: %hu.%hu Family ID: %hu\n",
157 		version_major, version_minor, family_id);
158 
159 	adev->uvd.fw_version = ((version_major << 24) | (version_minor << 16) |
160 				(family_id << 8));
161 
162 	bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8)
163 		 +  AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE;
164 	r = amdgpu_bo_create(adev, bo_size, PAGE_SIZE, true,
165 			     AMDGPU_GEM_DOMAIN_VRAM,
166 			     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
167 			     NULL, NULL, &adev->uvd.vcpu_bo);
168 	if (r) {
169 		dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r);
170 		return r;
171 	}
172 
173 	r = amdgpu_bo_reserve(adev->uvd.vcpu_bo, false);
174 	if (r) {
175 		amdgpu_bo_unref(&adev->uvd.vcpu_bo);
176 		dev_err(adev->dev, "(%d) failed to reserve UVD bo\n", r);
177 		return r;
178 	}
179 
180 	r = amdgpu_bo_pin(adev->uvd.vcpu_bo, AMDGPU_GEM_DOMAIN_VRAM,
181 			  &adev->uvd.gpu_addr);
182 	if (r) {
183 		amdgpu_bo_unreserve(adev->uvd.vcpu_bo);
184 		amdgpu_bo_unref(&adev->uvd.vcpu_bo);
185 		dev_err(adev->dev, "(%d) UVD bo pin failed\n", r);
186 		return r;
187 	}
188 
189 	r = amdgpu_bo_kmap(adev->uvd.vcpu_bo, &adev->uvd.cpu_addr);
190 	if (r) {
191 		dev_err(adev->dev, "(%d) UVD map failed\n", r);
192 		return r;
193 	}
194 
195 	amdgpu_bo_unreserve(adev->uvd.vcpu_bo);
196 
197 	for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
198 		atomic_set(&adev->uvd.handles[i], 0);
199 		adev->uvd.filp[i] = NULL;
200 	}
201 
202 	/* from uvd v5.0 HW addressing capacity increased to 64 bits */
203 	if (!amdgpu_ip_block_version_cmp(adev, AMD_IP_BLOCK_TYPE_UVD, 5, 0))
204 		adev->uvd.address_64_bit = true;
205 
206 	return 0;
207 }
208 
amdgpu_uvd_sw_fini(struct amdgpu_device * adev)209 int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
210 {
211 	int r;
212 
213 	if (adev->uvd.vcpu_bo == NULL)
214 		return 0;
215 
216 	r = amdgpu_bo_reserve(adev->uvd.vcpu_bo, false);
217 	if (!r) {
218 		amdgpu_bo_kunmap(adev->uvd.vcpu_bo);
219 		amdgpu_bo_unpin(adev->uvd.vcpu_bo);
220 		amdgpu_bo_unreserve(adev->uvd.vcpu_bo);
221 	}
222 
223 	amdgpu_bo_unref(&adev->uvd.vcpu_bo);
224 
225 	amdgpu_ring_fini(&adev->uvd.ring);
226 
227 	release_firmware(adev->uvd.fw);
228 
229 	return 0;
230 }
231 
amdgpu_uvd_suspend(struct amdgpu_device * adev)232 int amdgpu_uvd_suspend(struct amdgpu_device *adev)
233 {
234 	struct amdgpu_ring *ring = &adev->uvd.ring;
235 	int i, r;
236 
237 	if (adev->uvd.vcpu_bo == NULL)
238 		return 0;
239 
240 	for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
241 		uint32_t handle = atomic_read(&adev->uvd.handles[i]);
242 		if (handle != 0) {
243 			struct fence *fence;
244 
245 			amdgpu_uvd_note_usage(adev);
246 
247 			r = amdgpu_uvd_get_destroy_msg(ring, handle, &fence);
248 			if (r) {
249 				DRM_ERROR("Error destroying UVD (%d)!\n", r);
250 				continue;
251 			}
252 
253 			fence_wait(fence, false);
254 			fence_put(fence);
255 
256 			adev->uvd.filp[i] = NULL;
257 			atomic_set(&adev->uvd.handles[i], 0);
258 		}
259 	}
260 
261 	return 0;
262 }
263 
amdgpu_uvd_resume(struct amdgpu_device * adev)264 int amdgpu_uvd_resume(struct amdgpu_device *adev)
265 {
266 	unsigned size;
267 	void *ptr;
268 	const struct common_firmware_header *hdr;
269 	unsigned offset;
270 
271 	if (adev->uvd.vcpu_bo == NULL)
272 		return -EINVAL;
273 
274 	hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
275 	offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
276 	memcpy(adev->uvd.cpu_addr, (adev->uvd.fw->data) + offset,
277 		(adev->uvd.fw->size) - offset);
278 
279 	cancel_delayed_work_sync(&adev->uvd.idle_work);
280 
281 	size = amdgpu_bo_size(adev->uvd.vcpu_bo);
282 	size -= le32_to_cpu(hdr->ucode_size_bytes);
283 	ptr = adev->uvd.cpu_addr;
284 	ptr += le32_to_cpu(hdr->ucode_size_bytes);
285 
286 	memset(ptr, 0, size);
287 
288 	return 0;
289 }
290 
amdgpu_uvd_free_handles(struct amdgpu_device * adev,struct drm_file * filp)291 void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
292 {
293 	struct amdgpu_ring *ring = &adev->uvd.ring;
294 	int i, r;
295 
296 	for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
297 		uint32_t handle = atomic_read(&adev->uvd.handles[i]);
298 		if (handle != 0 && adev->uvd.filp[i] == filp) {
299 			struct fence *fence;
300 
301 			amdgpu_uvd_note_usage(adev);
302 
303 			r = amdgpu_uvd_get_destroy_msg(ring, handle, &fence);
304 			if (r) {
305 				DRM_ERROR("Error destroying UVD (%d)!\n", r);
306 				continue;
307 			}
308 
309 			fence_wait(fence, false);
310 			fence_put(fence);
311 
312 			adev->uvd.filp[i] = NULL;
313 			atomic_set(&adev->uvd.handles[i], 0);
314 		}
315 	}
316 }
317 
amdgpu_uvd_force_into_uvd_segment(struct amdgpu_bo * rbo)318 static void amdgpu_uvd_force_into_uvd_segment(struct amdgpu_bo *rbo)
319 {
320 	int i;
321 	for (i = 0; i < rbo->placement.num_placement; ++i) {
322 		rbo->placements[i].fpfn = 0 >> PAGE_SHIFT;
323 		rbo->placements[i].lpfn = (256 * 1024 * 1024) >> PAGE_SHIFT;
324 	}
325 }
326 
327 /**
328  * amdgpu_uvd_cs_pass1 - first parsing round
329  *
330  * @ctx: UVD parser context
331  *
332  * Make sure UVD message and feedback buffers are in VRAM and
333  * nobody is violating an 256MB boundary.
334  */
amdgpu_uvd_cs_pass1(struct amdgpu_uvd_cs_ctx * ctx)335 static int amdgpu_uvd_cs_pass1(struct amdgpu_uvd_cs_ctx *ctx)
336 {
337 	struct amdgpu_bo_va_mapping *mapping;
338 	struct amdgpu_bo *bo;
339 	uint32_t cmd, lo, hi;
340 	uint64_t addr;
341 	int r = 0;
342 
343 	lo = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data0);
344 	hi = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data1);
345 	addr = ((uint64_t)lo) | (((uint64_t)hi) << 32);
346 
347 	mapping = amdgpu_cs_find_mapping(ctx->parser, addr, &bo);
348 	if (mapping == NULL) {
349 		DRM_ERROR("Can't find BO for addr 0x%08Lx\n", addr);
350 		return -EINVAL;
351 	}
352 
353 	if (!ctx->parser->adev->uvd.address_64_bit) {
354 		/* check if it's a message or feedback command */
355 		cmd = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->idx) >> 1;
356 		if (cmd == 0x0 || cmd == 0x3) {
357 			/* yes, force it into VRAM */
358 			uint32_t domain = AMDGPU_GEM_DOMAIN_VRAM;
359 			amdgpu_ttm_placement_from_domain(bo, domain);
360 		}
361 		amdgpu_uvd_force_into_uvd_segment(bo);
362 
363 		r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
364 	}
365 
366 	return r;
367 }
368 
369 /**
370  * amdgpu_uvd_cs_msg_decode - handle UVD decode message
371  *
372  * @msg: pointer to message structure
373  * @buf_sizes: returned buffer sizes
374  *
375  * Peek into the decode message and calculate the necessary buffer sizes.
376  */
amdgpu_uvd_cs_msg_decode(uint32_t * msg,unsigned buf_sizes[])377 static int amdgpu_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[])
378 {
379 	unsigned stream_type = msg[4];
380 	unsigned width = msg[6];
381 	unsigned height = msg[7];
382 	unsigned dpb_size = msg[9];
383 	unsigned pitch = msg[28];
384 	unsigned level = msg[57];
385 
386 	unsigned width_in_mb = width / 16;
387 	unsigned height_in_mb = ALIGN(height / 16, 2);
388 	unsigned fs_in_mb = width_in_mb * height_in_mb;
389 
390 	unsigned image_size, tmp, min_dpb_size, num_dpb_buffer;
391 	unsigned min_ctx_size = 0;
392 
393 	image_size = width * height;
394 	image_size += image_size / 2;
395 	image_size = ALIGN(image_size, 1024);
396 
397 	switch (stream_type) {
398 	case 0: /* H264 */
399 	case 7: /* H264 Perf */
400 		switch(level) {
401 		case 30:
402 			num_dpb_buffer = 8100 / fs_in_mb;
403 			break;
404 		case 31:
405 			num_dpb_buffer = 18000 / fs_in_mb;
406 			break;
407 		case 32:
408 			num_dpb_buffer = 20480 / fs_in_mb;
409 			break;
410 		case 41:
411 			num_dpb_buffer = 32768 / fs_in_mb;
412 			break;
413 		case 42:
414 			num_dpb_buffer = 34816 / fs_in_mb;
415 			break;
416 		case 50:
417 			num_dpb_buffer = 110400 / fs_in_mb;
418 			break;
419 		case 51:
420 			num_dpb_buffer = 184320 / fs_in_mb;
421 			break;
422 		default:
423 			num_dpb_buffer = 184320 / fs_in_mb;
424 			break;
425 		}
426 		num_dpb_buffer++;
427 		if (num_dpb_buffer > 17)
428 			num_dpb_buffer = 17;
429 
430 		/* reference picture buffer */
431 		min_dpb_size = image_size * num_dpb_buffer;
432 
433 		/* macroblock context buffer */
434 		min_dpb_size += width_in_mb * height_in_mb * num_dpb_buffer * 192;
435 
436 		/* IT surface buffer */
437 		min_dpb_size += width_in_mb * height_in_mb * 32;
438 		break;
439 
440 	case 1: /* VC1 */
441 
442 		/* reference picture buffer */
443 		min_dpb_size = image_size * 3;
444 
445 		/* CONTEXT_BUFFER */
446 		min_dpb_size += width_in_mb * height_in_mb * 128;
447 
448 		/* IT surface buffer */
449 		min_dpb_size += width_in_mb * 64;
450 
451 		/* DB surface buffer */
452 		min_dpb_size += width_in_mb * 128;
453 
454 		/* BP */
455 		tmp = max(width_in_mb, height_in_mb);
456 		min_dpb_size += ALIGN(tmp * 7 * 16, 64);
457 		break;
458 
459 	case 3: /* MPEG2 */
460 
461 		/* reference picture buffer */
462 		min_dpb_size = image_size * 3;
463 		break;
464 
465 	case 4: /* MPEG4 */
466 
467 		/* reference picture buffer */
468 		min_dpb_size = image_size * 3;
469 
470 		/* CM */
471 		min_dpb_size += width_in_mb * height_in_mb * 64;
472 
473 		/* IT surface buffer */
474 		min_dpb_size += ALIGN(width_in_mb * height_in_mb * 32, 64);
475 		break;
476 
477 	case 16: /* H265 */
478 		image_size = (ALIGN(width, 16) * ALIGN(height, 16) * 3) / 2;
479 		image_size = ALIGN(image_size, 256);
480 
481 		num_dpb_buffer = (le32_to_cpu(msg[59]) & 0xff) + 2;
482 		min_dpb_size = image_size * num_dpb_buffer;
483 		min_ctx_size = ((width + 255) / 16) * ((height + 255) / 16)
484 					   * 16 * num_dpb_buffer + 52 * 1024;
485 		break;
486 
487 	default:
488 		DRM_ERROR("UVD codec not handled %d!\n", stream_type);
489 		return -EINVAL;
490 	}
491 
492 	if (width > pitch) {
493 		DRM_ERROR("Invalid UVD decoding target pitch!\n");
494 		return -EINVAL;
495 	}
496 
497 	if (dpb_size < min_dpb_size) {
498 		DRM_ERROR("Invalid dpb_size in UVD message (%d / %d)!\n",
499 			  dpb_size, min_dpb_size);
500 		return -EINVAL;
501 	}
502 
503 	buf_sizes[0x1] = dpb_size;
504 	buf_sizes[0x2] = image_size;
505 	buf_sizes[0x4] = min_ctx_size;
506 	return 0;
507 }
508 
509 /**
510  * amdgpu_uvd_cs_msg - handle UVD message
511  *
512  * @ctx: UVD parser context
513  * @bo: buffer object containing the message
514  * @offset: offset into the buffer object
515  *
516  * Peek into the UVD message and extract the session id.
517  * Make sure that we don't open up to many sessions.
518  */
amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx * ctx,struct amdgpu_bo * bo,unsigned offset)519 static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
520 			     struct amdgpu_bo *bo, unsigned offset)
521 {
522 	struct amdgpu_device *adev = ctx->parser->adev;
523 	int32_t *msg, msg_type, handle;
524 	void *ptr;
525 	long r;
526 	int i;
527 
528 	if (offset & 0x3F) {
529 		DRM_ERROR("UVD messages must be 64 byte aligned!\n");
530 		return -EINVAL;
531 	}
532 
533 	r = reservation_object_wait_timeout_rcu(bo->tbo.resv, true, false,
534 						MAX_SCHEDULE_TIMEOUT);
535 	if (r < 0) {
536 		DRM_ERROR("Failed waiting for UVD message (%ld)!\n", r);
537 		return r;
538 	}
539 
540 	r = amdgpu_bo_kmap(bo, &ptr);
541 	if (r) {
542 		DRM_ERROR("Failed mapping the UVD message (%ld)!\n", r);
543 		return r;
544 	}
545 
546 	msg = ptr + offset;
547 
548 	msg_type = msg[1];
549 	handle = msg[2];
550 
551 	if (handle == 0) {
552 		DRM_ERROR("Invalid UVD handle!\n");
553 		return -EINVAL;
554 	}
555 
556 	switch (msg_type) {
557 	case 0:
558 		/* it's a create msg, calc image size (width * height) */
559 		amdgpu_bo_kunmap(bo);
560 
561 		/* try to alloc a new handle */
562 		for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
563 			if (atomic_read(&adev->uvd.handles[i]) == handle) {
564 				DRM_ERROR("Handle 0x%x already in use!\n", handle);
565 				return -EINVAL;
566 			}
567 
568 			if (!atomic_cmpxchg(&adev->uvd.handles[i], 0, handle)) {
569 				adev->uvd.filp[i] = ctx->parser->filp;
570 				return 0;
571 			}
572 		}
573 
574 		DRM_ERROR("No more free UVD handles!\n");
575 		return -EINVAL;
576 
577 	case 1:
578 		/* it's a decode msg, calc buffer sizes */
579 		r = amdgpu_uvd_cs_msg_decode(msg, ctx->buf_sizes);
580 		amdgpu_bo_kunmap(bo);
581 		if (r)
582 			return r;
583 
584 		/* validate the handle */
585 		for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
586 			if (atomic_read(&adev->uvd.handles[i]) == handle) {
587 				if (adev->uvd.filp[i] != ctx->parser->filp) {
588 					DRM_ERROR("UVD handle collision detected!\n");
589 					return -EINVAL;
590 				}
591 				return 0;
592 			}
593 		}
594 
595 		DRM_ERROR("Invalid UVD handle 0x%x!\n", handle);
596 		return -ENOENT;
597 
598 	case 2:
599 		/* it's a destroy msg, free the handle */
600 		for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i)
601 			atomic_cmpxchg(&adev->uvd.handles[i], handle, 0);
602 		amdgpu_bo_kunmap(bo);
603 		return 0;
604 
605 	default:
606 		DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type);
607 		return -EINVAL;
608 	}
609 	BUG();
610 	return -EINVAL;
611 }
612 
613 /**
614  * amdgpu_uvd_cs_pass2 - second parsing round
615  *
616  * @ctx: UVD parser context
617  *
618  * Patch buffer addresses, make sure buffer sizes are correct.
619  */
amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx * ctx)620 static int amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx *ctx)
621 {
622 	struct amdgpu_bo_va_mapping *mapping;
623 	struct amdgpu_bo *bo;
624 	struct amdgpu_ib *ib;
625 	uint32_t cmd, lo, hi;
626 	uint64_t start, end;
627 	uint64_t addr;
628 	int r;
629 
630 	lo = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data0);
631 	hi = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data1);
632 	addr = ((uint64_t)lo) | (((uint64_t)hi) << 32);
633 
634 	mapping = amdgpu_cs_find_mapping(ctx->parser, addr, &bo);
635 	if (mapping == NULL)
636 		return -EINVAL;
637 
638 	start = amdgpu_bo_gpu_offset(bo);
639 
640 	end = (mapping->it.last + 1 - mapping->it.start);
641 	end = end * AMDGPU_GPU_PAGE_SIZE + start;
642 
643 	addr -= ((uint64_t)mapping->it.start) * AMDGPU_GPU_PAGE_SIZE;
644 	start += addr;
645 
646 	ib = &ctx->parser->ibs[ctx->ib_idx];
647 	ib->ptr[ctx->data0] = start & 0xFFFFFFFF;
648 	ib->ptr[ctx->data1] = start >> 32;
649 
650 	cmd = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->idx) >> 1;
651 	if (cmd < 0x4) {
652 		if ((end - start) < ctx->buf_sizes[cmd]) {
653 			DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd,
654 				  (unsigned)(end - start),
655 				  ctx->buf_sizes[cmd]);
656 			return -EINVAL;
657 		}
658 
659 	} else if (cmd == 0x206) {
660 		if ((end - start) < ctx->buf_sizes[4]) {
661 			DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd,
662 					  (unsigned)(end - start),
663 					  ctx->buf_sizes[4]);
664 			return -EINVAL;
665 		}
666 	} else if ((cmd != 0x100) && (cmd != 0x204)) {
667 		DRM_ERROR("invalid UVD command %X!\n", cmd);
668 		return -EINVAL;
669 	}
670 
671 	if (!ctx->parser->adev->uvd.address_64_bit) {
672 		if ((start >> 28) != ((end - 1) >> 28)) {
673 			DRM_ERROR("reloc %LX-%LX crossing 256MB boundary!\n",
674 				  start, end);
675 			return -EINVAL;
676 		}
677 
678 		if ((cmd == 0 || cmd == 0x3) &&
679 		    (start >> 28) != (ctx->parser->adev->uvd.gpu_addr >> 28)) {
680 			DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n",
681 				  start, end);
682 			return -EINVAL;
683 		}
684 	}
685 
686 	if (cmd == 0) {
687 		ctx->has_msg_cmd = true;
688 		r = amdgpu_uvd_cs_msg(ctx, bo, addr);
689 		if (r)
690 			return r;
691 	} else if (!ctx->has_msg_cmd) {
692 		DRM_ERROR("Message needed before other commands are send!\n");
693 		return -EINVAL;
694 	}
695 
696 	return 0;
697 }
698 
699 /**
700  * amdgpu_uvd_cs_reg - parse register writes
701  *
702  * @ctx: UVD parser context
703  * @cb: callback function
704  *
705  * Parse the register writes, call cb on each complete command.
706  */
amdgpu_uvd_cs_reg(struct amdgpu_uvd_cs_ctx * ctx,int (* cb)(struct amdgpu_uvd_cs_ctx * ctx))707 static int amdgpu_uvd_cs_reg(struct amdgpu_uvd_cs_ctx *ctx,
708 			     int (*cb)(struct amdgpu_uvd_cs_ctx *ctx))
709 {
710 	struct amdgpu_ib *ib = &ctx->parser->ibs[ctx->ib_idx];
711 	int i, r;
712 
713 	ctx->idx++;
714 	for (i = 0; i <= ctx->count; ++i) {
715 		unsigned reg = ctx->reg + i;
716 
717 		if (ctx->idx >= ib->length_dw) {
718 			DRM_ERROR("Register command after end of CS!\n");
719 			return -EINVAL;
720 		}
721 
722 		switch (reg) {
723 		case mmUVD_GPCOM_VCPU_DATA0:
724 			ctx->data0 = ctx->idx;
725 			break;
726 		case mmUVD_GPCOM_VCPU_DATA1:
727 			ctx->data1 = ctx->idx;
728 			break;
729 		case mmUVD_GPCOM_VCPU_CMD:
730 			r = cb(ctx);
731 			if (r)
732 				return r;
733 			break;
734 		case mmUVD_ENGINE_CNTL:
735 			break;
736 		default:
737 			DRM_ERROR("Invalid reg 0x%X!\n", reg);
738 			return -EINVAL;
739 		}
740 		ctx->idx++;
741 	}
742 	return 0;
743 }
744 
745 /**
746  * amdgpu_uvd_cs_packets - parse UVD packets
747  *
748  * @ctx: UVD parser context
749  * @cb: callback function
750  *
751  * Parse the command stream packets.
752  */
amdgpu_uvd_cs_packets(struct amdgpu_uvd_cs_ctx * ctx,int (* cb)(struct amdgpu_uvd_cs_ctx * ctx))753 static int amdgpu_uvd_cs_packets(struct amdgpu_uvd_cs_ctx *ctx,
754 				 int (*cb)(struct amdgpu_uvd_cs_ctx *ctx))
755 {
756 	struct amdgpu_ib *ib = &ctx->parser->ibs[ctx->ib_idx];
757 	int r;
758 
759 	for (ctx->idx = 0 ; ctx->idx < ib->length_dw; ) {
760 		uint32_t cmd = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->idx);
761 		unsigned type = CP_PACKET_GET_TYPE(cmd);
762 		switch (type) {
763 		case PACKET_TYPE0:
764 			ctx->reg = CP_PACKET0_GET_REG(cmd);
765 			ctx->count = CP_PACKET_GET_COUNT(cmd);
766 			r = amdgpu_uvd_cs_reg(ctx, cb);
767 			if (r)
768 				return r;
769 			break;
770 		case PACKET_TYPE2:
771 			++ctx->idx;
772 			break;
773 		default:
774 			DRM_ERROR("Unknown packet type %d !\n", type);
775 			return -EINVAL;
776 		}
777 	}
778 	return 0;
779 }
780 
781 /**
782  * amdgpu_uvd_ring_parse_cs - UVD command submission parser
783  *
784  * @parser: Command submission parser context
785  *
786  * Parse the command stream, patch in addresses as necessary.
787  */
amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser * parser,uint32_t ib_idx)788 int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
789 {
790 	struct amdgpu_uvd_cs_ctx ctx = {};
791 	unsigned buf_sizes[] = {
792 		[0x00000000]	=	2048,
793 		[0x00000001]	=	0xFFFFFFFF,
794 		[0x00000002]	=	0xFFFFFFFF,
795 		[0x00000003]	=	2048,
796 		[0x00000004]	=	0xFFFFFFFF,
797 	};
798 	struct amdgpu_ib *ib = &parser->ibs[ib_idx];
799 	int r;
800 
801 	if (ib->length_dw % 16) {
802 		DRM_ERROR("UVD IB length (%d) not 16 dwords aligned!\n",
803 			  ib->length_dw);
804 		return -EINVAL;
805 	}
806 
807 	ctx.parser = parser;
808 	ctx.buf_sizes = buf_sizes;
809 	ctx.ib_idx = ib_idx;
810 
811 	/* first round, make sure the buffers are actually in the UVD segment */
812 	r = amdgpu_uvd_cs_packets(&ctx, amdgpu_uvd_cs_pass1);
813 	if (r)
814 		return r;
815 
816 	/* second round, patch buffer addresses into the command stream */
817 	r = amdgpu_uvd_cs_packets(&ctx, amdgpu_uvd_cs_pass2);
818 	if (r)
819 		return r;
820 
821 	if (!ctx.has_msg_cmd) {
822 		DRM_ERROR("UVD-IBs need a msg command!\n");
823 		return -EINVAL;
824 	}
825 
826 	amdgpu_uvd_note_usage(ctx.parser->adev);
827 
828 	return 0;
829 }
830 
amdgpu_uvd_free_job(struct amdgpu_job * job)831 static int amdgpu_uvd_free_job(
832 	struct amdgpu_job *job)
833 {
834 	amdgpu_ib_free(job->adev, job->ibs);
835 	kfree(job->ibs);
836 	return 0;
837 }
838 
amdgpu_uvd_send_msg(struct amdgpu_ring * ring,struct amdgpu_bo * bo,struct fence ** fence)839 static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring,
840 			       struct amdgpu_bo *bo,
841 			       struct fence **fence)
842 {
843 	struct ttm_validate_buffer tv;
844 	struct ww_acquire_ctx ticket;
845 	struct list_head head;
846 	struct amdgpu_ib *ib = NULL;
847 	struct fence *f = NULL;
848 	struct amdgpu_device *adev = ring->adev;
849 	uint64_t addr;
850 	int i, r;
851 
852 	memset(&tv, 0, sizeof(tv));
853 	tv.bo = &bo->tbo;
854 
855 	INIT_LIST_HEAD(&head);
856 	list_add(&tv.head, &head);
857 
858 	r = ttm_eu_reserve_buffers(&ticket, &head, true, NULL);
859 	if (r)
860 		return r;
861 
862 	if (!bo->adev->uvd.address_64_bit) {
863 		amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
864 		amdgpu_uvd_force_into_uvd_segment(bo);
865 	}
866 
867 	r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
868 	if (r)
869 		goto err;
870 	ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
871 	if (!ib) {
872 		r = -ENOMEM;
873 		goto err;
874 	}
875 	r = amdgpu_ib_get(ring, NULL, 64, ib);
876 	if (r)
877 		goto err1;
878 
879 	addr = amdgpu_bo_gpu_offset(bo);
880 	ib->ptr[0] = PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0);
881 	ib->ptr[1] = addr;
882 	ib->ptr[2] = PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0);
883 	ib->ptr[3] = addr >> 32;
884 	ib->ptr[4] = PACKET0(mmUVD_GPCOM_VCPU_CMD, 0);
885 	ib->ptr[5] = 0;
886 	for (i = 6; i < 16; ++i)
887 		ib->ptr[i] = PACKET2(0);
888 	ib->length_dw = 16;
889 
890 	r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1,
891 						 &amdgpu_uvd_free_job,
892 						 AMDGPU_FENCE_OWNER_UNDEFINED,
893 						 &f);
894 	if (r)
895 		goto err2;
896 
897 	ttm_eu_fence_buffer_objects(&ticket, &head, f);
898 
899 	if (fence)
900 		*fence = fence_get(f);
901 	amdgpu_bo_unref(&bo);
902 	fence_put(f);
903 	if (amdgpu_enable_scheduler)
904 		return 0;
905 
906 	amdgpu_ib_free(ring->adev, ib);
907 	kfree(ib);
908 	return 0;
909 err2:
910 	amdgpu_ib_free(ring->adev, ib);
911 err1:
912 	kfree(ib);
913 err:
914 	ttm_eu_backoff_reservation(&ticket, &head);
915 	return r;
916 }
917 
918 /* multiple fence commands without any stream commands in between can
919    crash the vcpu so just try to emmit a dummy create/destroy msg to
920    avoid this */
amdgpu_uvd_get_create_msg(struct amdgpu_ring * ring,uint32_t handle,struct fence ** fence)921 int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
922 			      struct fence **fence)
923 {
924 	struct amdgpu_device *adev = ring->adev;
925 	struct amdgpu_bo *bo;
926 	uint32_t *msg;
927 	int r, i;
928 
929 	r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true,
930 			     AMDGPU_GEM_DOMAIN_VRAM,
931 			     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
932 			     NULL, NULL, &bo);
933 	if (r)
934 		return r;
935 
936 	r = amdgpu_bo_reserve(bo, false);
937 	if (r) {
938 		amdgpu_bo_unref(&bo);
939 		return r;
940 	}
941 
942 	r = amdgpu_bo_kmap(bo, (void **)&msg);
943 	if (r) {
944 		amdgpu_bo_unreserve(bo);
945 		amdgpu_bo_unref(&bo);
946 		return r;
947 	}
948 
949 	/* stitch together an UVD create msg */
950 	msg[0] = cpu_to_le32(0x00000de4);
951 	msg[1] = cpu_to_le32(0x00000000);
952 	msg[2] = cpu_to_le32(handle);
953 	msg[3] = cpu_to_le32(0x00000000);
954 	msg[4] = cpu_to_le32(0x00000000);
955 	msg[5] = cpu_to_le32(0x00000000);
956 	msg[6] = cpu_to_le32(0x00000000);
957 	msg[7] = cpu_to_le32(0x00000780);
958 	msg[8] = cpu_to_le32(0x00000440);
959 	msg[9] = cpu_to_le32(0x00000000);
960 	msg[10] = cpu_to_le32(0x01b37000);
961 	for (i = 11; i < 1024; ++i)
962 		msg[i] = cpu_to_le32(0x0);
963 
964 	amdgpu_bo_kunmap(bo);
965 	amdgpu_bo_unreserve(bo);
966 
967 	return amdgpu_uvd_send_msg(ring, bo, fence);
968 }
969 
amdgpu_uvd_get_destroy_msg(struct amdgpu_ring * ring,uint32_t handle,struct fence ** fence)970 int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
971 			       struct fence **fence)
972 {
973 	struct amdgpu_device *adev = ring->adev;
974 	struct amdgpu_bo *bo;
975 	uint32_t *msg;
976 	int r, i;
977 
978 	r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true,
979 			     AMDGPU_GEM_DOMAIN_VRAM,
980 			     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
981 			     NULL, NULL, &bo);
982 	if (r)
983 		return r;
984 
985 	r = amdgpu_bo_reserve(bo, false);
986 	if (r) {
987 		amdgpu_bo_unref(&bo);
988 		return r;
989 	}
990 
991 	r = amdgpu_bo_kmap(bo, (void **)&msg);
992 	if (r) {
993 		amdgpu_bo_unreserve(bo);
994 		amdgpu_bo_unref(&bo);
995 		return r;
996 	}
997 
998 	/* stitch together an UVD destroy msg */
999 	msg[0] = cpu_to_le32(0x00000de4);
1000 	msg[1] = cpu_to_le32(0x00000002);
1001 	msg[2] = cpu_to_le32(handle);
1002 	msg[3] = cpu_to_le32(0x00000000);
1003 	for (i = 4; i < 1024; ++i)
1004 		msg[i] = cpu_to_le32(0x0);
1005 
1006 	amdgpu_bo_kunmap(bo);
1007 	amdgpu_bo_unreserve(bo);
1008 
1009 	return amdgpu_uvd_send_msg(ring, bo, fence);
1010 }
1011 
amdgpu_uvd_idle_work_handler(struct work_struct * work)1012 static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
1013 {
1014 	struct amdgpu_device *adev =
1015 		container_of(work, struct amdgpu_device, uvd.idle_work.work);
1016 	unsigned i, fences, handles = 0;
1017 
1018 	fences = amdgpu_fence_count_emitted(&adev->uvd.ring);
1019 
1020 	for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i)
1021 		if (atomic_read(&adev->uvd.handles[i]))
1022 			++handles;
1023 
1024 	if (fences == 0 && handles == 0) {
1025 		if (adev->pm.dpm_enabled) {
1026 			amdgpu_dpm_enable_uvd(adev, false);
1027 		} else {
1028 			amdgpu_asic_set_uvd_clocks(adev, 0, 0);
1029 		}
1030 	} else {
1031 		schedule_delayed_work(&adev->uvd.idle_work,
1032 				      msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS));
1033 	}
1034 }
1035 
amdgpu_uvd_note_usage(struct amdgpu_device * adev)1036 static void amdgpu_uvd_note_usage(struct amdgpu_device *adev)
1037 {
1038 	bool set_clocks = !cancel_delayed_work_sync(&adev->uvd.idle_work);
1039 	set_clocks &= schedule_delayed_work(&adev->uvd.idle_work,
1040 					    msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS));
1041 
1042 	if (set_clocks) {
1043 		if (adev->pm.dpm_enabled) {
1044 			amdgpu_dpm_enable_uvd(adev, true);
1045 		} else {
1046 			amdgpu_asic_set_uvd_clocks(adev, 53300, 40000);
1047 		}
1048 	}
1049 }
1050