• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2017 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22 */
23 
24 #include <stdio.h>
25 #include <inttypes.h>
26 
27 #include "CUnit/Basic.h"
28 
29 #include "util_math.h"
30 
31 #include "amdgpu_test.h"
32 #include "amdgpu_drm.h"
33 #include "amdgpu_internal.h"
34 #include "frame.h"
35 #include "uve_ib.h"
36 
37 #define IB_SIZE		4096
38 #define MAX_RESOURCES	16
39 
40 struct amdgpu_uvd_enc_bo {
41 	amdgpu_bo_handle handle;
42 	amdgpu_va_handle va_handle;
43 	uint64_t addr;
44 	uint64_t size;
45 	uint8_t *ptr;
46 };
47 
48 struct amdgpu_uvd_enc {
49 	unsigned width;
50 	unsigned height;
51 	struct amdgpu_uvd_enc_bo session;
52 	struct amdgpu_uvd_enc_bo vbuf;
53 	struct amdgpu_uvd_enc_bo bs;
54 	struct amdgpu_uvd_enc_bo fb;
55 	struct amdgpu_uvd_enc_bo cpb;
56 };
57 
58 static amdgpu_device_handle device_handle;
59 static uint32_t major_version;
60 static uint32_t minor_version;
61 static uint32_t family_id;
62 
63 static amdgpu_context_handle context_handle;
64 static amdgpu_bo_handle ib_handle;
65 static amdgpu_va_handle ib_va_handle;
66 static uint64_t ib_mc_address;
67 static uint32_t *ib_cpu;
68 
69 static struct amdgpu_uvd_enc enc;
70 static amdgpu_bo_handle resources[MAX_RESOURCES];
71 static unsigned num_resources;
72 
73 static void amdgpu_cs_uvd_enc_create(void);
74 static void amdgpu_cs_uvd_enc_session_init(void);
75 static void amdgpu_cs_uvd_enc_encode(void);
76 static void amdgpu_cs_uvd_enc_destroy(void);
77 
78 
79 CU_TestInfo uvd_enc_tests[] = {
80 	{ "UVD ENC create",  amdgpu_cs_uvd_enc_create },
81 	{ "UVD ENC session init",  amdgpu_cs_uvd_enc_session_init },
82 	{ "UVD ENC encode",  amdgpu_cs_uvd_enc_encode },
83 	{ "UVD ENC destroy",  amdgpu_cs_uvd_enc_destroy },
84 	CU_TEST_INFO_NULL,
85 };
86 
suite_uvd_enc_tests_enable(void)87 CU_BOOL suite_uvd_enc_tests_enable(void)
88 {
89 	int r;
90 	struct drm_amdgpu_info_hw_ip info;
91 
92 	if (amdgpu_device_initialize(drm_amdgpu[0], &major_version,
93 					     &minor_version, &device_handle))
94 		return CU_FALSE;
95 
96 	r = amdgpu_query_hw_ip_info(device_handle, AMDGPU_HW_IP_UVD_ENC, 0, &info);
97 
98 	if (amdgpu_device_deinitialize(device_handle))
99 		return CU_FALSE;
100 
101 	if (!info.available_rings)
102 		printf("\n\nThe ASIC NOT support UVD ENC, suite disabled.\n");
103 
104 	return (r == 0 && (info.available_rings ? CU_TRUE : CU_FALSE));
105 }
106 
107 
suite_uvd_enc_tests_init(void)108 int suite_uvd_enc_tests_init(void)
109 {
110 	int r;
111 
112 	r = amdgpu_device_initialize(drm_amdgpu[0], &major_version,
113 				     &minor_version, &device_handle);
114 	if (r)
115 		return CUE_SINIT_FAILED;
116 
117 	family_id = device_handle->info.family_id;
118 
119 	r = amdgpu_cs_ctx_create(device_handle, &context_handle);
120 	if (r)
121 		return CUE_SINIT_FAILED;
122 
123 	r = amdgpu_bo_alloc_and_map(device_handle, IB_SIZE, 4096,
124 				    AMDGPU_GEM_DOMAIN_GTT, 0,
125 				    &ib_handle, (void**)&ib_cpu,
126 				    &ib_mc_address, &ib_va_handle);
127 	if (r)
128 		return CUE_SINIT_FAILED;
129 
130 	return CUE_SUCCESS;
131 }
132 
suite_uvd_enc_tests_clean(void)133 int suite_uvd_enc_tests_clean(void)
134 {
135 	int r;
136 
137 	r = amdgpu_bo_unmap_and_free(ib_handle, ib_va_handle,
138 				     ib_mc_address, IB_SIZE);
139 	if (r)
140 		return CUE_SCLEAN_FAILED;
141 
142 	r = amdgpu_cs_ctx_free(context_handle);
143 	if (r)
144 		return CUE_SCLEAN_FAILED;
145 
146 	r = amdgpu_device_deinitialize(device_handle);
147 	if (r)
148 		return CUE_SCLEAN_FAILED;
149 
150 	return CUE_SUCCESS;
151 }
152 
submit(unsigned ndw,unsigned ip)153 static int submit(unsigned ndw, unsigned ip)
154 {
155 	struct amdgpu_cs_request ibs_request = {0};
156 	struct amdgpu_cs_ib_info ib_info = {0};
157 	struct amdgpu_cs_fence fence_status = {0};
158 	uint32_t expired;
159 	int r;
160 
161 	ib_info.ib_mc_address = ib_mc_address;
162 	ib_info.size = ndw;
163 
164 	ibs_request.ip_type = ip;
165 
166 	r = amdgpu_bo_list_create(device_handle, num_resources, resources,
167 				  NULL, &ibs_request.resources);
168 	if (r)
169 		return r;
170 
171 	ibs_request.number_of_ibs = 1;
172 	ibs_request.ibs = &ib_info;
173 	ibs_request.fence_info.handle = NULL;
174 
175 	r = amdgpu_cs_submit(context_handle, 0, &ibs_request, 1);
176 	if (r)
177 		return r;
178 
179 	r = amdgpu_bo_list_destroy(ibs_request.resources);
180 	if (r)
181 		return r;
182 
183 	fence_status.context = context_handle;
184 	fence_status.ip_type = ip;
185 	fence_status.fence = ibs_request.seq_no;
186 
187 	r = amdgpu_cs_query_fence_status(&fence_status,
188 					 AMDGPU_TIMEOUT_INFINITE,
189 					 0, &expired);
190 	if (r)
191 		return r;
192 
193 	return 0;
194 }
195 
alloc_resource(struct amdgpu_uvd_enc_bo * uvd_enc_bo,unsigned size,unsigned domain)196 static void alloc_resource(struct amdgpu_uvd_enc_bo *uvd_enc_bo,
197 			unsigned size, unsigned domain)
198 {
199 	struct amdgpu_bo_alloc_request req = {0};
200 	amdgpu_bo_handle buf_handle;
201 	amdgpu_va_handle va_handle;
202 	uint64_t va = 0;
203 	int r;
204 
205 	req.alloc_size = ALIGN(size, 4096);
206 	req.preferred_heap = domain;
207 	r = amdgpu_bo_alloc(device_handle, &req, &buf_handle);
208 	CU_ASSERT_EQUAL(r, 0);
209 	r = amdgpu_va_range_alloc(device_handle,
210 				  amdgpu_gpu_va_range_general,
211 				  req.alloc_size, 1, 0, &va,
212 				  &va_handle, 0);
213 	CU_ASSERT_EQUAL(r, 0);
214 	r = amdgpu_bo_va_op(buf_handle, 0, req.alloc_size, va, 0,
215 			    AMDGPU_VA_OP_MAP);
216 	CU_ASSERT_EQUAL(r, 0);
217 	uvd_enc_bo->addr = va;
218 	uvd_enc_bo->handle = buf_handle;
219 	uvd_enc_bo->size = req.alloc_size;
220 	uvd_enc_bo->va_handle = va_handle;
221 	r = amdgpu_bo_cpu_map(uvd_enc_bo->handle, (void **)&uvd_enc_bo->ptr);
222 	CU_ASSERT_EQUAL(r, 0);
223 	memset(uvd_enc_bo->ptr, 0, size);
224 	r = amdgpu_bo_cpu_unmap(uvd_enc_bo->handle);
225 	CU_ASSERT_EQUAL(r, 0);
226 }
227 
free_resource(struct amdgpu_uvd_enc_bo * uvd_enc_bo)228 static void free_resource(struct amdgpu_uvd_enc_bo *uvd_enc_bo)
229 {
230 	int r;
231 
232 	r = amdgpu_bo_va_op(uvd_enc_bo->handle, 0, uvd_enc_bo->size,
233 			    uvd_enc_bo->addr, 0, AMDGPU_VA_OP_UNMAP);
234 	CU_ASSERT_EQUAL(r, 0);
235 
236 	r = amdgpu_va_range_free(uvd_enc_bo->va_handle);
237 	CU_ASSERT_EQUAL(r, 0);
238 
239 	r = amdgpu_bo_free(uvd_enc_bo->handle);
240 	CU_ASSERT_EQUAL(r, 0);
241 	memset(uvd_enc_bo, 0, sizeof(*uvd_enc_bo));
242 }
243 
amdgpu_cs_uvd_enc_create(void)244 static void amdgpu_cs_uvd_enc_create(void)
245 {
246 	enc.width = 160;
247 	enc.height = 128;
248 
249 	num_resources  = 0;
250 	alloc_resource(&enc.session, 128 * 1024, AMDGPU_GEM_DOMAIN_GTT);
251 	resources[num_resources++] = enc.session.handle;
252 	resources[num_resources++] = ib_handle;
253 }
254 
check_result(struct amdgpu_uvd_enc * enc)255 static void check_result(struct amdgpu_uvd_enc *enc)
256 {
257 	uint64_t sum;
258 	uint32_t s = 175602;
259 	uint32_t *ptr, size;
260 	int j, r;
261 
262 	r = amdgpu_bo_cpu_map(enc->fb.handle, (void **)&enc->fb.ptr);
263 	CU_ASSERT_EQUAL(r, 0);
264 	ptr = (uint32_t *)enc->fb.ptr;
265 	size = ptr[6];
266 	r = amdgpu_bo_cpu_unmap(enc->fb.handle);
267 	CU_ASSERT_EQUAL(r, 0);
268 	r = amdgpu_bo_cpu_map(enc->bs.handle, (void **)&enc->bs.ptr);
269 	CU_ASSERT_EQUAL(r, 0);
270 	for (j = 0, sum = 0; j < size; ++j)
271 		sum += enc->bs.ptr[j];
272 	CU_ASSERT_EQUAL(sum, s);
273 	r = amdgpu_bo_cpu_unmap(enc->bs.handle);
274 	CU_ASSERT_EQUAL(r, 0);
275 
276 }
277 
amdgpu_cs_uvd_enc_session_init(void)278 static void amdgpu_cs_uvd_enc_session_init(void)
279 {
280 	int len, r;
281 
282 	len = 0;
283 	memcpy((ib_cpu + len), uve_session_info, sizeof(uve_session_info));
284 	len += sizeof(uve_session_info) / 4;
285 	ib_cpu[len++] = enc.session.addr >> 32;
286 	ib_cpu[len++] = enc.session.addr;
287 
288 	memcpy((ib_cpu + len), uve_task_info, sizeof(uve_task_info));
289 	len += sizeof(uve_task_info) / 4;
290 	ib_cpu[len++] = 0x000000d8;
291 	ib_cpu[len++] = 0x00000000;
292 	ib_cpu[len++] = 0x00000000;
293 
294 	memcpy((ib_cpu + len), uve_op_init, sizeof(uve_op_init));
295 	len += sizeof(uve_op_init) / 4;
296 
297 	memcpy((ib_cpu + len), uve_session_init, sizeof(uve_session_init));
298 	len += sizeof(uve_session_init) / 4;
299 
300 	memcpy((ib_cpu + len), uve_layer_ctrl, sizeof(uve_layer_ctrl));
301 	len += sizeof(uve_layer_ctrl) / 4;
302 
303 	memcpy((ib_cpu + len), uve_slice_ctrl, sizeof(uve_slice_ctrl));
304 	len += sizeof(uve_slice_ctrl) / 4;
305 
306 	memcpy((ib_cpu + len), uve_spec_misc, sizeof(uve_spec_misc));
307 	len += sizeof(uve_spec_misc) / 4;
308 
309 	memcpy((ib_cpu + len), uve_rc_session_init, sizeof(uve_rc_session_init));
310 	len += sizeof(uve_rc_session_init) / 4;
311 
312 	memcpy((ib_cpu + len), uve_deblocking_filter, sizeof(uve_deblocking_filter));
313 	len += sizeof(uve_deblocking_filter) / 4;
314 
315 	memcpy((ib_cpu + len), uve_quality_params, sizeof(uve_quality_params));
316 	len += sizeof(uve_quality_params) / 4;
317 
318 	memcpy((ib_cpu + len), uve_op_init_rc, sizeof(uve_op_init_rc));
319 	len += sizeof(uve_op_init_rc) / 4;
320 
321 	memcpy((ib_cpu + len), uve_op_init_rc_vbv_level, sizeof(uve_op_init_rc_vbv_level));
322 	len += sizeof(uve_op_init_rc_vbv_level) / 4;
323 
324 	r = submit(len, AMDGPU_HW_IP_UVD_ENC);
325 	CU_ASSERT_EQUAL(r, 0);
326 }
327 
amdgpu_cs_uvd_enc_encode(void)328 static void amdgpu_cs_uvd_enc_encode(void)
329 {
330 	int len, r, i;
331 	uint64_t luma_offset, chroma_offset;
332 	uint32_t vbuf_size, bs_size = 0x003f4800, cpb_size;
333 	unsigned align = (family_id >= AMDGPU_FAMILY_AI) ? 256 : 16;
334 	vbuf_size = ALIGN(enc.width, align) * ALIGN(enc.height, 16) * 1.5;
335 	cpb_size = vbuf_size * 10;
336 
337 
338 	num_resources  = 0;
339 	alloc_resource(&enc.fb, 4096, AMDGPU_GEM_DOMAIN_VRAM);
340 	resources[num_resources++] = enc.fb.handle;
341 	alloc_resource(&enc.bs, bs_size, AMDGPU_GEM_DOMAIN_VRAM);
342 	resources[num_resources++] = enc.bs.handle;
343 	alloc_resource(&enc.vbuf, vbuf_size, AMDGPU_GEM_DOMAIN_VRAM);
344 	resources[num_resources++] = enc.vbuf.handle;
345 	alloc_resource(&enc.cpb, cpb_size, AMDGPU_GEM_DOMAIN_VRAM);
346 	resources[num_resources++] = enc.cpb.handle;
347 	resources[num_resources++] = ib_handle;
348 
349 	r = amdgpu_bo_cpu_map(enc.vbuf.handle, (void **)&enc.vbuf.ptr);
350 	CU_ASSERT_EQUAL(r, 0);
351 
352 	memset(enc.vbuf.ptr, 0, vbuf_size);
353 	for (i = 0; i < enc.height; ++i) {
354 		memcpy(enc.vbuf.ptr, (frame + i * enc.width), enc.width);
355 		enc.vbuf.ptr += ALIGN(enc.width, align);
356 	}
357 	for (i = 0; i < enc.height / 2; ++i) {
358 		memcpy(enc.vbuf.ptr, ((frame + enc.height * enc.width) + i * enc.width), enc.width);
359 		enc.vbuf.ptr += ALIGN(enc.width, align);
360 	}
361 
362 	r = amdgpu_bo_cpu_unmap(enc.vbuf.handle);
363 	CU_ASSERT_EQUAL(r, 0);
364 
365 	len = 0;
366 	memcpy((ib_cpu + len), uve_session_info, sizeof(uve_session_info));
367 	len += sizeof(uve_session_info) / 4;
368 	ib_cpu[len++] = enc.session.addr >> 32;
369 	ib_cpu[len++] = enc.session.addr;
370 
371 	memcpy((ib_cpu + len), uve_task_info, sizeof(uve_task_info));
372 	len += sizeof(uve_task_info) / 4;
373 	ib_cpu[len++] = 0x000005e0;
374 	ib_cpu[len++] = 0x00000001;
375 	ib_cpu[len++] = 0x00000001;
376 
377 	memcpy((ib_cpu + len), uve_nalu_buffer_1, sizeof(uve_nalu_buffer_1));
378 	len += sizeof(uve_nalu_buffer_1) / 4;
379 
380 	memcpy((ib_cpu + len), uve_nalu_buffer_2, sizeof(uve_nalu_buffer_2));
381 	len += sizeof(uve_nalu_buffer_2) / 4;
382 
383 	memcpy((ib_cpu + len), uve_nalu_buffer_3, sizeof(uve_nalu_buffer_3));
384 	len += sizeof(uve_nalu_buffer_3) / 4;
385 
386 	memcpy((ib_cpu + len), uve_nalu_buffer_4, sizeof(uve_nalu_buffer_4));
387 	len += sizeof(uve_nalu_buffer_4) / 4;
388 
389 	memcpy((ib_cpu + len), uve_slice_header, sizeof(uve_slice_header));
390 	len += sizeof(uve_slice_header) / 4;
391 
392 	ib_cpu[len++] = 0x00000254;
393 	ib_cpu[len++] = 0x00000010;
394 	ib_cpu[len++] = enc.cpb.addr >> 32;
395 	ib_cpu[len++] = enc.cpb.addr;
396 	memcpy((ib_cpu + len), uve_ctx_buffer, sizeof(uve_ctx_buffer));
397 	len += sizeof(uve_ctx_buffer) / 4;
398 
399 	memcpy((ib_cpu + len), uve_bitstream_buffer, sizeof(uve_bitstream_buffer));
400 	len += sizeof(uve_bitstream_buffer) / 4;
401 	ib_cpu[len++] = 0x00000000;
402 	ib_cpu[len++] = enc.bs.addr >> 32;
403 	ib_cpu[len++] = enc.bs.addr;
404 	ib_cpu[len++] = 0x003f4800;
405 	ib_cpu[len++] = 0x00000000;
406 
407 	memcpy((ib_cpu + len), uve_feedback_buffer, sizeof(uve_feedback_buffer));
408 	len += sizeof(uve_feedback_buffer) / 4;
409 	ib_cpu[len++] = enc.fb.addr >> 32;
410 	ib_cpu[len++] = enc.fb.addr;
411 	ib_cpu[len++] = 0x00000010;
412 	ib_cpu[len++] = 0x00000028;
413 
414 	memcpy((ib_cpu + len), uve_feedback_buffer_additional, sizeof(uve_feedback_buffer_additional));
415 	len += sizeof(uve_feedback_buffer_additional) / 4;
416 
417 	memcpy((ib_cpu + len), uve_intra_refresh, sizeof(uve_intra_refresh));
418 	len += sizeof(uve_intra_refresh) / 4;
419 
420 	memcpy((ib_cpu + len), uve_layer_select, sizeof(uve_layer_select));
421 	len += sizeof(uve_layer_select) / 4;
422 
423 	memcpy((ib_cpu + len), uve_rc_layer_init, sizeof(uve_rc_layer_init));
424 	len += sizeof(uve_rc_layer_init) / 4;
425 
426 	memcpy((ib_cpu + len), uve_layer_select, sizeof(uve_layer_select));
427 	len += sizeof(uve_layer_select) / 4;
428 
429 	memcpy((ib_cpu + len), uve_rc_per_pic, sizeof(uve_rc_per_pic));
430 	len += sizeof(uve_rc_per_pic) / 4;
431 
432 	unsigned luma_size = ALIGN(enc.width, align) * ALIGN(enc.height, 16);
433 	luma_offset = enc.vbuf.addr;
434 	chroma_offset = luma_offset + luma_size;
435 	ib_cpu[len++] = 0x00000054;
436 	ib_cpu[len++] = 0x0000000c;
437 	ib_cpu[len++] = 0x00000002;
438 	ib_cpu[len++] = 0x003f4800;
439 	ib_cpu[len++] = luma_offset >> 32;
440 	ib_cpu[len++] = luma_offset;
441 	ib_cpu[len++] = chroma_offset >> 32;
442 	ib_cpu[len++] = chroma_offset;
443 	memcpy((ib_cpu + len), uve_encode_param, sizeof(uve_encode_param));
444 	ib_cpu[len] = ALIGN(enc.width, align);
445 	ib_cpu[len + 1] = ALIGN(enc.width, align);
446 	len += sizeof(uve_encode_param) / 4;
447 
448 	memcpy((ib_cpu + len), uve_op_speed_enc_mode, sizeof(uve_op_speed_enc_mode));
449 	len += sizeof(uve_op_speed_enc_mode) / 4;
450 
451 	memcpy((ib_cpu + len), uve_op_encode, sizeof(uve_op_encode));
452 	len += sizeof(uve_op_encode) / 4;
453 
454 	r = submit(len, AMDGPU_HW_IP_UVD_ENC);
455 	CU_ASSERT_EQUAL(r, 0);
456 
457 	check_result(&enc);
458 
459 	free_resource(&enc.fb);
460 	free_resource(&enc.bs);
461 	free_resource(&enc.vbuf);
462 	free_resource(&enc.cpb);
463 }
464 
amdgpu_cs_uvd_enc_destroy(void)465 static void amdgpu_cs_uvd_enc_destroy(void)
466 {
467 	int len, r;
468 
469 	num_resources  = 0;
470 	resources[num_resources++] = ib_handle;
471 
472 	len = 0;
473 	memcpy((ib_cpu + len), uve_session_info, sizeof(uve_session_info));
474 	len += sizeof(uve_session_info) / 4;
475 	ib_cpu[len++] = enc.session.addr >> 32;
476 	ib_cpu[len++] = enc.session.addr;
477 
478 	memcpy((ib_cpu + len), uve_task_info, sizeof(uve_task_info));
479 	len += sizeof(uve_task_info) / 4;
480 	ib_cpu[len++] = 0xffffffff;
481 	ib_cpu[len++] = 0x00000002;
482 	ib_cpu[len++] = 0x00000000;
483 
484 	memcpy((ib_cpu + len), uve_op_close, sizeof(uve_op_close));
485 	len += sizeof(uve_op_close) / 4;
486 
487 	r = submit(len, AMDGPU_HW_IP_UVD_ENC);
488 	CU_ASSERT_EQUAL(r, 0);
489 
490 	free_resource(&enc.session);
491 }
492