1 /*
2 * Copyright 2017 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24 #include <stdio.h>
25 #include <stdlib.h>
26 #include <unistd.h>
27 #if HAVE_ALLOCA_H
28 # include <alloca.h>
29 #endif
30
31 #include "CUnit/Basic.h"
32
33 #include "amdgpu_test.h"
34 #include "amdgpu_drm.h"
35 #include "amdgpu_internal.h"
36
37 #include <pthread.h>
38
39
40 /*
41 * This defines the delay in MS after which memory location designated for
42 * compression against reference value is written to, unblocking command
43 * processor
44 */
45 #define WRITE_MEM_ADDRESS_DELAY_MS 100
46
47 #define PACKET_TYPE3 3
48
49 #define PACKET3(op, n) ((PACKET_TYPE3 << 30) | \
50 (((op) & 0xFF) << 8) | \
51 ((n) & 0x3FFF) << 16)
52
53 #define PACKET3_WAIT_REG_MEM 0x3C
54 #define WAIT_REG_MEM_FUNCTION(x) ((x) << 0)
55 /* 0 - always
56 * 1 - <
57 * 2 - <=
58 * 3 - ==
59 * 4 - !=
60 * 5 - >=
61 * 6 - >
62 */
63 #define WAIT_REG_MEM_MEM_SPACE(x) ((x) << 4)
64 /* 0 - reg
65 * 1 - mem
66 */
67 #define WAIT_REG_MEM_OPERATION(x) ((x) << 6)
68 /* 0 - wait_reg_mem
69 * 1 - wr_wait_wr_reg
70 */
71 #define WAIT_REG_MEM_ENGINE(x) ((x) << 8)
72 /* 0 - me
73 * 1 - pfp
74 */
75
76 #define PACKET3_WRITE_DATA 0x37
77 #define WRITE_DATA_DST_SEL(x) ((x) << 8)
78 /* 0 - register
79 * 1 - memory (sync - via GRBM)
80 * 2 - gl2
81 * 3 - gds
82 * 4 - reserved
83 * 5 - memory (async - direct)
84 */
85 #define WR_ONE_ADDR (1 << 16)
86 #define WR_CONFIRM (1 << 20)
87 #define WRITE_DATA_CACHE_POLICY(x) ((x) << 25)
88 /* 0 - LRU
89 * 1 - Stream
90 */
91 #define WRITE_DATA_ENGINE_SEL(x) ((x) << 30)
92 /* 0 - me
93 * 1 - pfp
94 * 2 - ce
95 */
96
97 #define mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR 0x54f
98
99 #define SDMA_PKT_HEADER_OP(x) (x & 0xff)
100 #define SDMA_OP_POLL_REGMEM 8
101
102 static amdgpu_device_handle device_handle;
103 static uint32_t major_version;
104 static uint32_t minor_version;
105
106 static pthread_t stress_thread;
107 static uint32_t *ptr;
108
109 static uint32_t family_id;
110 static uint32_t chip_rev;
111 static uint32_t chip_id;
112
113 int use_uc_mtype = 0;
114
115 static void amdgpu_deadlock_helper(unsigned ip_type);
116 static void amdgpu_deadlock_gfx(void);
117 static void amdgpu_deadlock_compute(void);
118 static void amdgpu_illegal_reg_access();
119 static void amdgpu_illegal_mem_access();
120 static void amdgpu_deadlock_sdma(void);
121 static void amdgpu_dispatch_hang_gfx(void);
122 static void amdgpu_dispatch_hang_compute(void);
123 static void amdgpu_dispatch_hang_slow_gfx(void);
124 static void amdgpu_dispatch_hang_slow_compute(void);
125 static void amdgpu_draw_hang_gfx(void);
126 static void amdgpu_draw_hang_slow_gfx(void);
127
suite_deadlock_tests_enable(void)128 CU_BOOL suite_deadlock_tests_enable(void)
129 {
130 CU_BOOL enable = CU_TRUE;
131
132 if (amdgpu_device_initialize(drm_amdgpu[0], &major_version,
133 &minor_version, &device_handle))
134 return CU_FALSE;
135
136 family_id = device_handle->info.family_id;
137 chip_id = device_handle->info.chip_external_rev;
138 chip_rev = device_handle->info.chip_rev;
139
140 /*
141 * Only enable for ASICs supporting GPU reset and for which it's enabled
142 * by default (currently GFX8+ dGPUS and gfx9+ APUs). Note that Raven1
143 * did not support GPU reset, but newer variants do.
144 */
145 if (family_id == AMDGPU_FAMILY_SI ||
146 family_id == AMDGPU_FAMILY_KV ||
147 family_id == AMDGPU_FAMILY_CZ ||
148 family_id == AMDGPU_FAMILY_RV) {
149 printf("\n\nGPU reset is not enabled for the ASIC, deadlock suite disabled\n");
150 enable = CU_FALSE;
151 }
152
153 if (asic_is_gfx_pipe_removed(family_id, chip_id, chip_rev)) {
154 if (amdgpu_set_test_active("Deadlock Tests",
155 "gfx ring block test (set amdgpu.lockup_timeout=50)",
156 CU_FALSE))
157 fprintf(stderr, "test deactivation failed - %s\n",
158 CU_get_error_msg());
159 }
160
161 if (device_handle->info.family_id >= AMDGPU_FAMILY_AI)
162 use_uc_mtype = 1;
163
164 if (amdgpu_device_deinitialize(device_handle))
165 return CU_FALSE;
166
167 return enable;
168 }
169
suite_deadlock_tests_init(void)170 int suite_deadlock_tests_init(void)
171 {
172 int r;
173
174 r = amdgpu_device_initialize(drm_amdgpu[0], &major_version,
175 &minor_version, &device_handle);
176
177 if (r) {
178 if ((r == -EACCES) && (errno == EACCES))
179 printf("\n\nError:%s. "
180 "Hint:Try to run this test program as root.",
181 strerror(errno));
182 return CUE_SINIT_FAILED;
183 }
184
185 return CUE_SUCCESS;
186 }
187
suite_deadlock_tests_clean(void)188 int suite_deadlock_tests_clean(void)
189 {
190 int r = amdgpu_device_deinitialize(device_handle);
191
192 if (r == 0)
193 return CUE_SUCCESS;
194 else
195 return CUE_SCLEAN_FAILED;
196 }
197
198
199 CU_TestInfo deadlock_tests[] = {
200 { "gfx ring block test (set amdgpu.lockup_timeout=50)", amdgpu_deadlock_gfx },
201 { "compute ring block test (set amdgpu.lockup_timeout=50)", amdgpu_deadlock_compute },
202 { "sdma ring block test (set amdgpu.lockup_timeout=50)", amdgpu_deadlock_sdma },
203 { "illegal reg access test", amdgpu_illegal_reg_access },
204 { "illegal mem access test (set amdgpu.vm_fault_stop=2)", amdgpu_illegal_mem_access },
205 { "gfx ring bad dispatch test (set amdgpu.lockup_timeout=50)", amdgpu_dispatch_hang_gfx },
206 { "compute ring bad dispatch test (set amdgpu.lockup_timeout=50,50)", amdgpu_dispatch_hang_compute },
207 { "gfx ring bad slow dispatch test (set amdgpu.lockup_timeout=50)", amdgpu_dispatch_hang_slow_gfx },
208 { "compute ring bad slow dispatch test (set amdgpu.lockup_timeout=50,50)", amdgpu_dispatch_hang_slow_compute },
209 { "gfx ring bad draw test (set amdgpu.lockup_timeout=50)", amdgpu_draw_hang_gfx },
210 { "gfx ring slow bad draw test (set amdgpu.lockup_timeout=50)", amdgpu_draw_hang_slow_gfx },
211 CU_TEST_INFO_NULL,
212 };
213
write_mem_address(void * data)214 static void *write_mem_address(void *data)
215 {
216 int i;
217
218 /* useconds_t range is [0, 1,000,000] so use loop for waits > 1s */
219 for (i = 0; i < WRITE_MEM_ADDRESS_DELAY_MS; i++)
220 usleep(1000);
221
222 ptr[256] = 0x1;
223
224 return 0;
225 }
226
amdgpu_deadlock_gfx(void)227 static void amdgpu_deadlock_gfx(void)
228 {
229 amdgpu_deadlock_helper(AMDGPU_HW_IP_GFX);
230 }
231
amdgpu_deadlock_compute(void)232 static void amdgpu_deadlock_compute(void)
233 {
234 amdgpu_deadlock_helper(AMDGPU_HW_IP_COMPUTE);
235 }
236
amdgpu_deadlock_helper(unsigned ip_type)237 static void amdgpu_deadlock_helper(unsigned ip_type)
238 {
239 amdgpu_context_handle context_handle;
240 amdgpu_bo_handle ib_result_handle;
241 void *ib_result_cpu;
242 uint64_t ib_result_mc_address;
243 struct amdgpu_cs_request ibs_request;
244 struct amdgpu_cs_ib_info ib_info;
245 struct amdgpu_cs_fence fence_status;
246 uint32_t expired;
247 int i, r;
248 amdgpu_bo_list_handle bo_list;
249 amdgpu_va_handle va_handle;
250
251 r = pthread_create(&stress_thread, NULL, write_mem_address, NULL);
252 CU_ASSERT_EQUAL(r, 0);
253
254 r = amdgpu_cs_ctx_create(device_handle, &context_handle);
255 CU_ASSERT_EQUAL(r, 0);
256
257 r = amdgpu_bo_alloc_and_map_raw(device_handle, 4096, 4096,
258 AMDGPU_GEM_DOMAIN_GTT, 0, use_uc_mtype ? AMDGPU_VM_MTYPE_UC : 0,
259 &ib_result_handle, &ib_result_cpu,
260 &ib_result_mc_address, &va_handle);
261 CU_ASSERT_EQUAL(r, 0);
262
263 r = amdgpu_get_bo_list(device_handle, ib_result_handle, NULL,
264 &bo_list);
265 CU_ASSERT_EQUAL(r, 0);
266
267 ptr = ib_result_cpu;
268
269 ptr[0] = PACKET3(PACKET3_WAIT_REG_MEM, 5);
270 ptr[1] = (WAIT_REG_MEM_MEM_SPACE(1) | /* memory */
271 WAIT_REG_MEM_FUNCTION(4) | /* != */
272 WAIT_REG_MEM_ENGINE(0)); /* me */
273 ptr[2] = (ib_result_mc_address + 256*4) & 0xfffffffc;
274 ptr[3] = ((ib_result_mc_address + 256*4) >> 32) & 0xffffffff;
275 ptr[4] = 0x00000000; /* reference value */
276 ptr[5] = 0xffffffff; /* and mask */
277 ptr[6] = 0x00000004; /* poll interval */
278
279 for (i = 7; i < 16; ++i)
280 ptr[i] = 0xffff1000;
281
282
283 ptr[256] = 0x0; /* the memory we wait on to change */
284
285
286
287 memset(&ib_info, 0, sizeof(struct amdgpu_cs_ib_info));
288 ib_info.ib_mc_address = ib_result_mc_address;
289 ib_info.size = 16;
290
291 memset(&ibs_request, 0, sizeof(struct amdgpu_cs_request));
292 ibs_request.ip_type = ip_type;
293 ibs_request.ring = 0;
294 ibs_request.number_of_ibs = 1;
295 ibs_request.ibs = &ib_info;
296 ibs_request.resources = bo_list;
297 ibs_request.fence_info.handle = NULL;
298 for (i = 0; i < 200; i++) {
299 r = amdgpu_cs_submit(context_handle, 0,&ibs_request, 1);
300 CU_ASSERT_EQUAL((r == 0 || r == -ECANCELED), 1);
301
302 }
303
304 memset(&fence_status, 0, sizeof(struct amdgpu_cs_fence));
305 fence_status.context = context_handle;
306 fence_status.ip_type = ip_type;
307 fence_status.ip_instance = 0;
308 fence_status.ring = 0;
309 fence_status.fence = ibs_request.seq_no;
310
311 r = amdgpu_cs_query_fence_status(&fence_status,
312 AMDGPU_TIMEOUT_INFINITE,0, &expired);
313 CU_ASSERT_EQUAL((r == 0 || r == -ECANCELED), 1);
314
315 pthread_join(stress_thread, NULL);
316
317 r = amdgpu_bo_list_destroy(bo_list);
318 CU_ASSERT_EQUAL(r, 0);
319
320 r = amdgpu_bo_unmap_and_free(ib_result_handle, va_handle,
321 ib_result_mc_address, 4096);
322 CU_ASSERT_EQUAL(r, 0);
323
324 r = amdgpu_cs_ctx_free(context_handle);
325 CU_ASSERT_EQUAL(r, 0);
326 }
327
amdgpu_deadlock_sdma(void)328 static void amdgpu_deadlock_sdma(void)
329 {
330 amdgpu_context_handle context_handle;
331 amdgpu_bo_handle ib_result_handle;
332 void *ib_result_cpu;
333 uint64_t ib_result_mc_address;
334 struct amdgpu_cs_request ibs_request;
335 struct amdgpu_cs_ib_info ib_info;
336 struct amdgpu_cs_fence fence_status;
337 uint32_t expired;
338 int i, r;
339 amdgpu_bo_list_handle bo_list;
340 amdgpu_va_handle va_handle;
341 struct drm_amdgpu_info_hw_ip info;
342 uint32_t ring_id;
343
344 r = amdgpu_query_hw_ip_info(device_handle, AMDGPU_HW_IP_DMA, 0, &info);
345 CU_ASSERT_EQUAL(r, 0);
346
347 r = amdgpu_cs_ctx_create(device_handle, &context_handle);
348 CU_ASSERT_EQUAL(r, 0);
349
350 for (ring_id = 0; (1 << ring_id) & info.available_rings; ring_id++) {
351 r = pthread_create(&stress_thread, NULL, write_mem_address, NULL);
352 CU_ASSERT_EQUAL(r, 0);
353
354 r = amdgpu_bo_alloc_and_map_raw(device_handle, 4096, 4096,
355 AMDGPU_GEM_DOMAIN_GTT, 0, use_uc_mtype ? AMDGPU_VM_MTYPE_UC : 0,
356 &ib_result_handle, &ib_result_cpu,
357 &ib_result_mc_address, &va_handle);
358 CU_ASSERT_EQUAL(r, 0);
359
360 r = amdgpu_get_bo_list(device_handle, ib_result_handle, NULL,
361 &bo_list);
362 CU_ASSERT_EQUAL(r, 0);
363
364 ptr = ib_result_cpu;
365 i = 0;
366
367 ptr[i++] = SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
368 (0 << 26) | /* WAIT_REG_MEM */
369 (4 << 28) | /* != */
370 (1 << 31); /* memory */
371 ptr[i++] = (ib_result_mc_address + 256*4) & 0xfffffffc;
372 ptr[i++] = ((ib_result_mc_address + 256*4) >> 32) & 0xffffffff;
373 ptr[i++] = 0x00000000; /* reference value */
374 ptr[i++] = 0xffffffff; /* and mask */
375 ptr[i++] = 4 | /* poll interval */
376 (0xfff << 16); /* retry count */
377
378 for (; i < 16; i++)
379 ptr[i] = 0;
380
381 ptr[256] = 0x0; /* the memory we wait on to change */
382
383 memset(&ib_info, 0, sizeof(struct amdgpu_cs_ib_info));
384 ib_info.ib_mc_address = ib_result_mc_address;
385 ib_info.size = 16;
386
387 memset(&ibs_request, 0, sizeof(struct amdgpu_cs_request));
388 ibs_request.ip_type = AMDGPU_HW_IP_DMA;
389 ibs_request.ring = ring_id;
390 ibs_request.number_of_ibs = 1;
391 ibs_request.ibs = &ib_info;
392 ibs_request.resources = bo_list;
393 ibs_request.fence_info.handle = NULL;
394
395 for (i = 0; i < 200; i++) {
396 r = amdgpu_cs_submit(context_handle, 0,&ibs_request, 1);
397 CU_ASSERT_EQUAL((r == 0 || r == -ECANCELED), 1);
398
399 }
400
401 memset(&fence_status, 0, sizeof(struct amdgpu_cs_fence));
402 fence_status.context = context_handle;
403 fence_status.ip_type = AMDGPU_HW_IP_DMA;
404 fence_status.ip_instance = 0;
405 fence_status.ring = ring_id;
406 fence_status.fence = ibs_request.seq_no;
407
408 r = amdgpu_cs_query_fence_status(&fence_status,
409 AMDGPU_TIMEOUT_INFINITE,0, &expired);
410 CU_ASSERT_EQUAL((r == 0 || r == -ECANCELED), 1);
411
412 pthread_join(stress_thread, NULL);
413
414 r = amdgpu_bo_list_destroy(bo_list);
415 CU_ASSERT_EQUAL(r, 0);
416
417 r = amdgpu_bo_unmap_and_free(ib_result_handle, va_handle,
418 ib_result_mc_address, 4096);
419 CU_ASSERT_EQUAL(r, 0);
420 }
421 r = amdgpu_cs_ctx_free(context_handle);
422 CU_ASSERT_EQUAL(r, 0);
423 }
424
bad_access_helper(int reg_access)425 static void bad_access_helper(int reg_access)
426 {
427 amdgpu_context_handle context_handle;
428 amdgpu_bo_handle ib_result_handle;
429 void *ib_result_cpu;
430 uint64_t ib_result_mc_address;
431 struct amdgpu_cs_request ibs_request;
432 struct amdgpu_cs_ib_info ib_info;
433 struct amdgpu_cs_fence fence_status;
434 uint32_t expired;
435 int i, r;
436 amdgpu_bo_list_handle bo_list;
437 amdgpu_va_handle va_handle;
438
439 r = amdgpu_cs_ctx_create(device_handle, &context_handle);
440 CU_ASSERT_EQUAL(r, 0);
441
442 r = amdgpu_bo_alloc_and_map_raw(device_handle, 4096, 4096,
443 AMDGPU_GEM_DOMAIN_GTT, 0, 0,
444 &ib_result_handle, &ib_result_cpu,
445 &ib_result_mc_address, &va_handle);
446 CU_ASSERT_EQUAL(r, 0);
447
448 r = amdgpu_get_bo_list(device_handle, ib_result_handle, NULL,
449 &bo_list);
450 CU_ASSERT_EQUAL(r, 0);
451
452 ptr = ib_result_cpu;
453 i = 0;
454
455 ptr[i++] = PACKET3(PACKET3_WRITE_DATA, 3);
456 ptr[i++] = (reg_access ? WRITE_DATA_DST_SEL(0) : WRITE_DATA_DST_SEL(5))| WR_CONFIRM;
457 ptr[i++] = reg_access ? mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR : 0xdeadbee0;
458 ptr[i++] = 0;
459 ptr[i++] = 0xdeadbeef;
460
461 for (; i < 16; ++i)
462 ptr[i] = 0xffff1000;
463
464 memset(&ib_info, 0, sizeof(struct amdgpu_cs_ib_info));
465 ib_info.ib_mc_address = ib_result_mc_address;
466 ib_info.size = 16;
467
468 memset(&ibs_request, 0, sizeof(struct amdgpu_cs_request));
469 ibs_request.ip_type = AMDGPU_HW_IP_GFX;
470 ibs_request.ring = 0;
471 ibs_request.number_of_ibs = 1;
472 ibs_request.ibs = &ib_info;
473 ibs_request.resources = bo_list;
474 ibs_request.fence_info.handle = NULL;
475
476 r = amdgpu_cs_submit(context_handle, 0,&ibs_request, 1);
477 CU_ASSERT_EQUAL((r == 0 || r == -ECANCELED), 1);
478
479
480 memset(&fence_status, 0, sizeof(struct amdgpu_cs_fence));
481 fence_status.context = context_handle;
482 fence_status.ip_type = AMDGPU_HW_IP_GFX;
483 fence_status.ip_instance = 0;
484 fence_status.ring = 0;
485 fence_status.fence = ibs_request.seq_no;
486
487 r = amdgpu_cs_query_fence_status(&fence_status,
488 AMDGPU_TIMEOUT_INFINITE,0, &expired);
489 CU_ASSERT_EQUAL((r == 0 || r == -ECANCELED), 1);
490
491 r = amdgpu_bo_list_destroy(bo_list);
492 CU_ASSERT_EQUAL(r, 0);
493
494 r = amdgpu_bo_unmap_and_free(ib_result_handle, va_handle,
495 ib_result_mc_address, 4096);
496 CU_ASSERT_EQUAL(r, 0);
497
498 r = amdgpu_cs_ctx_free(context_handle);
499 CU_ASSERT_EQUAL(r, 0);
500 }
501
amdgpu_illegal_reg_access()502 static void amdgpu_illegal_reg_access()
503 {
504 bad_access_helper(1);
505 }
506
amdgpu_illegal_mem_access()507 static void amdgpu_illegal_mem_access()
508 {
509 bad_access_helper(0);
510 }
511
amdgpu_dispatch_hang_gfx(void)512 static void amdgpu_dispatch_hang_gfx(void)
513 {
514 amdgpu_dispatch_hang_helper(device_handle, AMDGPU_HW_IP_GFX);
515 }
516
amdgpu_dispatch_hang_compute(void)517 static void amdgpu_dispatch_hang_compute(void)
518 {
519 amdgpu_dispatch_hang_helper(device_handle, AMDGPU_HW_IP_COMPUTE);
520 }
521
amdgpu_dispatch_hang_slow_gfx(void)522 static void amdgpu_dispatch_hang_slow_gfx(void)
523 {
524 amdgpu_dispatch_hang_slow_helper(device_handle, AMDGPU_HW_IP_GFX);
525 }
526
amdgpu_dispatch_hang_slow_compute(void)527 static void amdgpu_dispatch_hang_slow_compute(void)
528 {
529 amdgpu_dispatch_hang_slow_helper(device_handle, AMDGPU_HW_IP_COMPUTE);
530 }
531
amdgpu_draw_hang_gfx(void)532 static void amdgpu_draw_hang_gfx(void)
533 {
534 int r;
535 struct drm_amdgpu_info_hw_ip info;
536 uint32_t ring_id, version;
537
538 r = amdgpu_query_hw_ip_info(device_handle, AMDGPU_HW_IP_GFX, 0, &info);
539 CU_ASSERT_EQUAL(r, 0);
540 if (!info.available_rings)
541 printf("SKIP ... as there's no graphic ring\n");
542
543 version = info.hw_ip_version_major;
544 if (version != 9 && version != 10) {
545 printf("SKIP ... unsupported gfx version %d\n", version);
546 return;
547 }
548
549 for (ring_id = 0; (1 << ring_id) & info.available_rings; ring_id++) {
550 amdgpu_memcpy_draw_test(device_handle, ring_id, version, 0);
551 amdgpu_memcpy_draw_test(device_handle, ring_id, version, 1);
552 amdgpu_memcpy_draw_test(device_handle, ring_id, version, 0);
553 }
554 }
555
amdgpu_draw_hang_slow_gfx(void)556 static void amdgpu_draw_hang_slow_gfx(void)
557 {
558 struct drm_amdgpu_info_hw_ip info;
559 uint32_t ring_id, version;
560 int r;
561
562 r = amdgpu_query_hw_ip_info(device_handle, AMDGPU_HW_IP_GFX, 0, &info);
563 CU_ASSERT_EQUAL(r, 0);
564
565 version = info.hw_ip_version_major;
566 if (version != 9 && version != 10) {
567 printf("SKIP ... unsupported gfx version %d\n", version);
568 return;
569 }
570
571 for (ring_id = 0; (1 << ring_id) & info.available_rings; ring_id++) {
572 amdgpu_memcpy_draw_test(device_handle, ring_id, version, 0);
573 amdgpu_memcpy_draw_hang_slow_test(device_handle, ring_id, version);
574 amdgpu_memcpy_draw_test(device_handle, ring_id, version, 0);
575 }
576 }
577