1 /*
2 * Copyright © 2022 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Capture the hanging application with INTEL_DEBUG=capture-all
24 *
25 * Turn the error state into a replay file with :
26 * $ intel_error2hangdump error_state
27 *
28 * Replay with :
29 * $ intel_hang_replay -d error_state.dmp
30 */
31
32 #include <fcntl.h>
33 #include <getopt.h>
34 #include <inttypes.h>
35 #include <stdbool.h>
36 #include <stdio.h>
37 #include <stdlib.h>
38 #include <sys/mman.h>
39 #include <sys/stat.h>
40 #include <sys/types.h>
41 #include <unistd.h>
42
43 #include <xf86drm.h>
44
45 #include "common/intel_gem.h"
46 #include "common/i915/intel_gem.h"
47 #include "common/intel_hang_dump.h"
48 #include "dev/intel_device_info.h"
49
50 #include "drm-uapi/i915_drm.h"
51
52 #include "util/u_dynarray.h"
53 #include "util/u_math.h"
54
55 #include "intel_tools.h"
56
57 static uint32_t
gem_create(int drm_fd,uint64_t size)58 gem_create(int drm_fd, uint64_t size)
59 {
60 struct drm_i915_gem_create gem_create = {
61 .size = size,
62 };
63
64 int ret = intel_ioctl(drm_fd, DRM_IOCTL_I915_GEM_CREATE, &gem_create);
65 if (ret != 0) {
66 /* FIXME: What do we do if this fails? */
67 return 0;
68 }
69
70 return gem_create.handle;
71 }
72
73 static uint32_t
gem_context_create(int drm_fd)74 gem_context_create(int drm_fd)
75 {
76 /* TODO: add additional information in the intel_hang_dump_block_exec &
77 * intel_hang_dump_block_hw_image structures to specify the engine and use
78 * the correct engine here.
79 */
80 I915_DEFINE_CONTEXT_PARAM_ENGINES(engines_param, 1) = { };
81 struct drm_i915_gem_context_create_ext_setparam set_engines = {
82 .param = {
83 .param = I915_CONTEXT_PARAM_ENGINES,
84 .value = (uintptr_t)&engines_param,
85 .size = sizeof(engines_param),
86 }
87 };
88 struct drm_i915_gem_context_create_ext_setparam recoverable_param = {
89 .param = {
90 .param = I915_CONTEXT_PARAM_RECOVERABLE,
91 .value = 0,
92 },
93 };
94 struct drm_i915_gem_context_create_ext create = {
95 .flags = I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS,
96 };
97
98 intel_i915_gem_add_ext(&create.extensions,
99 I915_CONTEXT_CREATE_EXT_SETPARAM,
100 &set_engines.base);
101 intel_i915_gem_add_ext(&create.extensions,
102 I915_CONTEXT_CREATE_EXT_SETPARAM,
103 &recoverable_param.base);
104
105 if (intel_ioctl(drm_fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE_EXT, &create) == -1)
106 return false;
107
108 return create.ctx_id;
109 }
110
111 static bool
gem_context_set_hw_image(int drm_fd,uint32_t ctx_id,const void * hw_img_data,uint32_t img_size)112 gem_context_set_hw_image(int drm_fd, uint32_t ctx_id,
113 const void *hw_img_data, uint32_t img_size)
114 {
115 /* TODO: add additional information in the intel_hang_dump_block_exec &
116 * intel_hang_dump_block_hw_image structures to specify the engine and use
117 * the correct engine here.
118 */
119 struct i915_gem_context_param_context_image img_param = {
120 .engine = {
121 .engine_class = 0,
122 .engine_instance = 0,
123 },
124 .flags = I915_CONTEXT_IMAGE_FLAG_ENGINE_INDEX,
125 .size = img_size,
126 .image = (uint64_t)(uintptr_t)hw_img_data,
127 };
128 struct drm_i915_gem_context_param param = {
129 .ctx_id = ctx_id,
130 .param = I915_CONTEXT_PARAM_CONTEXT_IMAGE,
131 };
132 uint64_t val = 0;
133 int ret;
134
135 param.ctx_id = ctx_id;
136 param.param = I915_CONTEXT_PARAM_RECOVERABLE;
137 param.value = (uint64_t)(uintptr_t)&val;
138
139 /* Force i915 to convert the "proto" context to be a "live" context, since
140 * the I915_CONTEXT_PARAM_CONTEXT_IMAGE parameter cannot be set on a "proto"
141 * context. See kernel docs for i915_gem_proto_context.
142 */
143 ret = intel_ioctl(drm_fd, DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM, ¶m);
144 if (ret)
145 return false;
146
147 param.param = I915_CONTEXT_PARAM_CONTEXT_IMAGE;
148 param.size = sizeof(img_param);
149 param.value = (uint64_t)(uintptr_t)&img_param;
150
151 return intel_ioctl(drm_fd, DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM, ¶m) == 0;
152 }
153
154 static void*
gem_mmap_offset(int drm_fd,uint32_t gem_handle,uint64_t offset,uint64_t size,uint32_t flags)155 gem_mmap_offset(int drm_fd,
156 uint32_t gem_handle,
157 uint64_t offset,
158 uint64_t size,
159 uint32_t flags)
160 {
161 struct drm_i915_gem_mmap_offset gem_mmap = {
162 .handle = gem_handle,
163 .flags = I915_MMAP_OFFSET_WB,
164 };
165 assert(offset == 0);
166
167 /* Get the fake offset back */
168 int ret = intel_ioctl(drm_fd, DRM_IOCTL_I915_GEM_MMAP_OFFSET, &gem_mmap);
169 if (ret != 0 && gem_mmap.flags == I915_MMAP_OFFSET_FIXED) {
170 gem_mmap.flags =
171 (flags & I915_MMAP_WC) ? I915_MMAP_OFFSET_WC : I915_MMAP_OFFSET_WB,
172 ret = intel_ioctl(drm_fd, DRM_IOCTL_I915_GEM_MMAP_OFFSET, &gem_mmap);
173 }
174
175 if (ret != 0)
176 return MAP_FAILED;
177
178 /* And map it */
179 void *map = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED,
180 drm_fd, gem_mmap.offset);
181 return map;
182 }
183
184 static void
write_malloc_data(void * out_data,int file_fd,size_t size)185 write_malloc_data(void *out_data,
186 int file_fd,
187 size_t size)
188 {
189 size_t total_read_len = 0;
190 ssize_t read_len;
191 while (total_read_len < size &&
192 (read_len = read(file_fd, out_data + total_read_len, size - total_read_len)) > 0) {
193 total_read_len += read_len;
194 }
195 assert(total_read_len == size);
196 }
197
198 static void
write_gem_bo_data(int drm_fd,uint32_t gem_handle,int file_fd,size_t size)199 write_gem_bo_data(int drm_fd,
200 uint32_t gem_handle,
201 int file_fd,
202 size_t size)
203 {
204 void *map = gem_mmap_offset(drm_fd, gem_handle, 0, size, I915_MMAP_OFFSET_WB);
205 assert(map != MAP_FAILED);
206
207 size_t total_read_len = 0;
208 ssize_t read_len;
209 while (total_read_len < size &&
210 (read_len = read(file_fd, map + total_read_len, size - total_read_len)) > 0) {
211 total_read_len += read_len;
212 }
213 munmap(map, size);
214
215 assert(total_read_len == size);
216 }
217
218 static void
skip_data(int file_fd,size_t size)219 skip_data(int file_fd, size_t size)
220 {
221 lseek(file_fd, size, SEEK_CUR);
222 }
223
224 static int
get_drm_device(struct intel_device_info * devinfo)225 get_drm_device(struct intel_device_info *devinfo)
226 {
227 drmDevicePtr devices[8];
228 int max_devices = drmGetDevices2(0, devices, 8);
229
230 int i, fd = -1;
231 for (i = 0; i < max_devices; i++) {
232 if (devices[i]->available_nodes & 1 << DRM_NODE_RENDER &&
233 devices[i]->bustype == DRM_BUS_PCI &&
234 devices[i]->deviceinfo.pci->vendor_id == 0x8086) {
235 fd = open(devices[i]->nodes[DRM_NODE_RENDER], O_RDWR | O_CLOEXEC);
236 if (fd < 0)
237 continue;
238
239 if (!intel_get_device_info_from_fd(fd, devinfo, -1, -1) ||
240 devinfo->ver < 8) {
241 close(fd);
242 fd = -1;
243 continue;
244 }
245
246 /* Found a device! */
247 break;
248 }
249 }
250
251 return fd;
252 }
253
254 struct gem_bo {
255 off_t file_offset;
256 uint32_t gem_handle;
257 uint64_t offset;
258 uint64_t size;
259 bool hw_img;
260 };
261
262 static int
compare_bos(const void * b1,const void * b2)263 compare_bos(const void *b1, const void *b2)
264 {
265 const struct gem_bo *gem_b1 = b1, *gem_b2 = b2;
266
267 return gem_b2->size > gem_b1->size;
268 }
269
270 static void
print_help(const char * filename,FILE * f)271 print_help(const char *filename, FILE *f)
272 {
273 fprintf(f, "%s: %s [options]...\n", filename, filename);
274 fprintf(f, " -d, --dump FILE hang file to replay\n");
275 fprintf(f, " -l, --list list content of hang file (no replay)\n");
276 fprintf(f, " -s, --shader ADDR print shader at ADDR\n");
277 fprintf(f, " -h, --help print this screen\n");
278 fprintf(f, " -a, --address ADDR Find BO containing ADDR\n");
279 }
280
281 static int
execbuffer(int drm_fd,uint32_t context_id,struct util_dynarray * execbuffer_bos,struct gem_bo * exec_bo,uint64_t exec_offset)282 execbuffer(int drm_fd,
283 uint32_t context_id,
284 struct util_dynarray *execbuffer_bos,
285 struct gem_bo *exec_bo, uint64_t exec_offset)
286 {
287 struct drm_i915_gem_execbuffer2 execbuf = {
288 .buffers_ptr = (uintptr_t)(void *)util_dynarray_begin(execbuffer_bos),
289 .buffer_count = util_dynarray_num_elements(execbuffer_bos,
290 struct drm_i915_gem_exec_object2),
291 .batch_start_offset = exec_offset - exec_bo->offset,
292 .batch_len = exec_bo->size,
293 .flags = I915_EXEC_HANDLE_LUT,
294 .rsvd1 = context_id,
295 };
296
297 int ret = intel_ioctl(drm_fd, DRM_IOCTL_I915_GEM_EXECBUFFER2_WR, &execbuf);
298 if (ret == 0) {
299 struct drm_i915_gem_wait gem_wait = {
300 .bo_handle = exec_bo->gem_handle,
301 .timeout_ns = INT64_MAX,
302 };
303 ret = intel_ioctl(drm_fd, DRM_IOCTL_I915_GEM_WAIT, &gem_wait);
304 if (ret)
305 fprintf(stderr, "wait failed: %m\n");
306 } else {
307 fprintf(stderr, "execbuffer failed: %m\n");
308 }
309
310 return ret;
311 }
312
313 int
main(int argc,char * argv[])314 main(int argc, char *argv[])
315 {
316 bool help = false, list = false;
317 const struct option aubinator_opts[] = {
318 { "address", required_argument, NULL, 'a' },
319 { "dump", required_argument, NULL, 'd' },
320 { "shader", required_argument, NULL, 's' },
321 { "list", no_argument, NULL, 'l' },
322 { "help", no_argument, NULL, 'h' },
323 { NULL, 0, NULL, 0 },
324 };
325
326 void *mem_ctx = ralloc_context(NULL);
327
328 struct util_dynarray shader_addresses;
329
330 util_dynarray_init(&shader_addresses, mem_ctx);
331
332 const char *file = NULL;
333 uint64_t check_addr = -1;
334 int c, i;
335 while ((c = getopt_long(argc, argv, "a:d:hls:", aubinator_opts, &i)) != -1) {
336 switch (c) {
337 case 'a':
338 check_addr = strtol(optarg, NULL, 0);
339 break;
340 case 'd':
341 file = optarg;
342 break;
343 case 's': {
344 uint64_t *addr = util_dynarray_grow(&shader_addresses, uint64_t, 1);
345 *addr = strtol(optarg, NULL, 0);
346 fprintf(stderr, "shader addr=0x%016"PRIx64"\n", *addr);
347 break;
348 }
349 case 'h':
350 help = true;
351 break;
352 case 'l':
353 list = true;
354 break;
355 default:
356 break;
357 }
358 }
359
360 if (help) {
361 print_help(argv[0], stderr);
362 exit(EXIT_SUCCESS);
363 }
364
365 int file_fd = open(file, O_RDONLY);
366 if (file_fd < 0)
367 exit(EXIT_FAILURE);
368
369 struct stat file_stats;
370 if (fstat(file_fd, &file_stats) != 0)
371 exit(EXIT_FAILURE);
372
373 struct intel_device_info devinfo;
374 int drm_fd = get_drm_device(&devinfo);
375 if (drm_fd < 0)
376 exit(EXIT_FAILURE);
377
378 struct util_dynarray buffers;
379 uint64_t total_vma = 0;
380
381 util_dynarray_init(&buffers, mem_ctx);
382
383 union intel_hang_dump_block_all block_header;
384 struct intel_hang_dump_block_exec init = {
385 .offset = -1,
386 }, exec = {
387 .offset = -1,
388 };
389
390 while (read(file_fd, &block_header.base, sizeof(block_header.base)) ==
391 sizeof(block_header.base)) {
392
393 static const size_t block_size[] = {
394 [INTEL_HANG_DUMP_BLOCK_TYPE_HEADER] = sizeof(struct intel_hang_dump_block_header),
395 [INTEL_HANG_DUMP_BLOCK_TYPE_BO] = sizeof(struct intel_hang_dump_block_bo),
396 [INTEL_HANG_DUMP_BLOCK_TYPE_MAP] = sizeof(struct intel_hang_dump_block_map),
397 [INTEL_HANG_DUMP_BLOCK_TYPE_EXEC] = sizeof(struct intel_hang_dump_block_exec),
398 [INTEL_HANG_DUMP_BLOCK_TYPE_HW_IMAGE] = sizeof(struct intel_hang_dump_block_hw_image),
399 };
400
401 assert(block_header.base.type < ARRAY_SIZE(block_size));
402
403 size_t remaining_size = block_size[block_header.base.type] - sizeof(block_header.base);
404 ssize_t ret = read(file_fd, &block_header.base + 1, remaining_size);
405 bool has_hw_image = false;
406 assert(ret == remaining_size);
407
408 switch (block_header.base.type) {
409 case INTEL_HANG_DUMP_BLOCK_TYPE_HEADER:
410 assert(block_header.header.magic == INTEL_HANG_DUMP_MAGIC);
411 assert(block_header.header.version == INTEL_HANG_DUMP_VERSION);
412 break;
413
414 case INTEL_HANG_DUMP_BLOCK_TYPE_BO: {
415 struct gem_bo *bo = util_dynarray_grow(&buffers, struct gem_bo, 1);
416 *bo = (struct gem_bo) {
417 .file_offset = lseek(file_fd, 0, SEEK_CUR),
418 .offset = block_header.bo.offset,
419 .size = block_header.bo.size,
420 };
421 total_vma += bo->size;
422 skip_data(file_fd, bo->size);
423 if (list) {
424 fprintf(stderr, "buffer: offset=0x%016"PRIx64" size=0x%016"PRIx64" name=%s\n",
425 bo->offset, bo->size, block_header.bo.name);
426 }
427 break;
428 }
429
430 case INTEL_HANG_DUMP_BLOCK_TYPE_HW_IMAGE: {
431 struct gem_bo *bo = util_dynarray_grow(&buffers, struct gem_bo, 1);
432 *bo = (struct gem_bo) {
433 .file_offset = lseek(file_fd, 0, SEEK_CUR),
434 .offset = 0,
435 .size = block_header.hw_img.size,
436 .hw_img = true,
437 };
438 total_vma += bo->size;
439 skip_data(file_fd, bo->size);
440 if (list) {
441 fprintf(stderr, "buffer: offset=0x%016"PRIx64" size=0x%016"PRIx64" name=hw_img\n",
442 bo->offset, bo->size);
443 }
444 has_hw_image = true;
445 break;
446 }
447
448 case INTEL_HANG_DUMP_BLOCK_TYPE_MAP: {
449 struct gem_bo *bo = util_dynarray_grow(&buffers, struct gem_bo, 1);
450 *bo = (struct gem_bo) {
451 .file_offset = 0,
452 .offset = block_header.map.offset,
453 .size = block_header.map.size,
454 };
455 total_vma += bo->size;
456 if (list) {
457 fprintf(stderr, "map : offset=0x%016"PRIx64" size=0x%016"PRIx64" name=%s\n",
458 bo->offset, bo->size, block_header.map.name);
459 }
460 break;
461 }
462
463 case INTEL_HANG_DUMP_BLOCK_TYPE_EXEC: {
464 if (init.offset == 0 && !has_hw_image) {
465 if (list)
466 fprintf(stderr, "init : offset=0x%016"PRIx64"\n", block_header.exec.offset);
467 init = block_header.exec;
468 } else {
469 if (list)
470 fprintf(stderr, "exec : offset=0x%016"PRIx64"\n", block_header.exec.offset);
471 exec = block_header.exec;
472 }
473 break;
474 }
475
476 default:
477 unreachable("Invalid block type");
478 }
479 }
480
481 fprintf(stderr, "total_vma: 0x%016"PRIx64"\n", total_vma);
482
483 if (check_addr != -1) {
484 struct gem_bo *check_bo = NULL;
485 util_dynarray_foreach(&buffers, struct gem_bo, bo) {
486 if (check_addr >= bo->offset && check_addr < (bo->offset + bo->size)) {
487 check_bo = bo;
488 break;
489 }
490 }
491
492 if (check_bo) {
493 fprintf(stderr, "address=0x%016"PRIx64" found in buffer 0x%016"PRIx64" size=0x%016"PRIx64"\n",
494 check_addr, check_bo->offset, check_bo->size);
495 } else {
496 fprintf(stderr, "address=0x%016"PRIx64" not found in buffer list\n", check_addr);
497 }
498 }
499
500 util_dynarray_foreach(&shader_addresses, uint64_t, addr) {
501 bool found = false;
502 util_dynarray_foreach(&buffers, struct gem_bo, bo) {
503 if (*addr < bo->offset || *addr >= (bo->offset + bo->size))
504 continue;
505 if (!bo->file_offset)
506 break;
507
508 uint64_t aligned_offset = ROUND_DOWN_TO(bo->file_offset, 4096);
509 uint64_t remaining_length = file_stats.st_size - aligned_offset;
510 void *map = mmap(NULL, remaining_length, PROT_READ, MAP_PRIVATE,
511 file_fd, aligned_offset);
512 if (map == MAP_FAILED)
513 break;
514
515 found = true;
516 fprintf(stderr, "shader at 0x%016"PRIx64" file_offset=0%016"PRIx64" addr_offset=%016"PRIx64":\n", *addr,
517 (bo->file_offset - aligned_offset), (*addr - bo->offset));
518 intel_disassemble(&devinfo, map + (bo->file_offset - aligned_offset) + (*addr - bo->offset),
519 0, stderr);
520 munmap(map, remaining_length);
521 }
522
523 if (!found)
524 fprintf(stderr, "shader at 0x%016"PRIx64" not found\n", *addr);
525 }
526
527 if (!list && util_dynarray_num_elements(&shader_addresses, uint64_t) == 0) {
528 /* Sort buffers by size */
529 qsort(util_dynarray_begin(&buffers),
530 util_dynarray_num_elements(&buffers, struct gem_bo),
531 sizeof(struct gem_bo),
532 compare_bos);
533
534 void *hw_img = NULL;
535 uint32_t hw_img_size = 0;
536
537 /* Allocate BOs populate them */
538 uint64_t gem_allocated = 0;
539 util_dynarray_foreach(&buffers, struct gem_bo, bo) {
540 lseek(file_fd, bo->file_offset, SEEK_SET);
541 if (bo->hw_img) {
542 hw_img = malloc(bo->size);
543 write_malloc_data(hw_img, file_fd, bo->size);
544 hw_img_size = bo->size;
545 } else {
546 bo->gem_handle = gem_create(drm_fd, bo->size);
547 write_gem_bo_data(drm_fd, bo->gem_handle, file_fd, bo->size);
548 }
549
550 gem_allocated += bo->size;
551 }
552
553 uint32_t ctx_id = gem_context_create(drm_fd);
554 if (ctx_id == 0) {
555 fprintf(stderr, "fail to create context: %s\n", strerror(errno));
556 return EXIT_FAILURE;
557 }
558
559 if (hw_img != NULL) {
560 if (!gem_context_set_hw_image(drm_fd, ctx_id, hw_img, hw_img_size)) {
561 fprintf(stderr, "fail to set context hw img: %s\n", strerror(errno));
562 return EXIT_FAILURE;
563 }
564 }
565
566 struct util_dynarray execbuffer_bos;
567 util_dynarray_init(&execbuffer_bos, mem_ctx);
568
569 struct gem_bo *init_bo = NULL, *batch_bo = NULL;
570 util_dynarray_foreach(&buffers, struct gem_bo, bo) {
571 if (bo->offset <= init.offset &&
572 (bo->offset + bo->size) > init.offset) {
573 init_bo = bo;
574 continue;
575 }
576
577 if (bo->offset <= exec.offset &&
578 (bo->offset + bo->size) > exec.offset) {
579 batch_bo = bo;
580 continue;
581 }
582
583 if (bo->hw_img)
584 continue;
585
586 struct drm_i915_gem_exec_object2 *execbuf_bo =
587 util_dynarray_grow(&execbuffer_bos, struct drm_i915_gem_exec_object2, 1);
588 *execbuf_bo = (struct drm_i915_gem_exec_object2) {
589 .handle = bo->gem_handle,
590 .relocation_count = 0,
591 .relocs_ptr = 0,
592 .flags = EXEC_OBJECT_SUPPORTS_48B_ADDRESS |
593 EXEC_OBJECT_PINNED |
594 EXEC_OBJECT_CAPTURE,
595 .offset = intel_canonical_address(bo->offset),
596 };
597 }
598
599 assert(batch_bo != NULL);
600
601 struct drm_i915_gem_exec_object2 *execbuf_bo =
602 util_dynarray_grow(&execbuffer_bos, struct drm_i915_gem_exec_object2, 1);
603
604 int ret;
605
606 if (init_bo) {
607 fprintf(stderr, "init: 0x%016"PRIx64"\n", init_bo->offset);
608 *execbuf_bo = (struct drm_i915_gem_exec_object2) {
609 .handle = init_bo->gem_handle,
610 .relocation_count = 0,
611 .relocs_ptr = 0,
612 .flags = EXEC_OBJECT_SUPPORTS_48B_ADDRESS |
613 EXEC_OBJECT_PINNED |
614 EXEC_OBJECT_CAPTURE,
615 .offset = intel_canonical_address(init_bo->offset),
616 };
617 ret = execbuffer(drm_fd, ctx_id, &execbuffer_bos, init_bo, init.offset);
618 if (ret != 0) {
619 fprintf(stderr, "initialization buffer failed to execute errno=%i\n", errno);
620 exit(-1);
621 }
622 } else {
623 fprintf(stderr, "no init BO\n");
624 }
625
626 if (batch_bo) {
627 fprintf(stderr, "exec: 0x%016"PRIx64" aperture=%.2fMb\n", batch_bo->offset,
628 gem_allocated / 1024.0 / 1024.0);
629 *execbuf_bo = (struct drm_i915_gem_exec_object2) {
630 .handle = batch_bo->gem_handle,
631 .relocation_count = 0,
632 .relocs_ptr = 0,
633 .flags = EXEC_OBJECT_SUPPORTS_48B_ADDRESS |
634 EXEC_OBJECT_PINNED |
635 EXEC_OBJECT_CAPTURE,
636 .offset = intel_canonical_address(batch_bo->offset),
637 };
638 ret = execbuffer(drm_fd, ctx_id, &execbuffer_bos, batch_bo, exec.offset);
639 if (ret != 0) {
640 fprintf(stderr, "replayed buffer failed to execute errno=%i\n", errno);
641 exit(-1);
642 } else {
643 fprintf(stderr, "exec completed successfully\n");
644 }
645 } else {
646 fprintf(stderr, "no exec BO\n");
647 }
648 }
649
650 close(drm_fd);
651 close(file_fd);
652
653 ralloc_free(mem_ctx);
654
655 return EXIT_SUCCESS;
656 }
657