1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <stdlib.h>
25 #include <stdio.h>
26 #include <string.h>
27 #include <stdint.h>
28 #include <stdbool.h>
29 #include <signal.h>
30 #include <stdarg.h>
31 #include <fcntl.h>
32 #include <sys/types.h>
33 #include <sys/sysmacros.h>
34 #include <sys/stat.h>
35 #include <sys/ioctl.h>
36 #include <unistd.h>
37 #include <errno.h>
38 #include <sys/mman.h>
39 #include <dlfcn.h>
40 #include "drm-uapi/i915_drm.h"
41 #include <inttypes.h>
42
43 #include "intel_aub.h"
44 #include "aub_write.h"
45
46 #include "c11/threads.h"
47 #include "dev/intel_debug.h"
48 #include "dev/intel_device_info.h"
49 #include "common/intel_gem.h"
50 #include "util/macros.h"
51 #include "util/u_math.h"
52
53 static int close_init_helper(int fd);
54 static int ioctl_init_helper(int fd, unsigned long request, ...);
55 static int munmap_init_helper(void *addr, size_t length);
56
57 static int (*libc_close)(int fd) = close_init_helper;
58 static int (*libc_ioctl)(int fd, unsigned long request, ...) = ioctl_init_helper;
59 static int (*libc_munmap)(void *addr, size_t length) = munmap_init_helper;
60
61 static int drm_fd = -1;
62 static char *output_filename = NULL;
63 static FILE *output_file = NULL;
64 static int verbose = 0;
65 static bool device_override = false;
66 static bool capture_only = false;
67 static int64_t frame_id = -1;
68 static bool capture_finished = false;
69
70 #define MAX_FD_COUNT 64
71 #define MAX_BO_COUNT 64 * 1024
72
73 struct bo {
74 uint32_t size;
75 uint64_t offset;
76 void *map;
77 /* Whether the buffer has been positioned in the GTT already. */
78 bool gtt_mapped : 1;
79 /* Tracks userspace mmapping of the buffer */
80 bool user_mapped : 1;
81 /* Using the i915-gem mmapping ioctl & execbuffer ioctl, track whether a
82 * buffer has been updated.
83 */
84 bool dirty : 1;
85 };
86
87 static struct bo *bos;
88
89 #define DRM_MAJOR 226
90
91 /* We set bit 0 in the map pointer for userptr BOs so we know not to
92 * munmap them on DRM_IOCTL_GEM_CLOSE.
93 */
94 #define USERPTR_FLAG 1
95 #define IS_USERPTR(p) ((uintptr_t) (p) & USERPTR_FLAG)
96 #define GET_PTR(p) ( (void *) ((uintptr_t) p & ~(uintptr_t) 1) )
97
98 #define fail_if(cond, ...) _fail_if(cond, "intel_dump_gpu", __VA_ARGS__)
99
100 static struct bo *
get_bo(unsigned fd,uint32_t handle)101 get_bo(unsigned fd, uint32_t handle)
102 {
103 struct bo *bo;
104
105 fail_if(handle >= MAX_BO_COUNT, "bo handle too large\n");
106 fail_if(fd >= MAX_FD_COUNT, "bo fd too large\n");
107 bo = &bos[handle + fd * MAX_BO_COUNT];
108
109 return bo;
110 }
111
112 static struct intel_device_info devinfo = {0};
113 static int device = 0;
114 static struct aub_file aub_file;
115
116 static void
ensure_device_info(int fd)117 ensure_device_info(int fd)
118 {
119 /* We can't do this at open time as we're not yet authenticated. */
120 if (device == 0) {
121 fail_if(!intel_get_device_info_from_fd(fd, &devinfo, -1, -1),
122 "failed to identify chipset.\n");
123 device = devinfo.pci_device_id;
124 } else if (devinfo.ver == 0) {
125 fail_if(!intel_get_device_info_from_pci_id(device, &devinfo),
126 "failed to identify chipset.\n");
127 }
128 }
129
130 static void *
relocate_bo(int fd,struct bo * bo,const struct drm_i915_gem_execbuffer2 * execbuffer2,const struct drm_i915_gem_exec_object2 * obj)131 relocate_bo(int fd, struct bo *bo, const struct drm_i915_gem_execbuffer2 *execbuffer2,
132 const struct drm_i915_gem_exec_object2 *obj)
133 {
134 const struct drm_i915_gem_exec_object2 *exec_objects =
135 (struct drm_i915_gem_exec_object2 *) (uintptr_t) execbuffer2->buffers_ptr;
136 const struct drm_i915_gem_relocation_entry *relocs =
137 (const struct drm_i915_gem_relocation_entry *) (uintptr_t) obj->relocs_ptr;
138 void *relocated;
139 int handle;
140
141 relocated = malloc(bo->size);
142 fail_if(relocated == NULL, "out of memory\n");
143 memcpy(relocated, GET_PTR(bo->map), bo->size);
144 for (size_t i = 0; i < obj->relocation_count; i++) {
145 fail_if(relocs[i].offset >= bo->size, "reloc outside bo\n");
146
147 if (execbuffer2->flags & I915_EXEC_HANDLE_LUT)
148 handle = exec_objects[relocs[i].target_handle].handle;
149 else
150 handle = relocs[i].target_handle;
151
152 aub_write_reloc(&devinfo, ((char *)relocated) + relocs[i].offset,
153 get_bo(fd, handle)->offset + relocs[i].delta);
154 }
155
156 return relocated;
157 }
158
159 static int
gem_ioctl(int fd,unsigned long request,void * argp)160 gem_ioctl(int fd, unsigned long request, void *argp)
161 {
162 int ret;
163
164 do {
165 ret = libc_ioctl(fd, request, argp);
166 } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
167
168 return ret;
169 }
170
171 static void *
gem_mmap(int fd,uint32_t handle,uint64_t offset,uint64_t size)172 gem_mmap(int fd, uint32_t handle, uint64_t offset, uint64_t size)
173 {
174 struct drm_i915_gem_mmap mmap = {
175 .handle = handle,
176 .offset = offset,
177 .size = size
178 };
179
180 if (gem_ioctl(fd, DRM_IOCTL_I915_GEM_MMAP, &mmap) == -1)
181 return MAP_FAILED;
182
183 return (void *)(uintptr_t) mmap.addr_ptr;
184 }
185
186 static enum intel_engine_class
engine_class_from_ring_flag(uint32_t ring_flag)187 engine_class_from_ring_flag(uint32_t ring_flag)
188 {
189 switch (ring_flag) {
190 case I915_EXEC_DEFAULT:
191 case I915_EXEC_RENDER:
192 return INTEL_ENGINE_CLASS_RENDER;
193 case I915_EXEC_BSD:
194 return INTEL_ENGINE_CLASS_VIDEO;
195 case I915_EXEC_BLT:
196 return INTEL_ENGINE_CLASS_COPY;
197 case I915_EXEC_VEBOX:
198 return INTEL_ENGINE_CLASS_VIDEO_ENHANCE;
199 default:
200 return INTEL_ENGINE_CLASS_INVALID;
201 }
202 }
203
204 static void
dump_execbuffer2(int fd,struct drm_i915_gem_execbuffer2 * execbuffer2)205 dump_execbuffer2(int fd, struct drm_i915_gem_execbuffer2 *execbuffer2)
206 {
207 struct drm_i915_gem_exec_object2 *exec_objects =
208 (struct drm_i915_gem_exec_object2 *) (uintptr_t) execbuffer2->buffers_ptr;
209 uint32_t ring_flag = execbuffer2->flags & I915_EXEC_RING_MASK;
210 uint32_t offset;
211 struct drm_i915_gem_exec_object2 *obj;
212 struct bo *bo, *batch_bo;
213 int batch_index;
214 void *data;
215
216 ensure_device_info(fd);
217
218 if (capture_finished)
219 return;
220
221 if (!aub_file.file) {
222 aub_file_init(&aub_file, output_file,
223 verbose == 2 ? stdout : NULL,
224 device, program_invocation_short_name);
225 aub_write_default_setup(&aub_file);
226
227 if (verbose)
228 printf("[running, output file %s, chipset id 0x%04x, gen %d]\n",
229 output_filename, device, devinfo.ver);
230 }
231
232 if (aub_use_execlists(&aub_file))
233 offset = 0x1000;
234 else
235 offset = aub_gtt_size(&aub_file);
236
237 for (uint32_t i = 0; i < execbuffer2->buffer_count; i++) {
238 obj = &exec_objects[i];
239 bo = get_bo(fd, obj->handle);
240
241 /* If bo->size == 0, this means they passed us an invalid
242 * buffer. The kernel will reject it and so should we.
243 */
244 if (bo->size == 0) {
245 if (verbose)
246 printf("BO #%d is invalid!\n", obj->handle);
247 return;
248 }
249
250 if (obj->flags & EXEC_OBJECT_PINNED) {
251 if (bo->offset != obj->offset)
252 bo->gtt_mapped = false;
253 bo->offset = obj->offset;
254 } else {
255 if (obj->alignment != 0)
256 offset = align(offset, obj->alignment);
257 bo->offset = offset;
258 offset = align(offset + bo->size + 4095, 4096);
259 }
260
261 if (bo->map == NULL && bo->size > 0)
262 bo->map = gem_mmap(fd, obj->handle, 0, bo->size);
263 fail_if(bo->map == MAP_FAILED, "bo mmap failed\n");
264 }
265
266 uint64_t current_frame_id = 0;
267 if (frame_id >= 0) {
268 for (uint32_t i = 0; i < execbuffer2->buffer_count; i++) {
269 obj = &exec_objects[i];
270 bo = get_bo(fd, obj->handle);
271
272 /* Check against frame_id requirements. */
273 if (memcmp(bo->map, intel_debug_identifier(),
274 intel_debug_identifier_size()) == 0) {
275 const struct intel_debug_block_frame *frame_desc =
276 intel_debug_get_identifier_block(bo->map, bo->size,
277 INTEL_DEBUG_BLOCK_TYPE_FRAME);
278
279 current_frame_id = frame_desc ? frame_desc->frame_id : 0;
280 break;
281 }
282 }
283 }
284
285 if (verbose)
286 printf("Dumping execbuffer2 (frame_id=%"PRIu64", buffers=%u):\n",
287 current_frame_id, execbuffer2->buffer_count);
288
289 /* Check whether we can stop right now. */
290 if (frame_id >= 0) {
291 if (current_frame_id < frame_id)
292 return;
293
294 if (current_frame_id > frame_id) {
295 aub_file_finish(&aub_file);
296 capture_finished = true;
297 return;
298 }
299 }
300
301
302 /* Map buffers into the PPGTT. */
303 for (uint32_t i = 0; i < execbuffer2->buffer_count; i++) {
304 obj = &exec_objects[i];
305 bo = get_bo(fd, obj->handle);
306
307 if (verbose) {
308 printf("BO #%d (%dB) @ 0x%" PRIx64 "\n",
309 obj->handle, bo->size, bo->offset);
310 }
311
312 if (aub_use_execlists(&aub_file) && !bo->gtt_mapped) {
313 aub_map_ppgtt(&aub_file, bo->offset, bo->size);
314 bo->gtt_mapped = true;
315 }
316 }
317
318 /* Write the buffer content into the Aub. */
319 batch_index = (execbuffer2->flags & I915_EXEC_BATCH_FIRST) ? 0 :
320 execbuffer2->buffer_count - 1;
321 batch_bo = get_bo(fd, exec_objects[batch_index].handle);
322 for (uint32_t i = 0; i < execbuffer2->buffer_count; i++) {
323 obj = &exec_objects[i];
324 bo = get_bo(fd, obj->handle);
325
326 if (obj->relocation_count > 0)
327 data = relocate_bo(fd, bo, execbuffer2, obj);
328 else
329 data = bo->map;
330
331 bool write = !capture_only || (obj->flags & EXEC_OBJECT_CAPTURE);
332
333 if (write && bo->dirty) {
334 if (bo == batch_bo) {
335 aub_write_trace_block(&aub_file, AUB_TRACE_TYPE_BATCH,
336 GET_PTR(data), bo->size, bo->offset);
337 } else {
338 aub_write_trace_block(&aub_file, AUB_TRACE_TYPE_NOTYPE,
339 GET_PTR(data), bo->size, bo->offset);
340 }
341
342 if (!bo->user_mapped)
343 bo->dirty = false;
344 }
345
346 if (data != bo->map)
347 free(data);
348 }
349
350 uint32_t ctx_id = execbuffer2->rsvd1;
351
352 aub_write_exec(&aub_file, ctx_id,
353 batch_bo->offset + execbuffer2->batch_start_offset,
354 offset, engine_class_from_ring_flag(ring_flag));
355
356 if (device_override &&
357 (execbuffer2->flags & I915_EXEC_FENCE_ARRAY) != 0) {
358 struct drm_i915_gem_exec_fence *fences =
359 (void*)(uintptr_t)execbuffer2->cliprects_ptr;
360 for (uint32_t i = 0; i < execbuffer2->num_cliprects; i++) {
361 if ((fences[i].flags & I915_EXEC_FENCE_SIGNAL) != 0) {
362 struct drm_syncobj_array arg = {
363 .handles = (uintptr_t)&fences[i].handle,
364 .count_handles = 1,
365 .pad = 0,
366 };
367 libc_ioctl(fd, DRM_IOCTL_SYNCOBJ_SIGNAL, &arg);
368 }
369 }
370 }
371 }
372
373 static void
add_new_bo(unsigned fd,int handle,uint64_t size,void * map)374 add_new_bo(unsigned fd, int handle, uint64_t size, void *map)
375 {
376 struct bo *bo = &bos[handle + fd * MAX_BO_COUNT];
377
378 fail_if(handle >= MAX_BO_COUNT, "bo handle out of range\n");
379 fail_if(fd >= MAX_FD_COUNT, "bo fd out of range\n");
380 fail_if(size == 0, "bo size is invalid\n");
381
382 bo->size = size;
383 bo->map = map;
384 bo->user_mapped = false;
385 bo->gtt_mapped = false;
386 }
387
388 static void
remove_bo(int fd,int handle)389 remove_bo(int fd, int handle)
390 {
391 struct bo *bo = get_bo(fd, handle);
392
393 if (bo->map && !IS_USERPTR(bo->map))
394 munmap(bo->map, bo->size);
395 memset(bo, 0, sizeof(*bo));
396 }
397
398 __attribute__ ((visibility ("default"))) int
close(int fd)399 close(int fd)
400 {
401 if (fd == drm_fd)
402 drm_fd = -1;
403
404 return libc_close(fd);
405 }
406
407 static int
get_pci_id(int fd,int * pci_id)408 get_pci_id(int fd, int *pci_id)
409 {
410 if (device_override) {
411 *pci_id = device;
412 return 0;
413 }
414
415 return intel_gem_get_param(fd, I915_PARAM_CHIPSET_ID, pci_id) ? 0 : -1;
416 }
417
418 static void
maybe_init(int fd)419 maybe_init(int fd)
420 {
421 static bool initialized = false;
422 FILE *config;
423 char *key, *value;
424
425 if (initialized)
426 return;
427
428 initialized = true;
429
430 const char *config_path = getenv("INTEL_DUMP_GPU_CONFIG");
431 fail_if(config_path == NULL, "INTEL_DUMP_GPU_CONFIG is not set\n");
432
433 config = fopen(config_path, "r");
434 fail_if(config == NULL, "failed to open file %s\n", config_path);
435
436 while (fscanf(config, "%m[^=]=%m[^\n]\n", &key, &value) != EOF) {
437 if (!strcmp(key, "verbose")) {
438 if (!strcmp(value, "1")) {
439 verbose = 1;
440 } else if (!strcmp(value, "2")) {
441 verbose = 2;
442 }
443 } else if (!strcmp(key, "device")) {
444 fail_if(device != 0, "Device/Platform override specified multiple times.\n");
445 fail_if(sscanf(value, "%i", &device) != 1,
446 "failed to parse device id '%s'\n",
447 value);
448 device_override = true;
449 } else if (!strcmp(key, "platform")) {
450 fail_if(device != 0, "Device/Platform override specified multiple times.\n");
451 device = intel_device_name_to_pci_device_id(value);
452 fail_if(device == -1, "Unknown platform '%s'\n", value);
453 device_override = true;
454 } else if (!strcmp(key, "file")) {
455 free(output_filename);
456 if (output_file)
457 fclose(output_file);
458 output_filename = strdup(value);
459 output_file = fopen(output_filename, "w+");
460 fail_if(output_file == NULL,
461 "failed to open file '%s'\n",
462 output_filename);
463 } else if (!strcmp(key, "capture_only")) {
464 capture_only = atoi(value);
465 } else if (!strcmp(key, "frame")) {
466 frame_id = atol(value);
467 } else {
468 fprintf(stderr, "unknown option '%s'\n", key);
469 }
470
471 free(key);
472 free(value);
473 }
474 fclose(config);
475
476 bos = calloc(MAX_FD_COUNT * MAX_BO_COUNT, sizeof(bos[0]));
477 fail_if(bos == NULL, "out of memory\n");
478
479 ASSERTED int ret = get_pci_id(fd, &device);
480 assert(ret == 0);
481
482 aub_file_init(&aub_file, output_file,
483 verbose == 2 ? stdout : NULL,
484 device, program_invocation_short_name);
485 aub_write_default_setup(&aub_file);
486
487 if (verbose)
488 printf("[running, output file %s, chipset id 0x%04x, gen %d]\n",
489 output_filename, device, devinfo.ver);
490 }
491
492 static int
intercept_ioctl(int fd,unsigned long request,...)493 intercept_ioctl(int fd, unsigned long request, ...)
494 {
495 va_list args;
496 void *argp;
497 int ret;
498 struct stat buf;
499
500 va_start(args, request);
501 argp = va_arg(args, void *);
502 va_end(args);
503
504 if (_IOC_TYPE(request) == DRM_IOCTL_BASE &&
505 drm_fd != fd && fstat(fd, &buf) == 0 &&
506 (buf.st_mode & S_IFMT) == S_IFCHR && major(buf.st_rdev) == DRM_MAJOR) {
507 drm_fd = fd;
508 if (verbose)
509 printf("[intercept drm ioctl on fd %d]\n", fd);
510 }
511
512 if (fd == drm_fd) {
513 maybe_init(fd);
514
515 switch (request) {
516 case DRM_IOCTL_SYNCOBJ_WAIT:
517 case DRM_IOCTL_I915_GEM_WAIT: {
518 if (device_override)
519 return 0;
520 return libc_ioctl(fd, request, argp);
521 }
522
523 case DRM_IOCTL_I915_GET_RESET_STATS: {
524 if (device_override) {
525 struct drm_i915_reset_stats *stats = argp;
526
527 stats->reset_count = 0;
528 stats->batch_active = 0;
529 stats->batch_pending = 0;
530 return 0;
531 }
532 return libc_ioctl(fd, request, argp);
533 }
534
535 case DRM_IOCTL_I915_GETPARAM: {
536 struct drm_i915_getparam *getparam = argp;
537
538 ensure_device_info(fd);
539
540 if (getparam->param == I915_PARAM_CHIPSET_ID)
541 return get_pci_id(fd, getparam->value);
542
543 if (device_override) {
544 switch (getparam->param) {
545 case I915_PARAM_CS_TIMESTAMP_FREQUENCY:
546 *getparam->value = devinfo.timestamp_frequency;
547 return 0;
548
549 case I915_PARAM_HAS_WAIT_TIMEOUT:
550 case I915_PARAM_HAS_EXECBUF2:
551 case I915_PARAM_MMAP_VERSION:
552 case I915_PARAM_HAS_EXEC_ASYNC:
553 case I915_PARAM_HAS_EXEC_FENCE:
554 case I915_PARAM_HAS_EXEC_FENCE_ARRAY:
555 *getparam->value = 1;
556 return 0;
557
558 case I915_PARAM_HAS_EXEC_SOFTPIN:
559 *getparam->value = devinfo.ver >= 8 && devinfo.platform != INTEL_PLATFORM_CHV;
560 return 0;
561
562 default:
563 return -1;
564 }
565 }
566
567 return libc_ioctl(fd, request, argp);
568 }
569
570 case DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM: {
571 struct drm_i915_gem_context_param *getparam = argp;
572
573 ensure_device_info(fd);
574
575 if (device_override) {
576 switch (getparam->param) {
577 case I915_CONTEXT_PARAM_GTT_SIZE:
578 if (devinfo.platform == INTEL_PLATFORM_EHL)
579 getparam->value = 1ull << 36;
580 else if (devinfo.ver >= 8 && devinfo.platform != INTEL_PLATFORM_CHV)
581 getparam->value = 1ull << 48;
582 else
583 getparam->value = 1ull << 31;
584 return 0;
585
586 default:
587 return -1;
588 }
589 }
590
591 return libc_ioctl(fd, request, argp);
592 }
593
594 case DRM_IOCTL_I915_GEM_EXECBUFFER: {
595 static bool once;
596 if (!once) {
597 fprintf(stderr,
598 "application uses DRM_IOCTL_I915_GEM_EXECBUFFER, not handled\n");
599 once = true;
600 }
601 return libc_ioctl(fd, request, argp);
602 }
603
604 case DRM_IOCTL_I915_GEM_EXECBUFFER2:
605 case DRM_IOCTL_I915_GEM_EXECBUFFER2_WR: {
606 dump_execbuffer2(fd, argp);
607 if (device_override)
608 return 0;
609
610 return libc_ioctl(fd, request, argp);
611 }
612
613 case DRM_IOCTL_I915_GEM_CONTEXT_CREATE: {
614 uint32_t *ctx_id = NULL;
615 struct drm_i915_gem_context_create *create = argp;
616 ret = 0;
617 if (!device_override) {
618 ret = libc_ioctl(fd, request, argp);
619 ctx_id = &create->ctx_id;
620 }
621
622 if (ret == 0)
623 create->ctx_id = aub_write_context_create(&aub_file, ctx_id);
624
625 return ret;
626 }
627
628 case DRM_IOCTL_I915_GEM_CONTEXT_CREATE_EXT: {
629 uint32_t *ctx_id = NULL;
630 struct drm_i915_gem_context_create_ext *create = argp;
631 ret = 0;
632 if (!device_override) {
633 ret = libc_ioctl(fd, request, argp);
634 ctx_id = &create->ctx_id;
635 }
636
637 if (ret == 0)
638 create->ctx_id = aub_write_context_create(&aub_file, ctx_id);
639
640 return ret;
641 }
642
643 case DRM_IOCTL_I915_GEM_CREATE: {
644 struct drm_i915_gem_create *create = argp;
645
646 ret = libc_ioctl(fd, request, argp);
647 if (ret == 0)
648 add_new_bo(fd, create->handle, create->size, NULL);
649
650 return ret;
651 }
652
653 case DRM_IOCTL_I915_GEM_CREATE_EXT: {
654 struct drm_i915_gem_create_ext *create = argp;
655
656 ret = libc_ioctl(fd, request, argp);
657 if (ret == 0)
658 add_new_bo(fd, create->handle, create->size, NULL);
659
660 return ret;
661 }
662
663 case DRM_IOCTL_I915_GEM_USERPTR: {
664 struct drm_i915_gem_userptr *userptr = argp;
665
666 ret = libc_ioctl(fd, request, argp);
667 if (ret == 0)
668 add_new_bo(fd, userptr->handle, userptr->user_size,
669 (void *) (uintptr_t) (userptr->user_ptr | USERPTR_FLAG));
670
671 return ret;
672 }
673
674 case DRM_IOCTL_GEM_CLOSE: {
675 struct drm_gem_close *close = argp;
676
677 remove_bo(fd, close->handle);
678
679 return libc_ioctl(fd, request, argp);
680 }
681
682 case DRM_IOCTL_GEM_OPEN: {
683 struct drm_gem_open *open = argp;
684
685 ret = libc_ioctl(fd, request, argp);
686 if (ret == 0)
687 add_new_bo(fd, open->handle, open->size, NULL);
688
689 return ret;
690 }
691
692 case DRM_IOCTL_PRIME_FD_TO_HANDLE: {
693 struct drm_prime_handle *prime = argp;
694
695 ret = libc_ioctl(fd, request, argp);
696 if (ret == 0) {
697 off_t size;
698
699 size = lseek(prime->fd, 0, SEEK_END);
700 fail_if(size == -1, "failed to get prime bo size\n");
701 add_new_bo(fd, prime->handle, size, NULL);
702
703 }
704
705 return ret;
706 }
707
708 case DRM_IOCTL_I915_GEM_MMAP: {
709 ret = libc_ioctl(fd, request, argp);
710 if (ret == 0) {
711 struct drm_i915_gem_mmap *mmap = argp;
712 struct bo *bo = get_bo(fd, mmap->handle);
713 bo->user_mapped = true;
714 bo->dirty = true;
715 }
716 return ret;
717 }
718
719 case DRM_IOCTL_I915_GEM_MMAP_OFFSET: {
720 ret = libc_ioctl(fd, request, argp);
721 if (ret == 0) {
722 struct drm_i915_gem_mmap_offset *mmap = argp;
723 struct bo *bo = get_bo(fd, mmap->handle);
724 bo->user_mapped = true;
725 bo->dirty = true;
726 }
727 return ret;
728 }
729
730 default:
731 return libc_ioctl(fd, request, argp);
732 }
733 } else {
734 return libc_ioctl(fd, request, argp);
735 }
736 }
737
738 __attribute__ ((visibility ("default"))) int
ioctl(int fd,unsigned long request,...)739 ioctl(int fd, unsigned long request, ...)
740 {
741 static thread_local bool entered = false;
742 va_list args;
743 void *argp;
744 int ret;
745
746 va_start(args, request);
747 argp = va_arg(args, void *);
748 va_end(args);
749
750 /* Some of the functions called by intercept_ioctl call ioctls of their
751 * own. These need to go to the libc ioctl instead of being passed back to
752 * intercept_ioctl to avoid a stack overflow. */
753 if (entered) {
754 return libc_ioctl(fd, request, argp);
755 } else {
756 entered = true;
757 ret = intercept_ioctl(fd, request, argp);
758 entered = false;
759 return ret;
760 }
761 }
762
763 static void
init(void)764 init(void)
765 {
766 libc_close = dlsym(RTLD_NEXT, "close");
767 libc_ioctl = dlsym(RTLD_NEXT, "ioctl");
768 libc_munmap = dlsym(RTLD_NEXT, "munmap");
769 fail_if(libc_close == NULL || libc_ioctl == NULL,
770 "failed to get libc ioctl or close\n");
771 }
772
773 static int
close_init_helper(int fd)774 close_init_helper(int fd)
775 {
776 init();
777 return libc_close(fd);
778 }
779
780 static int
ioctl_init_helper(int fd,unsigned long request,...)781 ioctl_init_helper(int fd, unsigned long request, ...)
782 {
783 va_list args;
784 void *argp;
785
786 va_start(args, request);
787 argp = va_arg(args, void *);
788 va_end(args);
789
790 init();
791 return libc_ioctl(fd, request, argp);
792 }
793
794 static int
munmap_init_helper(void * addr,size_t length)795 munmap_init_helper(void *addr, size_t length)
796 {
797 init();
798 for (uint32_t i = 0; i < MAX_FD_COUNT * MAX_BO_COUNT; i++) {
799 struct bo *bo = &bos[i];
800 if (bo->map == addr) {
801 bo->user_mapped = false;
802 break;
803 }
804 }
805 return libc_munmap(addr, length);
806 }
807
808 static void __attribute__ ((destructor))
fini(void)809 fini(void)
810 {
811 if (devinfo.ver != 0) {
812 free(output_filename);
813 if (!capture_finished)
814 aub_file_finish(&aub_file);
815 free(bos);
816 }
817 }
818