1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <stdlib.h>
25 #include <stdio.h>
26 #include <string.h>
27 #include <stdint.h>
28 #include <stdbool.h>
29 #include <signal.h>
30 #include <stdarg.h>
31 #include <fcntl.h>
32 #include <sys/types.h>
33 #include <sys/sysmacros.h>
34 #include <sys/stat.h>
35 #include <sys/ioctl.h>
36 #include <unistd.h>
37 #include <errno.h>
38 #include <sys/mman.h>
39 #include <dlfcn.h>
40 #include <i915_drm.h>
41
42 #include "intel_aub.h"
43 #include "intel_chipset.h"
44
45 #ifndef ARRAY_SIZE
46 #define ARRAY_SIZE(x) (sizeof(x)/sizeof((x)[0]))
47 #endif
48
49 #ifndef ALIGN
50 #define ALIGN(x, y) (((x) + (y)-1) & ~((y)-1))
51 #endif
52
53 #define min(a, b) ({ \
54 typeof(a) _a = (a); \
55 typeof(b) _b = (b); \
56 _a < _b ? _a : _b; \
57 })
58
59 #define HWS_PGA_RCSUNIT 0x02080
60 #define HWS_PGA_VCSUNIT0 0x12080
61 #define HWS_PGA_BCSUNIT 0x22080
62
63 #define GFX_MODE_RCSUNIT 0x0229c
64 #define GFX_MODE_VCSUNIT0 0x1229c
65 #define GFX_MODE_BCSUNIT 0x2229c
66
67 #define EXECLIST_SUBMITPORT_RCSUNIT 0x02230
68 #define EXECLIST_SUBMITPORT_VCSUNIT0 0x12230
69 #define EXECLIST_SUBMITPORT_BCSUNIT 0x22230
70
71 #define EXECLIST_STATUS_RCSUNIT 0x02234
72 #define EXECLIST_STATUS_VCSUNIT0 0x12234
73 #define EXECLIST_STATUS_BCSUNIT 0x22234
74
75 #define EXECLIST_SQ_CONTENTS0_RCSUNIT 0x02510
76 #define EXECLIST_SQ_CONTENTS0_VCSUNIT0 0x12510
77 #define EXECLIST_SQ_CONTENTS0_BCSUNIT 0x22510
78
79 #define EXECLIST_CONTROL_RCSUNIT 0x02550
80 #define EXECLIST_CONTROL_VCSUNIT0 0x12550
81 #define EXECLIST_CONTROL_BCSUNIT 0x22550
82
83 #define MEMORY_MAP_SIZE (64 /* MiB */ * 1024 * 1024)
84
85 #define PTE_SIZE 4
86 #define GEN8_PTE_SIZE 8
87
88 #define NUM_PT_ENTRIES (ALIGN(MEMORY_MAP_SIZE, 4096) / 4096)
89 #define PT_SIZE ALIGN(NUM_PT_ENTRIES * GEN8_PTE_SIZE, 4096)
90
91 #define RING_SIZE (1 * 4096)
92 #define PPHWSP_SIZE (1 * 4096)
93 #define GEN10_LR_CONTEXT_RENDER_SIZE (19 * 4096)
94 #define GEN8_LR_CONTEXT_OTHER_SIZE (2 * 4096)
95
96 #define STATIC_GGTT_MAP_START 0
97
98 #define RENDER_RING_ADDR STATIC_GGTT_MAP_START
99 #define RENDER_CONTEXT_ADDR (RENDER_RING_ADDR + RING_SIZE)
100
101 #define BLITTER_RING_ADDR (RENDER_CONTEXT_ADDR + PPHWSP_SIZE + GEN10_LR_CONTEXT_RENDER_SIZE)
102 #define BLITTER_CONTEXT_ADDR (BLITTER_RING_ADDR + RING_SIZE)
103
104 #define VIDEO_RING_ADDR (BLITTER_CONTEXT_ADDR + PPHWSP_SIZE + GEN8_LR_CONTEXT_OTHER_SIZE)
105 #define VIDEO_CONTEXT_ADDR (VIDEO_RING_ADDR + RING_SIZE)
106
107 #define STATIC_GGTT_MAP_END (VIDEO_CONTEXT_ADDR + PPHWSP_SIZE + GEN8_LR_CONTEXT_OTHER_SIZE)
108 #define STATIC_GGTT_MAP_SIZE (STATIC_GGTT_MAP_END - STATIC_GGTT_MAP_START)
109
110 #define CONTEXT_FLAGS (0x229) /* Normal Priority | L3-LLC Coherency |
111 Legacy Context with no 64 bit VA support | Valid */
112
113 #define RENDER_CONTEXT_DESCRIPTOR ((uint64_t)1 << 32 | RENDER_CONTEXT_ADDR | CONTEXT_FLAGS)
114 #define BLITTER_CONTEXT_DESCRIPTOR ((uint64_t)2 << 32 | BLITTER_CONTEXT_ADDR | CONTEXT_FLAGS)
115 #define VIDEO_CONTEXT_DESCRIPTOR ((uint64_t)3 << 32 | VIDEO_CONTEXT_ADDR | CONTEXT_FLAGS)
116
117 static const uint32_t render_context_init[GEN10_LR_CONTEXT_RENDER_SIZE /
118 sizeof(uint32_t)] = {
119 0 /* MI_NOOP */,
120 0x1100101B /* MI_LOAD_REGISTER_IMM */,
121 0x2244 /* CONTEXT_CONTROL */, 0x90009 /* Inhibit Synchronous Context Switch | Engine Context Restore Inhibit */,
122 0x2034 /* RING_HEAD */, 0,
123 0x2030 /* RING_TAIL */, 0,
124 0x2038 /* RING_BUFFER_START */, RENDER_RING_ADDR,
125 0x203C /* RING_BUFFER_CONTROL */, (RING_SIZE - 4096) | 1 /* Buffer Length | Ring Buffer Enable */,
126 0x2168 /* BB_HEAD_U */, 0,
127 0x2140 /* BB_HEAD_L */, 0,
128 0x2110 /* BB_STATE */, 0,
129 0x211C /* SECOND_BB_HEAD_U */, 0,
130 0x2114 /* SECOND_BB_HEAD_L */, 0,
131 0x2118 /* SECOND_BB_STATE */, 0,
132 0x21C0 /* BB_PER_CTX_PTR */, 0,
133 0x21C4 /* RCS_INDIRECT_CTX */, 0,
134 0x21C8 /* RCS_INDIRECT_CTX_OFFSET */, 0,
135 /* MI_NOOP */
136 0, 0,
137
138 0 /* MI_NOOP */,
139 0x11001011 /* MI_LOAD_REGISTER_IMM */,
140 0x23A8 /* CTX_TIMESTAMP */, 0,
141 0x228C /* PDP3_UDW */, 0,
142 0x2288 /* PDP3_LDW */, 0,
143 0x2284 /* PDP2_UDW */, 0,
144 0x2280 /* PDP2_LDW */, 0,
145 0x227C /* PDP1_UDW */, 0,
146 0x2278 /* PDP1_LDW */, 0,
147 0x2274 /* PDP0_UDW */, 0,
148 0x2270 /* PDP0_LDW */, 0,
149 /* MI_NOOP */
150 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
151
152 0 /* MI_NOOP */,
153 0x11000001 /* MI_LOAD_REGISTER_IMM */,
154 0x20C8 /* R_PWR_CLK_STATE */, 0x7FFFFFFF,
155 0x05000001 /* MI_BATCH_BUFFER_END */
156 };
157
158 static const uint32_t blitter_context_init[GEN8_LR_CONTEXT_OTHER_SIZE /
159 sizeof(uint32_t)] = {
160 0 /* MI_NOOP */,
161 0x11001015 /* MI_LOAD_REGISTER_IMM */,
162 0x22244 /* CONTEXT_CONTROL */, 0x90009 /* Inhibit Synchronous Context Switch | Engine Context Restore Inhibit */,
163 0x22034 /* RING_HEAD */, 0,
164 0x22030 /* RING_TAIL */, 0,
165 0x22038 /* RING_BUFFER_START */, BLITTER_RING_ADDR,
166 0x2203C /* RING_BUFFER_CONTROL */, (RING_SIZE - 4096) | 1 /* Buffer Length | Ring Buffer Enable */,
167 0x22168 /* BB_HEAD_U */, 0,
168 0x22140 /* BB_HEAD_L */, 0,
169 0x22110 /* BB_STATE */, 0,
170 0x2211C /* SECOND_BB_HEAD_U */, 0,
171 0x22114 /* SECOND_BB_HEAD_L */, 0,
172 0x22118 /* SECOND_BB_STATE */, 0,
173 /* MI_NOOP */
174 0, 0, 0, 0, 0, 0, 0, 0,
175
176 0 /* MI_NOOP */,
177 0x11001011,
178 0x223A8 /* CTX_TIMESTAMP */, 0,
179 0x2228C /* PDP3_UDW */, 0,
180 0x22288 /* PDP3_LDW */, 0,
181 0x22284 /* PDP2_UDW */, 0,
182 0x22280 /* PDP2_LDW */, 0,
183 0x2227C /* PDP1_UDW */, 0,
184 0x22278 /* PDP1_LDW */, 0,
185 0x22274 /* PDP0_UDW */, 0,
186 0x22270 /* PDP0_LDW */, 0,
187 /* MI_NOOP */
188 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
189
190 0x05000001 /* MI_BATCH_BUFFER_END */
191 };
192
193 static const uint32_t video_context_init[GEN8_LR_CONTEXT_OTHER_SIZE /
194 sizeof(uint32_t)] = {
195 0 /* MI_NOOP */,
196 0x11001015 /* MI_LOAD_REGISTER_IMM */,
197 0x1C244 /* CONTEXT_CONTROL */, 0x90009 /* Inhibit Synchronous Context Switch | Engine Context Restore Inhibit */,
198 0x1C034 /* RING_HEAD */, 0,
199 0x1C030 /* RING_TAIL */, 0,
200 0x1C038 /* RING_BUFFER_START */, VIDEO_RING_ADDR,
201 0x1C03C /* RING_BUFFER_CONTROL */, (RING_SIZE - 4096) | 1 /* Buffer Length | Ring Buffer Enable */,
202 0x1C168 /* BB_HEAD_U */, 0,
203 0x1C140 /* BB_HEAD_L */, 0,
204 0x1C110 /* BB_STATE */, 0,
205 0x1C11C /* SECOND_BB_HEAD_U */, 0,
206 0x1C114 /* SECOND_BB_HEAD_L */, 0,
207 0x1C118 /* SECOND_BB_STATE */, 0,
208 /* MI_NOOP */
209 0, 0, 0, 0, 0, 0, 0, 0,
210
211 0 /* MI_NOOP */,
212 0x11001011,
213 0x1C3A8 /* CTX_TIMESTAMP */, 0,
214 0x1C28C /* PDP3_UDW */, 0,
215 0x1C288 /* PDP3_LDW */, 0,
216 0x1C284 /* PDP2_UDW */, 0,
217 0x1C280 /* PDP2_LDW */, 0,
218 0x1C27C /* PDP1_UDW */, 0,
219 0x1C278 /* PDP1_LDW */, 0,
220 0x1C274 /* PDP0_UDW */, 0,
221 0x1C270 /* PDP0_LDW */, 0,
222 /* MI_NOOP */
223 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
224
225 0x05000001 /* MI_BATCH_BUFFER_END */
226 };
227
228 static int close_init_helper(int fd);
229 static int ioctl_init_helper(int fd, unsigned long request, ...);
230
231 static int (*libc_close)(int fd) = close_init_helper;
232 static int (*libc_ioctl)(int fd, unsigned long request, ...) = ioctl_init_helper;
233
234 static int drm_fd = -1;
235 static char *filename = NULL;
236 static FILE *files[2] = { NULL, NULL };
237 static int gen = 0;
238 static int verbose = 0;
239 static bool device_override;
240 static uint32_t device;
241 static int addr_bits = 0;
242
243 #define MAX_BO_COUNT 64 * 1024
244
245 struct bo {
246 uint32_t size;
247 uint64_t offset;
248 void *map;
249 };
250
251 static struct bo *bos;
252
253 #define DRM_MAJOR 226
254
255 #ifndef DRM_I915_GEM_USERPTR
256
257 #define DRM_I915_GEM_USERPTR 0x33
258 #define DRM_IOCTL_I915_GEM_USERPTR DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_USERPTR, struct drm_i915_gem_userptr)
259
260 struct drm_i915_gem_userptr {
261 __u64 user_ptr;
262 __u64 user_size;
263 __u32 flags;
264 #define I915_USERPTR_READ_ONLY 0x1
265 #define I915_USERPTR_UNSYNCHRONIZED 0x80000000
266 /**
267 * Returned handle for the object.
268 *
269 * Object handles are nonzero.
270 */
271 __u32 handle;
272 };
273
274 #endif
275
276 /* We set bit 0 in the map pointer for userptr BOs so we know not to
277 * munmap them on DRM_IOCTL_GEM_CLOSE.
278 */
279 #define USERPTR_FLAG 1
280 #define IS_USERPTR(p) ((uintptr_t) (p) & USERPTR_FLAG)
281 #define GET_PTR(p) ( (void *) ((uintptr_t) p & ~(uintptr_t) 1) )
282
283 #ifndef I915_EXEC_BATCH_FIRST
284 #define I915_EXEC_BATCH_FIRST (1 << 18)
285 #endif
286
287 static void __attribute__ ((format(__printf__, 2, 3)))
fail_if(int cond,const char * format,...)288 fail_if(int cond, const char *format, ...)
289 {
290 va_list args;
291
292 if (!cond)
293 return;
294
295 va_start(args, format);
296 vfprintf(stderr, format, args);
297 va_end(args);
298
299 raise(SIGTRAP);
300 }
301
302 static struct bo *
get_bo(uint32_t handle)303 get_bo(uint32_t handle)
304 {
305 struct bo *bo;
306
307 fail_if(handle >= MAX_BO_COUNT, "bo handle too large\n");
308 bo = &bos[handle];
309
310 return bo;
311 }
312
313 static inline uint32_t
align_u32(uint32_t v,uint32_t a)314 align_u32(uint32_t v, uint32_t a)
315 {
316 return (v + a - 1) & ~(a - 1);
317 }
318
319 static inline uint64_t
align_u64(uint64_t v,uint64_t a)320 align_u64(uint64_t v, uint64_t a)
321 {
322 return (v + a - 1) & ~(a - 1);
323 }
324
325 static void
dword_out(uint32_t data)326 dword_out(uint32_t data)
327 {
328 for (int i = 0; i < ARRAY_SIZE (files); i++) {
329 if (files[i] == NULL)
330 continue;
331
332 fail_if(fwrite(&data, 1, 4, files[i]) == 0,
333 "Writing to output failed\n");
334 }
335 }
336
337 static void
data_out(const void * data,size_t size)338 data_out(const void *data, size_t size)
339 {
340 if (size == 0)
341 return;
342
343 for (int i = 0; i < ARRAY_SIZE (files); i++) {
344 if (files[i] == NULL)
345 continue;
346
347 fail_if(fwrite(data, 1, size, files[i]) == 0,
348 "Writing to output failed\n");
349 }
350 }
351
352 static uint32_t
gtt_size(void)353 gtt_size(void)
354 {
355 return NUM_PT_ENTRIES * (addr_bits > 32 ? GEN8_PTE_SIZE : PTE_SIZE);
356 }
357
358 static void
mem_trace_memory_write_header_out(uint64_t addr,uint32_t len,uint32_t addr_space)359 mem_trace_memory_write_header_out(uint64_t addr, uint32_t len,
360 uint32_t addr_space)
361 {
362 uint32_t dwords = ALIGN(len, sizeof(uint32_t)) / sizeof(uint32_t);
363
364 dword_out(CMD_MEM_TRACE_MEMORY_WRITE | (5 + dwords - 1));
365 dword_out(addr & 0xFFFFFFFF); /* addr lo */
366 dword_out(addr >> 32); /* addr hi */
367 dword_out(addr_space); /* gtt */
368 dword_out(len);
369 }
370
371 static void
register_write_out(uint32_t addr,uint32_t value)372 register_write_out(uint32_t addr, uint32_t value)
373 {
374 uint32_t dwords = 1;
375
376 dword_out(CMD_MEM_TRACE_REGISTER_WRITE | (5 + dwords - 1));
377 dword_out(addr);
378 dword_out(AUB_MEM_TRACE_REGISTER_SIZE_DWORD |
379 AUB_MEM_TRACE_REGISTER_SPACE_MMIO);
380 dword_out(0xFFFFFFFF); /* mask lo */
381 dword_out(0x00000000); /* mask hi */
382 dword_out(value);
383 }
384
385 static void
gen8_emit_ggtt_pte_for_range(uint64_t start,uint64_t end)386 gen8_emit_ggtt_pte_for_range(uint64_t start, uint64_t end)
387 {
388 uint64_t entry_addr;
389 uint64_t page_num;
390 uint64_t end_aligned = align_u64(end, 4096);
391
392 if (start >= end || end > (1ull << 32))
393 return;
394
395 entry_addr = start & ~(4096 - 1);
396 do {
397 uint64_t last_page_entry, num_entries;
398
399 page_num = entry_addr >> 21;
400 last_page_entry = min((page_num + 1) << 21, end_aligned);
401 num_entries = (last_page_entry - entry_addr) >> 12;
402 mem_trace_memory_write_header_out(
403 entry_addr >> 9, num_entries * GEN8_PTE_SIZE,
404 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT_ENTRY);
405 while (num_entries-- > 0) {
406 dword_out((entry_addr & ~(4096 - 1)) |
407 3 /* read/write | present */);
408 dword_out(entry_addr >> 32);
409 entry_addr += 4096;
410 }
411 } while (entry_addr < end);
412 }
413
414 /**
415 * Sets bits `start` through `end` - 1 in the bitmap array.
416 */
417 static void
set_bitmap_range(uint32_t * bitmap,uint32_t start,uint32_t end)418 set_bitmap_range(uint32_t *bitmap, uint32_t start, uint32_t end)
419 {
420 uint32_t pos = start;
421 while (pos < end) {
422 const uint32_t bit = 1 << (pos & 0x1f);
423 if (bit == 1 && (end - pos) > 32) {
424 bitmap[pos >> 5] = 0xffffffff;
425 pos += 32;
426 } else {
427 bitmap[pos >> 5] |= bit;
428 pos++;
429 }
430 }
431 }
432
433 /**
434 * Finds the next `set` (or clear) bit in the bitmap array.
435 *
436 * The search starts at `*start` and only checks until `end` - 1.
437 *
438 * If found, returns true, and the found bit index in `*start`.
439 */
440 static bool
find_bitmap_bit(uint32_t * bitmap,bool set,uint32_t * start,uint32_t end)441 find_bitmap_bit(uint32_t *bitmap, bool set, uint32_t *start, uint32_t end)
442 {
443 uint32_t pos = *start;
444 const uint32_t neg_dw = set ? 0 : -1;
445 while (pos < end) {
446 const uint32_t dw = bitmap[pos >> 5];
447 const uint32_t bit = 1 << (pos & 0x1f);
448 if (!!(dw & bit) == set) {
449 *start = pos;
450 return true;
451 } else if (bit == 1 && dw == neg_dw)
452 pos += 32;
453 else
454 pos++;
455 }
456 return false;
457 }
458
459 /**
460 * Finds a range of clear bits within the bitmap array.
461 *
462 * The search starts at `*start` and only checks until `*end` - 1.
463 *
464 * If found, returns true, and `*start` and `*end` are set for the
465 * range of clear bits.
466 */
467 static bool
find_bitmap_clear_bit_range(uint32_t * bitmap,uint32_t * start,uint32_t * end)468 find_bitmap_clear_bit_range(uint32_t *bitmap, uint32_t *start, uint32_t *end)
469 {
470 if (find_bitmap_bit(bitmap, false, start, *end)) {
471 uint32_t found_end = *start;
472 if (find_bitmap_bit(bitmap, true, &found_end, *end))
473 *end = found_end;
474 return true;
475 }
476 return false;
477 }
478
479 static void
gen8_map_ggtt_range(uint64_t start,uint64_t end)480 gen8_map_ggtt_range(uint64_t start, uint64_t end)
481 {
482 uint32_t pos1, pos2, end_pos;
483 static uint32_t *bitmap = NULL;
484 if (bitmap == NULL) {
485 /* 4GiB (32-bits) of 4KiB pages (12-bits) in dwords (5-bits) */
486 bitmap = calloc(1 << (32 - 12 - 5), sizeof(*bitmap));
487 if (bitmap == NULL)
488 return;
489 }
490
491 pos1 = start >> 12;
492 end_pos = (end + 4096 - 1) >> 12;
493 while (pos1 < end_pos) {
494 pos2 = end_pos;
495 if (!find_bitmap_clear_bit_range(bitmap, &pos1, &pos2))
496 break;
497
498 if (verbose)
499 printf("MAPPING 0x%08"PRIx64"-0x%08"PRIx64"\n",
500 (uint64_t)pos1 << 12, (uint64_t)pos2 << 12);
501 gen8_emit_ggtt_pte_for_range((uint64_t)pos1 << 12,
502 (uint64_t)pos2 << 12);
503 set_bitmap_range(bitmap, (uint64_t)pos1, (uint64_t)pos2);
504 pos1 = pos2;
505 }
506 }
507
508 static void
gen8_map_base_size(uint64_t base,uint64_t size)509 gen8_map_base_size(uint64_t base, uint64_t size)
510 {
511 gen8_map_ggtt_range(base, base + size);
512 }
513
514 static void
gen10_write_header(void)515 gen10_write_header(void)
516 {
517 char app_name[8 * 4];
518 int app_name_len, dwords;
519
520 app_name_len =
521 snprintf(app_name, sizeof(app_name), "PCI-ID=0x%X %s", device,
522 program_invocation_short_name);
523 app_name_len = ALIGN(app_name_len, sizeof(uint32_t));
524
525 dwords = 5 + app_name_len / sizeof(uint32_t);
526 dword_out(CMD_MEM_TRACE_VERSION | (dwords - 1));
527 dword_out(AUB_MEM_TRACE_VERSION_FILE_VERSION);
528 dword_out(AUB_MEM_TRACE_VERSION_DEVICE_CNL |
529 AUB_MEM_TRACE_VERSION_METHOD_PHY);
530 dword_out(0); /* version */
531 dword_out(0); /* version */
532 data_out(app_name, app_name_len);
533
534 /* RENDER_RING */
535 gen8_map_base_size(RENDER_RING_ADDR, RING_SIZE);
536 mem_trace_memory_write_header_out(RENDER_RING_ADDR, RING_SIZE,
537 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_LOCAL);
538 for (uint32_t i = 0; i < RING_SIZE; i += sizeof(uint32_t))
539 dword_out(0);
540
541 /* RENDER_PPHWSP */
542 gen8_map_base_size(RENDER_CONTEXT_ADDR,
543 PPHWSP_SIZE + sizeof(render_context_init));
544 mem_trace_memory_write_header_out(RENDER_CONTEXT_ADDR,
545 PPHWSP_SIZE +
546 sizeof(render_context_init),
547 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_LOCAL);
548 for (uint32_t i = 0; i < PPHWSP_SIZE; i += sizeof(uint32_t))
549 dword_out(0);
550
551 /* RENDER_CONTEXT */
552 data_out(render_context_init, sizeof(render_context_init));
553
554 /* BLITTER_RING */
555 gen8_map_base_size(BLITTER_RING_ADDR, RING_SIZE);
556 mem_trace_memory_write_header_out(BLITTER_RING_ADDR, RING_SIZE,
557 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_LOCAL);
558 for (uint32_t i = 0; i < RING_SIZE; i += sizeof(uint32_t))
559 dword_out(0);
560
561 /* BLITTER_PPHWSP */
562 gen8_map_base_size(BLITTER_CONTEXT_ADDR,
563 PPHWSP_SIZE + sizeof(blitter_context_init));
564 mem_trace_memory_write_header_out(BLITTER_CONTEXT_ADDR,
565 PPHWSP_SIZE +
566 sizeof(blitter_context_init),
567 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_LOCAL);
568 for (uint32_t i = 0; i < PPHWSP_SIZE; i += sizeof(uint32_t))
569 dword_out(0);
570
571 /* BLITTER_CONTEXT */
572 data_out(blitter_context_init, sizeof(blitter_context_init));
573
574 /* VIDEO_RING */
575 gen8_map_base_size(VIDEO_RING_ADDR, RING_SIZE);
576 mem_trace_memory_write_header_out(VIDEO_RING_ADDR, RING_SIZE,
577 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_LOCAL);
578 for (uint32_t i = 0; i < RING_SIZE; i += sizeof(uint32_t))
579 dword_out(0);
580
581 /* VIDEO_PPHWSP */
582 gen8_map_base_size(VIDEO_CONTEXT_ADDR,
583 PPHWSP_SIZE + sizeof(video_context_init));
584 mem_trace_memory_write_header_out(VIDEO_CONTEXT_ADDR,
585 PPHWSP_SIZE +
586 sizeof(video_context_init),
587 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_LOCAL);
588 for (uint32_t i = 0; i < PPHWSP_SIZE; i += sizeof(uint32_t))
589 dword_out(0);
590
591 /* VIDEO_CONTEXT */
592 data_out(video_context_init, sizeof(video_context_init));
593
594 register_write_out(HWS_PGA_RCSUNIT, RENDER_CONTEXT_ADDR);
595 register_write_out(HWS_PGA_VCSUNIT0, VIDEO_CONTEXT_ADDR);
596 register_write_out(HWS_PGA_BCSUNIT, BLITTER_CONTEXT_ADDR);
597
598 register_write_out(GFX_MODE_RCSUNIT, 0x80008000 /* execlist enable */);
599 register_write_out(GFX_MODE_VCSUNIT0, 0x80008000 /* execlist enable */);
600 register_write_out(GFX_MODE_BCSUNIT, 0x80008000 /* execlist enable */);
601 }
602
write_header(void)603 static void write_header(void)
604 {
605 char app_name[8 * 4];
606 char comment[16];
607 int comment_len, comment_dwords, dwords;
608 uint32_t entry = 0x200003;
609
610 comment_len = snprintf(comment, sizeof(comment), "PCI-ID=0x%x", device);
611 comment_dwords = ((comment_len + 3) / 4);
612
613 /* Start with a (required) version packet. */
614 dwords = 13 + comment_dwords;
615 dword_out(CMD_AUB_HEADER | (dwords - 2));
616 dword_out((4 << AUB_HEADER_MAJOR_SHIFT) |
617 (0 << AUB_HEADER_MINOR_SHIFT));
618
619 /* Next comes a 32-byte application name. */
620 strncpy(app_name, program_invocation_short_name, sizeof(app_name));
621 app_name[sizeof(app_name) - 1] = 0;
622 data_out(app_name, sizeof(app_name));
623
624 dword_out(0); /* timestamp */
625 dword_out(0); /* timestamp */
626 dword_out(comment_len);
627 data_out(comment, comment_dwords * 4);
628
629 /* Set up the GTT. The max we can handle is 64M */
630 dword_out(CMD_AUB_TRACE_HEADER_BLOCK | ((addr_bits > 32 ? 6 : 5) - 2));
631 dword_out(AUB_TRACE_MEMTYPE_GTT_ENTRY |
632 AUB_TRACE_TYPE_NOTYPE | AUB_TRACE_OP_DATA_WRITE);
633 dword_out(0); /* subtype */
634 dword_out(0); /* offset */
635 dword_out(gtt_size()); /* size */
636 if (addr_bits > 32)
637 dword_out(0);
638 for (uint32_t i = 0; i < NUM_PT_ENTRIES; i++) {
639 dword_out(entry + 0x1000 * i);
640 if (addr_bits > 32)
641 dword_out(0);
642 }
643 }
644
645 /**
646 * Break up large objects into multiple writes. Otherwise a 128kb VBO
647 * would overflow the 16 bits of size field in the packet header and
648 * everything goes badly after that.
649 */
650 static void
aub_write_trace_block(uint32_t type,void * virtual,uint32_t size,uint64_t gtt_offset)651 aub_write_trace_block(uint32_t type, void *virtual, uint32_t size, uint64_t gtt_offset)
652 {
653 uint32_t block_size;
654 uint32_t subtype = 0;
655 static const char null_block[8 * 4096];
656
657 for (uint32_t offset = 0; offset < size; offset += block_size) {
658 block_size = size - offset;
659
660 if (block_size > 8 * 4096)
661 block_size = 8 * 4096;
662
663 if (gen >= 10) {
664 mem_trace_memory_write_header_out(gtt_offset + offset,
665 block_size,
666 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_LOCAL);
667 } else {
668 dword_out(CMD_AUB_TRACE_HEADER_BLOCK |
669 ((addr_bits > 32 ? 6 : 5) - 2));
670 dword_out(AUB_TRACE_MEMTYPE_GTT |
671 type | AUB_TRACE_OP_DATA_WRITE);
672 dword_out(subtype);
673 dword_out(gtt_offset + offset);
674 dword_out(align_u32(block_size, 4));
675 if (addr_bits > 32)
676 dword_out((gtt_offset + offset) >> 32);
677 }
678
679 if (virtual)
680 data_out(((char *) GET_PTR(virtual)) + offset, block_size);
681 else
682 data_out(null_block, block_size);
683
684 /* Pad to a multiple of 4 bytes. */
685 data_out(null_block, -block_size & 3);
686 }
687 }
688
689 static void
write_reloc(void * p,uint64_t v)690 write_reloc(void *p, uint64_t v)
691 {
692 if (addr_bits > 32) {
693 /* From the Broadwell PRM Vol. 2a,
694 * MI_LOAD_REGISTER_MEM::MemoryAddress:
695 *
696 * "This field specifies the address of the memory
697 * location where the register value specified in the
698 * DWord above will read from. The address specifies
699 * the DWord location of the data. Range =
700 * GraphicsVirtualAddress[63:2] for a DWord register
701 * GraphicsAddress [63:48] are ignored by the HW and
702 * assumed to be in correct canonical form [63:48] ==
703 * [47]."
704 *
705 * In practice, this will always mean the top bits are zero
706 * because of the GTT size limitation of the aubdump tool.
707 */
708 const int shift = 63 - 47;
709 *(uint64_t *)p = (((int64_t)v) << shift) >> shift;
710 } else {
711 *(uint32_t *)p = v;
712 }
713 }
714
715 static void
aub_dump_execlist(uint64_t batch_offset,int ring_flag)716 aub_dump_execlist(uint64_t batch_offset, int ring_flag)
717 {
718 uint32_t ring_addr;
719 uint64_t descriptor;
720 uint32_t elsp_reg;
721 uint32_t elsq_reg;
722 uint32_t status_reg;
723 uint32_t control_reg;
724
725 switch (ring_flag) {
726 case I915_EXEC_DEFAULT:
727 case I915_EXEC_RENDER:
728 ring_addr = RENDER_RING_ADDR;
729 descriptor = RENDER_CONTEXT_DESCRIPTOR;
730 elsp_reg = EXECLIST_SUBMITPORT_RCSUNIT;
731 elsq_reg = EXECLIST_SQ_CONTENTS0_RCSUNIT;
732 status_reg = EXECLIST_STATUS_RCSUNIT;
733 control_reg = EXECLIST_CONTROL_RCSUNIT;
734 break;
735 case I915_EXEC_BSD:
736 ring_addr = VIDEO_RING_ADDR;
737 descriptor = VIDEO_CONTEXT_DESCRIPTOR;
738 elsp_reg = EXECLIST_SUBMITPORT_VCSUNIT0;
739 elsq_reg = EXECLIST_SQ_CONTENTS0_VCSUNIT0;
740 status_reg = EXECLIST_STATUS_VCSUNIT0;
741 control_reg = EXECLIST_CONTROL_VCSUNIT0;
742 break;
743 case I915_EXEC_BLT:
744 ring_addr = BLITTER_RING_ADDR;
745 descriptor = BLITTER_CONTEXT_DESCRIPTOR;
746 elsp_reg = EXECLIST_SUBMITPORT_BCSUNIT;
747 elsq_reg = EXECLIST_SQ_CONTENTS0_BCSUNIT;
748 status_reg = EXECLIST_STATUS_BCSUNIT;
749 control_reg = EXECLIST_CONTROL_BCSUNIT;
750 break;
751 }
752
753 mem_trace_memory_write_header_out(ring_addr, 16,
754 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_LOCAL);
755 dword_out(AUB_MI_BATCH_BUFFER_START | (3 - 2));
756 dword_out(batch_offset & 0xFFFFFFFF);
757 dword_out(batch_offset >> 32);
758 dword_out(0 /* MI_NOOP */);
759
760 mem_trace_memory_write_header_out(ring_addr + 8192 + 20, 4,
761 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_LOCAL);
762 dword_out(0); /* RING_BUFFER_HEAD */
763 mem_trace_memory_write_header_out(ring_addr + 8192 + 28, 4,
764 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_LOCAL);
765 dword_out(16); /* RING_BUFFER_TAIL */
766
767 if (gen >= 11) {
768 register_write_out(elsq_reg, descriptor & 0xFFFFFFFF);
769 register_write_out(elsq_reg + sizeof(uint32_t), descriptor >> 32);
770 register_write_out(control_reg, 1);
771 } else {
772 register_write_out(elsp_reg, 0);
773 register_write_out(elsp_reg, 0);
774 register_write_out(elsp_reg, descriptor >> 32);
775 register_write_out(elsp_reg, descriptor & 0xFFFFFFFF);
776 }
777
778 dword_out(CMD_MEM_TRACE_REGISTER_POLL | (5 + 1 - 1));
779 dword_out(status_reg);
780 dword_out(AUB_MEM_TRACE_REGISTER_SIZE_DWORD |
781 AUB_MEM_TRACE_REGISTER_SPACE_MMIO);
782 if (gen >= 11) {
783 dword_out(0x00000001); /* mask lo */
784 dword_out(0x00000000); /* mask hi */
785 dword_out(0x00000001);
786 } else {
787 dword_out(0x00000010); /* mask lo */
788 dword_out(0x00000000); /* mask hi */
789 dword_out(0x00000000);
790 }
791 }
792
793 static void
aub_dump_ringbuffer(uint64_t batch_offset,uint64_t offset,int ring_flag)794 aub_dump_ringbuffer(uint64_t batch_offset, uint64_t offset, int ring_flag)
795 {
796 uint32_t ringbuffer[4096];
797 unsigned aub_mi_bbs_len;
798 int ring = AUB_TRACE_TYPE_RING_PRB0; /* The default ring */
799 int ring_count = 0;
800
801 if (ring_flag == I915_EXEC_BSD)
802 ring = AUB_TRACE_TYPE_RING_PRB1;
803 else if (ring_flag == I915_EXEC_BLT)
804 ring = AUB_TRACE_TYPE_RING_PRB2;
805
806 /* Make a ring buffer to execute our batchbuffer. */
807 memset(ringbuffer, 0, sizeof(ringbuffer));
808
809 aub_mi_bbs_len = addr_bits > 32 ? 3 : 2;
810 ringbuffer[ring_count] = AUB_MI_BATCH_BUFFER_START | (aub_mi_bbs_len - 2);
811 write_reloc(&ringbuffer[ring_count + 1], batch_offset);
812 ring_count += aub_mi_bbs_len;
813
814 /* Write out the ring. This appears to trigger execution of
815 * the ring in the simulator.
816 */
817 dword_out(CMD_AUB_TRACE_HEADER_BLOCK |
818 ((addr_bits > 32 ? 6 : 5) - 2));
819 dword_out(AUB_TRACE_MEMTYPE_GTT | ring | AUB_TRACE_OP_COMMAND_WRITE);
820 dword_out(0); /* general/surface subtype */
821 dword_out(offset);
822 dword_out(ring_count * 4);
823 if (addr_bits > 32)
824 dword_out(offset >> 32);
825
826 data_out(ringbuffer, ring_count * 4);
827 }
828
829 static void *
relocate_bo(struct bo * bo,const struct drm_i915_gem_execbuffer2 * execbuffer2,const struct drm_i915_gem_exec_object2 * obj)830 relocate_bo(struct bo *bo, const struct drm_i915_gem_execbuffer2 *execbuffer2,
831 const struct drm_i915_gem_exec_object2 *obj)
832 {
833 const struct drm_i915_gem_exec_object2 *exec_objects =
834 (struct drm_i915_gem_exec_object2 *) (uintptr_t) execbuffer2->buffers_ptr;
835 const struct drm_i915_gem_relocation_entry *relocs =
836 (const struct drm_i915_gem_relocation_entry *) (uintptr_t) obj->relocs_ptr;
837 void *relocated;
838 int handle;
839
840 relocated = malloc(bo->size);
841 fail_if(relocated == NULL, "intel_aubdump: out of memory\n");
842 memcpy(relocated, GET_PTR(bo->map), bo->size);
843 for (size_t i = 0; i < obj->relocation_count; i++) {
844 fail_if(relocs[i].offset >= bo->size, "intel_aubdump: reloc outside bo\n");
845
846 if (execbuffer2->flags & I915_EXEC_HANDLE_LUT)
847 handle = exec_objects[relocs[i].target_handle].handle;
848 else
849 handle = relocs[i].target_handle;
850
851 write_reloc(((char *)relocated) + relocs[i].offset,
852 get_bo(handle)->offset + relocs[i].delta);
853 }
854
855 return relocated;
856 }
857
858 static int
gem_ioctl(int fd,unsigned long request,void * argp)859 gem_ioctl(int fd, unsigned long request, void *argp)
860 {
861 int ret;
862
863 do {
864 ret = libc_ioctl(fd, request, argp);
865 } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
866
867 return ret;
868 }
869
870 static void *
gem_mmap(int fd,uint32_t handle,uint64_t offset,uint64_t size)871 gem_mmap(int fd, uint32_t handle, uint64_t offset, uint64_t size)
872 {
873 struct drm_i915_gem_mmap mmap = {
874 .handle = handle,
875 .offset = offset,
876 .size = size
877 };
878
879 if (gem_ioctl(fd, DRM_IOCTL_I915_GEM_MMAP, &mmap) == -1)
880 return MAP_FAILED;
881
882 return (void *)(uintptr_t) mmap.addr_ptr;
883 }
884
885 static int
gem_get_param(int fd,uint32_t param)886 gem_get_param(int fd, uint32_t param)
887 {
888 int value;
889 drm_i915_getparam_t gp = {
890 .param = param,
891 .value = &value
892 };
893
894 if (gem_ioctl(fd, DRM_IOCTL_I915_GETPARAM, &gp) == -1)
895 return 0;
896
897 return value;
898 }
899
900 static void
dump_execbuffer2(int fd,struct drm_i915_gem_execbuffer2 * execbuffer2)901 dump_execbuffer2(int fd, struct drm_i915_gem_execbuffer2 *execbuffer2)
902 {
903 struct drm_i915_gem_exec_object2 *exec_objects =
904 (struct drm_i915_gem_exec_object2 *) (uintptr_t) execbuffer2->buffers_ptr;
905 uint32_t ring_flag = execbuffer2->flags & I915_EXEC_RING_MASK;
906 uint32_t offset;
907 struct drm_i915_gem_exec_object2 *obj;
908 struct bo *bo, *batch_bo;
909 int batch_index;
910 void *data;
911
912 /* We can't do this at open time as we're not yet authenticated. */
913 if (device == 0) {
914 device = gem_get_param(fd, I915_PARAM_CHIPSET_ID);
915 fail_if(device == 0 || gen == -1, "failed to identify chipset\n");
916 }
917 if (gen == 0) {
918 gen = intel_gen(device);
919
920 /* If we don't know the device gen, then it probably is a
921 * newer device. Set gen to some arbitrarily high number.
922 */
923 if (gen == 0)
924 gen = 9999;
925
926 addr_bits = gen >= 8 ? 48 : 32;
927
928 if (gen >= 10)
929 gen10_write_header();
930 else
931 write_header();
932
933 if (verbose)
934 printf("[intel_aubdump: running, "
935 "output file %s, chipset id 0x%04x, gen %d]\n",
936 filename, device, gen);
937 }
938
939 if (gen >= 10)
940 offset = STATIC_GGTT_MAP_END;
941 else
942 offset = gtt_size();
943
944 if (verbose)
945 printf("Dumping execbuffer2:\n");
946
947 for (uint32_t i = 0; i < execbuffer2->buffer_count; i++) {
948 obj = &exec_objects[i];
949 bo = get_bo(obj->handle);
950
951 /* If bo->size == 0, this means they passed us an invalid
952 * buffer. The kernel will reject it and so should we.
953 */
954 if (bo->size == 0) {
955 if (verbose)
956 printf("BO #%d is invalid!\n", obj->handle);
957 return;
958 }
959
960 if (obj->flags & EXEC_OBJECT_PINNED) {
961 bo->offset = obj->offset;
962 if (verbose)
963 printf("BO #%d (%dB) pinned @ 0x%"PRIx64"\n",
964 obj->handle, bo->size, bo->offset);
965 } else {
966 if (obj->alignment != 0)
967 offset = align_u32(offset, obj->alignment);
968 bo->offset = offset;
969 if (verbose)
970 printf("BO #%d (%dB) @ 0x%"PRIx64"\n", obj->handle,
971 bo->size, bo->offset);
972 offset = align_u32(offset + bo->size + 4095, 4096);
973 }
974
975 if (bo->map == NULL && bo->size > 0)
976 bo->map = gem_mmap(fd, obj->handle, 0, bo->size);
977 fail_if(bo->map == MAP_FAILED, "intel_aubdump: bo mmap failed\n");
978
979 if (gen >= 10)
980 gen8_map_ggtt_range(bo->offset, bo->offset + bo->size);
981 }
982
983 batch_index = (execbuffer2->flags & I915_EXEC_BATCH_FIRST) ? 0 :
984 execbuffer2->buffer_count - 1;
985 batch_bo = get_bo(exec_objects[batch_index].handle);
986 for (uint32_t i = 0; i < execbuffer2->buffer_count; i++) {
987 obj = &exec_objects[i];
988 bo = get_bo(obj->handle);
989
990 if (obj->relocation_count > 0)
991 data = relocate_bo(bo, execbuffer2, obj);
992 else
993 data = bo->map;
994
995 if (bo == batch_bo) {
996 aub_write_trace_block(AUB_TRACE_TYPE_BATCH,
997 data, bo->size, bo->offset);
998 } else {
999 aub_write_trace_block(AUB_TRACE_TYPE_NOTYPE,
1000 data, bo->size, bo->offset);
1001 }
1002 if (data != bo->map)
1003 free(data);
1004 }
1005
1006 if (gen >= 10) {
1007 aub_dump_execlist(batch_bo->offset +
1008 execbuffer2->batch_start_offset, ring_flag);
1009 } else {
1010 /* Dump ring buffer */
1011 aub_dump_ringbuffer(batch_bo->offset +
1012 execbuffer2->batch_start_offset, offset,
1013 ring_flag);
1014 }
1015
1016 for (int i = 0; i < ARRAY_SIZE(files); i++) {
1017 if (files[i] != NULL)
1018 fflush(files[i]);
1019 }
1020
1021 if (device_override &&
1022 (execbuffer2->flags & I915_EXEC_FENCE_ARRAY) != 0) {
1023 struct drm_i915_gem_exec_fence *fences =
1024 (void*)(uintptr_t)execbuffer2->cliprects_ptr;
1025 for (uint32_t i = 0; i < execbuffer2->num_cliprects; i++) {
1026 if ((fences[i].flags & I915_EXEC_FENCE_SIGNAL) != 0) {
1027 struct drm_syncobj_array arg = {
1028 .handles = (uintptr_t)&fences[i].handle,
1029 .count_handles = 1,
1030 .pad = 0,
1031 };
1032 libc_ioctl(fd, DRM_IOCTL_SYNCOBJ_SIGNAL, &arg);
1033 }
1034 }
1035 }
1036 }
1037
1038 static void
add_new_bo(int handle,uint64_t size,void * map)1039 add_new_bo(int handle, uint64_t size, void *map)
1040 {
1041 struct bo *bo = &bos[handle];
1042
1043 fail_if(handle >= MAX_BO_COUNT, "intel_aubdump: bo handle out of range\n");
1044 fail_if(size == 0, "intel_aubdump: bo size is invalid\n");
1045
1046 bo->size = size;
1047 bo->map = map;
1048 }
1049
1050 static void
remove_bo(int handle)1051 remove_bo(int handle)
1052 {
1053 struct bo *bo = get_bo(handle);
1054
1055 if (bo->map && !IS_USERPTR(bo->map))
1056 munmap(bo->map, bo->size);
1057 bo->size = 0;
1058 bo->map = NULL;
1059 }
1060
1061 int
close(int fd)1062 close(int fd)
1063 {
1064 if (fd == drm_fd)
1065 drm_fd = -1;
1066
1067 return libc_close(fd);
1068 }
1069
1070 static FILE *
launch_command(char * command)1071 launch_command(char *command)
1072 {
1073 int i = 0, fds[2];
1074 char **args = calloc(strlen(command), sizeof(char *));
1075 char *iter = command;
1076
1077 args[i++] = iter = command;
1078
1079 while ((iter = strstr(iter, ",")) != NULL) {
1080 *iter = '\0';
1081 iter += 1;
1082 args[i++] = iter;
1083 }
1084
1085 if (pipe(fds) == -1)
1086 return NULL;
1087
1088 switch (fork()) {
1089 case 0:
1090 dup2(fds[0], 0);
1091 fail_if(execvp(args[0], args) == -1,
1092 "intel_aubdump: failed to launch child command\n");
1093 return NULL;
1094
1095 default:
1096 free(args);
1097 return fdopen(fds[1], "w");
1098
1099 case -1:
1100 return NULL;
1101 }
1102 }
1103
1104 static void
maybe_init(void)1105 maybe_init(void)
1106 {
1107 static bool initialized = false;
1108 FILE *config;
1109 char *key, *value;
1110
1111 if (initialized)
1112 return;
1113
1114 initialized = true;
1115
1116 config = fdopen(3, "r");
1117 while (fscanf(config, "%m[^=]=%m[^\n]\n", &key, &value) != EOF) {
1118 if (!strcmp(key, "verbose")) {
1119 verbose = 1;
1120 } else if (!strcmp(key, "device")) {
1121 fail_if(sscanf(value, "%i", &device) != 1,
1122 "intel_aubdump: failed to parse device id '%s'",
1123 value);
1124 device_override = true;
1125 } else if (!strcmp(key, "file")) {
1126 filename = strdup(value);
1127 files[0] = fopen(filename, "w+");
1128 fail_if(files[0] == NULL,
1129 "intel_aubdump: failed to open file '%s'\n",
1130 filename);
1131 } else if (!strcmp(key, "command")) {
1132 files[1] = launch_command(value);
1133 fail_if(files[1] == NULL,
1134 "intel_aubdump: failed to launch command '%s'\n",
1135 value);
1136 } else {
1137 fprintf(stderr, "intel_aubdump: unknown option '%s'\n", key);
1138 }
1139
1140 free(key);
1141 free(value);
1142 }
1143 fclose(config);
1144
1145 bos = calloc(MAX_BO_COUNT, sizeof(bos[0]));
1146 fail_if(bos == NULL, "intel_aubdump: out of memory\n");
1147 }
1148
1149 #define LOCAL_IOCTL_I915_GEM_EXECBUFFER2_WR \
1150 DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2, struct drm_i915_gem_execbuffer2)
1151
1152 int
ioctl(int fd,unsigned long request,...)1153 ioctl(int fd, unsigned long request, ...)
1154 {
1155 va_list args;
1156 void *argp;
1157 int ret;
1158 struct stat buf;
1159
1160 va_start(args, request);
1161 argp = va_arg(args, void *);
1162 va_end(args);
1163
1164 if (_IOC_TYPE(request) == DRM_IOCTL_BASE &&
1165 drm_fd != fd && fstat(fd, &buf) == 0 &&
1166 (buf.st_mode & S_IFMT) == S_IFCHR && major(buf.st_rdev) == DRM_MAJOR) {
1167 drm_fd = fd;
1168 if (verbose)
1169 printf("[intel_aubdump: intercept drm ioctl on fd %d]\n", fd);
1170 }
1171
1172 if (fd == drm_fd) {
1173 maybe_init();
1174
1175 switch (request) {
1176 case DRM_IOCTL_I915_GETPARAM: {
1177 struct drm_i915_getparam *getparam = argp;
1178
1179 if (device_override && getparam->param == I915_PARAM_CHIPSET_ID) {
1180 *getparam->value = device;
1181 return 0;
1182 }
1183
1184 ret = libc_ioctl(fd, request, argp);
1185
1186 /* If the application looks up chipset_id
1187 * (they typically do), we'll piggy-back on
1188 * their ioctl and store the id for later
1189 * use. */
1190 if (getparam->param == I915_PARAM_CHIPSET_ID)
1191 device = *getparam->value;
1192
1193 return ret;
1194 }
1195
1196 case DRM_IOCTL_I915_GEM_EXECBUFFER: {
1197 static bool once;
1198 if (!once) {
1199 fprintf(stderr, "intel_aubdump: "
1200 "application uses DRM_IOCTL_I915_GEM_EXECBUFFER, not handled\n");
1201 once = true;
1202 }
1203 return libc_ioctl(fd, request, argp);
1204 }
1205
1206 case DRM_IOCTL_I915_GEM_EXECBUFFER2:
1207 case LOCAL_IOCTL_I915_GEM_EXECBUFFER2_WR: {
1208 dump_execbuffer2(fd, argp);
1209 if (device_override)
1210 return 0;
1211
1212 return libc_ioctl(fd, request, argp);
1213 }
1214
1215 case DRM_IOCTL_I915_GEM_CREATE: {
1216 struct drm_i915_gem_create *create = argp;
1217
1218 ret = libc_ioctl(fd, request, argp);
1219 if (ret == 0)
1220 add_new_bo(create->handle, create->size, NULL);
1221
1222 return ret;
1223 }
1224
1225 case DRM_IOCTL_I915_GEM_USERPTR: {
1226 struct drm_i915_gem_userptr *userptr = argp;
1227
1228 ret = libc_ioctl(fd, request, argp);
1229 if (ret == 0)
1230 add_new_bo(userptr->handle, userptr->user_size,
1231 (void *) (uintptr_t) (userptr->user_ptr | USERPTR_FLAG));
1232 return ret;
1233 }
1234
1235 case DRM_IOCTL_GEM_CLOSE: {
1236 struct drm_gem_close *close = argp;
1237
1238 remove_bo(close->handle);
1239
1240 return libc_ioctl(fd, request, argp);
1241 }
1242
1243 case DRM_IOCTL_GEM_OPEN: {
1244 struct drm_gem_open *open = argp;
1245
1246 ret = libc_ioctl(fd, request, argp);
1247 if (ret == 0)
1248 add_new_bo(open->handle, open->size, NULL);
1249
1250 return ret;
1251 }
1252
1253 case DRM_IOCTL_PRIME_FD_TO_HANDLE: {
1254 struct drm_prime_handle *prime = argp;
1255
1256 ret = libc_ioctl(fd, request, argp);
1257 if (ret == 0) {
1258 off_t size;
1259
1260 size = lseek(prime->fd, 0, SEEK_END);
1261 fail_if(size == -1, "intel_aubdump: failed to get prime bo size\n");
1262 add_new_bo(prime->handle, size, NULL);
1263 }
1264
1265 return ret;
1266 }
1267
1268 default:
1269 return libc_ioctl(fd, request, argp);
1270 }
1271 } else {
1272 return libc_ioctl(fd, request, argp);
1273 }
1274 }
1275
1276 static void
init(void)1277 init(void)
1278 {
1279 libc_close = dlsym(RTLD_NEXT, "close");
1280 libc_ioctl = dlsym(RTLD_NEXT, "ioctl");
1281 fail_if(libc_close == NULL || libc_ioctl == NULL,
1282 "intel_aubdump: failed to get libc ioctl or close\n");
1283 }
1284
1285 static int
close_init_helper(int fd)1286 close_init_helper(int fd)
1287 {
1288 init();
1289 return libc_close(fd);
1290 }
1291
1292 static int
ioctl_init_helper(int fd,unsigned long request,...)1293 ioctl_init_helper(int fd, unsigned long request, ...)
1294 {
1295 va_list args;
1296 void *argp;
1297
1298 va_start(args, request);
1299 argp = va_arg(args, void *);
1300 va_end(args);
1301
1302 init();
1303 return libc_ioctl(fd, request, argp);
1304 }
1305
1306 static void __attribute__ ((destructor))
fini(void)1307 fini(void)
1308 {
1309 free(filename);
1310 for (int i = 0; i < ARRAY_SIZE(files); i++) {
1311 if (files[i] != NULL)
1312 fclose(files[i]);
1313 }
1314 free(bos);
1315 }
1316