1 /*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * SPDX-License-Identifier: MIT
5 */
6
7 #include "ac_debug.h"
8 #include "ac_rtld.h"
9 #include "driver_ddebug/dd_util.h"
10 #include "si_pipe.h"
11 #include "sid.h"
12 #include "sid_tables.h"
13 #include "tgsi/tgsi_from_mesa.h"
14 #include "util/u_dump.h"
15 #include "util/u_log.h"
16 #include "util/u_memory.h"
17 #include "util/u_process.h"
18 #include "util/u_string.h"
19
20 static void si_dump_bo_list(struct si_context *sctx, const struct radeon_saved_cs *saved, FILE *f);
21
22 DEBUG_GET_ONCE_OPTION(replace_shaders, "RADEON_REPLACE_SHADERS", NULL)
23
24 /**
25 * Store a linearized copy of all chunks of \p cs together with the buffer
26 * list in \p saved.
27 */
si_save_cs(struct radeon_winsys * ws,struct radeon_cmdbuf * cs,struct radeon_saved_cs * saved,bool get_buffer_list)28 void si_save_cs(struct radeon_winsys *ws, struct radeon_cmdbuf *cs, struct radeon_saved_cs *saved,
29 bool get_buffer_list)
30 {
31 uint32_t *buf;
32 unsigned i;
33
34 /* Save the IB chunks. */
35 saved->num_dw = cs->prev_dw + cs->current.cdw;
36 saved->ib = MALLOC(4 * saved->num_dw);
37 if (!saved->ib)
38 goto oom;
39
40 buf = saved->ib;
41 for (i = 0; i < cs->num_prev; ++i) {
42 memcpy(buf, cs->prev[i].buf, cs->prev[i].cdw * 4);
43 buf += cs->prev[i].cdw;
44 }
45 memcpy(buf, cs->current.buf, cs->current.cdw * 4);
46
47 if (!get_buffer_list)
48 return;
49
50 /* Save the buffer list. */
51 saved->bo_count = ws->cs_get_buffer_list(cs, NULL);
52 saved->bo_list = CALLOC(saved->bo_count, sizeof(saved->bo_list[0]));
53 if (!saved->bo_list) {
54 FREE(saved->ib);
55 goto oom;
56 }
57 ws->cs_get_buffer_list(cs, saved->bo_list);
58
59 return;
60
61 oom:
62 fprintf(stderr, "%s: out of memory\n", __func__);
63 memset(saved, 0, sizeof(*saved));
64 }
65
si_clear_saved_cs(struct radeon_saved_cs * saved)66 void si_clear_saved_cs(struct radeon_saved_cs *saved)
67 {
68 FREE(saved->ib);
69 FREE(saved->bo_list);
70
71 memset(saved, 0, sizeof(*saved));
72 }
73
si_destroy_saved_cs(struct si_saved_cs * scs)74 void si_destroy_saved_cs(struct si_saved_cs *scs)
75 {
76 si_clear_saved_cs(&scs->gfx);
77 si_resource_reference(&scs->trace_buf, NULL);
78 free(scs);
79 }
80
si_dump_shader(struct si_screen * sscreen,struct si_shader * shader,FILE * f)81 static void si_dump_shader(struct si_screen *sscreen, struct si_shader *shader, FILE *f)
82 {
83 if (shader->shader_log)
84 fwrite(shader->shader_log, shader->shader_log_size, 1, f);
85 else
86 si_shader_dump(sscreen, shader, NULL, f, false);
87
88 if (shader->bo && sscreen->options.dump_shader_binary) {
89 unsigned size = shader->bo->b.b.width0;
90 fprintf(f, "BO: VA=%" PRIx64 " Size=%u\n", shader->bo->gpu_address, size);
91
92 const char *mapped = sscreen->ws->buffer_map(sscreen->ws,
93 shader->bo->buf, NULL,
94 PIPE_MAP_UNSYNCHRONIZED | PIPE_MAP_READ | RADEON_MAP_TEMPORARY);
95
96 for (unsigned i = 0; i < size; i += 4) {
97 fprintf(f, " %4x: %08x\n", i, *(uint32_t *)(mapped + i));
98 }
99
100 sscreen->ws->buffer_unmap(sscreen->ws, shader->bo->buf);
101
102 fprintf(f, "\n");
103 }
104 }
105
106 struct si_log_chunk_shader {
107 /* The shader destroy code assumes a current context for unlinking of
108 * PM4 packets etc.
109 *
110 * While we should be able to destroy shaders without a context, doing
111 * so would happen only very rarely and be therefore likely to fail
112 * just when you're trying to debug something. Let's just remember the
113 * current context in the chunk.
114 */
115 struct si_context *ctx;
116 struct si_shader *shader;
117
118 /* For keep-alive reference counts */
119 struct si_shader_selector *sel;
120 struct si_compute *program;
121 };
122
si_log_chunk_shader_destroy(void * data)123 static void si_log_chunk_shader_destroy(void *data)
124 {
125 struct si_log_chunk_shader *chunk = data;
126 si_shader_selector_reference(chunk->ctx, &chunk->sel, NULL);
127 si_compute_reference(&chunk->program, NULL);
128 FREE(chunk);
129 }
130
si_log_chunk_shader_print(void * data,FILE * f)131 static void si_log_chunk_shader_print(void *data, FILE *f)
132 {
133 struct si_log_chunk_shader *chunk = data;
134 struct si_screen *sscreen = chunk->ctx->screen;
135 si_dump_shader(sscreen, chunk->shader, f);
136 }
137
138 static struct u_log_chunk_type si_log_chunk_type_shader = {
139 .destroy = si_log_chunk_shader_destroy,
140 .print = si_log_chunk_shader_print,
141 };
142
si_dump_gfx_shader(struct si_context * ctx,const struct si_shader_ctx_state * state,struct u_log_context * log)143 static void si_dump_gfx_shader(struct si_context *ctx, const struct si_shader_ctx_state *state,
144 struct u_log_context *log)
145 {
146 struct si_shader *current = state->current;
147
148 if (!state->cso || !current)
149 return;
150
151 struct si_log_chunk_shader *chunk = CALLOC_STRUCT(si_log_chunk_shader);
152 chunk->ctx = ctx;
153 chunk->shader = current;
154 si_shader_selector_reference(ctx, &chunk->sel, current->selector);
155 u_log_chunk(log, &si_log_chunk_type_shader, chunk);
156 }
157
si_dump_compute_shader(struct si_context * ctx,struct u_log_context * log)158 static void si_dump_compute_shader(struct si_context *ctx, struct u_log_context *log)
159 {
160 const struct si_cs_shader_state *state = &ctx->cs_shader_state;
161
162 if (!state->program)
163 return;
164
165 struct si_log_chunk_shader *chunk = CALLOC_STRUCT(si_log_chunk_shader);
166 chunk->ctx = ctx;
167 chunk->shader = &state->program->shader;
168 si_compute_reference(&chunk->program, state->program);
169 u_log_chunk(log, &si_log_chunk_type_shader, chunk);
170 }
171
172 /**
173 * Shader compiles can be overridden with arbitrary ELF objects by setting
174 * the environment variable RADEON_REPLACE_SHADERS=num1:filename1[;num2:filename2]
175 *
176 * TODO: key this off some hash
177 */
si_replace_shader(unsigned num,struct si_shader_binary * binary)178 bool si_replace_shader(unsigned num, struct si_shader_binary *binary)
179 {
180 const char *p = debug_get_option_replace_shaders();
181 const char *semicolon;
182 char *copy = NULL;
183 FILE *f;
184 long filesize, nread;
185 bool replaced = false;
186
187 if (!p)
188 return false;
189
190 while (*p) {
191 unsigned long i;
192 char *endp;
193 i = strtoul(p, &endp, 0);
194
195 p = endp;
196 if (*p != ':') {
197 fprintf(stderr, "RADEON_REPLACE_SHADERS formatted badly.\n");
198 exit(1);
199 }
200 ++p;
201
202 if (i == num)
203 break;
204
205 p = strchr(p, ';');
206 if (!p)
207 return false;
208 ++p;
209 }
210 if (!*p)
211 return false;
212
213 semicolon = strchr(p, ';');
214 if (semicolon) {
215 p = copy = strndup(p, semicolon - p);
216 if (!copy) {
217 fprintf(stderr, "out of memory\n");
218 return false;
219 }
220 }
221
222 fprintf(stderr, "radeonsi: replace shader %u by %s\n", num, p);
223
224 f = fopen(p, "r");
225 if (!f) {
226 perror("radeonsi: failed to open file");
227 goto out_free;
228 }
229
230 if (fseek(f, 0, SEEK_END) != 0)
231 goto file_error;
232
233 filesize = ftell(f);
234 if (filesize < 0)
235 goto file_error;
236
237 if (fseek(f, 0, SEEK_SET) != 0)
238 goto file_error;
239
240 binary->code_buffer = MALLOC(filesize);
241 if (!binary->code_buffer) {
242 fprintf(stderr, "out of memory\n");
243 goto out_close;
244 }
245
246 nread = fread((void *)binary->code_buffer, 1, filesize, f);
247 if (nread != filesize) {
248 FREE((void *)binary->code_buffer);
249 binary->code_buffer = NULL;
250 goto file_error;
251 }
252
253 binary->type = SI_SHADER_BINARY_ELF;
254 binary->code_size = nread;
255 replaced = true;
256
257 out_close:
258 fclose(f);
259 out_free:
260 free(copy);
261 return replaced;
262
263 file_error:
264 perror("radeonsi: reading shader");
265 goto out_close;
266 }
267
268 /* Parsed IBs are difficult to read without colors. Use "less -R file" to
269 * read them, or use "aha -b -f file" to convert them to html.
270 */
271 #define COLOR_RESET "\033[0m"
272 #define COLOR_RED "\033[31m"
273 #define COLOR_GREEN "\033[1;32m"
274 #define COLOR_YELLOW "\033[1;33m"
275 #define COLOR_CYAN "\033[1;36m"
276
si_dump_mmapped_reg(struct si_context * sctx,FILE * f,unsigned offset)277 static void si_dump_mmapped_reg(struct si_context *sctx, FILE *f, unsigned offset)
278 {
279 struct radeon_winsys *ws = sctx->ws;
280 uint32_t value;
281
282 if (ws->read_registers(ws, offset, 1, &value))
283 ac_dump_reg(f, sctx->gfx_level, sctx->family, offset, value, ~0);
284 }
285
si_dump_debug_registers(struct si_context * sctx,FILE * f)286 static void si_dump_debug_registers(struct si_context *sctx, FILE *f)
287 {
288 fprintf(f, "Memory-mapped registers:\n");
289 si_dump_mmapped_reg(sctx, f, R_008010_GRBM_STATUS);
290
291 /* No other registers can be read on radeon. */
292 if (!sctx->screen->info.is_amdgpu) {
293 fprintf(f, "\n");
294 return;
295 }
296
297 si_dump_mmapped_reg(sctx, f, R_008008_GRBM_STATUS2);
298 si_dump_mmapped_reg(sctx, f, R_008014_GRBM_STATUS_SE0);
299 si_dump_mmapped_reg(sctx, f, R_008018_GRBM_STATUS_SE1);
300 si_dump_mmapped_reg(sctx, f, R_008038_GRBM_STATUS_SE2);
301 si_dump_mmapped_reg(sctx, f, R_00803C_GRBM_STATUS_SE3);
302 si_dump_mmapped_reg(sctx, f, R_00D034_SDMA0_STATUS_REG);
303 si_dump_mmapped_reg(sctx, f, R_00D834_SDMA1_STATUS_REG);
304 if (sctx->gfx_level <= GFX8) {
305 si_dump_mmapped_reg(sctx, f, R_000E50_SRBM_STATUS);
306 si_dump_mmapped_reg(sctx, f, R_000E4C_SRBM_STATUS2);
307 si_dump_mmapped_reg(sctx, f, R_000E54_SRBM_STATUS3);
308 }
309 si_dump_mmapped_reg(sctx, f, R_008680_CP_STAT);
310 si_dump_mmapped_reg(sctx, f, R_008674_CP_STALLED_STAT1);
311 si_dump_mmapped_reg(sctx, f, R_008678_CP_STALLED_STAT2);
312 si_dump_mmapped_reg(sctx, f, R_008670_CP_STALLED_STAT3);
313 si_dump_mmapped_reg(sctx, f, R_008210_CP_CPC_STATUS);
314 si_dump_mmapped_reg(sctx, f, R_008214_CP_CPC_BUSY_STAT);
315 si_dump_mmapped_reg(sctx, f, R_008218_CP_CPC_STALLED_STAT1);
316 si_dump_mmapped_reg(sctx, f, R_00821C_CP_CPF_STATUS);
317 si_dump_mmapped_reg(sctx, f, R_008220_CP_CPF_BUSY_STAT);
318 si_dump_mmapped_reg(sctx, f, R_008224_CP_CPF_STALLED_STAT1);
319 fprintf(f, "\n");
320 }
321
322 struct si_log_chunk_cs {
323 struct si_context *ctx;
324 struct si_saved_cs *cs;
325 bool dump_bo_list;
326 unsigned gfx_begin, gfx_end;
327 };
328
si_log_chunk_type_cs_destroy(void * data)329 static void si_log_chunk_type_cs_destroy(void *data)
330 {
331 struct si_log_chunk_cs *chunk = data;
332 si_saved_cs_reference(&chunk->cs, NULL);
333 free(chunk);
334 }
335
si_parse_current_ib(FILE * f,struct radeon_cmdbuf * cs,unsigned begin,unsigned end,int * last_trace_id,unsigned trace_id_count,const char * name,enum amd_gfx_level gfx_level,enum radeon_family family)336 static void si_parse_current_ib(FILE *f, struct radeon_cmdbuf *cs, unsigned begin, unsigned end,
337 int *last_trace_id, unsigned trace_id_count, const char *name,
338 enum amd_gfx_level gfx_level, enum radeon_family family)
339 {
340 unsigned orig_end = end;
341
342 assert(begin <= end);
343
344 fprintf(f, "------------------ %s begin (dw = %u) ------------------\n", name, begin);
345
346 for (unsigned prev_idx = 0; prev_idx < cs->num_prev; ++prev_idx) {
347 struct radeon_cmdbuf_chunk *chunk = &cs->prev[prev_idx];
348
349 if (begin < chunk->cdw) {
350 ac_parse_ib_chunk(f, chunk->buf + begin, MIN2(end, chunk->cdw) - begin, last_trace_id,
351 trace_id_count, gfx_level, family, AMD_IP_GFX, NULL, NULL);
352 }
353
354 if (end <= chunk->cdw)
355 return;
356
357 if (begin < chunk->cdw)
358 fprintf(f, "\n---------- Next %s Chunk ----------\n\n", name);
359
360 begin -= MIN2(begin, chunk->cdw);
361 end -= chunk->cdw;
362 }
363
364 assert(end <= cs->current.cdw);
365
366 ac_parse_ib_chunk(f, cs->current.buf + begin, end - begin, last_trace_id, trace_id_count,
367 gfx_level, family, AMD_IP_GFX, NULL, NULL);
368
369 fprintf(f, "------------------- %s end (dw = %u) -------------------\n\n", name, orig_end);
370 }
371
si_print_current_ib(struct si_context * sctx,FILE * f)372 void si_print_current_ib(struct si_context *sctx, FILE *f)
373 {
374 si_parse_current_ib(f, &sctx->gfx_cs, 0, sctx->gfx_cs.prev_dw + sctx->gfx_cs.current.cdw,
375 NULL, 0, "GFX", sctx->gfx_level, sctx->family);
376 }
377
si_log_chunk_type_cs_print(void * data,FILE * f)378 static void si_log_chunk_type_cs_print(void *data, FILE *f)
379 {
380 struct si_log_chunk_cs *chunk = data;
381 struct si_context *ctx = chunk->ctx;
382 struct si_saved_cs *scs = chunk->cs;
383 int last_trace_id = -1;
384
385 /* We are expecting that the ddebug pipe has already
386 * waited for the context, so this buffer should be idle.
387 * If the GPU is hung, there is no point in waiting for it.
388 */
389 uint32_t *map = ctx->ws->buffer_map(ctx->ws, scs->trace_buf->buf, NULL,
390 PIPE_MAP_UNSYNCHRONIZED | PIPE_MAP_READ);
391 if (map)
392 last_trace_id = map[0];
393
394 if (chunk->gfx_end != chunk->gfx_begin) {
395 if (scs->flushed) {
396 ac_parse_ib(f, scs->gfx.ib + chunk->gfx_begin, chunk->gfx_end - chunk->gfx_begin,
397 &last_trace_id, map ? 1 : 0, "IB", ctx->gfx_level, ctx->family, AMD_IP_GFX, NULL, NULL);
398 } else {
399 si_parse_current_ib(f, &ctx->gfx_cs, chunk->gfx_begin, chunk->gfx_end, &last_trace_id,
400 map ? 1 : 0, "IB", ctx->gfx_level, ctx->family);
401 }
402 }
403
404 if (chunk->dump_bo_list) {
405 fprintf(f, "Flushing. Time: ");
406 util_dump_ns(f, scs->time_flush);
407 fprintf(f, "\n\n");
408 si_dump_bo_list(ctx, &scs->gfx, f);
409 }
410 }
411
412 static const struct u_log_chunk_type si_log_chunk_type_cs = {
413 .destroy = si_log_chunk_type_cs_destroy,
414 .print = si_log_chunk_type_cs_print,
415 };
416
si_log_cs(struct si_context * ctx,struct u_log_context * log,bool dump_bo_list)417 static void si_log_cs(struct si_context *ctx, struct u_log_context *log, bool dump_bo_list)
418 {
419 assert(ctx->current_saved_cs);
420
421 struct si_saved_cs *scs = ctx->current_saved_cs;
422 unsigned gfx_cur = ctx->gfx_cs.prev_dw + ctx->gfx_cs.current.cdw;
423
424 if (!dump_bo_list && gfx_cur == scs->gfx_last_dw)
425 return;
426
427 struct si_log_chunk_cs *chunk = calloc(1, sizeof(*chunk));
428
429 chunk->ctx = ctx;
430 si_saved_cs_reference(&chunk->cs, scs);
431 chunk->dump_bo_list = dump_bo_list;
432
433 chunk->gfx_begin = scs->gfx_last_dw;
434 chunk->gfx_end = gfx_cur;
435 scs->gfx_last_dw = gfx_cur;
436
437 u_log_chunk(log, &si_log_chunk_type_cs, chunk);
438 }
439
si_auto_log_cs(void * data,struct u_log_context * log)440 void si_auto_log_cs(void *data, struct u_log_context *log)
441 {
442 struct si_context *ctx = (struct si_context *)data;
443 si_log_cs(ctx, log, false);
444 }
445
si_log_hw_flush(struct si_context * sctx)446 void si_log_hw_flush(struct si_context *sctx)
447 {
448 if (!sctx->log)
449 return;
450
451 si_log_cs(sctx, sctx->log, true);
452
453 if (sctx->context_flags & SI_CONTEXT_FLAG_AUX) {
454 /* The aux context isn't captured by the ddebug wrapper,
455 * so we dump it on a flush-by-flush basis here.
456 */
457 FILE *f = dd_get_debug_file(false);
458 if (!f) {
459 fprintf(stderr, "radeonsi: error opening aux context dump file.\n");
460 } else {
461 dd_write_header(f, &sctx->screen->b, 0);
462
463 fprintf(f, "Aux context dump:\n\n");
464 u_log_new_page_print(sctx->log, f);
465
466 fclose(f);
467 }
468 }
469 }
470
priority_to_string(unsigned priority)471 static const char *priority_to_string(unsigned priority)
472 {
473 #define ITEM(x) if (priority == RADEON_PRIO_##x) return #x
474 ITEM(FENCE_TRACE);
475 ITEM(SO_FILLED_SIZE);
476 ITEM(QUERY);
477 ITEM(IB);
478 ITEM(DRAW_INDIRECT);
479 ITEM(INDEX_BUFFER);
480 ITEM(CP_DMA);
481 ITEM(BORDER_COLORS);
482 ITEM(CONST_BUFFER);
483 ITEM(DESCRIPTORS);
484 ITEM(SAMPLER_BUFFER);
485 ITEM(VERTEX_BUFFER);
486 ITEM(SHADER_RW_BUFFER);
487 ITEM(SAMPLER_TEXTURE);
488 ITEM(SHADER_RW_IMAGE);
489 ITEM(SAMPLER_TEXTURE_MSAA);
490 ITEM(COLOR_BUFFER);
491 ITEM(DEPTH_BUFFER);
492 ITEM(COLOR_BUFFER_MSAA);
493 ITEM(DEPTH_BUFFER_MSAA);
494 ITEM(SEPARATE_META);
495 ITEM(SHADER_BINARY);
496 ITEM(SHADER_RINGS);
497 ITEM(SCRATCH_BUFFER);
498 #undef ITEM
499
500 return "";
501 }
502
bo_list_compare_va(const struct radeon_bo_list_item * a,const struct radeon_bo_list_item * b)503 static int bo_list_compare_va(const struct radeon_bo_list_item *a,
504 const struct radeon_bo_list_item *b)
505 {
506 return a->vm_address < b->vm_address ? -1 : a->vm_address > b->vm_address ? 1 : 0;
507 }
508
si_dump_bo_list(struct si_context * sctx,const struct radeon_saved_cs * saved,FILE * f)509 static void si_dump_bo_list(struct si_context *sctx, const struct radeon_saved_cs *saved, FILE *f)
510 {
511 unsigned i, j;
512
513 if (!saved->bo_list)
514 return;
515
516 /* Sort the list according to VM addresses first. */
517 qsort(saved->bo_list, saved->bo_count, sizeof(saved->bo_list[0]), (void *)bo_list_compare_va);
518
519 fprintf(f, "Buffer list (in units of pages = 4kB):\n" COLOR_YELLOW
520 " Size VM start page "
521 "VM end page Usage" COLOR_RESET "\n");
522
523 for (i = 0; i < saved->bo_count; i++) {
524 /* Note: Buffer sizes are expected to be aligned to 4k by the winsys. */
525 const unsigned page_size = sctx->screen->info.gart_page_size;
526 uint64_t va = saved->bo_list[i].vm_address;
527 uint64_t size = saved->bo_list[i].bo_size;
528 bool hit = false;
529
530 /* If there's unused virtual memory between 2 buffers, print it. */
531 if (i) {
532 uint64_t previous_va_end =
533 saved->bo_list[i - 1].vm_address + saved->bo_list[i - 1].bo_size;
534
535 if (va > previous_va_end) {
536 fprintf(f, " %10" PRIu64 " -- hole --\n", (va - previous_va_end) / page_size);
537 }
538 }
539
540 /* Print the buffer. */
541 fprintf(f, " %10" PRIu64 " 0x%013" PRIX64 " 0x%013" PRIX64 " ",
542 size / page_size, va / page_size, (va + size) / page_size);
543
544 /* Print the usage. */
545 for (j = 0; j < 32; j++) {
546 if (!(saved->bo_list[i].priority_usage & (1u << j)))
547 continue;
548
549 fprintf(f, "%s%s", !hit ? "" : ", ", priority_to_string(1u << j));
550 hit = true;
551 }
552 fprintf(f, "\n");
553 }
554 fprintf(f, "\nNote: The holes represent memory not used by the IB.\n"
555 " Other buffers can still be allocated there.\n\n");
556 }
557
si_dump_framebuffer(struct si_context * sctx,struct u_log_context * log)558 static void si_dump_framebuffer(struct si_context *sctx, struct u_log_context *log)
559 {
560 struct pipe_framebuffer_state *state = &sctx->framebuffer.state;
561 struct si_texture *tex;
562 int i;
563
564 for (i = 0; i < state->nr_cbufs; i++) {
565 if (!state->cbufs[i])
566 continue;
567
568 tex = (struct si_texture *)state->cbufs[i]->texture;
569 u_log_printf(log, COLOR_YELLOW "Color buffer %i:" COLOR_RESET "\n", i);
570 si_print_texture_info(sctx->screen, tex, log);
571 u_log_printf(log, "\n");
572 }
573
574 if (state->zsbuf) {
575 tex = (struct si_texture *)state->zsbuf->texture;
576 u_log_printf(log, COLOR_YELLOW "Depth-stencil buffer:" COLOR_RESET "\n");
577 si_print_texture_info(sctx->screen, tex, log);
578 u_log_printf(log, "\n");
579 }
580 }
581
582 typedef unsigned (*slot_remap_func)(unsigned);
583
584 struct si_log_chunk_desc_list {
585 /** Pointer to memory map of buffer where the list is uploader */
586 uint32_t *gpu_list;
587 /** Reference of buffer where the list is uploaded, so that gpu_list
588 * is kept live. */
589 struct si_resource *buf;
590
591 const char *shader_name;
592 const char *elem_name;
593 slot_remap_func slot_remap;
594 enum amd_gfx_level gfx_level;
595 enum radeon_family family;
596 unsigned element_dw_size;
597 unsigned num_elements;
598
599 uint32_t list[0];
600 };
601
si_log_chunk_desc_list_destroy(void * data)602 static void si_log_chunk_desc_list_destroy(void *data)
603 {
604 struct si_log_chunk_desc_list *chunk = data;
605 si_resource_reference(&chunk->buf, NULL);
606 FREE(chunk);
607 }
608
si_log_chunk_desc_list_print(void * data,FILE * f)609 static void si_log_chunk_desc_list_print(void *data, FILE *f)
610 {
611 struct si_log_chunk_desc_list *chunk = data;
612 unsigned sq_img_rsrc_word0 =
613 chunk->gfx_level >= GFX10 ? R_00A000_SQ_IMG_RSRC_WORD0 : R_008F10_SQ_IMG_RSRC_WORD0;
614
615 for (unsigned i = 0; i < chunk->num_elements; i++) {
616 unsigned cpu_dw_offset = i * chunk->element_dw_size;
617 unsigned gpu_dw_offset = chunk->slot_remap(i) * chunk->element_dw_size;
618 const char *list_note = chunk->gpu_list ? "GPU list" : "CPU list";
619 uint32_t *cpu_list = chunk->list + cpu_dw_offset;
620 uint32_t *gpu_list = chunk->gpu_list ? chunk->gpu_list + gpu_dw_offset : cpu_list;
621
622 fprintf(f, COLOR_GREEN "%s%s slot %u (%s):" COLOR_RESET "\n", chunk->shader_name,
623 chunk->elem_name, i, list_note);
624
625 switch (chunk->element_dw_size) {
626 case 4:
627 for (unsigned j = 0; j < 4; j++)
628 ac_dump_reg(f, chunk->gfx_level, chunk->family,
629 R_008F00_SQ_BUF_RSRC_WORD0 + j * 4, gpu_list[j], 0xffffffff);
630 break;
631 case 8:
632 for (unsigned j = 0; j < 8; j++)
633 ac_dump_reg(f, chunk->gfx_level, chunk->family,
634 sq_img_rsrc_word0 + j * 4, gpu_list[j], 0xffffffff);
635
636 fprintf(f, COLOR_CYAN " Buffer:" COLOR_RESET "\n");
637 for (unsigned j = 0; j < 4; j++)
638 ac_dump_reg(f, chunk->gfx_level, chunk->family,
639 R_008F00_SQ_BUF_RSRC_WORD0 + j * 4, gpu_list[4 + j], 0xffffffff);
640 break;
641 case 16:
642 for (unsigned j = 0; j < 8; j++)
643 ac_dump_reg(f, chunk->gfx_level, chunk->family,
644 sq_img_rsrc_word0 + j * 4, gpu_list[j], 0xffffffff);
645
646 fprintf(f, COLOR_CYAN " Buffer:" COLOR_RESET "\n");
647 for (unsigned j = 0; j < 4; j++)
648 ac_dump_reg(f, chunk->gfx_level, chunk->family,
649 R_008F00_SQ_BUF_RSRC_WORD0 + j * 4, gpu_list[4 + j], 0xffffffff);
650
651 fprintf(f, COLOR_CYAN " FMASK:" COLOR_RESET "\n");
652 for (unsigned j = 0; j < 8; j++)
653 ac_dump_reg(f, chunk->gfx_level, chunk->family,
654 sq_img_rsrc_word0 + j * 4, gpu_list[8 + j], 0xffffffff);
655
656 fprintf(f, COLOR_CYAN " Sampler state:" COLOR_RESET "\n");
657 for (unsigned j = 0; j < 4; j++)
658 ac_dump_reg(f, chunk->gfx_level, chunk->family,
659 R_008F30_SQ_IMG_SAMP_WORD0 + j * 4, gpu_list[12 + j], 0xffffffff);
660 break;
661 }
662
663 if (memcmp(gpu_list, cpu_list, chunk->element_dw_size * 4) != 0) {
664 fprintf(f, COLOR_RED "!!!!! This slot was corrupted in GPU memory !!!!!" COLOR_RESET "\n");
665 }
666
667 fprintf(f, "\n");
668 }
669 }
670
671 static const struct u_log_chunk_type si_log_chunk_type_descriptor_list = {
672 .destroy = si_log_chunk_desc_list_destroy,
673 .print = si_log_chunk_desc_list_print,
674 };
675
si_dump_descriptor_list(struct si_screen * screen,struct si_descriptors * desc,const char * shader_name,const char * elem_name,unsigned element_dw_size,unsigned num_elements,slot_remap_func slot_remap,struct u_log_context * log)676 static void si_dump_descriptor_list(struct si_screen *screen, struct si_descriptors *desc,
677 const char *shader_name, const char *elem_name,
678 unsigned element_dw_size, unsigned num_elements,
679 slot_remap_func slot_remap, struct u_log_context *log)
680 {
681 if (!desc->list)
682 return;
683
684 /* In some cases, the caller doesn't know how many elements are really
685 * uploaded. Reduce num_elements to fit in the range of active slots. */
686 unsigned active_range_dw_begin = desc->first_active_slot * desc->element_dw_size;
687 unsigned active_range_dw_end =
688 active_range_dw_begin + desc->num_active_slots * desc->element_dw_size;
689
690 while (num_elements > 0) {
691 int i = slot_remap(num_elements - 1);
692 unsigned dw_begin = i * element_dw_size;
693 unsigned dw_end = dw_begin + element_dw_size;
694
695 if (dw_begin >= active_range_dw_begin && dw_end <= active_range_dw_end)
696 break;
697
698 num_elements--;
699 }
700
701 struct si_log_chunk_desc_list *chunk =
702 CALLOC_VARIANT_LENGTH_STRUCT(si_log_chunk_desc_list, 4 * element_dw_size * num_elements);
703 chunk->shader_name = shader_name;
704 chunk->elem_name = elem_name;
705 chunk->element_dw_size = element_dw_size;
706 chunk->num_elements = num_elements;
707 chunk->slot_remap = slot_remap;
708 chunk->gfx_level = screen->info.gfx_level;
709 chunk->family = screen->info.family;
710
711 si_resource_reference(&chunk->buf, desc->buffer);
712 chunk->gpu_list = desc->gpu_list;
713
714 for (unsigned i = 0; i < num_elements; ++i) {
715 memcpy(&chunk->list[i * element_dw_size], &desc->list[slot_remap(i) * element_dw_size],
716 4 * element_dw_size);
717 }
718
719 u_log_chunk(log, &si_log_chunk_type_descriptor_list, chunk);
720 }
721
si_identity(unsigned slot)722 static unsigned si_identity(unsigned slot)
723 {
724 return slot;
725 }
726
si_dump_descriptors(struct si_context * sctx,gl_shader_stage stage,const struct si_shader_info * info,struct u_log_context * log)727 static void si_dump_descriptors(struct si_context *sctx, gl_shader_stage stage,
728 const struct si_shader_info *info, struct u_log_context *log)
729 {
730 enum pipe_shader_type processor = pipe_shader_type_from_mesa(stage);
731 struct si_descriptors *descs =
732 &sctx->descriptors[SI_DESCS_FIRST_SHADER + processor * SI_NUM_SHADER_DESCS];
733 static const char *shader_name[] = {"VS", "PS", "GS", "TCS", "TES", "CS"};
734 const char *name = shader_name[processor];
735 unsigned enabled_constbuf, enabled_shaderbuf, enabled_samplers;
736 unsigned enabled_images;
737
738 if (info) {
739 enabled_constbuf = u_bit_consecutive(0, info->base.num_ubos);
740 enabled_shaderbuf = u_bit_consecutive(0, info->base.num_ssbos);
741 enabled_samplers = info->base.textures_used[0];
742 enabled_images = u_bit_consecutive(0, info->base.num_images);
743 } else {
744 enabled_constbuf =
745 sctx->const_and_shader_buffers[processor].enabled_mask >> SI_NUM_SHADER_BUFFERS;
746 enabled_shaderbuf = 0;
747 for (int i = 0; i < SI_NUM_SHADER_BUFFERS; i++) {
748 enabled_shaderbuf |=
749 (sctx->const_and_shader_buffers[processor].enabled_mask &
750 1llu << (SI_NUM_SHADER_BUFFERS - i - 1)) << i;
751 }
752 enabled_samplers = sctx->samplers[processor].enabled_mask;
753 enabled_images = sctx->images[processor].enabled_mask;
754 }
755
756 si_dump_descriptor_list(sctx->screen, &descs[SI_SHADER_DESCS_CONST_AND_SHADER_BUFFERS], name,
757 " - Constant buffer", 4, util_last_bit(enabled_constbuf),
758 si_get_constbuf_slot, log);
759 si_dump_descriptor_list(sctx->screen, &descs[SI_SHADER_DESCS_CONST_AND_SHADER_BUFFERS], name,
760 " - Shader buffer", 4, util_last_bit(enabled_shaderbuf),
761 si_get_shaderbuf_slot, log);
762 si_dump_descriptor_list(sctx->screen, &descs[SI_SHADER_DESCS_SAMPLERS_AND_IMAGES], name,
763 " - Sampler", 16, util_last_bit(enabled_samplers), si_get_sampler_slot,
764 log);
765 si_dump_descriptor_list(sctx->screen, &descs[SI_SHADER_DESCS_SAMPLERS_AND_IMAGES], name,
766 " - Image", 8, util_last_bit(enabled_images), si_get_image_slot, log);
767 }
768
si_dump_gfx_descriptors(struct si_context * sctx,const struct si_shader_ctx_state * state,struct u_log_context * log)769 static void si_dump_gfx_descriptors(struct si_context *sctx,
770 const struct si_shader_ctx_state *state,
771 struct u_log_context *log)
772 {
773 if (!state->cso || !state->current)
774 return;
775
776 si_dump_descriptors(sctx, state->cso->stage, &state->cso->info, log);
777 }
778
si_dump_compute_descriptors(struct si_context * sctx,struct u_log_context * log)779 static void si_dump_compute_descriptors(struct si_context *sctx, struct u_log_context *log)
780 {
781 if (!sctx->cs_shader_state.program)
782 return;
783
784 si_dump_descriptors(sctx, MESA_SHADER_COMPUTE, NULL, log);
785 }
786
787 struct si_shader_inst {
788 const char *text; /* start of disassembly for this instruction */
789 unsigned textlen;
790 unsigned size; /* instruction size = 4 or 8 */
791 uint64_t addr; /* instruction address */
792 };
793
794 /**
795 * Open the given \p binary as \p rtld_binary and split the contained
796 * disassembly string into instructions and add them to the array
797 * pointed to by \p instructions, which must be sufficiently large.
798 *
799 * Labels are considered to be part of the following instruction.
800 *
801 * The caller must keep \p rtld_binary alive as long as \p instructions are
802 * used and then close it afterwards.
803 */
si_add_split_disasm(struct si_screen * screen,struct ac_rtld_binary * rtld_binary,struct si_shader_binary * binary,uint64_t * addr,unsigned * num,struct si_shader_inst * instructions,gl_shader_stage stage,unsigned wave_size)804 static void si_add_split_disasm(struct si_screen *screen, struct ac_rtld_binary *rtld_binary,
805 struct si_shader_binary *binary, uint64_t *addr, unsigned *num,
806 struct si_shader_inst *instructions,
807 gl_shader_stage stage, unsigned wave_size)
808 {
809 if (!ac_rtld_open(rtld_binary, (struct ac_rtld_open_info){
810 .info = &screen->info,
811 .shader_type = stage,
812 .wave_size = wave_size,
813 .num_parts = 1,
814 .elf_ptrs = &binary->code_buffer,
815 .elf_sizes = &binary->code_size}))
816 return;
817
818 const char *disasm;
819 size_t nbytes;
820 if (!ac_rtld_get_section_by_name(rtld_binary, ".AMDGPU.disasm", &disasm, &nbytes))
821 return;
822
823 const char *end = disasm + nbytes;
824 while (disasm < end) {
825 const char *semicolon = memchr(disasm, ';', end - disasm);
826 if (!semicolon)
827 break;
828
829 struct si_shader_inst *inst = &instructions[(*num)++];
830 const char *inst_end = memchr(semicolon + 1, '\n', end - semicolon - 1);
831 if (!inst_end)
832 inst_end = end;
833
834 inst->text = disasm;
835 inst->textlen = inst_end - disasm;
836
837 inst->addr = *addr;
838 /* More than 16 chars after ";" means the instruction is 8 bytes long. */
839 inst->size = inst_end - semicolon > 16 ? 8 : 4;
840 *addr += inst->size;
841
842 if (inst_end == end)
843 break;
844 disasm = inst_end + 1;
845 }
846 }
847
848 /* If the shader is being executed, print its asm instructions, and annotate
849 * those that are being executed right now with information about waves that
850 * execute them. This is most useful during a GPU hang.
851 */
si_print_annotated_shader(struct si_shader * shader,struct ac_wave_info * waves,unsigned num_waves,FILE * f)852 static void si_print_annotated_shader(struct si_shader *shader, struct ac_wave_info *waves,
853 unsigned num_waves, FILE *f)
854 {
855 if (!shader)
856 return;
857
858 struct si_screen *screen = shader->selector->screen;
859 gl_shader_stage stage = shader->selector->stage;
860 uint64_t start_addr = shader->bo->gpu_address;
861 uint64_t end_addr = start_addr + shader->bo->b.b.width0;
862 unsigned i;
863
864 /* See if any wave executes the shader. */
865 for (i = 0; i < num_waves; i++) {
866 if (start_addr <= waves[i].pc && waves[i].pc <= end_addr)
867 break;
868 }
869 if (i == num_waves)
870 return; /* the shader is not being executed */
871
872 /* Remember the first found wave. The waves are sorted according to PC. */
873 waves = &waves[i];
874 num_waves -= i;
875
876 /* Get the list of instructions.
877 * Buffer size / 4 is the upper bound of the instruction count.
878 */
879 unsigned num_inst = 0;
880 uint64_t inst_addr = start_addr;
881 struct ac_rtld_binary rtld_binaries[5] = {};
882 struct si_shader_inst *instructions =
883 calloc(shader->bo->b.b.width0 / 4, sizeof(struct si_shader_inst));
884
885 if (shader->prolog) {
886 si_add_split_disasm(screen, &rtld_binaries[0], &shader->prolog->binary, &inst_addr, &num_inst,
887 instructions, stage, shader->wave_size);
888 }
889 if (shader->previous_stage) {
890 si_add_split_disasm(screen, &rtld_binaries[1], &shader->previous_stage->binary, &inst_addr,
891 &num_inst, instructions, stage, shader->wave_size);
892 }
893 si_add_split_disasm(screen, &rtld_binaries[3], &shader->binary, &inst_addr, &num_inst,
894 instructions, stage, shader->wave_size);
895 if (shader->epilog) {
896 si_add_split_disasm(screen, &rtld_binaries[4], &shader->epilog->binary, &inst_addr, &num_inst,
897 instructions, stage, shader->wave_size);
898 }
899
900 fprintf(f, COLOR_YELLOW "%s - annotated disassembly:" COLOR_RESET "\n",
901 si_get_shader_name(shader));
902
903 /* Print instructions with annotations. */
904 for (i = 0; i < num_inst; i++) {
905 struct si_shader_inst *inst = &instructions[i];
906
907 fprintf(f, "%.*s [PC=0x%" PRIx64 ", size=%u]\n", inst->textlen, inst->text, inst->addr,
908 inst->size);
909
910 /* Print which waves execute the instruction right now. */
911 while (num_waves && inst->addr == waves->pc) {
912 fprintf(f,
913 " " COLOR_GREEN "^ SE%u SH%u CU%u "
914 "SIMD%u WAVE%u EXEC=%016" PRIx64 " ",
915 waves->se, waves->sh, waves->cu, waves->simd, waves->wave, waves->exec);
916
917 if (inst->size == 4) {
918 fprintf(f, "INST32=%08X" COLOR_RESET "\n", waves->inst_dw0);
919 } else {
920 fprintf(f, "INST64=%08X %08X" COLOR_RESET "\n", waves->inst_dw0, waves->inst_dw1);
921 }
922
923 waves->matched = true;
924 waves = &waves[1];
925 num_waves--;
926 }
927 }
928
929 fprintf(f, "\n\n");
930 free(instructions);
931 for (unsigned i = 0; i < ARRAY_SIZE(rtld_binaries); ++i)
932 ac_rtld_close(&rtld_binaries[i]);
933 }
934
si_dump_annotated_shaders(struct si_context * sctx,FILE * f)935 static void si_dump_annotated_shaders(struct si_context *sctx, FILE *f)
936 {
937 struct ac_wave_info waves[AC_MAX_WAVES_PER_CHIP];
938 unsigned num_waves = ac_get_wave_info(sctx->gfx_level, &sctx->screen->info, waves);
939
940 fprintf(f, COLOR_CYAN "The number of active waves = %u" COLOR_RESET "\n\n", num_waves);
941
942 si_print_annotated_shader(sctx->shader.vs.current, waves, num_waves, f);
943 si_print_annotated_shader(sctx->shader.tcs.current, waves, num_waves, f);
944 si_print_annotated_shader(sctx->shader.tes.current, waves, num_waves, f);
945 si_print_annotated_shader(sctx->shader.gs.current, waves, num_waves, f);
946 si_print_annotated_shader(sctx->shader.ps.current, waves, num_waves, f);
947
948 /* Print waves executing shaders that are not currently bound. */
949 unsigned i;
950 bool found = false;
951 for (i = 0; i < num_waves; i++) {
952 if (waves[i].matched)
953 continue;
954
955 if (!found) {
956 fprintf(f, COLOR_CYAN "Waves not executing currently-bound shaders:" COLOR_RESET "\n");
957 found = true;
958 }
959 fprintf(f,
960 " SE%u SH%u CU%u SIMD%u WAVE%u EXEC=%016" PRIx64 " INST=%08X %08X PC=%" PRIx64
961 "\n",
962 waves[i].se, waves[i].sh, waves[i].cu, waves[i].simd, waves[i].wave, waves[i].exec,
963 waves[i].inst_dw0, waves[i].inst_dw1, waves[i].pc);
964 }
965 if (found)
966 fprintf(f, "\n\n");
967 }
968
si_dump_command(const char * title,const char * command,FILE * f)969 static void si_dump_command(const char *title, const char *command, FILE *f)
970 {
971 char line[2000];
972
973 FILE *p = popen(command, "r");
974 if (!p)
975 return;
976
977 fprintf(f, COLOR_YELLOW "%s: " COLOR_RESET "\n", title);
978 while (fgets(line, sizeof(line), p))
979 fputs(line, f);
980 fprintf(f, "\n\n");
981 pclose(p);
982 }
983
si_dump_debug_state(struct pipe_context * ctx,FILE * f,unsigned flags)984 static void si_dump_debug_state(struct pipe_context *ctx, FILE *f, unsigned flags)
985 {
986 struct si_context *sctx = (struct si_context *)ctx;
987
988 if (sctx->log)
989 u_log_flush(sctx->log);
990
991 if (flags & PIPE_DUMP_DEVICE_STATUS_REGISTERS) {
992 si_dump_debug_registers(sctx, f);
993
994 si_dump_annotated_shaders(sctx, f);
995 si_dump_command("Active waves (raw data)", "umr -O halt_waves -wa | column -t", f);
996 si_dump_command("Wave information", "umr -O halt_waves,bits -wa", f);
997 }
998 }
999
si_log_draw_state(struct si_context * sctx,struct u_log_context * log)1000 void si_log_draw_state(struct si_context *sctx, struct u_log_context *log)
1001 {
1002 if (!log)
1003 return;
1004
1005 si_dump_framebuffer(sctx, log);
1006
1007 si_dump_gfx_shader(sctx, &sctx->shader.vs, log);
1008 si_dump_gfx_shader(sctx, &sctx->shader.tcs, log);
1009 si_dump_gfx_shader(sctx, &sctx->shader.tes, log);
1010 si_dump_gfx_shader(sctx, &sctx->shader.gs, log);
1011 si_dump_gfx_shader(sctx, &sctx->shader.ps, log);
1012
1013 si_dump_descriptor_list(sctx->screen, &sctx->descriptors[SI_DESCS_INTERNAL], "", "RW buffers",
1014 4, sctx->descriptors[SI_DESCS_INTERNAL].num_active_slots, si_identity,
1015 log);
1016 si_dump_gfx_descriptors(sctx, &sctx->shader.vs, log);
1017 si_dump_gfx_descriptors(sctx, &sctx->shader.tcs, log);
1018 si_dump_gfx_descriptors(sctx, &sctx->shader.tes, log);
1019 si_dump_gfx_descriptors(sctx, &sctx->shader.gs, log);
1020 si_dump_gfx_descriptors(sctx, &sctx->shader.ps, log);
1021 }
1022
si_log_compute_state(struct si_context * sctx,struct u_log_context * log)1023 void si_log_compute_state(struct si_context *sctx, struct u_log_context *log)
1024 {
1025 if (!log)
1026 return;
1027
1028 si_dump_compute_shader(sctx, log);
1029 si_dump_compute_descriptors(sctx, log);
1030 }
1031
si_check_vm_faults(struct si_context * sctx,struct radeon_saved_cs * saved,enum amd_ip_type ring)1032 void si_check_vm_faults(struct si_context *sctx, struct radeon_saved_cs *saved, enum amd_ip_type ring)
1033 {
1034 struct pipe_screen *screen = sctx->b.screen;
1035 FILE *f;
1036 uint64_t addr;
1037 char cmd_line[4096];
1038
1039 if (!ac_vm_fault_occurred(sctx->gfx_level, &sctx->dmesg_timestamp, &addr))
1040 return;
1041
1042 f = dd_get_debug_file(false);
1043 if (!f)
1044 return;
1045
1046 fprintf(f, "VM fault report.\n\n");
1047 if (util_get_command_line(cmd_line, sizeof(cmd_line)))
1048 fprintf(f, "Command: %s\n", cmd_line);
1049 fprintf(f, "Driver vendor: %s\n", screen->get_vendor(screen));
1050 fprintf(f, "Device vendor: %s\n", screen->get_device_vendor(screen));
1051 fprintf(f, "Device name: %s\n\n", screen->get_name(screen));
1052 fprintf(f, "Failing VM page: 0x%08" PRIx64 "\n\n", addr);
1053
1054 if (sctx->apitrace_call_number)
1055 fprintf(f, "Last apitrace call: %u\n\n", sctx->apitrace_call_number);
1056
1057 switch (ring) {
1058 case AMD_IP_GFX: {
1059 struct u_log_context log;
1060 u_log_context_init(&log);
1061
1062 si_log_draw_state(sctx, &log);
1063 si_log_compute_state(sctx, &log);
1064 si_log_cs(sctx, &log, true);
1065
1066 u_log_new_page_print(&log, f);
1067 u_log_context_destroy(&log);
1068 break;
1069 }
1070
1071 default:
1072 break;
1073 }
1074
1075 fclose(f);
1076
1077 fprintf(stderr, "Detected a VM fault, exiting...\n");
1078 exit(0);
1079 }
1080
si_gather_context_rolls(struct si_context * sctx)1081 void si_gather_context_rolls(struct si_context *sctx)
1082 {
1083 struct radeon_cmdbuf *cs = &sctx->gfx_cs;
1084 uint32_t **ibs = alloca(sizeof(ibs[0]) * (cs->num_prev + 1));
1085 uint32_t *ib_dw_sizes = alloca(sizeof(ib_dw_sizes[0]) * (cs->num_prev + 1));
1086
1087 for (unsigned i = 0; i < cs->num_prev; i++) {
1088 struct radeon_cmdbuf_chunk *chunk = &cs->prev[i];
1089
1090 ibs[i] = chunk->buf;
1091 ib_dw_sizes[i] = chunk->cdw;
1092 }
1093
1094 ibs[cs->num_prev] = cs->current.buf;
1095 ib_dw_sizes[cs->num_prev] = cs->current.cdw;
1096
1097 FILE *f = fopen(sctx->screen->context_roll_log_filename, "a");
1098 ac_gather_context_rolls(f, ibs, ib_dw_sizes, cs->num_prev + 1, &sctx->screen->info);
1099 fclose(f);
1100 }
1101
si_init_debug_functions(struct si_context * sctx)1102 void si_init_debug_functions(struct si_context *sctx)
1103 {
1104 sctx->b.dump_debug_state = si_dump_debug_state;
1105
1106 /* Set the initial dmesg timestamp for this context, so that
1107 * only new messages will be checked for VM faults.
1108 */
1109 if (sctx->screen->debug_flags & DBG(CHECK_VM))
1110 ac_vm_fault_occurred(sctx->gfx_level, &sctx->dmesg_timestamp, NULL);
1111 }
1112