1 /*
2 * Copyright (C) 2017-2019 Alyssa Rosenzweig
3 * Copyright (C) 2017-2019 Connor Abbott
4 * Copyright (C) 2019 Collabora, Ltd.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 * SOFTWARE.
24 */
25
26 #include <midgard_pack.h>
27 #include <stdio.h>
28 #include <stdlib.h>
29 #include <memory.h>
30 #include <stdbool.h>
31 #include <stdarg.h>
32 #include <ctype.h>
33 #include "decode.h"
34 #include "util/macros.h"
35 #include "util/u_math.h"
36
37 #include "midgard/disassemble.h"
38 #include "bifrost/disassemble.h"
39
40 #include "pan_encoder.h"
41
42 #define MEMORY_PROP(obj, p) {\
43 if (obj->p) { \
44 char *a = pointer_as_memory_reference(obj->p); \
45 pandecode_prop("%s = %s", #p, a); \
46 free(a); \
47 } \
48 }
49
50 #define MEMORY_PROP_DIR(obj, p) {\
51 if (obj.p) { \
52 char *a = pointer_as_memory_reference(obj.p); \
53 pandecode_prop("%s = %s", #p, a); \
54 free(a); \
55 } \
56 }
57
58 #define DUMP_UNPACKED(T, var, ...) { \
59 pandecode_log(__VA_ARGS__); \
60 pan_print(pandecode_dump_stream, T, var, (pandecode_indent + 1) * 2); \
61 }
62
63 #define DUMP_CL(T, cl, ...) {\
64 pan_unpack(cl, T, temp); \
65 DUMP_UNPACKED(T, temp, __VA_ARGS__); \
66 }
67
68 #define DUMP_SECTION(A, S, cl, ...) { \
69 pan_section_unpack(cl, A, S, temp); \
70 pandecode_log(__VA_ARGS__); \
71 pan_section_print(pandecode_dump_stream, A, S, temp, (pandecode_indent + 1) * 2); \
72 }
73
74 #define MAP_ADDR(T, addr, cl) \
75 const uint8_t *cl = 0; \
76 { \
77 struct pandecode_mapped_memory *mapped_mem = pandecode_find_mapped_gpu_mem_containing(addr); \
78 cl = pandecode_fetch_gpu_mem(mapped_mem, addr, MALI_ ## T ## _LENGTH); \
79 }
80
81 #define DUMP_ADDR(T, addr, ...) {\
82 MAP_ADDR(T, addr, cl) \
83 DUMP_CL(T, cl, __VA_ARGS__); \
84 }
85
86 FILE *pandecode_dump_stream;
87
88 /* Semantic logging type.
89 *
90 * Raw: for raw messages to be printed as is.
91 * Message: for helpful information to be commented out in replays.
92 * Property: for properties of a struct
93 *
94 * Use one of pandecode_log, pandecode_msg, or pandecode_prop as syntax sugar.
95 */
96
97 enum pandecode_log_type {
98 PANDECODE_RAW,
99 PANDECODE_MESSAGE,
100 PANDECODE_PROPERTY
101 };
102
103 #define pandecode_log(...) pandecode_log_typed(PANDECODE_RAW, __VA_ARGS__)
104 #define pandecode_msg(...) pandecode_log_typed(PANDECODE_MESSAGE, __VA_ARGS__)
105 #define pandecode_prop(...) pandecode_log_typed(PANDECODE_PROPERTY, __VA_ARGS__)
106
107 unsigned pandecode_indent = 0;
108
109 static void
pandecode_make_indent(void)110 pandecode_make_indent(void)
111 {
112 for (unsigned i = 0; i < pandecode_indent; ++i)
113 fprintf(pandecode_dump_stream, " ");
114 }
115
116 static void PRINTFLIKE(2, 3)
pandecode_log_typed(enum pandecode_log_type type,const char * format,...)117 pandecode_log_typed(enum pandecode_log_type type, const char *format, ...)
118 {
119 va_list ap;
120
121 pandecode_make_indent();
122
123 if (type == PANDECODE_MESSAGE)
124 fprintf(pandecode_dump_stream, "// ");
125 else if (type == PANDECODE_PROPERTY)
126 fprintf(pandecode_dump_stream, ".");
127
128 va_start(ap, format);
129 vfprintf(pandecode_dump_stream, format, ap);
130 va_end(ap);
131
132 if (type == PANDECODE_PROPERTY)
133 fprintf(pandecode_dump_stream, ",\n");
134 }
135
136 static void
pandecode_log_cont(const char * format,...)137 pandecode_log_cont(const char *format, ...)
138 {
139 va_list ap;
140
141 va_start(ap, format);
142 vfprintf(pandecode_dump_stream, format, ap);
143 va_end(ap);
144 }
145
146 /* To check for memory safety issues, validates that the given pointer in GPU
147 * memory is valid, containing at least sz bytes. The goal is to eliminate
148 * GPU-side memory bugs (NULL pointer dereferences, buffer overflows, or buffer
149 * overruns) by statically validating pointers.
150 */
151
152 static void
pandecode_validate_buffer(mali_ptr addr,size_t sz)153 pandecode_validate_buffer(mali_ptr addr, size_t sz)
154 {
155 if (!addr) {
156 pandecode_msg("XXX: null pointer deref");
157 return;
158 }
159
160 /* Find a BO */
161
162 struct pandecode_mapped_memory *bo =
163 pandecode_find_mapped_gpu_mem_containing(addr);
164
165 if (!bo) {
166 pandecode_msg("XXX: invalid memory dereference\n");
167 return;
168 }
169
170 /* Bounds check */
171
172 unsigned offset = addr - bo->gpu_va;
173 unsigned total = offset + sz;
174
175 if (total > bo->length) {
176 pandecode_msg("XXX: buffer overrun. "
177 "Chunk of size %zu at offset %d in buffer of size %zu. "
178 "Overrun by %zu bytes. \n",
179 sz, offset, bo->length, total - bo->length);
180 return;
181 }
182 }
183
184 /* Midgard's tiler descriptor is embedded within the
185 * larger FBD */
186
187 static void
pandecode_midgard_tiler_descriptor(const struct mali_midgard_tiler_packed * tp,const struct mali_midgard_tiler_weights_packed * wp,unsigned width,unsigned height,bool is_fragment,bool has_hierarchy)188 pandecode_midgard_tiler_descriptor(
189 const struct mali_midgard_tiler_packed *tp,
190 const struct mali_midgard_tiler_weights_packed *wp,
191 unsigned width,
192 unsigned height,
193 bool is_fragment,
194 bool has_hierarchy)
195 {
196 pan_unpack(tp, MIDGARD_TILER, t);
197 DUMP_UNPACKED(MIDGARD_TILER, t, "Tiler:\n");
198
199 MEMORY_PROP_DIR(t, polygon_list);
200
201 /* The body is offset from the base of the polygon list */
202 //assert(t->polygon_list_body > t->polygon_list);
203 unsigned body_offset = t.polygon_list_body - t.polygon_list;
204
205 /* It needs to fit inside the reported size */
206 //assert(t->polygon_list_size >= body_offset);
207
208 /* Now that we've sanity checked, we'll try to calculate the sizes
209 * ourselves for comparison */
210
211 unsigned ref_header = panfrost_tiler_header_size(width, height, t.hierarchy_mask, has_hierarchy);
212 unsigned ref_size = panfrost_tiler_full_size(width, height, t.hierarchy_mask, has_hierarchy);
213
214 if (!((ref_header == body_offset) && (ref_size == t.polygon_list_size))) {
215 pandecode_msg("XXX: bad polygon list size (expected %d / 0x%x)\n",
216 ref_header, ref_size);
217 pandecode_prop("polygon_list_size = 0x%x", t.polygon_list_size);
218 pandecode_msg("body offset %d\n", body_offset);
219 }
220
221 /* The tiler heap has a start and end specified -- it should be
222 * identical to what we have in the BO. The exception is if tiling is
223 * disabled. */
224
225 MEMORY_PROP_DIR(t, heap_start);
226 assert(t.heap_end >= t.heap_start);
227
228 unsigned heap_size = t.heap_end - t.heap_start;
229
230 /* Tiling is enabled with a special flag */
231 unsigned hierarchy_mask = t.hierarchy_mask & MALI_MIDGARD_TILER_HIERARCHY_MASK;
232 unsigned tiler_flags = t.hierarchy_mask ^ hierarchy_mask;
233
234 bool tiling_enabled = hierarchy_mask;
235
236 if (tiling_enabled) {
237 /* We should also have no other flags */
238 if (tiler_flags)
239 pandecode_msg("XXX: unexpected tiler %X\n", tiler_flags);
240 } else {
241 /* When tiling is disabled, we should have that flag and no others */
242
243 if (tiler_flags != MALI_MIDGARD_TILER_DISABLED) {
244 pandecode_msg("XXX: unexpected tiler flag %X, expected MALI_MIDGARD_TILER_DISABLED\n",
245 tiler_flags);
246 }
247
248 /* We should also have an empty heap */
249 if (heap_size) {
250 pandecode_msg("XXX: tiler heap size %d given, expected empty\n",
251 heap_size);
252 }
253
254 /* Disabled tiling is used only for clear-only jobs, which are
255 * purely FRAGMENT, so we should never see this for
256 * non-FRAGMENT descriptors. */
257
258 if (!is_fragment)
259 pandecode_msg("XXX: tiler disabled for non-FRAGMENT job\n");
260 }
261
262 /* We've never seen weights used in practice, but we know from the
263 * kernel these fields is there */
264
265 pan_unpack(wp, MIDGARD_TILER_WEIGHTS, w);
266 bool nonzero_weights = false;
267
268 nonzero_weights |= w.weight0 != 0x0;
269 nonzero_weights |= w.weight1 != 0x0;
270 nonzero_weights |= w.weight2 != 0x0;
271 nonzero_weights |= w.weight3 != 0x0;
272 nonzero_weights |= w.weight4 != 0x0;
273 nonzero_weights |= w.weight5 != 0x0;
274 nonzero_weights |= w.weight6 != 0x0;
275 nonzero_weights |= w.weight7 != 0x0;
276
277 if (nonzero_weights)
278 DUMP_UNPACKED(MIDGARD_TILER_WEIGHTS, w, "Tiler Weights:\n");
279 }
280
281 /* Information about the framebuffer passed back for
282 * additional analysis */
283
284 struct pandecode_fbd {
285 unsigned width;
286 unsigned height;
287 unsigned rt_count;
288 bool has_extra;
289 };
290
291 static struct pandecode_fbd
pandecode_sfbd(uint64_t gpu_va,int job_no,bool is_fragment,unsigned gpu_id)292 pandecode_sfbd(uint64_t gpu_va, int job_no, bool is_fragment, unsigned gpu_id)
293 {
294 struct pandecode_mapped_memory *mem = pandecode_find_mapped_gpu_mem_containing(gpu_va);
295 const void *PANDECODE_PTR_VAR(s, mem, (mali_ptr) gpu_va);
296
297 struct pandecode_fbd info = {
298 .has_extra = false,
299 .rt_count = 1
300 };
301
302 pandecode_log("Single-Target Framebuffer:\n");
303 pandecode_indent++;
304
305 DUMP_SECTION(SINGLE_TARGET_FRAMEBUFFER, LOCAL_STORAGE, s, "Local Storage:\n");
306 pan_section_unpack(s, SINGLE_TARGET_FRAMEBUFFER, PARAMETERS, p);
307 DUMP_UNPACKED(SINGLE_TARGET_FRAMEBUFFER_PARAMETERS, p, "Parameters:\n");
308
309 const void *t = pan_section_ptr(s, SINGLE_TARGET_FRAMEBUFFER, TILER);
310 const void *w = pan_section_ptr(s, SINGLE_TARGET_FRAMEBUFFER, TILER_WEIGHTS);
311
312 bool has_hierarchy = !(gpu_id == 0x0720 || gpu_id == 0x0820 || gpu_id == 0x0830);
313 pandecode_midgard_tiler_descriptor(t, w, p.bound_max_x + 1, p.bound_max_y + 1, is_fragment, has_hierarchy);
314
315 pandecode_indent--;
316
317 /* Dummy unpack of the padding section to make sure all words are 0.
318 * No need to call print here since the section is supposed to be empty.
319 */
320 pan_section_unpack(s, SINGLE_TARGET_FRAMEBUFFER, PADDING_1, padding1);
321 pan_section_unpack(s, SINGLE_TARGET_FRAMEBUFFER, PADDING_2, padding2);
322 pandecode_log("\n");
323
324 return info;
325 }
326
327 static void
pandecode_compute_fbd(uint64_t gpu_va,int job_no)328 pandecode_compute_fbd(uint64_t gpu_va, int job_no)
329 {
330 struct pandecode_mapped_memory *mem = pandecode_find_mapped_gpu_mem_containing(gpu_va);
331 const struct mali_local_storage_packed *PANDECODE_PTR_VAR(s, mem, (mali_ptr) gpu_va);
332 DUMP_CL(LOCAL_STORAGE, s, "Local Storage:\n");
333 }
334
335 static void
pandecode_render_target(uint64_t gpu_va,unsigned job_no,bool is_bifrost,unsigned gpu_id,const struct MALI_MULTI_TARGET_FRAMEBUFFER_PARAMETERS * fb)336 pandecode_render_target(uint64_t gpu_va, unsigned job_no, bool is_bifrost, unsigned gpu_id,
337 const struct MALI_MULTI_TARGET_FRAMEBUFFER_PARAMETERS *fb)
338 {
339 pandecode_log("Color Render Targets:\n");
340 pandecode_indent++;
341
342 for (int i = 0; i < (fb->render_target_count); i++) {
343 mali_ptr rt_va = gpu_va + i * MALI_RENDER_TARGET_LENGTH;
344 struct pandecode_mapped_memory *mem =
345 pandecode_find_mapped_gpu_mem_containing(rt_va);
346 const struct mali_render_target_packed *PANDECODE_PTR_VAR(rtp, mem, (mali_ptr) rt_va);
347 DUMP_CL(RENDER_TARGET, rtp, "Color Render Target %d:\n", i);
348 }
349
350 pandecode_indent--;
351 pandecode_log("\n");
352 }
353
354 static void
pandecode_mfbd_bifrost_deps(const void * fb,int job_no)355 pandecode_mfbd_bifrost_deps(const void *fb, int job_no)
356 {
357 pan_section_unpack(fb, MULTI_TARGET_FRAMEBUFFER, BIFROST_PARAMETERS, params);
358
359 /* The blob stores all possible sample locations in a single buffer
360 * allocated on startup, and just switches the pointer when switching
361 * MSAA state. For now, we just put the data into the cmdstream, but we
362 * should do something like what the blob does with a real driver.
363 *
364 * There seem to be 32 slots for sample locations, followed by another
365 * 16. The second 16 is just the center location followed by 15 zeros
366 * in all the cases I've identified (maybe shader vs. depth/color
367 * samples?).
368 */
369
370 struct pandecode_mapped_memory *smem =
371 pandecode_find_mapped_gpu_mem_containing(params.sample_locations);
372
373 const u16 *PANDECODE_PTR_VAR(samples, smem, params.sample_locations);
374
375 pandecode_log("uint16_t sample_locations_%d[] = {\n", job_no);
376 pandecode_indent++;
377 for (int i = 0; i < 32 + 16; i++) {
378 pandecode_log("%d, %d,\n", samples[2 * i], samples[2 * i + 1]);
379 }
380
381 pandecode_indent--;
382 pandecode_log("};\n");
383 }
384
385 static struct pandecode_fbd
pandecode_mfbd_bfr(uint64_t gpu_va,int job_no,bool is_fragment,bool is_compute,bool is_bifrost,unsigned gpu_id)386 pandecode_mfbd_bfr(uint64_t gpu_va, int job_no, bool is_fragment, bool is_compute, bool is_bifrost, unsigned gpu_id)
387 {
388 struct pandecode_mapped_memory *mem = pandecode_find_mapped_gpu_mem_containing(gpu_va);
389 const void *PANDECODE_PTR_VAR(fb, mem, (mali_ptr) gpu_va);
390 pan_section_unpack(fb, MULTI_TARGET_FRAMEBUFFER, PARAMETERS, params);
391
392 struct pandecode_fbd info;
393
394 if (is_bifrost)
395 pandecode_mfbd_bifrost_deps(fb, job_no);
396
397 pandecode_log("Multi-Target Framebuffer:\n");
398 pandecode_indent++;
399
400 if (is_bifrost) {
401 DUMP_SECTION(MULTI_TARGET_FRAMEBUFFER, BIFROST_PARAMETERS, fb, "Bifrost Params:\n");
402 } else {
403 DUMP_SECTION(MULTI_TARGET_FRAMEBUFFER, LOCAL_STORAGE, fb, "Local Storage:\n");
404 }
405
406 info.width = params.width;
407 info.height = params.height;
408 info.rt_count = params.render_target_count;
409 DUMP_UNPACKED(MULTI_TARGET_FRAMEBUFFER_PARAMETERS, params, "Parameters:\n");
410
411 if (!is_compute) {
412 if (is_bifrost) {
413 DUMP_SECTION(MULTI_TARGET_FRAMEBUFFER, BIFROST_TILER_POINTER, fb, "Tiler Pointer");
414 } else {
415 const void *t = pan_section_ptr(fb, MULTI_TARGET_FRAMEBUFFER, TILER);
416 const void *w = pan_section_ptr(fb, MULTI_TARGET_FRAMEBUFFER, TILER_WEIGHTS);
417 pandecode_midgard_tiler_descriptor(t, w, params.width, params.height, is_fragment, true);
418 }
419 } else {
420 pandecode_msg("XXX: skipping compute MFBD, fixme\n");
421 }
422
423 if (is_bifrost) {
424 pan_section_unpack(fb, MULTI_TARGET_FRAMEBUFFER, BIFROST_PADDING, padding);
425 }
426
427 pandecode_indent--;
428 pandecode_log("\n");
429
430 gpu_va += MALI_MULTI_TARGET_FRAMEBUFFER_LENGTH;
431
432 info.has_extra = params.has_zs_crc_extension;
433
434 if (info.has_extra) {
435 struct pandecode_mapped_memory *mem =
436 pandecode_find_mapped_gpu_mem_containing(gpu_va);
437 const struct mali_zs_crc_extension_packed *PANDECODE_PTR_VAR(zs_crc, mem, (mali_ptr)gpu_va);
438 DUMP_CL(ZS_CRC_EXTENSION, zs_crc, "ZS CRC Extension:\n");
439 pandecode_log("\n");
440
441 gpu_va += MALI_ZS_CRC_EXTENSION_LENGTH;
442 }
443
444 if (is_fragment)
445 pandecode_render_target(gpu_va, job_no, is_bifrost, gpu_id, ¶ms);
446
447 return info;
448 }
449
450 static void
pandecode_attributes(const struct pandecode_mapped_memory * mem,mali_ptr addr,int job_no,char * suffix,int count,bool varying,enum mali_job_type job_type)451 pandecode_attributes(const struct pandecode_mapped_memory *mem,
452 mali_ptr addr, int job_no, char *suffix,
453 int count, bool varying, enum mali_job_type job_type)
454 {
455 char *prefix = varying ? "Varying" : "Attribute";
456 assert(addr);
457
458 if (!count) {
459 pandecode_msg("warn: No %s records\n", prefix);
460 return;
461 }
462
463 MAP_ADDR(ATTRIBUTE_BUFFER, addr, cl);
464
465 for (int i = 0; i < count; ++i) {
466 pan_unpack(cl + i * MALI_ATTRIBUTE_BUFFER_LENGTH, ATTRIBUTE_BUFFER, temp);
467 DUMP_UNPACKED(ATTRIBUTE_BUFFER, temp, "%s:\n", prefix);
468
469 if (temp.type != MALI_ATTRIBUTE_TYPE_1D_NPOT_DIVISOR)
470 continue;
471
472 pan_unpack(cl + (i + 1) * MALI_ATTRIBUTE_BUFFER_LENGTH,
473 ATTRIBUTE_BUFFER_CONTINUATION_NPOT, temp2);
474 pan_print(pandecode_dump_stream, ATTRIBUTE_BUFFER_CONTINUATION_NPOT,
475 temp2, (pandecode_indent + 1) * 2);
476 }
477 pandecode_log("\n");
478 }
479
480 static mali_ptr
pandecode_shader_address(const char * name,mali_ptr ptr)481 pandecode_shader_address(const char *name, mali_ptr ptr)
482 {
483 /* TODO: Decode flags */
484 mali_ptr shader_ptr = ptr & ~15;
485
486 char *a = pointer_as_memory_reference(shader_ptr);
487 pandecode_prop("%s = (%s) | %d", name, a, (int) (ptr & 15));
488 free(a);
489
490 return shader_ptr;
491 }
492
493 /* Decodes a Bifrost blend constant. See the notes in bifrost_blend_rt */
494
495 static mali_ptr
pandecode_bifrost_blend(void * descs,int job_no,int rt_no,mali_ptr frag_shader)496 pandecode_bifrost_blend(void *descs, int job_no, int rt_no, mali_ptr frag_shader)
497 {
498 pan_unpack(descs + (rt_no * MALI_BLEND_LENGTH), BLEND, b);
499 DUMP_UNPACKED(BLEND, b, "Blend RT %d:\n", rt_no);
500 if (b.bifrost.internal.mode != MALI_BIFROST_BLEND_MODE_SHADER)
501 return 0;
502
503 return (frag_shader & 0xFFFFFFFF00000000ULL) | b.bifrost.internal.shader.pc;
504 }
505
506 static mali_ptr
pandecode_midgard_blend_mrt(void * descs,int job_no,int rt_no)507 pandecode_midgard_blend_mrt(void *descs, int job_no, int rt_no)
508 {
509 pan_unpack(descs + (rt_no * MALI_BLEND_LENGTH), BLEND, b);
510 DUMP_UNPACKED(BLEND, b, "Blend RT %d:\n", rt_no);
511 return b.midgard.blend_shader ? (b.midgard.shader_pc & ~0xf) : 0;
512 }
513
514 /* Attributes and varyings have descriptor records, which contain information
515 * about their format and ordering with the attribute/varying buffers. We'll
516 * want to validate that the combinations specified are self-consistent.
517 */
518
519 static int
pandecode_attribute_meta(int count,mali_ptr attribute,bool varying,char * suffix)520 pandecode_attribute_meta(int count, mali_ptr attribute, bool varying, char *suffix)
521 {
522 for (int i = 0; i < count; ++i, attribute += MALI_ATTRIBUTE_LENGTH)
523 DUMP_ADDR(ATTRIBUTE, attribute, "%s:\n", varying ? "Varying" : "Attribute");
524
525 pandecode_log("\n");
526 return count;
527 }
528
529 /* return bits [lo, hi) of word */
530 static u32
bits(u32 word,u32 lo,u32 hi)531 bits(u32 word, u32 lo, u32 hi)
532 {
533 if (hi - lo >= 32)
534 return word; // avoid undefined behavior with the shift
535
536 return (word >> lo) & ((1 << (hi - lo)) - 1);
537 }
538
539 static void
pandecode_invocation(const void * i,bool graphics)540 pandecode_invocation(const void *i, bool graphics)
541 {
542 /* Decode invocation_count. See the comment before the definition of
543 * invocation_count for an explanation.
544 */
545 pan_unpack(i, INVOCATION, invocation);
546
547 unsigned size_x = bits(invocation.invocations, 0, invocation.size_y_shift) + 1;
548 unsigned size_y = bits(invocation.invocations, invocation.size_y_shift, invocation.size_z_shift) + 1;
549 unsigned size_z = bits(invocation.invocations, invocation.size_z_shift, invocation.workgroups_x_shift) + 1;
550
551 unsigned groups_x = bits(invocation.invocations, invocation.workgroups_x_shift, invocation.workgroups_y_shift) + 1;
552 unsigned groups_y = bits(invocation.invocations, invocation.workgroups_y_shift, invocation.workgroups_z_shift) + 1;
553 unsigned groups_z = bits(invocation.invocations, invocation.workgroups_z_shift, 32) + 1;
554
555 /* Even though we have this decoded, we want to ensure that the
556 * representation is "unique" so we don't lose anything by printing only
557 * the final result. More specifically, we need to check that we were
558 * passed something in canonical form, since the definition per the
559 * hardware is inherently not unique. How? Well, take the resulting
560 * decode and pack it ourselves! If it is bit exact with what we
561 * decoded, we're good to go. */
562
563 struct mali_invocation_packed ref;
564 panfrost_pack_work_groups_compute(&ref, groups_x, groups_y, groups_z, size_x, size_y, size_z, graphics);
565
566 if (memcmp(&ref, i, sizeof(ref))) {
567 pandecode_msg("XXX: non-canonical workgroups packing\n");
568 DUMP_UNPACKED(INVOCATION, invocation, "Invocation:\n")
569 }
570
571 /* Regardless, print the decode */
572 pandecode_log("Invocation (%d, %d, %d) x (%d, %d, %d)\n",
573 size_x, size_y, size_z,
574 groups_x, groups_y, groups_z);
575 }
576
577 static void
pandecode_primitive(const void * p)578 pandecode_primitive(const void *p)
579 {
580 pan_unpack(p, PRIMITIVE, primitive);
581 DUMP_UNPACKED(PRIMITIVE, primitive, "Primitive:\n");
582
583 /* Validate an index buffer is present if we need one. TODO: verify
584 * relationship between invocation_count and index_count */
585
586 if (primitive.indices) {
587 /* Grab the size */
588 unsigned size = (primitive.index_type == MALI_INDEX_TYPE_UINT32) ?
589 sizeof(uint32_t) : primitive.index_type;
590
591 /* Ensure we got a size, and if so, validate the index buffer
592 * is large enough to hold a full set of indices of the given
593 * size */
594
595 if (!size)
596 pandecode_msg("XXX: index size missing\n");
597 else
598 pandecode_validate_buffer(primitive.indices, primitive.index_count * size);
599 } else if (primitive.index_type)
600 pandecode_msg("XXX: unexpected index size\n");
601 }
602
603 static void
pandecode_uniform_buffers(mali_ptr pubufs,int ubufs_count,int job_no)604 pandecode_uniform_buffers(mali_ptr pubufs, int ubufs_count, int job_no)
605 {
606 struct pandecode_mapped_memory *umem = pandecode_find_mapped_gpu_mem_containing(pubufs);
607 uint64_t *PANDECODE_PTR_VAR(ubufs, umem, pubufs);
608
609 for (int i = 0; i < ubufs_count; i++) {
610 unsigned size = (ubufs[i] & ((1 << 10) - 1)) * 16;
611 mali_ptr addr = (ubufs[i] >> 10) << 2;
612
613 pandecode_validate_buffer(addr, size);
614
615 char *ptr = pointer_as_memory_reference(addr);
616 pandecode_log("ubuf_%d[%u] = %s;\n", i, size, ptr);
617 free(ptr);
618 }
619
620 pandecode_log("\n");
621 }
622
623 static void
pandecode_uniforms(mali_ptr uniforms,unsigned uniform_count)624 pandecode_uniforms(mali_ptr uniforms, unsigned uniform_count)
625 {
626 pandecode_validate_buffer(uniforms, uniform_count * 16);
627
628 char *ptr = pointer_as_memory_reference(uniforms);
629 pandecode_log("vec4 uniforms[%u] = %s;\n", uniform_count, ptr);
630 free(ptr);
631 pandecode_log("\n");
632 }
633
634 static const char *
shader_type_for_job(unsigned type)635 shader_type_for_job(unsigned type)
636 {
637 switch (type) {
638 case MALI_JOB_TYPE_VERTEX: return "VERTEX";
639 case MALI_JOB_TYPE_TILER: return "FRAGMENT";
640 case MALI_JOB_TYPE_COMPUTE: return "COMPUTE";
641 default: return "UNKNOWN";
642 }
643 }
644
645 static unsigned shader_id = 0;
646
647 static struct midgard_disasm_stats
pandecode_shader_disassemble(mali_ptr shader_ptr,int shader_no,int type,bool is_bifrost,unsigned gpu_id)648 pandecode_shader_disassemble(mali_ptr shader_ptr, int shader_no, int type,
649 bool is_bifrost, unsigned gpu_id)
650 {
651 struct pandecode_mapped_memory *mem = pandecode_find_mapped_gpu_mem_containing(shader_ptr);
652 uint8_t *PANDECODE_PTR_VAR(code, mem, shader_ptr);
653
654 /* Compute maximum possible size */
655 size_t sz = mem->length - (shader_ptr - mem->gpu_va);
656
657 /* Print some boilerplate to clearly denote the assembly (which doesn't
658 * obey indentation rules), and actually do the disassembly! */
659
660 pandecode_log_cont("\n\n");
661
662 struct midgard_disasm_stats stats;
663
664 if (is_bifrost) {
665 disassemble_bifrost(pandecode_dump_stream, code, sz, true);
666
667 /* TODO: Extend stats to Bifrost */
668 stats.texture_count = -128;
669 stats.sampler_count = -128;
670 stats.attribute_count = -128;
671 stats.varying_count = -128;
672 stats.uniform_count = -128;
673 stats.uniform_buffer_count = -128;
674 stats.work_count = -128;
675
676 stats.instruction_count = 0;
677 stats.bundle_count = 0;
678 stats.quadword_count = 0;
679 stats.helper_invocations = false;
680 } else {
681 stats = disassemble_midgard(pandecode_dump_stream,
682 code, sz, gpu_id,
683 type == MALI_JOB_TYPE_TILER ?
684 MESA_SHADER_FRAGMENT : MESA_SHADER_VERTEX);
685 }
686
687 unsigned nr_threads =
688 (stats.work_count <= 4) ? 4 :
689 (stats.work_count <= 8) ? 2 :
690 1;
691
692 pandecode_log_cont("shader%d - MESA_SHADER_%s shader: "
693 "%u inst, %u bundles, %u quadwords, "
694 "%u registers, %u threads, 0 loops, 0:0 spills:fills\n\n\n",
695 shader_id++,
696 shader_type_for_job(type),
697 stats.instruction_count, stats.bundle_count, stats.quadword_count,
698 stats.work_count, nr_threads);
699
700 return stats;
701 }
702
703 static void
pandecode_texture_payload(mali_ptr payload,enum mali_texture_dimension dim,enum mali_texture_layout layout,bool manual_stride,uint8_t levels,uint16_t depth,uint16_t array_size,struct pandecode_mapped_memory * tmem)704 pandecode_texture_payload(mali_ptr payload,
705 enum mali_texture_dimension dim,
706 enum mali_texture_layout layout,
707 bool manual_stride,
708 uint8_t levels,
709 uint16_t depth,
710 uint16_t array_size,
711 struct pandecode_mapped_memory *tmem)
712 {
713 pandecode_log(".payload = {\n");
714 pandecode_indent++;
715
716 /* A bunch of bitmap pointers follow.
717 * We work out the correct number,
718 * based on the mipmap/cubemap
719 * properties, but dump extra
720 * possibilities to futureproof */
721
722 int bitmap_count = levels + 1;
723
724 /* Miptree for each face */
725 if (dim == MALI_TEXTURE_DIMENSION_CUBE)
726 bitmap_count *= 6;
727
728 /* Array of layers */
729 bitmap_count *= depth;
730
731 /* Array of textures */
732 bitmap_count *= array_size;
733
734 /* Stride for each element */
735 if (manual_stride)
736 bitmap_count *= 2;
737
738 mali_ptr *pointers_and_strides = pandecode_fetch_gpu_mem(tmem,
739 payload, sizeof(mali_ptr) * bitmap_count);
740 for (int i = 0; i < bitmap_count; ++i) {
741 /* How we dump depends if this is a stride or a pointer */
742
743 if (manual_stride && (i & 1)) {
744 /* signed 32-bit snuck in as a 64-bit pointer */
745 uint64_t stride_set = pointers_and_strides[i];
746 uint32_t clamped_stride = stride_set;
747 int32_t stride = clamped_stride;
748 assert(stride_set == clamped_stride);
749 pandecode_log("(mali_ptr) %d /* stride */, \n", stride);
750 } else {
751 char *a = pointer_as_memory_reference(pointers_and_strides[i]);
752 pandecode_log("%s, \n", a);
753 free(a);
754 }
755 }
756
757 pandecode_indent--;
758 pandecode_log("},\n");
759 }
760
761 static void
pandecode_texture(mali_ptr u,struct pandecode_mapped_memory * tmem,unsigned job_no,unsigned tex)762 pandecode_texture(mali_ptr u,
763 struct pandecode_mapped_memory *tmem,
764 unsigned job_no, unsigned tex)
765 {
766 struct pandecode_mapped_memory *mapped_mem = pandecode_find_mapped_gpu_mem_containing(u);
767 const uint8_t *cl = pandecode_fetch_gpu_mem(mapped_mem, u, MALI_MIDGARD_TEXTURE_LENGTH);
768
769 pan_unpack(cl, MIDGARD_TEXTURE, temp);
770 DUMP_UNPACKED(MIDGARD_TEXTURE, temp, "Texture:\n")
771
772 pandecode_indent++;
773 pandecode_texture_payload(u + MALI_MIDGARD_TEXTURE_LENGTH,
774 temp.dimension, temp.texel_ordering, temp.manual_stride,
775 temp.levels, temp.depth, temp.array_size, mapped_mem);
776 pandecode_indent--;
777 }
778
779 static void
pandecode_bifrost_texture(const void * cl,unsigned job_no,unsigned tex)780 pandecode_bifrost_texture(
781 const void *cl,
782 unsigned job_no,
783 unsigned tex)
784 {
785 pan_unpack(cl, BIFROST_TEXTURE, temp);
786 DUMP_UNPACKED(BIFROST_TEXTURE, temp, "Texture:\n")
787
788 struct pandecode_mapped_memory *tmem = pandecode_find_mapped_gpu_mem_containing(temp.surfaces);
789 pandecode_indent++;
790 pandecode_texture_payload(temp.surfaces, temp.dimension, temp.texel_ordering,
791 true, temp.levels, 1, 1, tmem);
792 pandecode_indent--;
793 }
794
795 /* For shader properties like texture_count, we have a claimed property in the shader_meta, and the actual Truth from static analysis (this may just be an upper limit). We validate accordingly */
796
797 static void
pandecode_shader_prop(const char * name,unsigned claim,signed truth,bool fuzzy)798 pandecode_shader_prop(const char *name, unsigned claim, signed truth, bool fuzzy)
799 {
800 /* Nothing to do */
801 if (claim == truth)
802 return;
803
804 if (fuzzy && (truth < 0))
805 pandecode_msg("XXX: fuzzy %s, claimed %d, expected %d\n", name, claim, truth);
806
807 if ((truth >= 0) && !fuzzy) {
808 pandecode_msg("%s: expected %s = %d, claimed %u\n",
809 (truth < claim) ? "warn" : "XXX",
810 name, truth, claim);
811 } else if ((claim > -truth) && !fuzzy) {
812 pandecode_msg("XXX: expected %s <= %u, claimed %u\n",
813 name, -truth, claim);
814 } else if (fuzzy && (claim < truth))
815 pandecode_msg("XXX: expected %s >= %u, claimed %u\n",
816 name, truth, claim);
817
818 pandecode_log(".%s = %" PRId16, name, claim);
819
820 if (fuzzy)
821 pandecode_log_cont(" /* %u used */", truth);
822
823 pandecode_log_cont(",\n");
824 }
825
826 static void
pandecode_blend_shader_disassemble(mali_ptr shader,int job_no,int job_type,bool is_bifrost,unsigned gpu_id)827 pandecode_blend_shader_disassemble(mali_ptr shader, int job_no, int job_type,
828 bool is_bifrost, unsigned gpu_id)
829 {
830 struct midgard_disasm_stats stats =
831 pandecode_shader_disassemble(shader, job_no, job_type, is_bifrost, gpu_id);
832
833 bool has_texture = (stats.texture_count > 0);
834 bool has_sampler = (stats.sampler_count > 0);
835 bool has_attribute = (stats.attribute_count > 0);
836 bool has_varying = (stats.varying_count > 0);
837 bool has_uniform = (stats.uniform_count > 0);
838 bool has_ubo = (stats.uniform_buffer_count > 0);
839
840 if (has_texture || has_sampler)
841 pandecode_msg("XXX: blend shader accessing textures\n");
842
843 if (has_attribute || has_varying)
844 pandecode_msg("XXX: blend shader accessing interstage\n");
845
846 if (has_uniform || has_ubo)
847 pandecode_msg("XXX: blend shader accessing uniforms\n");
848 }
849
850 static void
pandecode_textures(mali_ptr textures,unsigned texture_count,int job_no,bool is_bifrost)851 pandecode_textures(mali_ptr textures, unsigned texture_count, int job_no, bool is_bifrost)
852 {
853 struct pandecode_mapped_memory *mmem = pandecode_find_mapped_gpu_mem_containing(textures);
854
855 if (!mmem)
856 return;
857
858 pandecode_log("Textures %"PRIx64"_%d:\n", textures, job_no);
859 pandecode_indent++;
860
861 if (is_bifrost) {
862 const void *cl = pandecode_fetch_gpu_mem(mmem,
863 textures, MALI_BIFROST_TEXTURE_LENGTH *
864 texture_count);
865
866 for (unsigned tex = 0; tex < texture_count; ++tex) {
867 pandecode_bifrost_texture(cl +
868 MALI_BIFROST_TEXTURE_LENGTH * tex,
869 job_no, tex);
870 }
871 } else {
872 mali_ptr *PANDECODE_PTR_VAR(u, mmem, textures);
873
874 for (int tex = 0; tex < texture_count; ++tex) {
875 mali_ptr *PANDECODE_PTR_VAR(u, mmem, textures + tex * sizeof(mali_ptr));
876 char *a = pointer_as_memory_reference(*u);
877 pandecode_log("%s,\n", a);
878 free(a);
879 }
880
881 /* Now, finally, descend down into the texture descriptor */
882 for (unsigned tex = 0; tex < texture_count; ++tex) {
883 mali_ptr *PANDECODE_PTR_VAR(u, mmem, textures + tex * sizeof(mali_ptr));
884 struct pandecode_mapped_memory *tmem = pandecode_find_mapped_gpu_mem_containing(*u);
885 if (tmem)
886 pandecode_texture(*u, tmem, job_no, tex);
887 }
888 }
889 pandecode_indent--;
890 pandecode_log("\n");
891 }
892
893 static void
pandecode_samplers(mali_ptr samplers,unsigned sampler_count,int job_no,bool is_bifrost)894 pandecode_samplers(mali_ptr samplers, unsigned sampler_count, int job_no, bool is_bifrost)
895 {
896 pandecode_log("Samplers %"PRIx64"_%d:\n", samplers, job_no);
897 pandecode_indent++;
898
899 for (int i = 0; i < sampler_count; ++i) {
900 if (is_bifrost) {
901 DUMP_ADDR(BIFROST_SAMPLER, samplers + (MALI_BIFROST_SAMPLER_LENGTH * i), "Sampler %d:\n", i);
902 } else {
903 DUMP_ADDR(MIDGARD_SAMPLER, samplers + (MALI_MIDGARD_SAMPLER_LENGTH * i), "Sampler %d:\n", i);
904 }
905 }
906
907 pandecode_indent--;
908 pandecode_log("\n");
909 }
910
911 static void
pandecode_vertex_tiler_postfix_pre(const struct MALI_DRAW * p,int job_no,enum mali_job_type job_type,char * suffix,bool is_bifrost,unsigned gpu_id)912 pandecode_vertex_tiler_postfix_pre(
913 const struct MALI_DRAW *p,
914 int job_no, enum mali_job_type job_type,
915 char *suffix, bool is_bifrost, unsigned gpu_id)
916 {
917 struct pandecode_mapped_memory *attr_mem;
918
919 struct pandecode_fbd fbd_info = {
920 /* Default for Bifrost */
921 .rt_count = 1
922 };
923
924 if (is_bifrost)
925 pandecode_compute_fbd(p->fbd & ~1, job_no);
926 else if (p->fbd & MALI_FBD_TAG_IS_MFBD)
927 fbd_info = pandecode_mfbd_bfr((u64) ((uintptr_t) p->fbd) & ~MALI_FBD_TAG_MASK,
928 job_no, false, job_type == MALI_JOB_TYPE_COMPUTE, is_bifrost, gpu_id);
929 else if (job_type == MALI_JOB_TYPE_COMPUTE)
930 pandecode_compute_fbd((u64) (uintptr_t) p->fbd, job_no);
931 else
932 fbd_info = pandecode_sfbd((u64) (uintptr_t) p->fbd, job_no, false, gpu_id);
933
934 int varying_count = 0, attribute_count = 0, uniform_count = 0, uniform_buffer_count = 0;
935 int texture_count = 0, sampler_count = 0;
936
937 if (p->state) {
938 struct pandecode_mapped_memory *smem = pandecode_find_mapped_gpu_mem_containing(p->state);
939 uint32_t *cl = pandecode_fetch_gpu_mem(smem, p->state, MALI_RENDERER_STATE_LENGTH);
940
941 /* Disassemble ahead-of-time to get stats. Initialize with
942 * stats for the missing-shader case so we get validation
943 * there, too */
944
945 struct midgard_disasm_stats info = {
946 .texture_count = 0,
947 .sampler_count = 0,
948 .attribute_count = 0,
949 .varying_count = 0,
950 .work_count = 1,
951
952 .uniform_count = -128,
953 .uniform_buffer_count = 0
954 };
955
956 pan_unpack(cl, RENDERER_STATE, state);
957
958 if (state.shader.shader & ~0xF)
959 info = pandecode_shader_disassemble(state.shader.shader & ~0xF, job_no, job_type, is_bifrost, gpu_id);
960
961 DUMP_UNPACKED(RENDERER_STATE, state, "State:\n");
962 pandecode_indent++;
963
964 /* Save for dumps */
965 attribute_count = state.shader.attribute_count;
966 varying_count = state.shader.varying_count;
967 texture_count = state.shader.texture_count;
968 sampler_count = state.shader.sampler_count;
969 uniform_buffer_count = state.properties.uniform_buffer_count;
970
971 if (is_bifrost)
972 uniform_count = state.preload.uniform_count;
973 else
974 uniform_count = state.properties.midgard.uniform_count;
975
976 pandecode_shader_prop("texture_count", texture_count, info.texture_count, false);
977 pandecode_shader_prop("sampler_count", sampler_count, info.sampler_count, false);
978 pandecode_shader_prop("attribute_count", attribute_count, info.attribute_count, false);
979 pandecode_shader_prop("varying_count", varying_count, info.varying_count, false);
980
981 if (is_bifrost)
982 DUMP_UNPACKED(PRELOAD, state.preload, "Preload:\n");
983
984 if (!is_bifrost) {
985 /* TODO: Blend shaders routing/disasm */
986 pandecode_log("SFBD Blend:\n");
987 pandecode_indent++;
988 if (state.multisample_misc.sfbd_blend_shader) {
989 pandecode_shader_address("Shader", state.sfbd_blend_shader);
990 } else {
991 DUMP_UNPACKED(BLEND_EQUATION, state.sfbd_blend_equation, "Equation:\n");
992 pandecode_prop("Constant = %f", state.sfbd_blend_constant);
993 }
994 pandecode_indent--;
995 pandecode_log("\n");
996
997 mali_ptr shader = state.sfbd_blend_shader & ~0xF;
998 if (state.multisample_misc.sfbd_blend_shader && shader)
999 pandecode_blend_shader_disassemble(shader, job_no, job_type, false, gpu_id);
1000 }
1001 pandecode_indent--;
1002 pandecode_log("\n");
1003
1004 /* MRT blend fields are used whenever MFBD is used, with
1005 * per-RT descriptors */
1006
1007 if (job_type == MALI_JOB_TYPE_TILER &&
1008 (is_bifrost || p->fbd & MALI_FBD_TAG_IS_MFBD)) {
1009 void* blend_base = ((void *) cl) + MALI_RENDERER_STATE_LENGTH;
1010
1011 for (unsigned i = 0; i < fbd_info.rt_count; i++) {
1012 mali_ptr shader = 0;
1013
1014 if (is_bifrost)
1015 shader = pandecode_bifrost_blend(blend_base, job_no, i,
1016 state.shader.shader);
1017 else
1018 shader = pandecode_midgard_blend_mrt(blend_base, job_no, i);
1019
1020 if (shader & ~0xF)
1021 pandecode_blend_shader_disassemble(shader, job_no, job_type,
1022 is_bifrost, gpu_id);
1023 }
1024 }
1025 } else
1026 pandecode_msg("XXX: missing shader descriptor\n");
1027
1028 if (p->viewport) {
1029 DUMP_ADDR(VIEWPORT, p->viewport, "Viewport:\n");
1030 pandecode_log("\n");
1031 }
1032
1033 unsigned max_attr_index = 0;
1034
1035 if (p->attributes)
1036 max_attr_index = pandecode_attribute_meta(attribute_count, p->attributes, false, suffix);
1037
1038 if (p->attribute_buffers) {
1039 attr_mem = pandecode_find_mapped_gpu_mem_containing(p->attribute_buffers);
1040 pandecode_attributes(attr_mem, p->attribute_buffers, job_no, suffix, max_attr_index, false, job_type);
1041 }
1042
1043 if (p->varyings) {
1044 varying_count = pandecode_attribute_meta(varying_count, p->varyings, true, suffix);
1045 }
1046
1047 if (p->varying_buffers) {
1048 attr_mem = pandecode_find_mapped_gpu_mem_containing(p->varying_buffers);
1049 pandecode_attributes(attr_mem, p->varying_buffers, job_no, suffix, varying_count, true, job_type);
1050 }
1051
1052 if (p->uniform_buffers) {
1053 if (uniform_buffer_count)
1054 pandecode_uniform_buffers(p->uniform_buffers, uniform_buffer_count, job_no);
1055 else
1056 pandecode_msg("warn: UBOs specified but not referenced\n");
1057 } else if (uniform_buffer_count)
1058 pandecode_msg("XXX: UBOs referenced but not specified\n");
1059
1060 /* We don't want to actually dump uniforms, but we do need to validate
1061 * that the counts we were given are sane */
1062
1063 if (p->push_uniforms) {
1064 if (uniform_count)
1065 pandecode_uniforms(p->push_uniforms, uniform_count);
1066 else
1067 pandecode_msg("warn: Uniforms specified but not referenced\n");
1068 } else if (uniform_count)
1069 pandecode_msg("XXX: Uniforms referenced but not specified\n");
1070
1071 if (p->textures)
1072 pandecode_textures(p->textures, texture_count, job_no, is_bifrost);
1073
1074 if (p->samplers)
1075 pandecode_samplers(p->samplers, sampler_count, job_no, is_bifrost);
1076 }
1077
1078 static void
pandecode_bifrost_tiler_heap(mali_ptr gpu_va,int job_no)1079 pandecode_bifrost_tiler_heap(mali_ptr gpu_va, int job_no)
1080 {
1081 struct pandecode_mapped_memory *mem = pandecode_find_mapped_gpu_mem_containing(gpu_va);
1082 pan_unpack(PANDECODE_PTR(mem, gpu_va, void), BIFROST_TILER_HEAP, h);
1083 DUMP_UNPACKED(BIFROST_TILER_HEAP, h, "Bifrost Tiler Heap:\n");
1084 }
1085
1086 static void
pandecode_bifrost_tiler(mali_ptr gpu_va,int job_no)1087 pandecode_bifrost_tiler(mali_ptr gpu_va, int job_no)
1088 {
1089 struct pandecode_mapped_memory *mem = pandecode_find_mapped_gpu_mem_containing(gpu_va);
1090 pan_unpack(PANDECODE_PTR(mem, gpu_va, void), BIFROST_TILER, t);
1091
1092 pandecode_bifrost_tiler_heap(t.heap, job_no);
1093
1094 DUMP_UNPACKED(BIFROST_TILER, t, "Bifrost Tiler:\n");
1095 pandecode_indent++;
1096 if (t.hierarchy_mask != 0xa &&
1097 t.hierarchy_mask != 0x14 &&
1098 t.hierarchy_mask != 0x28 &&
1099 t.hierarchy_mask != 0x50 &&
1100 t.hierarchy_mask != 0xa0)
1101 pandecode_prop("XXX: Unexpected hierarchy_mask (not 0xa, 0x14, 0x28, 0x50 or 0xa0)!");
1102
1103 pandecode_indent--;
1104 }
1105
1106 static void
pandecode_primitive_size(const void * s,bool constant)1107 pandecode_primitive_size(const void *s, bool constant)
1108 {
1109 pan_unpack(s, PRIMITIVE_SIZE, ps);
1110 if (ps.size_array == 0x0)
1111 return;
1112
1113 DUMP_UNPACKED(PRIMITIVE_SIZE, ps, "Primitive Size:\n")
1114 }
1115
1116 static void
pandecode_vertex_compute_geometry_job(const struct MALI_JOB_HEADER * h,const struct pandecode_mapped_memory * mem,mali_ptr job,int job_no,bool is_bifrost,unsigned gpu_id)1117 pandecode_vertex_compute_geometry_job(const struct MALI_JOB_HEADER *h,
1118 const struct pandecode_mapped_memory *mem,
1119 mali_ptr job, int job_no, bool is_bifrost,
1120 unsigned gpu_id)
1121 {
1122 struct mali_compute_job_packed *PANDECODE_PTR_VAR(p, mem, job);
1123 pan_section_unpack(p, COMPUTE_JOB, DRAW, draw);
1124 pandecode_vertex_tiler_postfix_pre(&draw, job_no, h->type, "", is_bifrost, gpu_id);
1125
1126 pandecode_log("Vertex Job Payload:\n");
1127 pandecode_indent++;
1128 pandecode_invocation(pan_section_ptr(p, COMPUTE_JOB, INVOCATION),
1129 h->type != MALI_JOB_TYPE_COMPUTE);
1130 DUMP_SECTION(COMPUTE_JOB, PARAMETERS, p, "Vertex Job Parameters:\n");
1131 DUMP_UNPACKED(DRAW, draw, "Draw:\n");
1132 pandecode_indent--;
1133 pandecode_log("\n");
1134 }
1135
1136 static void
pandecode_tiler_job_bfr(const struct MALI_JOB_HEADER * h,const struct pandecode_mapped_memory * mem,mali_ptr job,int job_no,unsigned gpu_id)1137 pandecode_tiler_job_bfr(const struct MALI_JOB_HEADER *h,
1138 const struct pandecode_mapped_memory *mem,
1139 mali_ptr job, int job_no, unsigned gpu_id)
1140 {
1141 struct mali_bifrost_tiler_job_packed *PANDECODE_PTR_VAR(p, mem, job);
1142 pan_section_unpack(p, BIFROST_TILER_JOB, DRAW, draw);
1143 pan_section_unpack(p, BIFROST_TILER_JOB, TILER, tiler_ptr);
1144 pandecode_vertex_tiler_postfix_pre(&draw, job_no, h->type, "", true, gpu_id);
1145
1146 pandecode_log("Tiler Job Payload:\n");
1147 pandecode_indent++;
1148 pandecode_bifrost_tiler(tiler_ptr.address, job_no);
1149
1150 pandecode_invocation(pan_section_ptr(p, BIFROST_TILER_JOB, INVOCATION), true);
1151 pandecode_primitive(pan_section_ptr(p, BIFROST_TILER_JOB, PRIMITIVE));
1152
1153 /* TODO: gl_PointSize on Bifrost */
1154 pandecode_primitive_size(pan_section_ptr(p, BIFROST_TILER_JOB, PRIMITIVE_SIZE), true);
1155 pan_section_unpack(p, BIFROST_TILER_JOB, PADDING, padding);
1156 DUMP_UNPACKED(DRAW, draw, "Draw:\n");
1157 pandecode_indent--;
1158 pandecode_log("\n");
1159 }
1160
1161 static void
pandecode_tiler_job_mdg(const struct MALI_JOB_HEADER * h,const struct pandecode_mapped_memory * mem,mali_ptr job,int job_no,unsigned gpu_id)1162 pandecode_tiler_job_mdg(const struct MALI_JOB_HEADER *h,
1163 const struct pandecode_mapped_memory *mem,
1164 mali_ptr job, int job_no, unsigned gpu_id)
1165 {
1166 struct mali_midgard_tiler_job_packed *PANDECODE_PTR_VAR(p, mem, job);
1167 pan_section_unpack(p, MIDGARD_TILER_JOB, DRAW, draw);
1168 pandecode_vertex_tiler_postfix_pre(&draw, job_no, h->type, "", false, gpu_id);
1169
1170 pandecode_log("Tiler Job Payload:\n");
1171 pandecode_indent++;
1172 pandecode_invocation(pan_section_ptr(p, MIDGARD_TILER_JOB, INVOCATION), true);
1173 pandecode_primitive(pan_section_ptr(p, MIDGARD_TILER_JOB, PRIMITIVE));
1174 DUMP_UNPACKED(DRAW, draw, "Draw:\n");
1175
1176 pan_section_unpack(p, MIDGARD_TILER_JOB, PRIMITIVE, primitive);
1177 pandecode_primitive_size(pan_section_ptr(p, MIDGARD_TILER_JOB, PRIMITIVE_SIZE),
1178 primitive.point_size_array_format == MALI_POINT_SIZE_ARRAY_FORMAT_NONE);
1179 pandecode_indent--;
1180 pandecode_log("\n");
1181 }
1182
1183 static void
pandecode_fragment_job(const struct pandecode_mapped_memory * mem,mali_ptr job,int job_no,bool is_bifrost,unsigned gpu_id)1184 pandecode_fragment_job(const struct pandecode_mapped_memory *mem,
1185 mali_ptr job, int job_no,
1186 bool is_bifrost, unsigned gpu_id)
1187 {
1188 struct mali_fragment_job_packed *PANDECODE_PTR_VAR(p, mem, job);
1189 pan_section_unpack(p, FRAGMENT_JOB, PAYLOAD, s);
1190
1191 bool is_mfbd = s.framebuffer & MALI_FBD_TAG_IS_MFBD;
1192
1193 if (!is_mfbd && is_bifrost)
1194 pandecode_msg("XXX: Bifrost fragment must use MFBD\n");
1195
1196 struct pandecode_fbd info;
1197
1198 if (is_mfbd)
1199 info = pandecode_mfbd_bfr(s.framebuffer & ~MALI_FBD_TAG_MASK, job_no,
1200 true, false, is_bifrost, gpu_id);
1201 else
1202 info = pandecode_sfbd(s.framebuffer & ~MALI_FBD_TAG_MASK, job_no,
1203 true, gpu_id);
1204
1205 /* Compute the tag for the tagged pointer. This contains the type of
1206 * FBD (MFBD/SFBD), and in the case of an MFBD, information about which
1207 * additional structures follow the MFBD header (an extra payload or
1208 * not, as well as a count of render targets) */
1209
1210 unsigned expected_tag = is_mfbd ? MALI_FBD_TAG_IS_MFBD : 0;
1211
1212 if (is_mfbd) {
1213 if (info.has_extra)
1214 expected_tag |= MALI_FBD_TAG_HAS_ZS_RT;
1215
1216 expected_tag |= (MALI_POSITIVE(info.rt_count) << 2);
1217 }
1218
1219 /* Extract tile coordinates */
1220
1221 unsigned min_x = s.bound_min_x << MALI_TILE_SHIFT;
1222 unsigned min_y = s.bound_min_y << MALI_TILE_SHIFT;
1223 unsigned max_x = s.bound_max_x << MALI_TILE_SHIFT;
1224 unsigned max_y = s.bound_max_y << MALI_TILE_SHIFT;
1225
1226 /* Validate the coordinates are well-ordered */
1227
1228 if (min_x > max_x)
1229 pandecode_msg("XXX: misordered X coordinates (%u > %u)\n", min_x, max_x);
1230
1231 if (min_y > max_y)
1232 pandecode_msg("XXX: misordered X coordinates (%u > %u)\n", min_x, max_x);
1233
1234 /* Validate the coordinates fit inside the framebuffer. We use floor,
1235 * rather than ceil, for the max coordinates, since the tile
1236 * coordinates for something like an 800x600 framebuffer will actually
1237 * resolve to 800x608, which would otherwise trigger a Y-overflow */
1238
1239 if (max_x + 1 > info.width)
1240 pandecode_msg("XXX: tile coordinates overflow in X direction\n");
1241
1242 if (max_y + 1 > info.height)
1243 pandecode_msg("XXX: tile coordinates overflow in Y direction\n");
1244
1245 /* After validation, we print */
1246 DUMP_UNPACKED(FRAGMENT_JOB_PAYLOAD, s, "Fragment Job Payload:\n");
1247
1248 /* The FBD is a tagged pointer */
1249
1250 unsigned tag = (s.framebuffer & MALI_FBD_TAG_MASK);
1251
1252 if (tag != expected_tag)
1253 pandecode_msg("XXX: expected FBD tag %X but got %X\n", expected_tag, tag);
1254
1255 pandecode_log("\n");
1256 }
1257
1258 static void
pandecode_write_value_job(const struct pandecode_mapped_memory * mem,mali_ptr job,int job_no)1259 pandecode_write_value_job(const struct pandecode_mapped_memory *mem,
1260 mali_ptr job, int job_no)
1261 {
1262 struct mali_write_value_job_packed *PANDECODE_PTR_VAR(p, mem, job);
1263 pan_section_unpack(p, WRITE_VALUE_JOB, PAYLOAD, u);
1264 DUMP_SECTION(WRITE_VALUE_JOB, PAYLOAD, p, "Write Value Payload:\n");
1265 pandecode_log("\n");
1266 }
1267
1268 /* Entrypoint to start tracing. jc_gpu_va is the GPU address for the first job
1269 * in the chain; later jobs are found by walking the chain. Bifrost is, well,
1270 * if it's bifrost or not. GPU ID is the more finegrained ID (at some point, we
1271 * might wish to combine this with the bifrost parameter) because some details
1272 * are model-specific even within a particular architecture. Minimal traces
1273 * *only* examine the job descriptors, skipping printing entirely if there is
1274 * no faults, and only descends into the payload if there are faults. This is
1275 * useful for looking for faults without the overhead of invasive traces. */
1276
1277 void
pandecode_jc(mali_ptr jc_gpu_va,bool bifrost,unsigned gpu_id,bool minimal)1278 pandecode_jc(mali_ptr jc_gpu_va, bool bifrost, unsigned gpu_id, bool minimal)
1279 {
1280 pandecode_dump_file_open();
1281
1282 unsigned job_descriptor_number = 0;
1283 mali_ptr next_job = 0;
1284
1285 do {
1286 struct pandecode_mapped_memory *mem =
1287 pandecode_find_mapped_gpu_mem_containing(jc_gpu_va);
1288
1289 pan_unpack(PANDECODE_PTR(mem, jc_gpu_va, struct mali_job_header_packed),
1290 JOB_HEADER, h);
1291 next_job = h.next;
1292
1293 int job_no = job_descriptor_number++;
1294
1295 /* If the job is good to go, skip it in minimal mode */
1296 if (minimal && (h.exception_status == 0x0 || h.exception_status == 0x1))
1297 continue;
1298
1299 DUMP_UNPACKED(JOB_HEADER, h, "Job Header:\n");
1300 pandecode_log("\n");
1301
1302 switch (h.type) {
1303 case MALI_JOB_TYPE_WRITE_VALUE:
1304 pandecode_write_value_job(mem, jc_gpu_va, job_no);
1305 break;
1306
1307 case MALI_JOB_TYPE_TILER:
1308 if (bifrost)
1309 pandecode_tiler_job_bfr(&h, mem, jc_gpu_va, job_no, gpu_id);
1310 else
1311 pandecode_tiler_job_mdg(&h, mem, jc_gpu_va, job_no, gpu_id);
1312 break;
1313
1314 case MALI_JOB_TYPE_VERTEX:
1315 case MALI_JOB_TYPE_COMPUTE:
1316 pandecode_vertex_compute_geometry_job(&h, mem, jc_gpu_va, job_no,
1317 bifrost, gpu_id);
1318 break;
1319
1320 case MALI_JOB_TYPE_FRAGMENT:
1321 pandecode_fragment_job(mem, jc_gpu_va, job_no, bifrost, gpu_id);
1322 break;
1323
1324 default:
1325 break;
1326 }
1327 } while ((jc_gpu_va = next_job));
1328
1329 pandecode_map_read_write();
1330 }
1331