1 /*
2 * Copyright (C) 2017-2019 Alyssa Rosenzweig
3 * Copyright (C) 2017-2019 Connor Abbott
4 * Copyright (C) 2019 Collabora, Ltd.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 * SOFTWARE.
24 */
25
26 #include "decode.h"
27 #include <ctype.h>
28 #include <errno.h>
29 #include <memory.h>
30 #include <stdarg.h>
31 #include <stdbool.h>
32 #include <stdio.h>
33 #include <stdlib.h>
34 #include <genxml/gen_macros.h>
35 #include <sys/mman.h>
36
37 #include "compiler/bifrost/disassemble.h"
38 #include "compiler/valhall/disassemble.h"
39 #include "midgard/disassemble.h"
40 #include "util/set.h"
41 #include "pan_format.h"
42
43 #if PAN_ARCH <= 5
44 /* Midgard's tiler descriptor is embedded within the
45 * larger FBD */
46
47 static void
pandecode_midgard_tiler_descriptor(struct pandecode_context * ctx,const struct mali_tiler_context_packed * tp,const struct mali_tiler_weights_packed * wp)48 pandecode_midgard_tiler_descriptor(struct pandecode_context *ctx,
49 const struct mali_tiler_context_packed *tp,
50 const struct mali_tiler_weights_packed *wp)
51 {
52 pan_unpack(tp, TILER_CONTEXT, t);
53 DUMP_UNPACKED(ctx, TILER_CONTEXT, t, "Tiler:\n");
54
55 /* We've never seen weights used in practice, but they exist */
56 pan_unpack(wp, TILER_WEIGHTS, w);
57 bool nonzero_weights = false;
58
59 nonzero_weights |= w.weight0 != 0x0;
60 nonzero_weights |= w.weight1 != 0x0;
61 nonzero_weights |= w.weight2 != 0x0;
62 nonzero_weights |= w.weight3 != 0x0;
63 nonzero_weights |= w.weight4 != 0x0;
64 nonzero_weights |= w.weight5 != 0x0;
65 nonzero_weights |= w.weight6 != 0x0;
66 nonzero_weights |= w.weight7 != 0x0;
67
68 if (nonzero_weights)
69 DUMP_UNPACKED(ctx, TILER_WEIGHTS, w, "Tiler Weights:\n");
70 }
71 #endif
72
73 #if PAN_ARCH >= 5
74 static void
pandecode_render_target(struct pandecode_context * ctx,uint64_t gpu_va,unsigned gpu_id,const struct MALI_FRAMEBUFFER_PARAMETERS * fb)75 pandecode_render_target(struct pandecode_context *ctx, uint64_t gpu_va,
76 unsigned gpu_id,
77 const struct MALI_FRAMEBUFFER_PARAMETERS *fb)
78 {
79 pandecode_log(ctx, "Color Render Targets @%" PRIx64 ":\n", gpu_va);
80 ctx->indent++;
81
82 for (int i = 0; i < (fb->render_target_count); i++) {
83 uint64_t rt_va = gpu_va + i * pan_size(RENDER_TARGET);
84 const struct mali_render_target_packed *PANDECODE_PTR_VAR(
85 ctx, rtp, (uint64_t)rt_va);
86 DUMP_CL(ctx, RENDER_TARGET, rtp, "Color Render Target %d:\n", i);
87 }
88
89 ctx->indent--;
90 pandecode_log(ctx, "\n");
91 }
92 #endif
93
94 #if PAN_ARCH >= 6
95 static void
pandecode_sample_locations(struct pandecode_context * ctx,const void * fb)96 pandecode_sample_locations(struct pandecode_context *ctx, const void *fb)
97 {
98 pan_section_unpack(fb, FRAMEBUFFER, PARAMETERS, params);
99
100 const uint16_t *PANDECODE_PTR_VAR(ctx, samples, params.sample_locations);
101
102 pandecode_log(ctx, "Sample locations @%" PRIx64 ":\n",
103 params.sample_locations);
104 for (int i = 0; i < 33; i++) {
105 pandecode_log(ctx, " (%d, %d),\n", samples[2 * i] - 128,
106 samples[2 * i + 1] - 128);
107 }
108 }
109 #endif
110
111 struct pandecode_fbd
GENX(pandecode_fbd)112 GENX(pandecode_fbd)(struct pandecode_context *ctx, uint64_t gpu_va,
113 bool is_fragment, unsigned gpu_id)
114 {
115 const void *PANDECODE_PTR_VAR(ctx, fb, (uint64_t)gpu_va);
116 pan_section_unpack(fb, FRAMEBUFFER, PARAMETERS, params);
117 DUMP_UNPACKED(ctx, FRAMEBUFFER_PARAMETERS, params, "Parameters:\n");
118
119 #if PAN_ARCH >= 6
120 pandecode_sample_locations(ctx, fb);
121
122 unsigned dcd_size = pan_size(DRAW);
123 unsigned job_type_param = 0;
124
125 #if PAN_ARCH <= 9
126 job_type_param = MALI_JOB_TYPE_FRAGMENT;
127 #endif
128
129 if (params.pre_frame_0 != MALI_PRE_POST_FRAME_SHADER_MODE_NEVER) {
130 const struct mali_draw_packed *PANDECODE_PTR_VAR(
131 ctx, dcd, params.frame_shader_dcds + (0 * dcd_size));
132 pan_unpack(dcd, DRAW, draw);
133 pandecode_log(ctx, "Pre frame 0 @%" PRIx64 " (mode=%d):\n",
134 params.frame_shader_dcds, params.pre_frame_0);
135 GENX(pandecode_dcd)(ctx, &draw, job_type_param, gpu_id);
136 }
137
138 if (params.pre_frame_1 != MALI_PRE_POST_FRAME_SHADER_MODE_NEVER) {
139 const struct mali_draw_packed *PANDECODE_PTR_VAR(
140 ctx, dcd, params.frame_shader_dcds + (1 * dcd_size));
141 pan_unpack(dcd, DRAW, draw);
142 pandecode_log(ctx, "Pre frame 1 @%" PRIx64 ":\n",
143 params.frame_shader_dcds + (1 * dcd_size));
144 GENX(pandecode_dcd)(ctx, &draw, job_type_param, gpu_id);
145 }
146
147 if (params.post_frame != MALI_PRE_POST_FRAME_SHADER_MODE_NEVER) {
148 const struct mali_draw_packed *PANDECODE_PTR_VAR(
149 ctx, dcd, params.frame_shader_dcds + (2 * dcd_size));
150 pan_unpack(dcd, DRAW, draw);
151 pandecode_log(ctx, "Post frame:\n");
152 GENX(pandecode_dcd)(ctx, &draw, job_type_param, gpu_id);
153 }
154 #else
155 DUMP_SECTION(ctx, FRAMEBUFFER, LOCAL_STORAGE, fb, "Local Storage:\n");
156
157 const void *t = pan_section_ptr(fb, FRAMEBUFFER, TILER);
158 const void *w = pan_section_ptr(fb, FRAMEBUFFER, TILER_WEIGHTS);
159 pandecode_midgard_tiler_descriptor(ctx, t, w);
160 #endif
161
162 pandecode_log(ctx, "Framebuffer @%" PRIx64 ":\n", gpu_va);
163 ctx->indent++;
164
165 DUMP_UNPACKED(ctx, FRAMEBUFFER_PARAMETERS, params, "Parameters:\n");
166 #if PAN_ARCH >= 6
167 if (params.tiler)
168 GENX(pandecode_tiler)(ctx, params.tiler, gpu_id);
169 #endif
170
171 ctx->indent--;
172 pandecode_log(ctx, "\n");
173
174 #if PAN_ARCH >= 5
175 gpu_va += pan_size(FRAMEBUFFER);
176
177 if (params.has_zs_crc_extension) {
178 const struct mali_zs_crc_extension_packed *PANDECODE_PTR_VAR(
179 ctx, zs_crc, (uint64_t)gpu_va);
180 DUMP_CL(ctx, ZS_CRC_EXTENSION, zs_crc, "ZS CRC Extension:\n");
181 pandecode_log(ctx, "\n");
182
183 gpu_va += pan_size(ZS_CRC_EXTENSION);
184 }
185
186 if (is_fragment)
187 pandecode_render_target(ctx, gpu_va, gpu_id, ¶ms);
188
189 return (struct pandecode_fbd){
190 .rt_count = params.render_target_count,
191 .has_extra = params.has_zs_crc_extension,
192 };
193 #else
194 /* Dummy unpack of the padding section to make sure all words are 0.
195 * No need to call print here since the section is supposed to be empty.
196 */
197 pan_section_unpack(fb, FRAMEBUFFER, PADDING_1, padding1);
198 pan_section_unpack(fb, FRAMEBUFFER, PADDING_2, padding2);
199
200 return (struct pandecode_fbd){
201 .rt_count = 1,
202 };
203 #endif
204 }
205
206 #if PAN_ARCH >= 5
207 uint64_t
GENX(pandecode_blend)208 GENX(pandecode_blend)(struct pandecode_context *ctx,
209 struct mali_blend_packed *descs, int rt_no,
210 uint64_t frag_shader)
211 {
212 pan_unpack(&descs[rt_no], BLEND, b);
213 DUMP_UNPACKED(ctx, BLEND, b, "Blend RT %d:\n", rt_no);
214 #if PAN_ARCH >= 6
215 if (b.internal.mode != MALI_BLEND_MODE_SHADER)
216 return 0;
217 /* If we don't have a frag shader, we can't extract the LSB of the blend
218 * shader so return NULL in that case. It doesn't matter, because the
219 * blend shader won't be executed anyway, so disassembling is not
220 * super useful. */
221 if (!frag_shader)
222 return 0;
223
224 return (frag_shader & 0xFFFFFFFF00000000ULL) | b.internal.shader.pc;
225 #else
226 return b.blend_shader ? (b.shader_pc & ~0xf) : 0;
227 #endif
228 }
229 #endif
230
231 #if PAN_ARCH <= 7
232 static bool
panfrost_is_yuv_format(uint32_t packed)233 panfrost_is_yuv_format(uint32_t packed)
234 {
235 #if PAN_ARCH == 7
236 enum mali_format mali_fmt = packed >> 12;
237 return mali_fmt >= MALI_YUV8 && mali_fmt <= MALI_CUSTOM_YUV_5;
238 #else
239 /* Currently only supported by panfrost on v7 */
240 assert(0);
241 return false;
242 #endif
243 }
244
245 static void
pandecode_texture_payload(struct pandecode_context * ctx,uint64_t payload,const struct MALI_TEXTURE * tex)246 pandecode_texture_payload(struct pandecode_context *ctx, uint64_t payload,
247 const struct MALI_TEXTURE *tex)
248 {
249 unsigned nr_samples =
250 tex->dimension == MALI_TEXTURE_DIMENSION_3D ? 1 : tex->sample_count;
251
252 if (!payload)
253 return;
254
255 /* A bunch of bitmap pointers follow.
256 * We work out the correct number,
257 * based on the mipmap/cubemap
258 * properties, but dump extra
259 * possibilities to futureproof */
260
261 int bitmap_count = tex->levels;
262
263 /* Miptree for each face */
264 if (tex->dimension == MALI_TEXTURE_DIMENSION_CUBE)
265 bitmap_count *= 6;
266
267 /* Array of layers */
268 bitmap_count *= nr_samples;
269
270 /* Array of textures */
271 bitmap_count *= tex->array_size;
272
273 #define PANDECODE_EMIT_TEX_PAYLOAD_DESC(T, msg) \
274 for (int i = 0; i < bitmap_count; ++i) { \
275 uint64_t addr = payload + pan_size(T) * i; \
276 pan_unpack(PANDECODE_PTR(ctx, addr, MALI_##T##_PACKED_T), T, s); \
277 DUMP_UNPACKED(ctx, T, s, msg " @%" PRIx64 ":\n", addr) \
278 }
279
280 #if PAN_ARCH <= 5
281 switch (tex->surface_type) {
282 case MALI_SURFACE_TYPE_32:
283 PANDECODE_EMIT_TEX_PAYLOAD_DESC(SURFACE_32, "Surface 32");
284 break;
285 case MALI_SURFACE_TYPE_64:
286 PANDECODE_EMIT_TEX_PAYLOAD_DESC(SURFACE, "Surface");
287 break;
288 case MALI_SURFACE_TYPE_32_WITH_ROW_STRIDE:
289 PANDECODE_EMIT_TEX_PAYLOAD_DESC(SURFACE_32, "Surface 32 With Row Stride");
290 break;
291 case MALI_SURFACE_TYPE_64_WITH_STRIDES:
292 PANDECODE_EMIT_TEX_PAYLOAD_DESC(SURFACE_WITH_STRIDE,
293 "Surface With Stride");
294 break;
295 default:
296 fprintf(ctx->dump_stream, "Unknown surface descriptor type %X\n",
297 tex->surface_type);
298 break;
299 }
300 #elif PAN_ARCH == 6
301 PANDECODE_EMIT_TEX_PAYLOAD_DESC(SURFACE_WITH_STRIDE, "Surface With Stride");
302 #else
303 STATIC_ASSERT(PAN_ARCH == 7);
304 if (panfrost_is_yuv_format(tex->format)) {
305 PANDECODE_EMIT_TEX_PAYLOAD_DESC(MULTIPLANAR_SURFACE, "Surface YUV");
306 } else {
307 PANDECODE_EMIT_TEX_PAYLOAD_DESC(SURFACE_WITH_STRIDE,
308 "Surface With Stride");
309 }
310 #endif
311
312 #undef PANDECODE_EMIT_TEX_PAYLOAD_DESC
313 }
314 #endif
315
316 #if PAN_ARCH <= 5
317 void
GENX(pandecode_texture)318 GENX(pandecode_texture)(struct pandecode_context *ctx, uint64_t u, unsigned tex)
319 {
320 const struct mali_texture_packed *cl =
321 pandecode_fetch_gpu_mem(ctx, u, pan_size(TEXTURE));
322
323 pan_unpack(cl, TEXTURE, temp);
324 DUMP_UNPACKED(ctx, TEXTURE, temp, "Texture:\n")
325
326 ctx->indent++;
327 pandecode_texture_payload(ctx, u + pan_size(TEXTURE), &temp);
328 ctx->indent--;
329 }
330 #else
331 void
GENX(pandecode_texture)332 GENX(pandecode_texture)(struct pandecode_context *ctx,
333 const struct mali_texture_packed *cl, unsigned tex)
334 {
335 pan_unpack(cl, TEXTURE, temp);
336 DUMP_UNPACKED(ctx, TEXTURE, temp, "Texture:\n")
337
338 ctx->indent++;
339
340 #if PAN_ARCH >= 9
341 int plane_count = temp.levels * temp.array_size;
342
343 /* Miptree for each face */
344 if (temp.dimension == MALI_TEXTURE_DIMENSION_CUBE)
345 plane_count *= 6;
346
347 for (unsigned i = 0; i < plane_count; ++i)
348 DUMP_ADDR(ctx, PLANE, temp.surfaces + i * pan_size(PLANE), "Plane %u:\n",
349 i);
350 #else
351 pandecode_texture_payload(ctx, temp.surfaces, &temp);
352 #endif
353 ctx->indent--;
354 }
355 #endif
356
357 #if PAN_ARCH >= 6
358 void
GENX(pandecode_tiler)359 GENX(pandecode_tiler)(struct pandecode_context *ctx, uint64_t gpu_va,
360 unsigned gpu_id)
361 {
362 pan_unpack(PANDECODE_PTR(ctx, gpu_va, struct mali_tiler_context_packed),
363 TILER_CONTEXT, t);
364
365 if (t.heap) {
366 pan_unpack(PANDECODE_PTR(ctx, t.heap, struct mali_tiler_heap_packed),
367 TILER_HEAP, h);
368 DUMP_UNPACKED(ctx, TILER_HEAP, h, "Tiler Heap:\n");
369 }
370
371 DUMP_UNPACKED(ctx, TILER_CONTEXT, t, "Tiler Context @%" PRIx64 ":\n",
372 gpu_va);
373 }
374 #endif
375
376 #if PAN_ARCH >= 9
377 void
GENX(pandecode_fau)378 GENX(pandecode_fau)(struct pandecode_context *ctx, uint64_t addr,
379 unsigned count, const char *name)
380 {
381 if (count == 0)
382 return;
383
384 const uint32_t *PANDECODE_PTR_VAR(ctx, raw, addr);
385
386 pandecode_validate_buffer(ctx, addr, count * 8);
387
388 fprintf(ctx->dump_stream, "%s @%" PRIx64 ":\n", name, addr);
389 for (unsigned i = 0; i < count; ++i) {
390 fprintf(ctx->dump_stream, " %08X %08X\n", raw[2 * i], raw[2 * i + 1]);
391 }
392 fprintf(ctx->dump_stream, "\n");
393 }
394
395 uint64_t
GENX(pandecode_shader)396 GENX(pandecode_shader)(struct pandecode_context *ctx, uint64_t addr,
397 const char *label, unsigned gpu_id)
398 {
399 MAP_ADDR(ctx, SHADER_PROGRAM, addr, cl);
400 pan_unpack(cl, SHADER_PROGRAM, desc);
401
402 assert(desc.type == 8);
403
404 DUMP_UNPACKED(ctx, SHADER_PROGRAM, desc, "%s Shader @%" PRIx64 ":\n", label,
405 addr);
406 pandecode_shader_disassemble(ctx, desc.binary, gpu_id);
407 return desc.binary;
408 }
409
410 static void
pandecode_resources(struct pandecode_context * ctx,uint64_t addr,unsigned size)411 pandecode_resources(struct pandecode_context *ctx, uint64_t addr, unsigned size)
412 {
413 const uint8_t *cl = pandecode_fetch_gpu_mem(ctx, addr, size);
414 assert((size % 0x20) == 0);
415
416 for (unsigned i = 0; i < size; i += 0x20) {
417 unsigned type = (cl[i] & 0xF);
418
419 switch (type) {
420 case MALI_DESCRIPTOR_TYPE_SAMPLER:
421 DUMP_CL(ctx, SAMPLER, cl + i, "Sampler @%" PRIx64 ":\n", addr + i);
422 break;
423 case MALI_DESCRIPTOR_TYPE_TEXTURE:
424 pandecode_log(ctx, "Texture @%" PRIx64 "\n", addr + i);
425 GENX(pandecode_texture)(ctx, (struct mali_texture_packed *)&cl[i], i);
426 break;
427 case MALI_DESCRIPTOR_TYPE_ATTRIBUTE:
428 DUMP_CL(ctx, ATTRIBUTE, cl + i, "Attribute @%" PRIx64 ":\n", addr + i);
429 break;
430 case MALI_DESCRIPTOR_TYPE_BUFFER:
431 DUMP_CL(ctx, BUFFER, cl + i, "Buffer @%" PRIx64 ":\n", addr + i);
432 break;
433 default:
434 fprintf(ctx->dump_stream, "Unknown descriptor type %X\n", type);
435 break;
436 }
437 }
438 }
439
440 void
GENX(pandecode_resource_tables)441 GENX(pandecode_resource_tables)(struct pandecode_context *ctx, uint64_t addr,
442 const char *label)
443 {
444 unsigned count = addr & 0x3F;
445 addr = addr & ~0x3F;
446
447 const struct mali_resource_packed *cl =
448 pandecode_fetch_gpu_mem(ctx, addr, MALI_RESOURCE_LENGTH * count);
449
450 pandecode_log(ctx, "%s resource table @%" PRIx64 "\n", label, addr);
451 ctx->indent += 2;
452 for (unsigned i = 0; i < count; ++i) {
453 pan_unpack(&cl[i], RESOURCE, entry);
454 DUMP_UNPACKED(ctx, RESOURCE, entry, "Entry %u @%" PRIx64 ":\n", i,
455 addr + i * MALI_RESOURCE_LENGTH);
456
457 ctx->indent += 2;
458 if (entry.address)
459 pandecode_resources(ctx, entry.address, entry.size);
460 ctx->indent -= 2;
461 }
462 ctx->indent -= 2;
463 }
464
465 void
GENX(pandecode_depth_stencil)466 GENX(pandecode_depth_stencil)(struct pandecode_context *ctx, uint64_t addr)
467 {
468 MAP_ADDR(ctx, DEPTH_STENCIL, addr, cl);
469 pan_unpack(cl, DEPTH_STENCIL, desc);
470 DUMP_UNPACKED(ctx, DEPTH_STENCIL, desc, "Depth/stencil");
471 }
472
473 void
GENX(pandecode_shader_environment)474 GENX(pandecode_shader_environment)(struct pandecode_context *ctx,
475 const struct MALI_SHADER_ENVIRONMENT *p,
476 unsigned gpu_id)
477 {
478 if (p->shader)
479 GENX(pandecode_shader)(ctx, p->shader, "Shader", gpu_id);
480
481 if (p->resources)
482 GENX(pandecode_resource_tables)(ctx, p->resources, "Resources");
483
484 if (p->thread_storage)
485 DUMP_ADDR(ctx, LOCAL_STORAGE, p->thread_storage, "Local Storage:\n");
486
487 if (p->fau)
488 GENX(pandecode_fau)(ctx, p->fau, p->fau_count, "FAU");
489 }
490
491 void
GENX(pandecode_blend_descs)492 GENX(pandecode_blend_descs)(struct pandecode_context *ctx, uint64_t blend,
493 unsigned count, uint64_t frag_shader,
494 unsigned gpu_id)
495 {
496 for (unsigned i = 0; i < count; ++i) {
497 struct mali_blend_packed *PANDECODE_PTR_VAR(ctx, blend_descs, blend);
498
499 uint64_t blend_shader =
500 GENX(pandecode_blend)(ctx, blend_descs, i, frag_shader);
501 if (blend_shader) {
502 fprintf(ctx->dump_stream, "Blend shader %u @%" PRIx64 "", i,
503 blend_shader);
504 pandecode_shader_disassemble(ctx, blend_shader, gpu_id);
505 }
506 }
507 }
508
509 void
GENX(pandecode_dcd)510 GENX(pandecode_dcd)(struct pandecode_context *ctx, const struct MALI_DRAW *p,
511 unsigned unused, unsigned gpu_id)
512 {
513 uint64_t frag_shader = 0;
514
515 GENX(pandecode_depth_stencil)(ctx, p->depth_stencil);
516 GENX(pandecode_blend_descs)
517 (ctx, p->blend, p->blend_count, frag_shader, gpu_id);
518 GENX(pandecode_shader_environment)(ctx, &p->shader, gpu_id);
519 DUMP_UNPACKED(ctx, DRAW, *p, "Draw:\n");
520 }
521 #endif
522