1 /**************************************************************************
2 *
3 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Copyright 2008 VMware, Inc.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * on the rights to use, copy, modify, merge, publish, distribute, sub
11 * license, and/or sell copies of the Software, and to permit persons to whom
12 * the Software is furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the next
15 * paragraph) shall be included in all copies or substantial portions of the
16 * Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #include "dd_pipe.h"
29
30 #include "util/u_dump.h"
31 #include "util/format/u_format.h"
32 #include "util/u_framebuffer.h"
33 #include "util/u_helpers.h"
34 #include "util/u_inlines.h"
35 #include "util/u_memory.h"
36 #include "util/u_process.h"
37 #include "tgsi/tgsi_parse.h"
38 #include "tgsi/tgsi_scan.h"
39 #include "util/os_time.h"
40 #include <inttypes.h>
41 #include "pipe/p_config.h"
42
43 void
dd_get_debug_filename_and_mkdir(char * buf,size_t buflen,bool verbose)44 dd_get_debug_filename_and_mkdir(char *buf, size_t buflen, bool verbose)
45 {
46 static unsigned index;
47 char proc_name[128], dir[256];
48
49 if (!os_get_process_name(proc_name, sizeof(proc_name))) {
50 fprintf(stderr, "dd: can't get the process name\n");
51 strcpy(proc_name, "unknown");
52 }
53
54 snprintf(dir, sizeof(dir), "%s/"DD_DIR, debug_get_option("HOME", "."));
55
56 if (mkdir(dir, 0774) && errno != EEXIST)
57 fprintf(stderr, "dd: can't create a directory (%i)\n", errno);
58
59 snprintf(buf, buflen, "%s/%s_%u_%08u", dir, proc_name, getpid(),
60 (unsigned int)p_atomic_inc_return(&index) - 1);
61
62 if (verbose)
63 fprintf(stderr, "dd: dumping to file %s\n", buf);
64 }
65
66 FILE *
dd_get_debug_file(bool verbose)67 dd_get_debug_file(bool verbose)
68 {
69 char name[512];
70 FILE *f;
71
72 dd_get_debug_filename_and_mkdir(name, sizeof(name), verbose);
73 f = fopen(name, "w");
74 if (!f) {
75 fprintf(stderr, "dd: can't open file %s\n", name);
76 return NULL;
77 }
78
79 return f;
80 }
81
82 void
dd_parse_apitrace_marker(const char * string,int len,unsigned * call_number)83 dd_parse_apitrace_marker(const char *string, int len, unsigned *call_number)
84 {
85 unsigned num;
86 char *s;
87
88 if (len <= 0)
89 return;
90
91 /* Make it zero-terminated. */
92 s = alloca(len + 1);
93 memcpy(s, string, len);
94 s[len] = 0;
95
96 /* Parse the number. */
97 errno = 0;
98 num = strtol(s, NULL, 10);
99 if (errno)
100 return;
101
102 *call_number = num;
103 }
104
105 void
dd_write_header(FILE * f,struct pipe_screen * screen,unsigned apitrace_call_number)106 dd_write_header(FILE *f, struct pipe_screen *screen, unsigned apitrace_call_number)
107 {
108 char cmd_line[4096];
109 if (os_get_command_line(cmd_line, sizeof(cmd_line)))
110 fprintf(f, "Command: %s\n", cmd_line);
111 fprintf(f, "Driver vendor: %s\n", screen->get_vendor(screen));
112 fprintf(f, "Device vendor: %s\n", screen->get_device_vendor(screen));
113 fprintf(f, "Device name: %s\n\n", screen->get_name(screen));
114
115 if (apitrace_call_number)
116 fprintf(f, "Last apitrace call: %u\n\n", apitrace_call_number);
117 }
118
119 FILE *
dd_get_file_stream(struct dd_screen * dscreen,unsigned apitrace_call_number)120 dd_get_file_stream(struct dd_screen *dscreen, unsigned apitrace_call_number)
121 {
122 struct pipe_screen *screen = dscreen->screen;
123
124 FILE *f = dd_get_debug_file(dscreen->verbose);
125 if (!f)
126 return NULL;
127
128 dd_write_header(f, screen, apitrace_call_number);
129 return f;
130 }
131
132 static void
dd_dump_dmesg(FILE * f)133 dd_dump_dmesg(FILE *f)
134 {
135 #ifdef PIPE_OS_LINUX
136 char line[2000];
137 FILE *p = popen("dmesg | tail -n60", "r");
138
139 if (!p)
140 return;
141
142 fprintf(f, "\nLast 60 lines of dmesg:\n\n");
143 while (fgets(line, sizeof(line), p))
144 fputs(line, f);
145
146 pclose(p);
147 #endif
148 }
149
150 static unsigned
dd_num_active_viewports(struct dd_draw_state * dstate)151 dd_num_active_viewports(struct dd_draw_state *dstate)
152 {
153 struct tgsi_shader_info info;
154 const struct tgsi_token *tokens;
155
156 if (dstate->shaders[PIPE_SHADER_GEOMETRY])
157 tokens = dstate->shaders[PIPE_SHADER_GEOMETRY]->state.shader.tokens;
158 else if (dstate->shaders[PIPE_SHADER_TESS_EVAL])
159 tokens = dstate->shaders[PIPE_SHADER_TESS_EVAL]->state.shader.tokens;
160 else if (dstate->shaders[PIPE_SHADER_VERTEX])
161 tokens = dstate->shaders[PIPE_SHADER_VERTEX]->state.shader.tokens;
162 else
163 return 1;
164
165 if (tokens) {
166 tgsi_scan_shader(tokens, &info);
167 if (info.writes_viewport_index)
168 return PIPE_MAX_VIEWPORTS;
169 }
170
171 return 1;
172 }
173
174 #define COLOR_RESET "\033[0m"
175 #define COLOR_SHADER "\033[1;32m"
176 #define COLOR_STATE "\033[1;33m"
177
178 #define DUMP(name, var) do { \
179 fprintf(f, COLOR_STATE #name ": " COLOR_RESET); \
180 util_dump_##name(f, var); \
181 fprintf(f, "\n"); \
182 } while(0)
183
184 #define DUMP_I(name, var, i) do { \
185 fprintf(f, COLOR_STATE #name " %i: " COLOR_RESET, i); \
186 util_dump_##name(f, var); \
187 fprintf(f, "\n"); \
188 } while(0)
189
190 #define DUMP_M(name, var, member) do { \
191 fprintf(f, " " #member ": "); \
192 util_dump_##name(f, (var)->member); \
193 fprintf(f, "\n"); \
194 } while(0)
195
196 #define DUMP_M_ADDR(name, var, member) do { \
197 fprintf(f, " " #member ": "); \
198 util_dump_##name(f, &(var)->member); \
199 fprintf(f, "\n"); \
200 } while(0)
201
202 #define PRINT_NAMED(type, name, value) \
203 do { \
204 fprintf(f, COLOR_STATE "%s" COLOR_RESET " = ", name); \
205 util_dump_##type(f, value); \
206 fprintf(f, "\n"); \
207 } while (0)
208
209 static void
util_dump_uint(FILE * f,unsigned i)210 util_dump_uint(FILE *f, unsigned i)
211 {
212 fprintf(f, "%u", i);
213 }
214
215 static void
util_dump_int(FILE * f,int i)216 util_dump_int(FILE *f, int i)
217 {
218 fprintf(f, "%d", i);
219 }
220
221 static void
util_dump_hex(FILE * f,unsigned i)222 util_dump_hex(FILE *f, unsigned i)
223 {
224 fprintf(f, "0x%x", i);
225 }
226
227 static void
util_dump_double(FILE * f,double d)228 util_dump_double(FILE *f, double d)
229 {
230 fprintf(f, "%f", d);
231 }
232
233 static void
util_dump_format(FILE * f,enum pipe_format format)234 util_dump_format(FILE *f, enum pipe_format format)
235 {
236 fprintf(f, "%s", util_format_name(format));
237 }
238
239 static void
util_dump_color_union(FILE * f,const union pipe_color_union * color)240 util_dump_color_union(FILE *f, const union pipe_color_union *color)
241 {
242 fprintf(f, "{f = {%f, %f, %f, %f}, ui = {%u, %u, %u, %u}",
243 color->f[0], color->f[1], color->f[2], color->f[3],
244 color->ui[0], color->ui[1], color->ui[2], color->ui[3]);
245 }
246
247 static void
dd_dump_render_condition(struct dd_draw_state * dstate,FILE * f)248 dd_dump_render_condition(struct dd_draw_state *dstate, FILE *f)
249 {
250 if (dstate->render_cond.query) {
251 fprintf(f, "render condition:\n");
252 DUMP_M(query_type, &dstate->render_cond, query->type);
253 DUMP_M(uint, &dstate->render_cond, condition);
254 DUMP_M(uint, &dstate->render_cond, mode);
255 fprintf(f, "\n");
256 }
257 }
258
259 static void
dd_dump_shader(struct dd_draw_state * dstate,enum pipe_shader_type sh,FILE * f)260 dd_dump_shader(struct dd_draw_state *dstate, enum pipe_shader_type sh, FILE *f)
261 {
262 int i;
263 const char *shader_str[PIPE_SHADER_TYPES];
264
265 shader_str[PIPE_SHADER_VERTEX] = "VERTEX";
266 shader_str[PIPE_SHADER_TESS_CTRL] = "TESS_CTRL";
267 shader_str[PIPE_SHADER_TESS_EVAL] = "TESS_EVAL";
268 shader_str[PIPE_SHADER_GEOMETRY] = "GEOMETRY";
269 shader_str[PIPE_SHADER_FRAGMENT] = "FRAGMENT";
270 shader_str[PIPE_SHADER_COMPUTE] = "COMPUTE";
271
272 if (sh == PIPE_SHADER_TESS_CTRL &&
273 !dstate->shaders[PIPE_SHADER_TESS_CTRL] &&
274 dstate->shaders[PIPE_SHADER_TESS_EVAL])
275 fprintf(f, "tess_state: {default_outer_level = {%f, %f, %f, %f}, "
276 "default_inner_level = {%f, %f}}\n",
277 dstate->tess_default_levels[0],
278 dstate->tess_default_levels[1],
279 dstate->tess_default_levels[2],
280 dstate->tess_default_levels[3],
281 dstate->tess_default_levels[4],
282 dstate->tess_default_levels[5]);
283
284 if (sh == PIPE_SHADER_FRAGMENT)
285 if (dstate->rs) {
286 unsigned num_viewports = dd_num_active_viewports(dstate);
287
288 if (dstate->rs->state.rs.clip_plane_enable)
289 DUMP(clip_state, &dstate->clip_state);
290
291 for (i = 0; i < num_viewports; i++)
292 DUMP_I(viewport_state, &dstate->viewports[i], i);
293
294 if (dstate->rs->state.rs.scissor)
295 for (i = 0; i < num_viewports; i++)
296 DUMP_I(scissor_state, &dstate->scissors[i], i);
297
298 DUMP(rasterizer_state, &dstate->rs->state.rs);
299
300 if (dstate->rs->state.rs.poly_stipple_enable)
301 DUMP(poly_stipple, &dstate->polygon_stipple);
302 fprintf(f, "\n");
303 }
304
305 if (!dstate->shaders[sh])
306 return;
307
308 fprintf(f, COLOR_SHADER "begin shader: %s" COLOR_RESET "\n", shader_str[sh]);
309 DUMP(shader_state, &dstate->shaders[sh]->state.shader);
310
311 for (i = 0; i < PIPE_MAX_CONSTANT_BUFFERS; i++)
312 if (dstate->constant_buffers[sh][i].buffer ||
313 dstate->constant_buffers[sh][i].user_buffer) {
314 DUMP_I(constant_buffer, &dstate->constant_buffers[sh][i], i);
315 if (dstate->constant_buffers[sh][i].buffer)
316 DUMP_M(resource, &dstate->constant_buffers[sh][i], buffer);
317 }
318
319 for (i = 0; i < PIPE_MAX_SAMPLERS; i++)
320 if (dstate->sampler_states[sh][i])
321 DUMP_I(sampler_state, &dstate->sampler_states[sh][i]->state.sampler, i);
322
323 for (i = 0; i < PIPE_MAX_SAMPLERS; i++)
324 if (dstate->sampler_views[sh][i]) {
325 DUMP_I(sampler_view, dstate->sampler_views[sh][i], i);
326 DUMP_M(resource, dstate->sampler_views[sh][i], texture);
327 }
328
329 for (i = 0; i < PIPE_MAX_SHADER_IMAGES; i++)
330 if (dstate->shader_images[sh][i].resource) {
331 DUMP_I(image_view, &dstate->shader_images[sh][i], i);
332 if (dstate->shader_images[sh][i].resource)
333 DUMP_M(resource, &dstate->shader_images[sh][i], resource);
334 }
335
336 for (i = 0; i < PIPE_MAX_SHADER_BUFFERS; i++)
337 if (dstate->shader_buffers[sh][i].buffer) {
338 DUMP_I(shader_buffer, &dstate->shader_buffers[sh][i], i);
339 if (dstate->shader_buffers[sh][i].buffer)
340 DUMP_M(resource, &dstate->shader_buffers[sh][i], buffer);
341 }
342
343 fprintf(f, COLOR_SHADER "end shader: %s" COLOR_RESET "\n\n", shader_str[sh]);
344 }
345
346 static void
dd_dump_flush(struct dd_draw_state * dstate,struct call_flush * info,FILE * f)347 dd_dump_flush(struct dd_draw_state *dstate, struct call_flush *info, FILE *f)
348 {
349 fprintf(f, "%s:\n", __func__+8);
350 DUMP_M(hex, info, flags);
351 }
352
353 static void
dd_dump_draw_vbo(struct dd_draw_state * dstate,struct pipe_draw_info * info,FILE * f)354 dd_dump_draw_vbo(struct dd_draw_state *dstate, struct pipe_draw_info *info, FILE *f)
355 {
356 int sh, i;
357
358 DUMP(draw_info, info);
359 if (info->count_from_stream_output)
360 DUMP_M(stream_output_target, info,
361 count_from_stream_output);
362 if (info->indirect) {
363 DUMP_M(resource, info, indirect->buffer);
364 if (info->indirect->indirect_draw_count)
365 DUMP_M(resource, info, indirect->indirect_draw_count);
366 }
367
368 fprintf(f, "\n");
369
370 /* TODO: dump active queries */
371
372 dd_dump_render_condition(dstate, f);
373
374 for (i = 0; i < PIPE_MAX_ATTRIBS; i++)
375 if (dstate->vertex_buffers[i].buffer.resource) {
376 DUMP_I(vertex_buffer, &dstate->vertex_buffers[i], i);
377 if (!dstate->vertex_buffers[i].is_user_buffer)
378 DUMP_M(resource, &dstate->vertex_buffers[i], buffer.resource);
379 }
380
381 if (dstate->velems) {
382 PRINT_NAMED(uint, "num vertex elements",
383 dstate->velems->state.velems.count);
384 for (i = 0; i < dstate->velems->state.velems.count; i++) {
385 fprintf(f, " ");
386 DUMP_I(vertex_element, &dstate->velems->state.velems.velems[i], i);
387 }
388 }
389
390 PRINT_NAMED(uint, "num stream output targets", dstate->num_so_targets);
391 for (i = 0; i < dstate->num_so_targets; i++)
392 if (dstate->so_targets[i]) {
393 DUMP_I(stream_output_target, dstate->so_targets[i], i);
394 DUMP_M(resource, dstate->so_targets[i], buffer);
395 fprintf(f, " offset = %i\n", dstate->so_offsets[i]);
396 }
397
398 fprintf(f, "\n");
399 for (sh = 0; sh < PIPE_SHADER_TYPES; sh++) {
400 if (sh == PIPE_SHADER_COMPUTE)
401 continue;
402
403 dd_dump_shader(dstate, sh, f);
404 }
405
406 if (dstate->dsa)
407 DUMP(depth_stencil_alpha_state, &dstate->dsa->state.dsa);
408 DUMP(stencil_ref, &dstate->stencil_ref);
409
410 if (dstate->blend)
411 DUMP(blend_state, &dstate->blend->state.blend);
412 DUMP(blend_color, &dstate->blend_color);
413
414 PRINT_NAMED(uint, "min_samples", dstate->min_samples);
415 PRINT_NAMED(hex, "sample_mask", dstate->sample_mask);
416 fprintf(f, "\n");
417
418 DUMP(framebuffer_state, &dstate->framebuffer_state);
419 for (i = 0; i < dstate->framebuffer_state.nr_cbufs; i++)
420 if (dstate->framebuffer_state.cbufs[i]) {
421 fprintf(f, " " COLOR_STATE "cbufs[%i]:" COLOR_RESET "\n ", i);
422 DUMP(surface, dstate->framebuffer_state.cbufs[i]);
423 fprintf(f, " ");
424 DUMP(resource, dstate->framebuffer_state.cbufs[i]->texture);
425 }
426 if (dstate->framebuffer_state.zsbuf) {
427 fprintf(f, " " COLOR_STATE "zsbuf:" COLOR_RESET "\n ");
428 DUMP(surface, dstate->framebuffer_state.zsbuf);
429 fprintf(f, " ");
430 DUMP(resource, dstate->framebuffer_state.zsbuf->texture);
431 }
432 fprintf(f, "\n");
433 }
434
435 static void
dd_dump_launch_grid(struct dd_draw_state * dstate,struct pipe_grid_info * info,FILE * f)436 dd_dump_launch_grid(struct dd_draw_state *dstate, struct pipe_grid_info *info, FILE *f)
437 {
438 fprintf(f, "%s:\n", __func__+8);
439 DUMP(grid_info, info);
440 fprintf(f, "\n");
441
442 dd_dump_shader(dstate, PIPE_SHADER_COMPUTE, f);
443 fprintf(f, "\n");
444 }
445
446 static void
dd_dump_resource_copy_region(struct dd_draw_state * dstate,struct call_resource_copy_region * info,FILE * f)447 dd_dump_resource_copy_region(struct dd_draw_state *dstate,
448 struct call_resource_copy_region *info,
449 FILE *f)
450 {
451 fprintf(f, "%s:\n", __func__+8);
452 DUMP_M(resource, info, dst);
453 DUMP_M(uint, info, dst_level);
454 DUMP_M(uint, info, dstx);
455 DUMP_M(uint, info, dsty);
456 DUMP_M(uint, info, dstz);
457 DUMP_M(resource, info, src);
458 DUMP_M(uint, info, src_level);
459 DUMP_M_ADDR(box, info, src_box);
460 }
461
462 static void
dd_dump_blit(struct dd_draw_state * dstate,struct pipe_blit_info * info,FILE * f)463 dd_dump_blit(struct dd_draw_state *dstate, struct pipe_blit_info *info, FILE *f)
464 {
465 fprintf(f, "%s:\n", __func__+8);
466 DUMP_M(resource, info, dst.resource);
467 DUMP_M(uint, info, dst.level);
468 DUMP_M_ADDR(box, info, dst.box);
469 DUMP_M(format, info, dst.format);
470
471 DUMP_M(resource, info, src.resource);
472 DUMP_M(uint, info, src.level);
473 DUMP_M_ADDR(box, info, src.box);
474 DUMP_M(format, info, src.format);
475
476 DUMP_M(hex, info, mask);
477 DUMP_M(uint, info, filter);
478 DUMP_M(uint, info, scissor_enable);
479 DUMP_M_ADDR(scissor_state, info, scissor);
480 DUMP_M(uint, info, render_condition_enable);
481
482 if (info->render_condition_enable)
483 dd_dump_render_condition(dstate, f);
484 }
485
486 static void
dd_dump_generate_mipmap(struct dd_draw_state * dstate,FILE * f)487 dd_dump_generate_mipmap(struct dd_draw_state *dstate, FILE *f)
488 {
489 fprintf(f, "%s:\n", __func__+8);
490 /* TODO */
491 }
492
493 static void
dd_dump_get_query_result_resource(struct call_get_query_result_resource * info,FILE * f)494 dd_dump_get_query_result_resource(struct call_get_query_result_resource *info, FILE *f)
495 {
496 fprintf(f, "%s:\n", __func__ + 8);
497 DUMP_M(query_type, info, query_type);
498 DUMP_M(uint, info, wait);
499 DUMP_M(query_value_type, info, result_type);
500 DUMP_M(int, info, index);
501 DUMP_M(resource, info, resource);
502 DUMP_M(uint, info, offset);
503 }
504
505 static void
dd_dump_flush_resource(struct dd_draw_state * dstate,struct pipe_resource * res,FILE * f)506 dd_dump_flush_resource(struct dd_draw_state *dstate, struct pipe_resource *res,
507 FILE *f)
508 {
509 fprintf(f, "%s:\n", __func__+8);
510 DUMP(resource, res);
511 }
512
513 static void
dd_dump_clear(struct dd_draw_state * dstate,struct call_clear * info,FILE * f)514 dd_dump_clear(struct dd_draw_state *dstate, struct call_clear *info, FILE *f)
515 {
516 fprintf(f, "%s:\n", __func__+8);
517 DUMP_M(uint, info, buffers);
518 fprintf(f, " scissor_state: %d,%d %d,%d\n",
519 info->scissor_state.minx, info->scissor_state.miny,
520 info->scissor_state.maxx, info->scissor_state.maxy);
521 DUMP_M_ADDR(color_union, info, color);
522 DUMP_M(double, info, depth);
523 DUMP_M(hex, info, stencil);
524 }
525
526 static void
dd_dump_clear_buffer(struct dd_draw_state * dstate,struct call_clear_buffer * info,FILE * f)527 dd_dump_clear_buffer(struct dd_draw_state *dstate, struct call_clear_buffer *info,
528 FILE *f)
529 {
530 int i;
531 const char *value = (const char*)info->clear_value;
532
533 fprintf(f, "%s:\n", __func__+8);
534 DUMP_M(resource, info, res);
535 DUMP_M(uint, info, offset);
536 DUMP_M(uint, info, size);
537 DUMP_M(uint, info, clear_value_size);
538
539 fprintf(f, " clear_value:");
540 for (i = 0; i < info->clear_value_size; i++)
541 fprintf(f, " %02x", value[i]);
542 fprintf(f, "\n");
543 }
544
545 static void
dd_dump_transfer_map(struct call_transfer_map * info,FILE * f)546 dd_dump_transfer_map(struct call_transfer_map *info, FILE *f)
547 {
548 fprintf(f, "%s:\n", __func__+8);
549 DUMP_M_ADDR(transfer, info, transfer);
550 DUMP_M(ptr, info, transfer_ptr);
551 DUMP_M(ptr, info, ptr);
552 }
553
554 static void
dd_dump_transfer_flush_region(struct call_transfer_flush_region * info,FILE * f)555 dd_dump_transfer_flush_region(struct call_transfer_flush_region *info, FILE *f)
556 {
557 fprintf(f, "%s:\n", __func__+8);
558 DUMP_M_ADDR(transfer, info, transfer);
559 DUMP_M(ptr, info, transfer_ptr);
560 DUMP_M_ADDR(box, info, box);
561 }
562
563 static void
dd_dump_transfer_unmap(struct call_transfer_unmap * info,FILE * f)564 dd_dump_transfer_unmap(struct call_transfer_unmap *info, FILE *f)
565 {
566 fprintf(f, "%s:\n", __func__+8);
567 DUMP_M_ADDR(transfer, info, transfer);
568 DUMP_M(ptr, info, transfer_ptr);
569 }
570
571 static void
dd_dump_buffer_subdata(struct call_buffer_subdata * info,FILE * f)572 dd_dump_buffer_subdata(struct call_buffer_subdata *info, FILE *f)
573 {
574 fprintf(f, "%s:\n", __func__+8);
575 DUMP_M(resource, info, resource);
576 DUMP_M(transfer_usage, info, usage);
577 DUMP_M(uint, info, offset);
578 DUMP_M(uint, info, size);
579 DUMP_M(ptr, info, data);
580 }
581
582 static void
dd_dump_texture_subdata(struct call_texture_subdata * info,FILE * f)583 dd_dump_texture_subdata(struct call_texture_subdata *info, FILE *f)
584 {
585 fprintf(f, "%s:\n", __func__+8);
586 DUMP_M(resource, info, resource);
587 DUMP_M(uint, info, level);
588 DUMP_M(transfer_usage, info, usage);
589 DUMP_M_ADDR(box, info, box);
590 DUMP_M(ptr, info, data);
591 DUMP_M(uint, info, stride);
592 DUMP_M(uint, info, layer_stride);
593 }
594
595 static void
dd_dump_clear_texture(struct dd_draw_state * dstate,FILE * f)596 dd_dump_clear_texture(struct dd_draw_state *dstate, FILE *f)
597 {
598 fprintf(f, "%s:\n", __func__+8);
599 /* TODO */
600 }
601
602 static void
dd_dump_clear_render_target(struct dd_draw_state * dstate,FILE * f)603 dd_dump_clear_render_target(struct dd_draw_state *dstate, FILE *f)
604 {
605 fprintf(f, "%s:\n", __func__+8);
606 /* TODO */
607 }
608
609 static void
dd_dump_clear_depth_stencil(struct dd_draw_state * dstate,FILE * f)610 dd_dump_clear_depth_stencil(struct dd_draw_state *dstate, FILE *f)
611 {
612 fprintf(f, "%s:\n", __func__+8);
613 /* TODO */
614 }
615
616 static void
dd_dump_driver_state(struct dd_context * dctx,FILE * f,unsigned flags)617 dd_dump_driver_state(struct dd_context *dctx, FILE *f, unsigned flags)
618 {
619 if (dctx->pipe->dump_debug_state) {
620 fprintf(f,"\n\n**************************************************"
621 "***************************\n");
622 fprintf(f, "Driver-specific state:\n\n");
623 dctx->pipe->dump_debug_state(dctx->pipe, f, flags);
624 }
625 }
626
627 static void
dd_dump_call(FILE * f,struct dd_draw_state * state,struct dd_call * call)628 dd_dump_call(FILE *f, struct dd_draw_state *state, struct dd_call *call)
629 {
630 switch (call->type) {
631 case CALL_FLUSH:
632 dd_dump_flush(state, &call->info.flush, f);
633 break;
634 case CALL_DRAW_VBO:
635 dd_dump_draw_vbo(state, &call->info.draw_vbo.draw, f);
636 break;
637 case CALL_LAUNCH_GRID:
638 dd_dump_launch_grid(state, &call->info.launch_grid, f);
639 break;
640 case CALL_RESOURCE_COPY_REGION:
641 dd_dump_resource_copy_region(state,
642 &call->info.resource_copy_region, f);
643 break;
644 case CALL_BLIT:
645 dd_dump_blit(state, &call->info.blit, f);
646 break;
647 case CALL_FLUSH_RESOURCE:
648 dd_dump_flush_resource(state, call->info.flush_resource, f);
649 break;
650 case CALL_CLEAR:
651 dd_dump_clear(state, &call->info.clear, f);
652 break;
653 case CALL_CLEAR_BUFFER:
654 dd_dump_clear_buffer(state, &call->info.clear_buffer, f);
655 break;
656 case CALL_CLEAR_TEXTURE:
657 dd_dump_clear_texture(state, f);
658 break;
659 case CALL_CLEAR_RENDER_TARGET:
660 dd_dump_clear_render_target(state, f);
661 break;
662 case CALL_CLEAR_DEPTH_STENCIL:
663 dd_dump_clear_depth_stencil(state, f);
664 break;
665 case CALL_GENERATE_MIPMAP:
666 dd_dump_generate_mipmap(state, f);
667 break;
668 case CALL_GET_QUERY_RESULT_RESOURCE:
669 dd_dump_get_query_result_resource(&call->info.get_query_result_resource, f);
670 break;
671 case CALL_TRANSFER_MAP:
672 dd_dump_transfer_map(&call->info.transfer_map, f);
673 break;
674 case CALL_TRANSFER_FLUSH_REGION:
675 dd_dump_transfer_flush_region(&call->info.transfer_flush_region, f);
676 break;
677 case CALL_TRANSFER_UNMAP:
678 dd_dump_transfer_unmap(&call->info.transfer_unmap, f);
679 break;
680 case CALL_BUFFER_SUBDATA:
681 dd_dump_buffer_subdata(&call->info.buffer_subdata, f);
682 break;
683 case CALL_TEXTURE_SUBDATA:
684 dd_dump_texture_subdata(&call->info.texture_subdata, f);
685 break;
686 }
687 }
688
689 static void
dd_kill_process(void)690 dd_kill_process(void)
691 {
692 #ifdef PIPE_OS_UNIX
693 sync();
694 #endif
695 fprintf(stderr, "dd: Aborting the process...\n");
696 fflush(stdout);
697 fflush(stderr);
698 exit(1);
699 }
700
701 static void
dd_unreference_copy_of_call(struct dd_call * dst)702 dd_unreference_copy_of_call(struct dd_call *dst)
703 {
704 switch (dst->type) {
705 case CALL_FLUSH:
706 break;
707 case CALL_DRAW_VBO:
708 pipe_so_target_reference(&dst->info.draw_vbo.draw.count_from_stream_output, NULL);
709 pipe_resource_reference(&dst->info.draw_vbo.indirect.buffer, NULL);
710 pipe_resource_reference(&dst->info.draw_vbo.indirect.indirect_draw_count, NULL);
711 if (dst->info.draw_vbo.draw.index_size &&
712 !dst->info.draw_vbo.draw.has_user_indices)
713 pipe_resource_reference(&dst->info.draw_vbo.draw.index.resource, NULL);
714 else
715 dst->info.draw_vbo.draw.index.user = NULL;
716 break;
717 case CALL_LAUNCH_GRID:
718 pipe_resource_reference(&dst->info.launch_grid.indirect, NULL);
719 break;
720 case CALL_RESOURCE_COPY_REGION:
721 pipe_resource_reference(&dst->info.resource_copy_region.dst, NULL);
722 pipe_resource_reference(&dst->info.resource_copy_region.src, NULL);
723 break;
724 case CALL_BLIT:
725 pipe_resource_reference(&dst->info.blit.dst.resource, NULL);
726 pipe_resource_reference(&dst->info.blit.src.resource, NULL);
727 break;
728 case CALL_FLUSH_RESOURCE:
729 pipe_resource_reference(&dst->info.flush_resource, NULL);
730 break;
731 case CALL_CLEAR:
732 break;
733 case CALL_CLEAR_BUFFER:
734 pipe_resource_reference(&dst->info.clear_buffer.res, NULL);
735 break;
736 case CALL_CLEAR_TEXTURE:
737 break;
738 case CALL_CLEAR_RENDER_TARGET:
739 break;
740 case CALL_CLEAR_DEPTH_STENCIL:
741 break;
742 case CALL_GENERATE_MIPMAP:
743 pipe_resource_reference(&dst->info.generate_mipmap.res, NULL);
744 break;
745 case CALL_GET_QUERY_RESULT_RESOURCE:
746 pipe_resource_reference(&dst->info.get_query_result_resource.resource, NULL);
747 break;
748 case CALL_TRANSFER_MAP:
749 pipe_resource_reference(&dst->info.transfer_map.transfer.resource, NULL);
750 break;
751 case CALL_TRANSFER_FLUSH_REGION:
752 pipe_resource_reference(&dst->info.transfer_flush_region.transfer.resource, NULL);
753 break;
754 case CALL_TRANSFER_UNMAP:
755 pipe_resource_reference(&dst->info.transfer_unmap.transfer.resource, NULL);
756 break;
757 case CALL_BUFFER_SUBDATA:
758 pipe_resource_reference(&dst->info.buffer_subdata.resource, NULL);
759 break;
760 case CALL_TEXTURE_SUBDATA:
761 pipe_resource_reference(&dst->info.texture_subdata.resource, NULL);
762 break;
763 }
764 }
765
766 static void
dd_init_copy_of_draw_state(struct dd_draw_state_copy * state)767 dd_init_copy_of_draw_state(struct dd_draw_state_copy *state)
768 {
769 unsigned i,j;
770
771 /* Just clear pointers to gallium objects. Don't clear the whole structure,
772 * because it would kill performance with its size of 130 KB.
773 */
774 memset(state->base.vertex_buffers, 0,
775 sizeof(state->base.vertex_buffers));
776 memset(state->base.so_targets, 0,
777 sizeof(state->base.so_targets));
778 memset(state->base.constant_buffers, 0,
779 sizeof(state->base.constant_buffers));
780 memset(state->base.sampler_views, 0,
781 sizeof(state->base.sampler_views));
782 memset(state->base.shader_images, 0,
783 sizeof(state->base.shader_images));
784 memset(state->base.shader_buffers, 0,
785 sizeof(state->base.shader_buffers));
786 memset(&state->base.framebuffer_state, 0,
787 sizeof(state->base.framebuffer_state));
788
789 memset(state->shaders, 0, sizeof(state->shaders));
790
791 state->base.render_cond.query = &state->render_cond;
792
793 for (i = 0; i < PIPE_SHADER_TYPES; i++) {
794 state->base.shaders[i] = &state->shaders[i];
795 for (j = 0; j < PIPE_MAX_SAMPLERS; j++)
796 state->base.sampler_states[i][j] = &state->sampler_states[i][j];
797 }
798
799 state->base.velems = &state->velems;
800 state->base.rs = &state->rs;
801 state->base.dsa = &state->dsa;
802 state->base.blend = &state->blend;
803 }
804
805 static void
dd_unreference_copy_of_draw_state(struct dd_draw_state_copy * state)806 dd_unreference_copy_of_draw_state(struct dd_draw_state_copy *state)
807 {
808 struct dd_draw_state *dst = &state->base;
809 unsigned i,j;
810
811 for (i = 0; i < ARRAY_SIZE(dst->vertex_buffers); i++)
812 pipe_vertex_buffer_unreference(&dst->vertex_buffers[i]);
813 for (i = 0; i < ARRAY_SIZE(dst->so_targets); i++)
814 pipe_so_target_reference(&dst->so_targets[i], NULL);
815
816 for (i = 0; i < PIPE_SHADER_TYPES; i++) {
817 if (dst->shaders[i])
818 tgsi_free_tokens(dst->shaders[i]->state.shader.tokens);
819
820 for (j = 0; j < PIPE_MAX_CONSTANT_BUFFERS; j++)
821 pipe_resource_reference(&dst->constant_buffers[i][j].buffer, NULL);
822 for (j = 0; j < PIPE_MAX_SAMPLERS; j++)
823 pipe_sampler_view_reference(&dst->sampler_views[i][j], NULL);
824 for (j = 0; j < PIPE_MAX_SHADER_IMAGES; j++)
825 pipe_resource_reference(&dst->shader_images[i][j].resource, NULL);
826 for (j = 0; j < PIPE_MAX_SHADER_BUFFERS; j++)
827 pipe_resource_reference(&dst->shader_buffers[i][j].buffer, NULL);
828 }
829
830 util_unreference_framebuffer_state(&dst->framebuffer_state);
831 }
832
833 static void
dd_copy_draw_state(struct dd_draw_state * dst,struct dd_draw_state * src)834 dd_copy_draw_state(struct dd_draw_state *dst, struct dd_draw_state *src)
835 {
836 unsigned i,j;
837
838 if (src->render_cond.query) {
839 *dst->render_cond.query = *src->render_cond.query;
840 dst->render_cond.condition = src->render_cond.condition;
841 dst->render_cond.mode = src->render_cond.mode;
842 } else {
843 dst->render_cond.query = NULL;
844 }
845
846 for (i = 0; i < ARRAY_SIZE(src->vertex_buffers); i++) {
847 pipe_vertex_buffer_reference(&dst->vertex_buffers[i],
848 &src->vertex_buffers[i]);
849 }
850
851 dst->num_so_targets = src->num_so_targets;
852 for (i = 0; i < src->num_so_targets; i++)
853 pipe_so_target_reference(&dst->so_targets[i], src->so_targets[i]);
854 memcpy(dst->so_offsets, src->so_offsets, sizeof(src->so_offsets));
855
856 for (i = 0; i < PIPE_SHADER_TYPES; i++) {
857 if (!src->shaders[i]) {
858 dst->shaders[i] = NULL;
859 continue;
860 }
861
862 if (src->shaders[i]) {
863 dst->shaders[i]->state.shader = src->shaders[i]->state.shader;
864 if (src->shaders[i]->state.shader.tokens) {
865 dst->shaders[i]->state.shader.tokens =
866 tgsi_dup_tokens(src->shaders[i]->state.shader.tokens);
867 } else {
868 dst->shaders[i]->state.shader.ir.nir = NULL;
869 }
870 } else {
871 dst->shaders[i] = NULL;
872 }
873
874 for (j = 0; j < PIPE_MAX_CONSTANT_BUFFERS; j++) {
875 pipe_resource_reference(&dst->constant_buffers[i][j].buffer,
876 src->constant_buffers[i][j].buffer);
877 memcpy(&dst->constant_buffers[i][j], &src->constant_buffers[i][j],
878 sizeof(src->constant_buffers[i][j]));
879 }
880
881 for (j = 0; j < PIPE_MAX_SAMPLERS; j++) {
882 pipe_sampler_view_reference(&dst->sampler_views[i][j],
883 src->sampler_views[i][j]);
884 if (src->sampler_states[i][j])
885 dst->sampler_states[i][j]->state.sampler =
886 src->sampler_states[i][j]->state.sampler;
887 else
888 dst->sampler_states[i][j] = NULL;
889 }
890
891 for (j = 0; j < PIPE_MAX_SHADER_IMAGES; j++) {
892 pipe_resource_reference(&dst->shader_images[i][j].resource,
893 src->shader_images[i][j].resource);
894 memcpy(&dst->shader_images[i][j], &src->shader_images[i][j],
895 sizeof(src->shader_images[i][j]));
896 }
897
898 for (j = 0; j < PIPE_MAX_SHADER_BUFFERS; j++) {
899 pipe_resource_reference(&dst->shader_buffers[i][j].buffer,
900 src->shader_buffers[i][j].buffer);
901 memcpy(&dst->shader_buffers[i][j], &src->shader_buffers[i][j],
902 sizeof(src->shader_buffers[i][j]));
903 }
904 }
905
906 if (src->velems)
907 dst->velems->state.velems = src->velems->state.velems;
908 else
909 dst->velems = NULL;
910
911 if (src->rs)
912 dst->rs->state.rs = src->rs->state.rs;
913 else
914 dst->rs = NULL;
915
916 if (src->dsa)
917 dst->dsa->state.dsa = src->dsa->state.dsa;
918 else
919 dst->dsa = NULL;
920
921 if (src->blend)
922 dst->blend->state.blend = src->blend->state.blend;
923 else
924 dst->blend = NULL;
925
926 dst->blend_color = src->blend_color;
927 dst->stencil_ref = src->stencil_ref;
928 dst->sample_mask = src->sample_mask;
929 dst->min_samples = src->min_samples;
930 dst->clip_state = src->clip_state;
931 util_copy_framebuffer_state(&dst->framebuffer_state, &src->framebuffer_state);
932 memcpy(dst->scissors, src->scissors, sizeof(src->scissors));
933 memcpy(dst->viewports, src->viewports, sizeof(src->viewports));
934 memcpy(dst->tess_default_levels, src->tess_default_levels,
935 sizeof(src->tess_default_levels));
936 dst->apitrace_call_number = src->apitrace_call_number;
937 }
938
939 static void
dd_free_record(struct pipe_screen * screen,struct dd_draw_record * record)940 dd_free_record(struct pipe_screen *screen, struct dd_draw_record *record)
941 {
942 u_log_page_destroy(record->log_page);
943 dd_unreference_copy_of_call(&record->call);
944 dd_unreference_copy_of_draw_state(&record->draw_state);
945 screen->fence_reference(screen, &record->prev_bottom_of_pipe, NULL);
946 screen->fence_reference(screen, &record->top_of_pipe, NULL);
947 screen->fence_reference(screen, &record->bottom_of_pipe, NULL);
948 util_queue_fence_destroy(&record->driver_finished);
949 FREE(record);
950 }
951
952 static void
dd_write_record(FILE * f,struct dd_draw_record * record)953 dd_write_record(FILE *f, struct dd_draw_record *record)
954 {
955 PRINT_NAMED(ptr, "pipe", record->dctx->pipe);
956 PRINT_NAMED(ns, "time before (API call)", record->time_before);
957 PRINT_NAMED(ns, "time after (driver done)", record->time_after);
958 fprintf(f, "\n");
959
960 dd_dump_call(f, &record->draw_state.base, &record->call);
961
962 if (record->log_page) {
963 fprintf(f,"\n\n**************************************************"
964 "***************************\n");
965 fprintf(f, "Context Log:\n\n");
966 u_log_page_print(record->log_page, f);
967 }
968 }
969
970 static void
dd_maybe_dump_record(struct dd_screen * dscreen,struct dd_draw_record * record)971 dd_maybe_dump_record(struct dd_screen *dscreen, struct dd_draw_record *record)
972 {
973 if (dscreen->dump_mode == DD_DUMP_ONLY_HANGS ||
974 (dscreen->dump_mode == DD_DUMP_APITRACE_CALL &&
975 dscreen->apitrace_dump_call != record->draw_state.base.apitrace_call_number))
976 return;
977
978 char name[512];
979 dd_get_debug_filename_and_mkdir(name, sizeof(name), dscreen->verbose);
980 FILE *f = fopen(name, "w");
981 if (!f) {
982 fprintf(stderr, "dd: failed to open %s\n", name);
983 return;
984 }
985
986 dd_write_header(f, dscreen->screen, record->draw_state.base.apitrace_call_number);
987 dd_write_record(f, record);
988
989 fclose(f);
990 }
991
992 static const char *
dd_fence_state(struct pipe_screen * screen,struct pipe_fence_handle * fence,bool * not_reached)993 dd_fence_state(struct pipe_screen *screen, struct pipe_fence_handle *fence,
994 bool *not_reached)
995 {
996 if (!fence)
997 return "---";
998
999 bool ok = screen->fence_finish(screen, NULL, fence, 0);
1000
1001 if (not_reached && !ok)
1002 *not_reached = true;
1003
1004 return ok ? "YES" : "NO ";
1005 }
1006
1007 static void
dd_report_hang(struct dd_context * dctx)1008 dd_report_hang(struct dd_context *dctx)
1009 {
1010 struct dd_screen *dscreen = dd_screen(dctx->base.screen);
1011 struct pipe_screen *screen = dscreen->screen;
1012 bool encountered_hang = false;
1013 bool stop_output = false;
1014 unsigned num_later = 0;
1015
1016 fprintf(stderr, "GPU hang detected, collecting information...\n\n");
1017
1018 fprintf(stderr, "Draw # driver prev BOP TOP BOP dump file\n"
1019 "-------------------------------------------------------------\n");
1020
1021 list_for_each_entry(struct dd_draw_record, record, &dctx->records, list) {
1022 if (!encountered_hang &&
1023 screen->fence_finish(screen, NULL, record->bottom_of_pipe, 0)) {
1024 dd_maybe_dump_record(dscreen, record);
1025 continue;
1026 }
1027
1028 if (stop_output) {
1029 dd_maybe_dump_record(dscreen, record);
1030 num_later++;
1031 continue;
1032 }
1033
1034 bool driver = util_queue_fence_is_signalled(&record->driver_finished);
1035 bool top_not_reached = false;
1036 const char *prev_bop = dd_fence_state(screen, record->prev_bottom_of_pipe, NULL);
1037 const char *top = dd_fence_state(screen, record->top_of_pipe, &top_not_reached);
1038 const char *bop = dd_fence_state(screen, record->bottom_of_pipe, NULL);
1039
1040 fprintf(stderr, "%-9u %s %s %s %s ",
1041 record->draw_call, driver ? "YES" : "NO ", prev_bop, top, bop);
1042
1043 char name[512];
1044 dd_get_debug_filename_and_mkdir(name, sizeof(name), false);
1045
1046 FILE *f = fopen(name, "w");
1047 if (!f) {
1048 fprintf(stderr, "fopen failed\n");
1049 } else {
1050 fprintf(stderr, "%s\n", name);
1051
1052 dd_write_header(f, dscreen->screen, record->draw_state.base.apitrace_call_number);
1053 dd_write_record(f, record);
1054
1055 fclose(f);
1056 }
1057
1058 if (top_not_reached)
1059 stop_output = true;
1060 encountered_hang = true;
1061 }
1062
1063 if (num_later)
1064 fprintf(stderr, "... and %u additional draws.\n", num_later);
1065
1066 char name[512];
1067 dd_get_debug_filename_and_mkdir(name, sizeof(name), false);
1068 FILE *f = fopen(name, "w");
1069 if (!f) {
1070 fprintf(stderr, "fopen failed\n");
1071 } else {
1072 dd_write_header(f, dscreen->screen, 0);
1073 dd_dump_driver_state(dctx, f, PIPE_DUMP_DEVICE_STATUS_REGISTERS);
1074 dd_dump_dmesg(f);
1075 fclose(f);
1076 }
1077
1078 fprintf(stderr, "\nDone.\n");
1079 dd_kill_process();
1080 }
1081
1082 int
dd_thread_main(void * input)1083 dd_thread_main(void *input)
1084 {
1085 struct dd_context *dctx = (struct dd_context *)input;
1086 struct dd_screen *dscreen = dd_screen(dctx->base.screen);
1087 struct pipe_screen *screen = dscreen->screen;
1088
1089 const char *process_name = util_get_process_name();
1090 if (process_name) {
1091 char threadname[16];
1092 snprintf(threadname, sizeof(threadname), "%.*s:ddbg",
1093 (int)MIN2(strlen(process_name), sizeof(threadname) - 6),
1094 process_name);
1095 u_thread_setname(threadname);
1096 }
1097
1098 mtx_lock(&dctx->mutex);
1099
1100 for (;;) {
1101 struct list_head records;
1102 list_replace(&dctx->records, &records);
1103 list_inithead(&dctx->records);
1104 dctx->num_records = 0;
1105
1106 if (dctx->api_stalled)
1107 cnd_signal(&dctx->cond);
1108
1109 if (list_is_empty(&records)) {
1110 if (dctx->kill_thread)
1111 break;
1112
1113 cnd_wait(&dctx->cond, &dctx->mutex);
1114 continue;
1115 }
1116
1117 mtx_unlock(&dctx->mutex);
1118
1119 /* Wait for the youngest draw. This means hangs can take a bit longer
1120 * to detect, but it's more efficient this way. */
1121 struct dd_draw_record *youngest =
1122 list_last_entry(&records, struct dd_draw_record, list);
1123
1124 if (dscreen->timeout_ms > 0) {
1125 uint64_t abs_timeout = os_time_get_absolute_timeout(
1126 (uint64_t)dscreen->timeout_ms * 1000*1000);
1127
1128 if (!util_queue_fence_wait_timeout(&youngest->driver_finished, abs_timeout) ||
1129 !screen->fence_finish(screen, NULL, youngest->bottom_of_pipe,
1130 (uint64_t)dscreen->timeout_ms * 1000*1000)) {
1131 mtx_lock(&dctx->mutex);
1132 list_splice(&records, &dctx->records);
1133 dd_report_hang(dctx);
1134 /* we won't actually get here */
1135 mtx_unlock(&dctx->mutex);
1136 }
1137 } else {
1138 util_queue_fence_wait(&youngest->driver_finished);
1139 }
1140
1141 list_for_each_entry_safe(struct dd_draw_record, record, &records, list) {
1142 dd_maybe_dump_record(dscreen, record);
1143 list_del(&record->list);
1144 dd_free_record(screen, record);
1145 }
1146
1147 mtx_lock(&dctx->mutex);
1148 }
1149 mtx_unlock(&dctx->mutex);
1150 return 0;
1151 }
1152
1153 static struct dd_draw_record *
dd_create_record(struct dd_context * dctx)1154 dd_create_record(struct dd_context *dctx)
1155 {
1156 struct dd_draw_record *record;
1157
1158 record = MALLOC_STRUCT(dd_draw_record);
1159 if (!record)
1160 return NULL;
1161
1162 record->dctx = dctx;
1163 record->draw_call = dctx->num_draw_calls;
1164
1165 record->prev_bottom_of_pipe = NULL;
1166 record->top_of_pipe = NULL;
1167 record->bottom_of_pipe = NULL;
1168 record->log_page = NULL;
1169 util_queue_fence_init(&record->driver_finished);
1170 util_queue_fence_reset(&record->driver_finished);
1171
1172 dd_init_copy_of_draw_state(&record->draw_state);
1173 dd_copy_draw_state(&record->draw_state.base, &dctx->draw_state);
1174
1175 return record;
1176 }
1177
1178 static void
dd_add_record(struct dd_context * dctx,struct dd_draw_record * record)1179 dd_add_record(struct dd_context *dctx, struct dd_draw_record *record)
1180 {
1181 mtx_lock(&dctx->mutex);
1182 if (unlikely(dctx->num_records > 10000)) {
1183 dctx->api_stalled = true;
1184 /* Since this is only a heuristic to prevent the API thread from getting
1185 * too far ahead, we don't need a loop here. */
1186 cnd_wait(&dctx->cond, &dctx->mutex);
1187 dctx->api_stalled = false;
1188 }
1189
1190 if (list_is_empty(&dctx->records))
1191 cnd_signal(&dctx->cond);
1192
1193 list_addtail(&record->list, &dctx->records);
1194 dctx->num_records++;
1195 mtx_unlock(&dctx->mutex);
1196 }
1197
1198 static void
dd_before_draw(struct dd_context * dctx,struct dd_draw_record * record)1199 dd_before_draw(struct dd_context *dctx, struct dd_draw_record *record)
1200 {
1201 struct dd_screen *dscreen = dd_screen(dctx->base.screen);
1202 struct pipe_context *pipe = dctx->pipe;
1203 struct pipe_screen *screen = dscreen->screen;
1204
1205 record->time_before = os_time_get_nano();
1206
1207 if (dscreen->timeout_ms > 0) {
1208 if (dscreen->flush_always && dctx->num_draw_calls >= dscreen->skip_count) {
1209 pipe->flush(pipe, &record->prev_bottom_of_pipe, 0);
1210 screen->fence_reference(screen, &record->top_of_pipe, record->prev_bottom_of_pipe);
1211 } else {
1212 pipe->flush(pipe, &record->prev_bottom_of_pipe,
1213 PIPE_FLUSH_DEFERRED | PIPE_FLUSH_BOTTOM_OF_PIPE);
1214 pipe->flush(pipe, &record->top_of_pipe,
1215 PIPE_FLUSH_DEFERRED | PIPE_FLUSH_TOP_OF_PIPE);
1216 }
1217 } else if (dscreen->flush_always && dctx->num_draw_calls >= dscreen->skip_count) {
1218 pipe->flush(pipe, NULL, 0);
1219 }
1220
1221 dd_add_record(dctx, record);
1222 }
1223
1224 static void
dd_after_draw_async(void * data)1225 dd_after_draw_async(void *data)
1226 {
1227 struct dd_draw_record *record = (struct dd_draw_record *)data;
1228 struct dd_context *dctx = record->dctx;
1229 struct dd_screen *dscreen = dd_screen(dctx->base.screen);
1230
1231 record->log_page = u_log_new_page(&dctx->log);
1232 record->time_after = os_time_get_nano();
1233
1234 util_queue_fence_signal(&record->driver_finished);
1235
1236 if (dscreen->dump_mode == DD_DUMP_APITRACE_CALL &&
1237 dscreen->apitrace_dump_call > dctx->draw_state.apitrace_call_number) {
1238 dd_thread_join(dctx);
1239 /* No need to continue. */
1240 exit(0);
1241 }
1242 }
1243
1244 static void
dd_after_draw(struct dd_context * dctx,struct dd_draw_record * record)1245 dd_after_draw(struct dd_context *dctx, struct dd_draw_record *record)
1246 {
1247 struct dd_screen *dscreen = dd_screen(dctx->base.screen);
1248 struct pipe_context *pipe = dctx->pipe;
1249
1250 if (dscreen->timeout_ms > 0) {
1251 unsigned flush_flags;
1252 if (dscreen->flush_always && dctx->num_draw_calls >= dscreen->skip_count)
1253 flush_flags = 0;
1254 else
1255 flush_flags = PIPE_FLUSH_DEFERRED | PIPE_FLUSH_BOTTOM_OF_PIPE;
1256 pipe->flush(pipe, &record->bottom_of_pipe, flush_flags);
1257 }
1258
1259 if (pipe->callback) {
1260 pipe->callback(pipe, dd_after_draw_async, record, true);
1261 } else {
1262 dd_after_draw_async(record);
1263 }
1264
1265 ++dctx->num_draw_calls;
1266 if (dscreen->skip_count && dctx->num_draw_calls % 10000 == 0)
1267 fprintf(stderr, "Gallium debugger reached %u draw calls.\n",
1268 dctx->num_draw_calls);
1269 }
1270
1271 static void
dd_context_flush(struct pipe_context * _pipe,struct pipe_fence_handle ** fence,unsigned flags)1272 dd_context_flush(struct pipe_context *_pipe,
1273 struct pipe_fence_handle **fence, unsigned flags)
1274 {
1275 struct dd_context *dctx = dd_context(_pipe);
1276 struct pipe_context *pipe = dctx->pipe;
1277 struct pipe_screen *screen = pipe->screen;
1278 struct dd_draw_record *record = dd_create_record(dctx);
1279
1280 record->call.type = CALL_FLUSH;
1281 record->call.info.flush.flags = flags;
1282
1283 record->time_before = os_time_get_nano();
1284
1285 dd_add_record(dctx, record);
1286
1287 pipe->flush(pipe, &record->bottom_of_pipe, flags);
1288 if (fence)
1289 screen->fence_reference(screen, fence, record->bottom_of_pipe);
1290
1291 if (pipe->callback) {
1292 pipe->callback(pipe, dd_after_draw_async, record, true);
1293 } else {
1294 dd_after_draw_async(record);
1295 }
1296 }
1297
1298 static void
dd_context_draw_vbo(struct pipe_context * _pipe,const struct pipe_draw_info * info)1299 dd_context_draw_vbo(struct pipe_context *_pipe,
1300 const struct pipe_draw_info *info)
1301 {
1302 struct dd_context *dctx = dd_context(_pipe);
1303 struct pipe_context *pipe = dctx->pipe;
1304 struct dd_draw_record *record = dd_create_record(dctx);
1305
1306 record->call.type = CALL_DRAW_VBO;
1307 record->call.info.draw_vbo.draw = *info;
1308 record->call.info.draw_vbo.draw.count_from_stream_output = NULL;
1309 pipe_so_target_reference(&record->call.info.draw_vbo.draw.count_from_stream_output,
1310 info->count_from_stream_output);
1311 if (info->index_size && !info->has_user_indices) {
1312 record->call.info.draw_vbo.draw.index.resource = NULL;
1313 pipe_resource_reference(&record->call.info.draw_vbo.draw.index.resource,
1314 info->index.resource);
1315 }
1316
1317 if (info->indirect) {
1318 record->call.info.draw_vbo.indirect = *info->indirect;
1319 record->call.info.draw_vbo.draw.indirect = &record->call.info.draw_vbo.indirect;
1320
1321 record->call.info.draw_vbo.indirect.buffer = NULL;
1322 pipe_resource_reference(&record->call.info.draw_vbo.indirect.buffer,
1323 info->indirect->buffer);
1324 record->call.info.draw_vbo.indirect.indirect_draw_count = NULL;
1325 pipe_resource_reference(&record->call.info.draw_vbo.indirect.indirect_draw_count,
1326 info->indirect->indirect_draw_count);
1327 } else {
1328 memset(&record->call.info.draw_vbo.indirect, 0, sizeof(*info->indirect));
1329 }
1330
1331 dd_before_draw(dctx, record);
1332 pipe->draw_vbo(pipe, info);
1333 dd_after_draw(dctx, record);
1334 }
1335
1336 static void
dd_context_launch_grid(struct pipe_context * _pipe,const struct pipe_grid_info * info)1337 dd_context_launch_grid(struct pipe_context *_pipe,
1338 const struct pipe_grid_info *info)
1339 {
1340 struct dd_context *dctx = dd_context(_pipe);
1341 struct pipe_context *pipe = dctx->pipe;
1342 struct dd_draw_record *record = dd_create_record(dctx);
1343
1344 record->call.type = CALL_LAUNCH_GRID;
1345 record->call.info.launch_grid = *info;
1346 record->call.info.launch_grid.indirect = NULL;
1347 pipe_resource_reference(&record->call.info.launch_grid.indirect, info->indirect);
1348
1349 dd_before_draw(dctx, record);
1350 pipe->launch_grid(pipe, info);
1351 dd_after_draw(dctx, record);
1352 }
1353
1354 static void
dd_context_resource_copy_region(struct pipe_context * _pipe,struct pipe_resource * dst,unsigned dst_level,unsigned dstx,unsigned dsty,unsigned dstz,struct pipe_resource * src,unsigned src_level,const struct pipe_box * src_box)1355 dd_context_resource_copy_region(struct pipe_context *_pipe,
1356 struct pipe_resource *dst, unsigned dst_level,
1357 unsigned dstx, unsigned dsty, unsigned dstz,
1358 struct pipe_resource *src, unsigned src_level,
1359 const struct pipe_box *src_box)
1360 {
1361 struct dd_context *dctx = dd_context(_pipe);
1362 struct pipe_context *pipe = dctx->pipe;
1363 struct dd_draw_record *record = dd_create_record(dctx);
1364
1365 record->call.type = CALL_RESOURCE_COPY_REGION;
1366 record->call.info.resource_copy_region.dst = NULL;
1367 pipe_resource_reference(&record->call.info.resource_copy_region.dst, dst);
1368 record->call.info.resource_copy_region.dst_level = dst_level;
1369 record->call.info.resource_copy_region.dstx = dstx;
1370 record->call.info.resource_copy_region.dsty = dsty;
1371 record->call.info.resource_copy_region.dstz = dstz;
1372 record->call.info.resource_copy_region.src = NULL;
1373 pipe_resource_reference(&record->call.info.resource_copy_region.src, src);
1374 record->call.info.resource_copy_region.src_level = src_level;
1375 record->call.info.resource_copy_region.src_box = *src_box;
1376
1377 dd_before_draw(dctx, record);
1378 pipe->resource_copy_region(pipe,
1379 dst, dst_level, dstx, dsty, dstz,
1380 src, src_level, src_box);
1381 dd_after_draw(dctx, record);
1382 }
1383
1384 static void
dd_context_blit(struct pipe_context * _pipe,const struct pipe_blit_info * info)1385 dd_context_blit(struct pipe_context *_pipe, const struct pipe_blit_info *info)
1386 {
1387 struct dd_context *dctx = dd_context(_pipe);
1388 struct pipe_context *pipe = dctx->pipe;
1389 struct dd_draw_record *record = dd_create_record(dctx);
1390
1391 record->call.type = CALL_BLIT;
1392 record->call.info.blit = *info;
1393 record->call.info.blit.dst.resource = NULL;
1394 pipe_resource_reference(&record->call.info.blit.dst.resource, info->dst.resource);
1395 record->call.info.blit.src.resource = NULL;
1396 pipe_resource_reference(&record->call.info.blit.src.resource, info->src.resource);
1397
1398 dd_before_draw(dctx, record);
1399 pipe->blit(pipe, info);
1400 dd_after_draw(dctx, record);
1401 }
1402
1403 static bool
dd_context_generate_mipmap(struct pipe_context * _pipe,struct pipe_resource * res,enum pipe_format format,unsigned base_level,unsigned last_level,unsigned first_layer,unsigned last_layer)1404 dd_context_generate_mipmap(struct pipe_context *_pipe,
1405 struct pipe_resource *res,
1406 enum pipe_format format,
1407 unsigned base_level,
1408 unsigned last_level,
1409 unsigned first_layer,
1410 unsigned last_layer)
1411 {
1412 struct dd_context *dctx = dd_context(_pipe);
1413 struct pipe_context *pipe = dctx->pipe;
1414 struct dd_draw_record *record = dd_create_record(dctx);
1415 bool result;
1416
1417 record->call.type = CALL_GENERATE_MIPMAP;
1418 record->call.info.generate_mipmap.res = NULL;
1419 pipe_resource_reference(&record->call.info.generate_mipmap.res, res);
1420 record->call.info.generate_mipmap.format = format;
1421 record->call.info.generate_mipmap.base_level = base_level;
1422 record->call.info.generate_mipmap.last_level = last_level;
1423 record->call.info.generate_mipmap.first_layer = first_layer;
1424 record->call.info.generate_mipmap.last_layer = last_layer;
1425
1426 dd_before_draw(dctx, record);
1427 result = pipe->generate_mipmap(pipe, res, format, base_level, last_level,
1428 first_layer, last_layer);
1429 dd_after_draw(dctx, record);
1430 return result;
1431 }
1432
1433 static void
dd_context_get_query_result_resource(struct pipe_context * _pipe,struct pipe_query * query,bool wait,enum pipe_query_value_type result_type,int index,struct pipe_resource * resource,unsigned offset)1434 dd_context_get_query_result_resource(struct pipe_context *_pipe,
1435 struct pipe_query *query,
1436 bool wait,
1437 enum pipe_query_value_type result_type,
1438 int index,
1439 struct pipe_resource *resource,
1440 unsigned offset)
1441 {
1442 struct dd_context *dctx = dd_context(_pipe);
1443 struct dd_query *dquery = dd_query(query);
1444 struct pipe_context *pipe = dctx->pipe;
1445 struct dd_draw_record *record = dd_create_record(dctx);
1446
1447 record->call.type = CALL_GET_QUERY_RESULT_RESOURCE;
1448 record->call.info.get_query_result_resource.query = query;
1449 record->call.info.get_query_result_resource.wait = wait;
1450 record->call.info.get_query_result_resource.result_type = result_type;
1451 record->call.info.get_query_result_resource.index = index;
1452 record->call.info.get_query_result_resource.resource = NULL;
1453 pipe_resource_reference(&record->call.info.get_query_result_resource.resource,
1454 resource);
1455 record->call.info.get_query_result_resource.offset = offset;
1456
1457 /* The query may be deleted by the time we need to print it. */
1458 record->call.info.get_query_result_resource.query_type = dquery->type;
1459
1460 dd_before_draw(dctx, record);
1461 pipe->get_query_result_resource(pipe, dquery->query, wait,
1462 result_type, index, resource, offset);
1463 dd_after_draw(dctx, record);
1464 }
1465
1466 static void
dd_context_flush_resource(struct pipe_context * _pipe,struct pipe_resource * resource)1467 dd_context_flush_resource(struct pipe_context *_pipe,
1468 struct pipe_resource *resource)
1469 {
1470 struct dd_context *dctx = dd_context(_pipe);
1471 struct pipe_context *pipe = dctx->pipe;
1472 struct dd_draw_record *record = dd_create_record(dctx);
1473
1474 record->call.type = CALL_FLUSH_RESOURCE;
1475 record->call.info.flush_resource = NULL;
1476 pipe_resource_reference(&record->call.info.flush_resource, resource);
1477
1478 dd_before_draw(dctx, record);
1479 pipe->flush_resource(pipe, resource);
1480 dd_after_draw(dctx, record);
1481 }
1482
1483 static void
dd_context_clear(struct pipe_context * _pipe,unsigned buffers,const struct pipe_scissor_state * scissor_state,const union pipe_color_union * color,double depth,unsigned stencil)1484 dd_context_clear(struct pipe_context *_pipe, unsigned buffers, const struct pipe_scissor_state *scissor_state,
1485 const union pipe_color_union *color, double depth,
1486 unsigned stencil)
1487 {
1488 struct dd_context *dctx = dd_context(_pipe);
1489 struct pipe_context *pipe = dctx->pipe;
1490 struct dd_draw_record *record = dd_create_record(dctx);
1491
1492 record->call.type = CALL_CLEAR;
1493 record->call.info.clear.buffers = buffers;
1494 if (scissor_state)
1495 record->call.info.clear.scissor_state = *scissor_state;
1496 record->call.info.clear.color = *color;
1497 record->call.info.clear.depth = depth;
1498 record->call.info.clear.stencil = stencil;
1499
1500 dd_before_draw(dctx, record);
1501 pipe->clear(pipe, buffers, scissor_state, color, depth, stencil);
1502 dd_after_draw(dctx, record);
1503 }
1504
1505 static void
dd_context_clear_render_target(struct pipe_context * _pipe,struct pipe_surface * dst,const union pipe_color_union * color,unsigned dstx,unsigned dsty,unsigned width,unsigned height,bool render_condition_enabled)1506 dd_context_clear_render_target(struct pipe_context *_pipe,
1507 struct pipe_surface *dst,
1508 const union pipe_color_union *color,
1509 unsigned dstx, unsigned dsty,
1510 unsigned width, unsigned height,
1511 bool render_condition_enabled)
1512 {
1513 struct dd_context *dctx = dd_context(_pipe);
1514 struct pipe_context *pipe = dctx->pipe;
1515 struct dd_draw_record *record = dd_create_record(dctx);
1516
1517 record->call.type = CALL_CLEAR_RENDER_TARGET;
1518
1519 dd_before_draw(dctx, record);
1520 pipe->clear_render_target(pipe, dst, color, dstx, dsty, width, height,
1521 render_condition_enabled);
1522 dd_after_draw(dctx, record);
1523 }
1524
1525 static void
dd_context_clear_depth_stencil(struct pipe_context * _pipe,struct pipe_surface * dst,unsigned clear_flags,double depth,unsigned stencil,unsigned dstx,unsigned dsty,unsigned width,unsigned height,bool render_condition_enabled)1526 dd_context_clear_depth_stencil(struct pipe_context *_pipe,
1527 struct pipe_surface *dst, unsigned clear_flags,
1528 double depth, unsigned stencil, unsigned dstx,
1529 unsigned dsty, unsigned width, unsigned height,
1530 bool render_condition_enabled)
1531 {
1532 struct dd_context *dctx = dd_context(_pipe);
1533 struct pipe_context *pipe = dctx->pipe;
1534 struct dd_draw_record *record = dd_create_record(dctx);
1535
1536 record->call.type = CALL_CLEAR_DEPTH_STENCIL;
1537
1538 dd_before_draw(dctx, record);
1539 pipe->clear_depth_stencil(pipe, dst, clear_flags, depth, stencil,
1540 dstx, dsty, width, height,
1541 render_condition_enabled);
1542 dd_after_draw(dctx, record);
1543 }
1544
1545 static void
dd_context_clear_buffer(struct pipe_context * _pipe,struct pipe_resource * res,unsigned offset,unsigned size,const void * clear_value,int clear_value_size)1546 dd_context_clear_buffer(struct pipe_context *_pipe, struct pipe_resource *res,
1547 unsigned offset, unsigned size,
1548 const void *clear_value, int clear_value_size)
1549 {
1550 struct dd_context *dctx = dd_context(_pipe);
1551 struct pipe_context *pipe = dctx->pipe;
1552 struct dd_draw_record *record = dd_create_record(dctx);
1553
1554 record->call.type = CALL_CLEAR_BUFFER;
1555 record->call.info.clear_buffer.res = NULL;
1556 pipe_resource_reference(&record->call.info.clear_buffer.res, res);
1557 record->call.info.clear_buffer.offset = offset;
1558 record->call.info.clear_buffer.size = size;
1559 record->call.info.clear_buffer.clear_value = clear_value;
1560 record->call.info.clear_buffer.clear_value_size = clear_value_size;
1561
1562 dd_before_draw(dctx, record);
1563 pipe->clear_buffer(pipe, res, offset, size, clear_value, clear_value_size);
1564 dd_after_draw(dctx, record);
1565 }
1566
1567 static void
dd_context_clear_texture(struct pipe_context * _pipe,struct pipe_resource * res,unsigned level,const struct pipe_box * box,const void * data)1568 dd_context_clear_texture(struct pipe_context *_pipe,
1569 struct pipe_resource *res,
1570 unsigned level,
1571 const struct pipe_box *box,
1572 const void *data)
1573 {
1574 struct dd_context *dctx = dd_context(_pipe);
1575 struct pipe_context *pipe = dctx->pipe;
1576 struct dd_draw_record *record = dd_create_record(dctx);
1577
1578 record->call.type = CALL_CLEAR_TEXTURE;
1579
1580 dd_before_draw(dctx, record);
1581 pipe->clear_texture(pipe, res, level, box, data);
1582 dd_after_draw(dctx, record);
1583 }
1584
1585 /********************************************************************
1586 * transfer
1587 */
1588
1589 static void *
dd_context_transfer_map(struct pipe_context * _pipe,struct pipe_resource * resource,unsigned level,unsigned usage,const struct pipe_box * box,struct pipe_transfer ** transfer)1590 dd_context_transfer_map(struct pipe_context *_pipe,
1591 struct pipe_resource *resource, unsigned level,
1592 unsigned usage, const struct pipe_box *box,
1593 struct pipe_transfer **transfer)
1594 {
1595 struct dd_context *dctx = dd_context(_pipe);
1596 struct pipe_context *pipe = dctx->pipe;
1597 struct dd_draw_record *record =
1598 dd_screen(dctx->base.screen)->transfers ? dd_create_record(dctx) : NULL;
1599
1600 if (record) {
1601 record->call.type = CALL_TRANSFER_MAP;
1602
1603 dd_before_draw(dctx, record);
1604 }
1605 void *ptr = pipe->transfer_map(pipe, resource, level, usage, box, transfer);
1606 if (record) {
1607 record->call.info.transfer_map.transfer_ptr = *transfer;
1608 record->call.info.transfer_map.ptr = ptr;
1609 if (*transfer) {
1610 record->call.info.transfer_map.transfer = **transfer;
1611 record->call.info.transfer_map.transfer.resource = NULL;
1612 pipe_resource_reference(&record->call.info.transfer_map.transfer.resource,
1613 (*transfer)->resource);
1614 } else {
1615 memset(&record->call.info.transfer_map.transfer, 0, sizeof(struct pipe_transfer));
1616 }
1617
1618 dd_after_draw(dctx, record);
1619 }
1620 return ptr;
1621 }
1622
1623 static void
dd_context_transfer_flush_region(struct pipe_context * _pipe,struct pipe_transfer * transfer,const struct pipe_box * box)1624 dd_context_transfer_flush_region(struct pipe_context *_pipe,
1625 struct pipe_transfer *transfer,
1626 const struct pipe_box *box)
1627 {
1628 struct dd_context *dctx = dd_context(_pipe);
1629 struct pipe_context *pipe = dctx->pipe;
1630 struct dd_draw_record *record =
1631 dd_screen(dctx->base.screen)->transfers ? dd_create_record(dctx) : NULL;
1632
1633 if (record) {
1634 record->call.type = CALL_TRANSFER_FLUSH_REGION;
1635 record->call.info.transfer_flush_region.transfer_ptr = transfer;
1636 record->call.info.transfer_flush_region.box = *box;
1637 record->call.info.transfer_flush_region.transfer = *transfer;
1638 record->call.info.transfer_flush_region.transfer.resource = NULL;
1639 pipe_resource_reference(
1640 &record->call.info.transfer_flush_region.transfer.resource,
1641 transfer->resource);
1642
1643 dd_before_draw(dctx, record);
1644 }
1645 pipe->transfer_flush_region(pipe, transfer, box);
1646 if (record)
1647 dd_after_draw(dctx, record);
1648 }
1649
1650 static void
dd_context_transfer_unmap(struct pipe_context * _pipe,struct pipe_transfer * transfer)1651 dd_context_transfer_unmap(struct pipe_context *_pipe,
1652 struct pipe_transfer *transfer)
1653 {
1654 struct dd_context *dctx = dd_context(_pipe);
1655 struct pipe_context *pipe = dctx->pipe;
1656 struct dd_draw_record *record =
1657 dd_screen(dctx->base.screen)->transfers ? dd_create_record(dctx) : NULL;
1658
1659 if (record) {
1660 record->call.type = CALL_TRANSFER_UNMAP;
1661 record->call.info.transfer_unmap.transfer_ptr = transfer;
1662 record->call.info.transfer_unmap.transfer = *transfer;
1663 record->call.info.transfer_unmap.transfer.resource = NULL;
1664 pipe_resource_reference(
1665 &record->call.info.transfer_unmap.transfer.resource,
1666 transfer->resource);
1667
1668 dd_before_draw(dctx, record);
1669 }
1670 pipe->transfer_unmap(pipe, transfer);
1671 if (record)
1672 dd_after_draw(dctx, record);
1673 }
1674
1675 static void
dd_context_buffer_subdata(struct pipe_context * _pipe,struct pipe_resource * resource,unsigned usage,unsigned offset,unsigned size,const void * data)1676 dd_context_buffer_subdata(struct pipe_context *_pipe,
1677 struct pipe_resource *resource,
1678 unsigned usage, unsigned offset,
1679 unsigned size, const void *data)
1680 {
1681 struct dd_context *dctx = dd_context(_pipe);
1682 struct pipe_context *pipe = dctx->pipe;
1683 struct dd_draw_record *record =
1684 dd_screen(dctx->base.screen)->transfers ? dd_create_record(dctx) : NULL;
1685
1686 if (record) {
1687 record->call.type = CALL_BUFFER_SUBDATA;
1688 record->call.info.buffer_subdata.resource = NULL;
1689 pipe_resource_reference(&record->call.info.buffer_subdata.resource, resource);
1690 record->call.info.buffer_subdata.usage = usage;
1691 record->call.info.buffer_subdata.offset = offset;
1692 record->call.info.buffer_subdata.size = size;
1693 record->call.info.buffer_subdata.data = data;
1694
1695 dd_before_draw(dctx, record);
1696 }
1697 pipe->buffer_subdata(pipe, resource, usage, offset, size, data);
1698 if (record)
1699 dd_after_draw(dctx, record);
1700 }
1701
1702 static void
dd_context_texture_subdata(struct pipe_context * _pipe,struct pipe_resource * resource,unsigned level,unsigned usage,const struct pipe_box * box,const void * data,unsigned stride,unsigned layer_stride)1703 dd_context_texture_subdata(struct pipe_context *_pipe,
1704 struct pipe_resource *resource,
1705 unsigned level, unsigned usage,
1706 const struct pipe_box *box,
1707 const void *data, unsigned stride,
1708 unsigned layer_stride)
1709 {
1710 struct dd_context *dctx = dd_context(_pipe);
1711 struct pipe_context *pipe = dctx->pipe;
1712 struct dd_draw_record *record =
1713 dd_screen(dctx->base.screen)->transfers ? dd_create_record(dctx) : NULL;
1714
1715 if (record) {
1716 record->call.type = CALL_TEXTURE_SUBDATA;
1717 record->call.info.texture_subdata.resource = NULL;
1718 pipe_resource_reference(&record->call.info.texture_subdata.resource, resource);
1719 record->call.info.texture_subdata.level = level;
1720 record->call.info.texture_subdata.usage = usage;
1721 record->call.info.texture_subdata.box = *box;
1722 record->call.info.texture_subdata.data = data;
1723 record->call.info.texture_subdata.stride = stride;
1724 record->call.info.texture_subdata.layer_stride = layer_stride;
1725
1726 dd_before_draw(dctx, record);
1727 }
1728 pipe->texture_subdata(pipe, resource, level, usage, box, data,
1729 stride, layer_stride);
1730 if (record)
1731 dd_after_draw(dctx, record);
1732 }
1733
1734 void
dd_init_draw_functions(struct dd_context * dctx)1735 dd_init_draw_functions(struct dd_context *dctx)
1736 {
1737 CTX_INIT(flush);
1738 CTX_INIT(draw_vbo);
1739 CTX_INIT(launch_grid);
1740 CTX_INIT(resource_copy_region);
1741 CTX_INIT(blit);
1742 CTX_INIT(clear);
1743 CTX_INIT(clear_render_target);
1744 CTX_INIT(clear_depth_stencil);
1745 CTX_INIT(clear_buffer);
1746 CTX_INIT(clear_texture);
1747 CTX_INIT(flush_resource);
1748 CTX_INIT(generate_mipmap);
1749 CTX_INIT(get_query_result_resource);
1750 CTX_INIT(transfer_map);
1751 CTX_INIT(transfer_flush_region);
1752 CTX_INIT(transfer_unmap);
1753 CTX_INIT(buffer_subdata);
1754 CTX_INIT(texture_subdata);
1755 }
1756