1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "anv_private.h"
25
26 #include "util/list.h"
27 #include "util/ralloc.h"
28
29 /* This file contains utility functions for help debugging. They can be
30 * called from GDB or similar to help inspect images and buffers.
31 *
32 * To dump the framebuffers of an application after each render pass, all you
33 * have to do is the following
34 *
35 * 1) Start the application in GDB
36 * 2) Run until you get to the point where the rendering errors occur
37 * 3) Pause in GDB and set a breakpoint in anv_QueuePresentKHR
38 * 4) Continue until it reaches anv_QueuePresentKHR
39 * 5) Call anv_dump_start(queue->device, ANV_DUMP_FRAMEBUFFERS_BIT)
40 * 6) Continue until the next anv_QueuePresentKHR call
41 * 7) Call anv_dump_finish() to complete the dump and write files
42 *
43 * While it's a bit manual, the process does allow you to do some very
44 * valuable debugging by dumping every render target at the end of every
45 * render pass. It's worth noting that this assumes that the application
46 * creates all of the command buffers more-or-less in-order and between the
47 * two anv_QueuePresentKHR calls.
48 */
49
50 struct dump_image {
51 struct list_head link;
52
53 const char *filename;
54
55 VkExtent2D extent;
56 VkImage image;
57 VkDeviceMemory memory;
58 };
59
60 static void
dump_image_init(struct anv_device * device,struct dump_image * image,uint32_t width,uint32_t height,const char * filename)61 dump_image_init(struct anv_device *device, struct dump_image *image,
62 uint32_t width, uint32_t height, const char *filename)
63 {
64 VkDevice vk_device = anv_device_to_handle(device);
65 ASSERTED VkResult result;
66
67 image->filename = filename;
68 image->extent = (VkExtent2D) { width, height };
69
70 result = anv_CreateImage(vk_device,
71 &(VkImageCreateInfo) {
72 .sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
73 .imageType = VK_IMAGE_TYPE_2D,
74 .format = VK_FORMAT_R8G8B8A8_UNORM,
75 .extent = (VkExtent3D) { width, height, 1 },
76 .mipLevels = 1,
77 .arrayLayers = 1,
78 .samples = 1,
79 .tiling = VK_IMAGE_TILING_LINEAR,
80 .usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT,
81 .flags = 0,
82 }, NULL, &image->image);
83 assert(result == VK_SUCCESS);
84
85 VkMemoryRequirements reqs;
86 anv_GetImageMemoryRequirements(vk_device, image->image, &reqs);
87
88 result = anv_AllocateMemory(vk_device,
89 &(VkMemoryAllocateInfo) {
90 .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
91 .allocationSize = reqs.size,
92 .memoryTypeIndex = 0,
93 }, NULL, &image->memory);
94 assert(result == VK_SUCCESS);
95
96 result = anv_BindImageMemory(vk_device, image->image, image->memory, 0);
97 assert(result == VK_SUCCESS);
98 }
99
100 static void
dump_image_finish(struct anv_device * device,struct dump_image * image)101 dump_image_finish(struct anv_device *device, struct dump_image *image)
102 {
103 VkDevice vk_device = anv_device_to_handle(device);
104
105 anv_DestroyImage(vk_device, image->image, NULL);
106 anv_FreeMemory(vk_device, image->memory, NULL);
107 }
108
109 static void
dump_image_do_blit(struct anv_device * device,struct dump_image * image,struct anv_cmd_buffer * cmd_buffer,struct anv_image * src,VkImageAspectFlagBits aspect,unsigned miplevel,unsigned array_layer)110 dump_image_do_blit(struct anv_device *device, struct dump_image *image,
111 struct anv_cmd_buffer *cmd_buffer, struct anv_image *src,
112 VkImageAspectFlagBits aspect,
113 unsigned miplevel, unsigned array_layer)
114 {
115 PFN_vkCmdPipelineBarrier CmdPipelineBarrier =
116 (void *)anv_GetDeviceProcAddr(anv_device_to_handle(device),
117 "vkCmdPipelineBarrier");
118
119 CmdPipelineBarrier(anv_cmd_buffer_to_handle(cmd_buffer),
120 VK_PIPELINE_STAGE_TRANSFER_BIT,
121 VK_PIPELINE_STAGE_TRANSFER_BIT,
122 0, 0, NULL, 0, NULL, 1,
123 &(VkImageMemoryBarrier) {
124 .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
125 .srcAccessMask = ~0,
126 .dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT,
127 .oldLayout = VK_IMAGE_LAYOUT_GENERAL,
128 .newLayout = VK_IMAGE_LAYOUT_GENERAL,
129 .srcQueueFamilyIndex = 0,
130 .dstQueueFamilyIndex = 0,
131 .image = anv_image_to_handle(src),
132 .subresourceRange = (VkImageSubresourceRange) {
133 .aspectMask = aspect,
134 .baseMipLevel = miplevel,
135 .levelCount = 1,
136 .baseArrayLayer = array_layer,
137 .layerCount = 1,
138 },
139 });
140
141 /* We need to do a blit so the image needs to be declared as sampled. The
142 * only thing these are used for is making sure we create the correct
143 * views, so it should be find to just stomp it and set it back.
144 */
145 VkImageUsageFlags old_usage = src->usage;
146 src->usage |= VK_IMAGE_USAGE_SAMPLED_BIT;
147
148 anv_CmdBlitImage(anv_cmd_buffer_to_handle(cmd_buffer),
149 anv_image_to_handle(src), VK_IMAGE_LAYOUT_GENERAL,
150 image->image, VK_IMAGE_LAYOUT_GENERAL, 1,
151 &(VkImageBlit) {
152 .srcSubresource = {
153 .aspectMask = aspect,
154 .mipLevel = miplevel,
155 .baseArrayLayer = array_layer,
156 .layerCount = 1,
157 },
158 .srcOffsets = {
159 { 0, 0, 0 },
160 { image->extent.width, image->extent.height, 1 },
161 },
162 .dstSubresource = {
163 .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
164 .mipLevel = 0,
165 .baseArrayLayer = 0,
166 .layerCount = 1,
167 },
168 .dstOffsets = {
169 { 0, 0, 0 },
170 { image->extent.width, image->extent.height, 1 },
171 },
172 }, VK_FILTER_NEAREST);
173
174 src->usage = old_usage;
175
176 CmdPipelineBarrier(anv_cmd_buffer_to_handle(cmd_buffer),
177 VK_PIPELINE_STAGE_TRANSFER_BIT,
178 VK_PIPELINE_STAGE_TRANSFER_BIT,
179 0, 0, NULL, 0, NULL, 1,
180 &(VkImageMemoryBarrier) {
181 .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
182 .srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT,
183 .dstAccessMask = VK_ACCESS_HOST_READ_BIT,
184 .oldLayout = VK_IMAGE_LAYOUT_GENERAL,
185 .newLayout = VK_IMAGE_LAYOUT_GENERAL,
186 .srcQueueFamilyIndex = 0,
187 .dstQueueFamilyIndex = 0,
188 .image = image->image,
189 .subresourceRange = (VkImageSubresourceRange) {
190 .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
191 .baseMipLevel = 0,
192 .levelCount = 1,
193 .baseArrayLayer = 0,
194 .layerCount = 1,
195 },
196 });
197 }
198
199 static void
dump_image_write_to_ppm(struct anv_device * device,struct dump_image * image)200 dump_image_write_to_ppm(struct anv_device *device, struct dump_image *image)
201 {
202 VkDevice vk_device = anv_device_to_handle(device);
203 ASSERTED VkResult result;
204
205 VkMemoryRequirements reqs;
206 anv_GetImageMemoryRequirements(vk_device, image->image, &reqs);
207
208 uint8_t *map;
209 result = anv_MapMemory(vk_device, image->memory, 0, reqs.size, 0, (void **)&map);
210 assert(result == VK_SUCCESS);
211
212 VkSubresourceLayout layout;
213 anv_GetImageSubresourceLayout(vk_device, image->image,
214 &(VkImageSubresource) {
215 .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
216 .mipLevel = 0,
217 .arrayLayer = 0,
218 }, &layout);
219
220 map += layout.offset;
221
222 FILE *file = fopen(image->filename, "wb");
223 assert(file);
224
225 uint8_t *row = malloc(image->extent.width * 3);
226 assert(row);
227
228 fprintf(file, "P6\n%d %d\n255\n", image->extent.width, image->extent.height);
229 for (unsigned y = 0; y < image->extent.height; y++) {
230 for (unsigned x = 0; x < image->extent.width; x++) {
231 row[x * 3 + 0] = map[x * 4 + 0];
232 row[x * 3 + 1] = map[x * 4 + 1];
233 row[x * 3 + 2] = map[x * 4 + 2];
234 }
235 fwrite(row, 3, image->extent.width, file);
236
237 map += layout.rowPitch;
238 }
239 free(row);
240 fclose(file);
241
242 anv_UnmapMemory(vk_device, image->memory);
243 }
244
245 void
anv_dump_image_to_ppm(struct anv_device * device,struct anv_image * image,unsigned miplevel,unsigned array_layer,VkImageAspectFlagBits aspect,const char * filename)246 anv_dump_image_to_ppm(struct anv_device *device,
247 struct anv_image *image, unsigned miplevel,
248 unsigned array_layer, VkImageAspectFlagBits aspect,
249 const char *filename)
250 {
251 VkDevice vk_device = anv_device_to_handle(device);
252 ASSERTED VkResult result;
253
254 PFN_vkBeginCommandBuffer BeginCommandBuffer =
255 (void *)anv_GetDeviceProcAddr(anv_device_to_handle(device),
256 "vkBeginCommandBuffer");
257 PFN_vkEndCommandBuffer EndCommandBuffer =
258 (void *)anv_GetDeviceProcAddr(anv_device_to_handle(device),
259 "vkEndCommandBuffer");
260
261 const uint32_t width = anv_minify(image->extent.width, miplevel);
262 const uint32_t height = anv_minify(image->extent.height, miplevel);
263
264 struct dump_image dump;
265 dump_image_init(device, &dump, width, height, filename);
266
267 VkCommandPool commandPool;
268 result = anv_CreateCommandPool(vk_device,
269 &(VkCommandPoolCreateInfo) {
270 .sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO,
271 .queueFamilyIndex = 0,
272 .flags = 0,
273 }, NULL, &commandPool);
274 assert(result == VK_SUCCESS);
275
276 VkCommandBuffer cmd;
277 result = anv_AllocateCommandBuffers(vk_device,
278 &(VkCommandBufferAllocateInfo) {
279 .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO,
280 .commandPool = commandPool,
281 .level = VK_COMMAND_BUFFER_LEVEL_PRIMARY,
282 .commandBufferCount = 1,
283 }, &cmd);
284 assert(result == VK_SUCCESS);
285
286 result = BeginCommandBuffer(cmd,
287 &(VkCommandBufferBeginInfo) {
288 .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,
289 .flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT,
290 });
291 assert(result == VK_SUCCESS);
292
293 dump_image_do_blit(device, &dump, anv_cmd_buffer_from_handle(cmd), image,
294 aspect, miplevel, array_layer);
295
296 result = EndCommandBuffer(cmd);
297 assert(result == VK_SUCCESS);
298
299 VkFence fence;
300 result = anv_CreateFence(vk_device,
301 &(VkFenceCreateInfo) {
302 .sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO,
303 .flags = 0,
304 }, NULL, &fence);
305 assert(result == VK_SUCCESS);
306
307 result = anv_QueueSubmit(anv_queue_to_handle(&device->queue), 1,
308 &(VkSubmitInfo) {
309 .sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,
310 .commandBufferCount = 1,
311 .pCommandBuffers = &cmd,
312 }, fence);
313 assert(result == VK_SUCCESS);
314
315 result = anv_WaitForFences(vk_device, 1, &fence, true, UINT64_MAX);
316 assert(result == VK_SUCCESS);
317
318 anv_DestroyFence(vk_device, fence, NULL);
319 anv_DestroyCommandPool(vk_device, commandPool, NULL);
320
321 dump_image_write_to_ppm(device, &dump);
322 dump_image_finish(device, &dump);
323 }
324
325 static pthread_mutex_t dump_mutex = PTHREAD_MUTEX_INITIALIZER;
326
327 static enum anv_dump_action dump_actions = 0;
328
329 /* Used to prevent recursive dumping */
330 static enum anv_dump_action dump_old_actions;
331
332 struct list_head dump_list;
333 static void *dump_ctx;
334 static struct anv_device *dump_device;
335 static unsigned dump_count;
336
337 void
anv_dump_start(struct anv_device * device,enum anv_dump_action actions)338 anv_dump_start(struct anv_device *device, enum anv_dump_action actions)
339 {
340 pthread_mutex_lock(&dump_mutex);
341
342 dump_device = device;
343 dump_actions = actions;
344 list_inithead(&dump_list);
345 dump_ctx = ralloc_context(NULL);
346 dump_count = 0;
347
348 pthread_mutex_unlock(&dump_mutex);
349 }
350
351 void
anv_dump_finish()352 anv_dump_finish()
353 {
354 anv_DeviceWaitIdle(anv_device_to_handle(dump_device));
355
356 pthread_mutex_lock(&dump_mutex);
357
358 list_for_each_entry(struct dump_image, dump, &dump_list, link) {
359 dump_image_write_to_ppm(dump_device, dump);
360 dump_image_finish(dump_device, dump);
361 }
362
363 dump_actions = 0;
364 dump_device = NULL;
365 list_inithead(&dump_list);
366
367 ralloc_free(dump_ctx);
368 dump_ctx = NULL;
369
370 pthread_mutex_unlock(&dump_mutex);
371 }
372
373 static bool
dump_lock(enum anv_dump_action action)374 dump_lock(enum anv_dump_action action)
375 {
376 if (likely((dump_actions & action) == 0))
377 return false;
378
379 pthread_mutex_lock(&dump_mutex);
380
381 /* Prevent recursive dumping */
382 dump_old_actions = dump_actions;
383 dump_actions = 0;
384
385 return true;
386 }
387
388 static void
dump_unlock()389 dump_unlock()
390 {
391 dump_actions = dump_old_actions;
392 pthread_mutex_unlock(&dump_mutex);
393 }
394
395 static void
dump_add_image(struct anv_cmd_buffer * cmd_buffer,struct anv_image * image,VkImageAspectFlagBits aspect,unsigned miplevel,unsigned array_layer,const char * filename)396 dump_add_image(struct anv_cmd_buffer *cmd_buffer, struct anv_image *image,
397 VkImageAspectFlagBits aspect,
398 unsigned miplevel, unsigned array_layer, const char *filename)
399 {
400 const uint32_t width = anv_minify(image->extent.width, miplevel);
401 const uint32_t height = anv_minify(image->extent.height, miplevel);
402
403 struct dump_image *dump = ralloc(dump_ctx, struct dump_image);
404
405 dump_image_init(cmd_buffer->device, dump, width, height, filename);
406 dump_image_do_blit(cmd_buffer->device, dump, cmd_buffer, image,
407 aspect, miplevel, array_layer);
408
409 list_addtail(&dump->link, &dump_list);
410 }
411
412 void
anv_dump_add_attachments(struct anv_cmd_buffer * cmd_buffer)413 anv_dump_add_attachments(struct anv_cmd_buffer *cmd_buffer)
414 {
415 if (!dump_lock(ANV_DUMP_FRAMEBUFFERS_BIT))
416 return;
417
418 unsigned dump_idx = dump_count++;
419
420 for (unsigned i = 0; i < cmd_buffer->state.pass->attachment_count; i++) {
421 struct anv_image_view *iview = cmd_buffer->state.attachments[i].image_view;
422
423 uint32_t b;
424 for_each_bit(b, iview->image->aspects) {
425 VkImageAspectFlagBits aspect = (1 << b);
426 const char *suffix;
427 switch (aspect) {
428 case VK_IMAGE_ASPECT_COLOR_BIT: suffix = "c"; break;
429 case VK_IMAGE_ASPECT_DEPTH_BIT: suffix = "d"; break;
430 case VK_IMAGE_ASPECT_STENCIL_BIT: suffix = "s"; break;
431 case VK_IMAGE_ASPECT_PLANE_0_BIT: suffix = "c0"; break;
432 case VK_IMAGE_ASPECT_PLANE_1_BIT: suffix = "c1"; break;
433 case VK_IMAGE_ASPECT_PLANE_2_BIT: suffix = "c2"; break;
434 default:
435 unreachable("Invalid aspect");
436 }
437
438 char *filename = ralloc_asprintf(dump_ctx, "attachment%04d-%d%s.ppm",
439 dump_idx, i, suffix);
440
441 unsigned plane = anv_image_aspect_to_plane(iview->image->aspects, aspect);
442 dump_add_image(cmd_buffer, (struct anv_image *)iview->image, aspect,
443 iview->planes[plane].isl.base_level,
444 iview->planes[plane].isl.base_array_layer,
445 filename);
446 }
447 }
448
449 dump_unlock();
450 }
451