1 /*
2 * Copyright © 2019 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <string.h>
25 #include <stdlib.h>
26 #include <assert.h>
27
28 #include <vulkan/vulkan_core.h>
29 #include <vulkan/vk_layer.h>
30
31 #include "git_sha1.h"
32
33 #include "imgui.h"
34
35 #include "overlay_params.h"
36
37 #include "util/u_debug.h"
38 #include "util/hash_table.h"
39 #include "util/list.h"
40 #include "util/ralloc.h"
41 #include "util/os_time.h"
42 #include "util/os_socket.h"
43 #include "util/simple_mtx.h"
44 #include "util/u_math.h"
45
46 #include "vk_enum_to_str.h"
47 #include "vk_dispatch_table.h"
48 #include "vk_util.h"
49
50 /* Mapped from VkInstace/VkPhysicalDevice */
51 struct instance_data {
52 struct vk_instance_dispatch_table vtable;
53 struct vk_physical_device_dispatch_table pd_vtable;
54 VkInstance instance;
55
56 struct overlay_params params;
57 bool pipeline_statistics_enabled;
58
59 int control_client;
60
61 /* Dumping of frame stats to a file has been enabled. */
62 bool capture_enabled;
63
64 /* Dumping of frame stats to a file has been enabled and started. */
65 bool capture_started;
66
67 int socket;
68
69 FILE *output_file_fd;
70 };
71
72 struct frame_stat {
73 uint64_t stats[OVERLAY_PARAM_ENABLED_MAX];
74 };
75
76 /* Mapped from VkDevice */
77 struct queue_data;
78 struct device_data {
79 struct instance_data *instance;
80
81 PFN_vkSetDeviceLoaderData set_device_loader_data;
82
83 struct vk_device_dispatch_table vtable;
84 VkPhysicalDevice physical_device;
85 VkDevice device;
86
87 VkPhysicalDeviceProperties properties;
88
89 struct queue_data *graphic_queue;
90
91 struct queue_data **queues;
92 uint32_t n_queues;
93
94 bool pipeline_statistics_enabled;
95
96 /* For a single frame */
97 struct frame_stat frame_stats;
98 };
99
100 /* Mapped from VkCommandBuffer */
101 struct command_buffer_data {
102 struct device_data *device;
103
104 VkCommandBufferLevel level;
105
106 VkCommandBuffer cmd_buffer;
107 VkQueryPool pipeline_query_pool;
108 VkQueryPool timestamp_query_pool;
109 uint32_t query_index;
110
111 struct frame_stat stats;
112
113 struct list_head link; /* link into queue_data::running_command_buffer */
114 };
115
116 /* Mapped from VkQueue */
117 struct queue_data {
118 struct device_data *device;
119
120 VkQueue queue;
121 VkQueueFlags flags;
122 uint32_t family_index;
123 uint64_t timestamp_mask;
124
125 VkFence queries_fence;
126
127 struct list_head running_command_buffer;
128 };
129
130 struct overlay_draw {
131 struct list_head link;
132
133 VkCommandBuffer command_buffer;
134
135 VkSemaphore cross_engine_semaphore;
136
137 VkSemaphore semaphore;
138 VkFence fence;
139
140 VkBuffer vertex_buffer;
141 VkDeviceMemory vertex_buffer_mem;
142 VkDeviceSize vertex_buffer_size;
143
144 VkBuffer index_buffer;
145 VkDeviceMemory index_buffer_mem;
146 VkDeviceSize index_buffer_size;
147 };
148
149 /* Mapped from VkSwapchainKHR */
150 struct swapchain_data {
151 struct device_data *device;
152
153 VkSwapchainKHR swapchain;
154 unsigned width, height;
155 VkFormat format;
156
157 uint32_t n_images;
158 VkImage *images;
159 VkImageView *image_views;
160 VkFramebuffer *framebuffers;
161
162 VkRenderPass render_pass;
163
164 VkDescriptorPool descriptor_pool;
165 VkDescriptorSetLayout descriptor_layout;
166 VkDescriptorSet descriptor_set;
167
168 VkSampler font_sampler;
169
170 VkPipelineLayout pipeline_layout;
171 VkPipeline pipeline;
172
173 VkCommandPool command_pool;
174
175 struct list_head draws; /* List of struct overlay_draw */
176
177 bool font_uploaded;
178 VkImage font_image;
179 VkImageView font_image_view;
180 VkDeviceMemory font_mem;
181 VkBuffer upload_font_buffer;
182 VkDeviceMemory upload_font_buffer_mem;
183
184 /**/
185 ImGuiContext* imgui_context;
186 ImVec2 window_size;
187
188 /**/
189 uint64_t n_frames;
190 uint64_t last_present_time;
191
192 unsigned n_frames_since_update;
193 uint64_t last_fps_update;
194 double fps;
195
196 enum overlay_param_enabled stat_selector;
197 double time_dividor;
198 struct frame_stat stats_min, stats_max;
199 struct frame_stat frames_stats[200];
200
201 /* Over a single frame */
202 struct frame_stat frame_stats;
203
204 /* Over fps_sampling_period */
205 struct frame_stat accumulated_stats;
206 };
207
208 static const VkQueryPipelineStatisticFlags overlay_query_flags =
209 VK_QUERY_PIPELINE_STATISTIC_INPUT_ASSEMBLY_VERTICES_BIT |
210 VK_QUERY_PIPELINE_STATISTIC_INPUT_ASSEMBLY_PRIMITIVES_BIT |
211 VK_QUERY_PIPELINE_STATISTIC_VERTEX_SHADER_INVOCATIONS_BIT |
212 VK_QUERY_PIPELINE_STATISTIC_GEOMETRY_SHADER_INVOCATIONS_BIT |
213 VK_QUERY_PIPELINE_STATISTIC_GEOMETRY_SHADER_PRIMITIVES_BIT |
214 VK_QUERY_PIPELINE_STATISTIC_CLIPPING_INVOCATIONS_BIT |
215 VK_QUERY_PIPELINE_STATISTIC_CLIPPING_PRIMITIVES_BIT |
216 VK_QUERY_PIPELINE_STATISTIC_FRAGMENT_SHADER_INVOCATIONS_BIT |
217 VK_QUERY_PIPELINE_STATISTIC_TESSELLATION_CONTROL_SHADER_PATCHES_BIT |
218 VK_QUERY_PIPELINE_STATISTIC_TESSELLATION_EVALUATION_SHADER_INVOCATIONS_BIT |
219 VK_QUERY_PIPELINE_STATISTIC_COMPUTE_SHADER_INVOCATIONS_BIT;
220 #define OVERLAY_QUERY_COUNT (11)
221
222 static struct hash_table_u64 *vk_object_to_data = NULL;
223 static simple_mtx_t vk_object_to_data_mutex = SIMPLE_MTX_INITIALIZER;
224
225 thread_local ImGuiContext* __MesaImGui;
226
ensure_vk_object_map(void)227 static inline void ensure_vk_object_map(void)
228 {
229 if (!vk_object_to_data)
230 vk_object_to_data = _mesa_hash_table_u64_create(NULL);
231 }
232
233 #define HKEY(obj) ((uint64_t)(obj))
234 #define FIND(type, obj) ((type *)find_object_data(HKEY(obj)))
235
find_object_data(uint64_t obj)236 static void *find_object_data(uint64_t obj)
237 {
238 simple_mtx_lock(&vk_object_to_data_mutex);
239 ensure_vk_object_map();
240 void *data = _mesa_hash_table_u64_search(vk_object_to_data, obj);
241 simple_mtx_unlock(&vk_object_to_data_mutex);
242 return data;
243 }
244
map_object(uint64_t obj,void * data)245 static void map_object(uint64_t obj, void *data)
246 {
247 simple_mtx_lock(&vk_object_to_data_mutex);
248 ensure_vk_object_map();
249 _mesa_hash_table_u64_insert(vk_object_to_data, obj, data);
250 simple_mtx_unlock(&vk_object_to_data_mutex);
251 }
252
unmap_object(uint64_t obj)253 static void unmap_object(uint64_t obj)
254 {
255 simple_mtx_lock(&vk_object_to_data_mutex);
256 _mesa_hash_table_u64_remove(vk_object_to_data, obj);
257 simple_mtx_unlock(&vk_object_to_data_mutex);
258 }
259
260 /**/
261
262 #define VK_CHECK(expr) \
263 do { \
264 VkResult __result = (expr); \
265 if (__result != VK_SUCCESS) { \
266 fprintf(stderr, "'%s' line %i failed with %s\n", \
267 #expr, __LINE__, vk_Result_to_str(__result)); \
268 } \
269 } while (0)
270
271 /**/
272
get_instance_chain_info(const VkInstanceCreateInfo * pCreateInfo,VkLayerFunction func)273 static VkLayerInstanceCreateInfo *get_instance_chain_info(const VkInstanceCreateInfo *pCreateInfo,
274 VkLayerFunction func)
275 {
276 vk_foreach_struct_const(item, pCreateInfo->pNext) {
277 if (item->sType == VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO &&
278 ((VkLayerInstanceCreateInfo *) item)->function == func)
279 return (VkLayerInstanceCreateInfo *) item;
280 }
281 unreachable("instance chain info not found");
282 return NULL;
283 }
284
get_device_chain_info(const VkDeviceCreateInfo * pCreateInfo,VkLayerFunction func)285 static VkLayerDeviceCreateInfo *get_device_chain_info(const VkDeviceCreateInfo *pCreateInfo,
286 VkLayerFunction func)
287 {
288 vk_foreach_struct_const(item, pCreateInfo->pNext) {
289 if (item->sType == VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO &&
290 ((VkLayerDeviceCreateInfo *) item)->function == func)
291 return (VkLayerDeviceCreateInfo *)item;
292 }
293 unreachable("device chain info not found");
294 return NULL;
295 }
296
297 static void
free_chain(struct VkBaseOutStructure * chain)298 free_chain(struct VkBaseOutStructure *chain)
299 {
300 while (chain) {
301 void *node = chain;
302 chain = chain->pNext;
303 free(node);
304 }
305 }
306
307 static struct VkBaseOutStructure *
clone_chain(const struct VkBaseInStructure * chain)308 clone_chain(const struct VkBaseInStructure *chain)
309 {
310 struct VkBaseOutStructure *head = NULL, *tail = NULL;
311
312 vk_foreach_struct_const(item, chain) {
313 size_t item_size = vk_structure_type_size(item);
314 if (item_size == 0) {
315 free_chain(head);
316 return NULL;
317 }
318
319 struct VkBaseOutStructure *new_item =
320 (struct VkBaseOutStructure *)malloc(item_size);;
321
322 memcpy(new_item, item, item_size);
323
324 if (!head)
325 head = new_item;
326 if (tail)
327 tail->pNext = new_item;
328 tail = new_item;
329 }
330
331 return head;
332 }
333
334 /**/
335
new_instance_data(VkInstance instance)336 static struct instance_data *new_instance_data(VkInstance instance)
337 {
338 struct instance_data *data = rzalloc(NULL, struct instance_data);
339 data->instance = instance;
340 data->control_client = -1;
341 data->socket = -1;
342 map_object(HKEY(data->instance), data);
343 return data;
344 }
345
destroy_instance_data(struct instance_data * data)346 static void destroy_instance_data(struct instance_data *data)
347 {
348 if (data->socket >= 0)
349 os_socket_close(data->socket);
350 if (data->params.output_file) {
351 free((void*)data->params.output_file);
352 data->params.output_file = NULL;
353 }
354 if (data->params.control) {
355 free((void*)data->params.control);
356 data->params.control = NULL;
357 }
358 unmap_object(HKEY(data->instance));
359 ralloc_free(data);
360 }
361
instance_data_map_physical_devices(struct instance_data * instance_data,bool map)362 static void instance_data_map_physical_devices(struct instance_data *instance_data,
363 bool map)
364 {
365 uint32_t physicalDeviceCount = 0;
366 instance_data->vtable.EnumeratePhysicalDevices(instance_data->instance,
367 &physicalDeviceCount,
368 NULL);
369
370 VkPhysicalDevice *physicalDevices = (VkPhysicalDevice *) malloc(sizeof(VkPhysicalDevice) * physicalDeviceCount);
371 instance_data->vtable.EnumeratePhysicalDevices(instance_data->instance,
372 &physicalDeviceCount,
373 physicalDevices);
374
375 for (uint32_t i = 0; i < physicalDeviceCount; i++) {
376 if (map)
377 map_object(HKEY(physicalDevices[i]), instance_data);
378 else
379 unmap_object(HKEY(physicalDevices[i]));
380 }
381
382 free(physicalDevices);
383 }
384
385 /**/
new_device_data(VkDevice device,struct instance_data * instance)386 static struct device_data *new_device_data(VkDevice device, struct instance_data *instance)
387 {
388 struct device_data *data = rzalloc(NULL, struct device_data);
389 data->instance = instance;
390 data->device = device;
391 map_object(HKEY(data->device), data);
392 return data;
393 }
394
new_queue_data(VkQueue queue,const VkQueueFamilyProperties * family_props,uint32_t family_index,struct device_data * device_data)395 static struct queue_data *new_queue_data(VkQueue queue,
396 const VkQueueFamilyProperties *family_props,
397 uint32_t family_index,
398 struct device_data *device_data)
399 {
400 struct queue_data *data = rzalloc(device_data, struct queue_data);
401 data->device = device_data;
402 data->queue = queue;
403 data->flags = family_props->queueFlags;
404 data->timestamp_mask = (1ull << family_props->timestampValidBits) - 1;
405 data->family_index = family_index;
406 list_inithead(&data->running_command_buffer);
407 map_object(HKEY(data->queue), data);
408
409 /* Fence synchronizing access to queries on that queue. */
410 VkFenceCreateInfo fence_info = {};
411 fence_info.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
412 fence_info.flags = VK_FENCE_CREATE_SIGNALED_BIT;
413 VK_CHECK(device_data->vtable.CreateFence(device_data->device,
414 &fence_info,
415 NULL,
416 &data->queries_fence));
417
418 if (data->flags & VK_QUEUE_GRAPHICS_BIT)
419 device_data->graphic_queue = data;
420
421 return data;
422 }
423
destroy_queue(struct queue_data * data)424 static void destroy_queue(struct queue_data *data)
425 {
426 struct device_data *device_data = data->device;
427 device_data->vtable.DestroyFence(device_data->device, data->queries_fence, NULL);
428 unmap_object(HKEY(data->queue));
429 ralloc_free(data);
430 }
431
device_map_queues(struct device_data * data,const VkDeviceCreateInfo * pCreateInfo)432 static void device_map_queues(struct device_data *data,
433 const VkDeviceCreateInfo *pCreateInfo)
434 {
435 for (uint32_t i = 0; i < pCreateInfo->queueCreateInfoCount; i++)
436 data->n_queues += pCreateInfo->pQueueCreateInfos[i].queueCount;
437 data->queues = ralloc_array(data, struct queue_data *, data->n_queues);
438
439 struct instance_data *instance_data = data->instance;
440 uint32_t n_family_props;
441 instance_data->pd_vtable.GetPhysicalDeviceQueueFamilyProperties(data->physical_device,
442 &n_family_props,
443 NULL);
444 VkQueueFamilyProperties *family_props =
445 (VkQueueFamilyProperties *)malloc(sizeof(VkQueueFamilyProperties) * n_family_props);
446 instance_data->pd_vtable.GetPhysicalDeviceQueueFamilyProperties(data->physical_device,
447 &n_family_props,
448 family_props);
449
450 uint32_t queue_index = 0;
451 for (uint32_t i = 0; i < pCreateInfo->queueCreateInfoCount; i++) {
452 for (uint32_t j = 0; j < pCreateInfo->pQueueCreateInfos[i].queueCount; j++) {
453 VkQueue queue;
454 data->vtable.GetDeviceQueue(data->device,
455 pCreateInfo->pQueueCreateInfos[i].queueFamilyIndex,
456 j, &queue);
457
458 VK_CHECK(data->set_device_loader_data(data->device, queue));
459
460 data->queues[queue_index++] =
461 new_queue_data(queue, &family_props[pCreateInfo->pQueueCreateInfos[i].queueFamilyIndex],
462 pCreateInfo->pQueueCreateInfos[i].queueFamilyIndex, data);
463 }
464 }
465
466 free(family_props);
467 }
468
device_unmap_queues(struct device_data * data)469 static void device_unmap_queues(struct device_data *data)
470 {
471 for (uint32_t i = 0; i < data->n_queues; i++)
472 destroy_queue(data->queues[i]);
473 }
474
destroy_device_data(struct device_data * data)475 static void destroy_device_data(struct device_data *data)
476 {
477 unmap_object(HKEY(data->device));
478 ralloc_free(data);
479 }
480
param_unit(enum overlay_param_enabled param)481 static const char *param_unit(enum overlay_param_enabled param)
482 {
483 switch (param) {
484 case OVERLAY_PARAM_ENABLED_frame_timing:
485 case OVERLAY_PARAM_ENABLED_acquire_timing:
486 case OVERLAY_PARAM_ENABLED_present_timing:
487 return "(us)";
488 case OVERLAY_PARAM_ENABLED_gpu_timing:
489 return "(ns)";
490 default:
491 return "";
492 }
493 }
494
495 /**/
new_command_buffer_data(VkCommandBuffer cmd_buffer,VkCommandBufferLevel level,VkQueryPool pipeline_query_pool,VkQueryPool timestamp_query_pool,uint32_t query_index,struct device_data * device_data)496 static struct command_buffer_data *new_command_buffer_data(VkCommandBuffer cmd_buffer,
497 VkCommandBufferLevel level,
498 VkQueryPool pipeline_query_pool,
499 VkQueryPool timestamp_query_pool,
500 uint32_t query_index,
501 struct device_data *device_data)
502 {
503 struct command_buffer_data *data = rzalloc(NULL, struct command_buffer_data);
504 data->device = device_data;
505 data->cmd_buffer = cmd_buffer;
506 data->level = level;
507 data->pipeline_query_pool = pipeline_query_pool;
508 data->timestamp_query_pool = timestamp_query_pool;
509 data->query_index = query_index;
510 list_inithead(&data->link);
511 map_object(HKEY(data->cmd_buffer), data);
512 return data;
513 }
514
destroy_command_buffer_data(struct command_buffer_data * data)515 static void destroy_command_buffer_data(struct command_buffer_data *data)
516 {
517 unmap_object(HKEY(data->cmd_buffer));
518 list_delinit(&data->link);
519 ralloc_free(data);
520 }
521
522 /**/
new_swapchain_data(VkSwapchainKHR swapchain,struct device_data * device_data)523 static struct swapchain_data *new_swapchain_data(VkSwapchainKHR swapchain,
524 struct device_data *device_data)
525 {
526 struct instance_data *instance_data = device_data->instance;
527 struct swapchain_data *data = rzalloc(NULL, struct swapchain_data);
528 data->device = device_data;
529 data->swapchain = swapchain;
530 data->window_size = ImVec2(instance_data->params.width, instance_data->params.height);
531 list_inithead(&data->draws);
532 map_object(HKEY(data->swapchain), data);
533
534 /* Open output file on swapchain creation */
535 assert(instance_data->output_file_fd == NULL);
536 instance_data->output_file_fd =
537 fopen(instance_data->params.output_file, "w+");
538
539 if (instance_data->output_file_fd) {
540 bool first_column = true;
541 #define OVERLAY_PARAM_BOOL(name) \
542 if (instance_data->params.enabled[OVERLAY_PARAM_ENABLED_##name]) { \
543 fprintf(instance_data->output_file_fd, \
544 "%s%s%s", first_column ? "" : ", ", #name, \
545 param_unit(OVERLAY_PARAM_ENABLED_##name)); \
546 first_column = false; \
547 }
548 #define OVERLAY_PARAM_CUSTOM(name)
549 OVERLAY_PARAMS
550 #undef OVERLAY_PARAM_BOOL
551 #undef OVERLAY_PARAM_CUSTOM
552 fprintf(instance_data->output_file_fd, "\n");
553 } else
554 fprintf(stderr, "ERROR opening output file: %s\n", strerror(errno));
555
556 return data;
557 }
558
destroy_swapchain_data(struct swapchain_data * data)559 static void destroy_swapchain_data(struct swapchain_data *data)
560 {
561 unmap_object(HKEY(data->swapchain));
562 ralloc_free(data);
563 }
564
get_overlay_draw(struct swapchain_data * data)565 struct overlay_draw *get_overlay_draw(struct swapchain_data *data)
566 {
567 struct device_data *device_data = data->device;
568 struct overlay_draw *draw = list_is_empty(&data->draws) ?
569 NULL : list_first_entry(&data->draws, struct overlay_draw, link);
570
571 VkSemaphoreCreateInfo sem_info = {};
572 sem_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
573
574 if (draw && device_data->vtable.GetFenceStatus(device_data->device, draw->fence) == VK_SUCCESS) {
575 list_del(&draw->link);
576 VK_CHECK(device_data->vtable.ResetFences(device_data->device,
577 1, &draw->fence));
578 list_addtail(&draw->link, &data->draws);
579 return draw;
580 }
581
582 draw = rzalloc(data, struct overlay_draw);
583
584 VkCommandBufferAllocateInfo cmd_buffer_info = {};
585 cmd_buffer_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
586 cmd_buffer_info.commandPool = data->command_pool;
587 cmd_buffer_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
588 cmd_buffer_info.commandBufferCount = 1;
589 VK_CHECK(device_data->vtable.AllocateCommandBuffers(device_data->device,
590 &cmd_buffer_info,
591 &draw->command_buffer));
592 VK_CHECK(device_data->set_device_loader_data(device_data->device,
593 draw->command_buffer));
594
595
596 VkFenceCreateInfo fence_info = {};
597 fence_info.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
598 VK_CHECK(device_data->vtable.CreateFence(device_data->device,
599 &fence_info,
600 NULL,
601 &draw->fence));
602
603 VK_CHECK(device_data->vtable.CreateSemaphore(device_data->device, &sem_info,
604 NULL, &draw->semaphore));
605 VK_CHECK(device_data->vtable.CreateSemaphore(device_data->device, &sem_info,
606 NULL, &draw->cross_engine_semaphore));
607
608 list_addtail(&draw->link, &data->draws);
609
610 return draw;
611 }
612
parse_command(struct instance_data * instance_data,const char * cmd,unsigned cmdlen,const char * param,unsigned paramlen)613 static void parse_command(struct instance_data *instance_data,
614 const char *cmd, unsigned cmdlen,
615 const char *param, unsigned paramlen)
616 {
617 if (!strncmp(cmd, "capture", cmdlen)) {
618 int value = atoi(param);
619 bool enabled = value > 0;
620
621 if (enabled) {
622 instance_data->capture_enabled = true;
623 } else {
624 instance_data->capture_enabled = false;
625 instance_data->capture_started = false;
626 }
627 }
628 }
629
630 #define BUFSIZE 4096
631
632 /**
633 * This function will process commands through the control file.
634 *
635 * A command starts with a colon, followed by the command, and followed by an
636 * option '=' and a parameter. It has to end with a semi-colon. A full command
637 * + parameter looks like:
638 *
639 * :cmd=param;
640 */
process_char(struct instance_data * instance_data,char c)641 static void process_char(struct instance_data *instance_data, char c)
642 {
643 static char cmd[BUFSIZE];
644 static char param[BUFSIZE];
645
646 static unsigned cmdpos = 0;
647 static unsigned parampos = 0;
648 static bool reading_cmd = false;
649 static bool reading_param = false;
650
651 switch (c) {
652 case ':':
653 cmdpos = 0;
654 parampos = 0;
655 reading_cmd = true;
656 reading_param = false;
657 break;
658 case ';':
659 if (!reading_cmd)
660 break;
661 cmd[cmdpos++] = '\0';
662 param[parampos++] = '\0';
663 parse_command(instance_data, cmd, cmdpos, param, parampos);
664 reading_cmd = false;
665 reading_param = false;
666 break;
667 case '=':
668 if (!reading_cmd)
669 break;
670 reading_param = true;
671 break;
672 default:
673 if (!reading_cmd)
674 break;
675
676 if (reading_param) {
677 /* overflow means an invalid parameter */
678 if (parampos >= BUFSIZE - 1) {
679 reading_cmd = false;
680 reading_param = false;
681 break;
682 }
683
684 param[parampos++] = c;
685 } else {
686 /* overflow means an invalid command */
687 if (cmdpos >= BUFSIZE - 1) {
688 reading_cmd = false;
689 break;
690 }
691
692 cmd[cmdpos++] = c;
693 }
694 }
695 }
696
control_send(struct instance_data * instance_data,const char * cmd,unsigned cmdlen,const char * param,unsigned paramlen)697 static void control_send(struct instance_data *instance_data,
698 const char *cmd, unsigned cmdlen,
699 const char *param, unsigned paramlen)
700 {
701 unsigned msglen = 0;
702 char buffer[BUFSIZE];
703
704 assert(cmdlen + paramlen + 3 < BUFSIZE);
705
706 buffer[msglen++] = ':';
707
708 memcpy(&buffer[msglen], cmd, cmdlen);
709 msglen += cmdlen;
710
711 if (paramlen > 0) {
712 buffer[msglen++] = '=';
713 memcpy(&buffer[msglen], param, paramlen);
714 msglen += paramlen;
715 buffer[msglen++] = ';';
716 }
717
718 os_socket_send(instance_data->control_client, buffer, msglen, 0);
719 }
720
control_send_connection_string(struct device_data * device_data)721 static void control_send_connection_string(struct device_data *device_data)
722 {
723 struct instance_data *instance_data = device_data->instance;
724
725 const char *controlVersionCmd = "MesaOverlayControlVersion";
726 const char *controlVersionString = "1";
727
728 control_send(instance_data, controlVersionCmd, strlen(controlVersionCmd),
729 controlVersionString, strlen(controlVersionString));
730
731 const char *deviceCmd = "DeviceName";
732 const char *deviceName = device_data->properties.deviceName;
733
734 control_send(instance_data, deviceCmd, strlen(deviceCmd),
735 deviceName, strlen(deviceName));
736
737 const char *mesaVersionCmd = "MesaVersion";
738 const char *mesaVersionString = "Mesa " PACKAGE_VERSION MESA_GIT_SHA1;
739
740 control_send(instance_data, mesaVersionCmd, strlen(mesaVersionCmd),
741 mesaVersionString, strlen(mesaVersionString));
742 }
743
control_client_check(struct device_data * device_data)744 static void control_client_check(struct device_data *device_data)
745 {
746 struct instance_data *instance_data = device_data->instance;
747
748 /* Already connected, just return. */
749 if (instance_data->control_client >= 0)
750 return;
751
752 int socket = os_socket_accept(instance_data->socket);
753 if (socket == -1) {
754 if (errno != EAGAIN && errno != EWOULDBLOCK && errno != ECONNABORTED)
755 fprintf(stderr, "ERROR on socket: %s\n", strerror(errno));
756 return;
757 }
758
759 if (socket >= 0) {
760 os_socket_block(socket, false);
761 instance_data->control_client = socket;
762 control_send_connection_string(device_data);
763 }
764 }
765
control_client_disconnected(struct instance_data * instance_data)766 static void control_client_disconnected(struct instance_data *instance_data)
767 {
768 os_socket_close(instance_data->control_client);
769 instance_data->control_client = -1;
770 }
771
process_control_socket(struct instance_data * instance_data)772 static void process_control_socket(struct instance_data *instance_data)
773 {
774 const int client = instance_data->control_client;
775 if (client >= 0) {
776 char buf[BUFSIZE];
777
778 while (true) {
779 ssize_t n = os_socket_recv(client, buf, BUFSIZE, 0);
780
781 if (n == -1) {
782 if (errno == EAGAIN || errno == EWOULDBLOCK) {
783 /* nothing to read, try again later */
784 break;
785 }
786
787 if (errno != ECONNRESET)
788 fprintf(stderr, "ERROR on connection: %s\n", strerror(errno));
789
790 control_client_disconnected(instance_data);
791 } else if (n == 0) {
792 /* recv() returns 0 when the client disconnects */
793 control_client_disconnected(instance_data);
794 }
795
796 for (ssize_t i = 0; i < n; i++) {
797 process_char(instance_data, buf[i]);
798 }
799
800 /* If we try to read BUFSIZE and receive BUFSIZE bytes from the
801 * socket, there's a good chance that there's still more data to be
802 * read, so we will try again. Otherwise, simply be done for this
803 * iteration and try again on the next frame.
804 */
805 if (n < BUFSIZE)
806 break;
807 }
808 }
809 }
810
snapshot_swapchain_frame(struct swapchain_data * data)811 static void snapshot_swapchain_frame(struct swapchain_data *data)
812 {
813 struct device_data *device_data = data->device;
814 struct instance_data *instance_data = device_data->instance;
815 uint32_t f_idx = data->n_frames % ARRAY_SIZE(data->frames_stats);
816 uint64_t now = os_time_get(); /* us */
817
818 if (instance_data->params.control && instance_data->socket < 0) {
819 int ret = os_socket_listen_abstract(instance_data->params.control, 1);
820 if (ret >= 0) {
821 os_socket_block(ret, false);
822 instance_data->socket = ret;
823 } else {
824 fprintf(stderr, "ERROR: Couldn't create socket pipe at '%s'\n", instance_data->params.control);
825 fprintf(stderr, "ERROR: '%s'\n", strerror(errno));
826 }
827 }
828
829 if (instance_data->socket >= 0) {
830 control_client_check(device_data);
831 process_control_socket(instance_data);
832 }
833
834 if (data->last_present_time) {
835 data->frame_stats.stats[OVERLAY_PARAM_ENABLED_frame_timing] =
836 now - data->last_present_time;
837 }
838
839 memset(&data->frames_stats[f_idx], 0, sizeof(data->frames_stats[f_idx]));
840 for (int s = 0; s < OVERLAY_PARAM_ENABLED_MAX; s++) {
841 data->frames_stats[f_idx].stats[s] += device_data->frame_stats.stats[s] + data->frame_stats.stats[s];
842 data->accumulated_stats.stats[s] += device_data->frame_stats.stats[s] + data->frame_stats.stats[s];
843 }
844
845 /* If capture has been enabled but it hasn't started yet, it means we are on
846 * the first snapshot after it has been enabled. At this point we want to
847 * use the stats captured so far to update the display, but we don't want
848 * this data to cause noise to the stats that we want to capture from now
849 * on.
850 *
851 * capture_begin == true will trigger an update of the fps on display, and a
852 * flush of the data, but no stats will be written to the output file. This
853 * way, we will have only stats from after the capture has been enabled
854 * written to the output_file.
855 */
856 const bool capture_begin =
857 instance_data->capture_enabled && !instance_data->capture_started;
858
859 if (data->last_fps_update) {
860 double elapsed = (double)(now - data->last_fps_update); /* us */
861 if (capture_begin ||
862 elapsed >= instance_data->params.fps_sampling_period) {
863 data->fps = 1000000.0f * data->n_frames_since_update / elapsed;
864 if (instance_data->capture_started) {
865 for (int s = 0; s < OVERLAY_PARAM_ENABLED_MAX; s++) {
866 if (!instance_data->params.enabled[s])
867 continue;
868 if (s == OVERLAY_PARAM_ENABLED_fps) {
869 fprintf(instance_data->output_file_fd,
870 "%s%.2f", s == 0 ? "" : ", ", data->fps);
871 } else {
872 fprintf(instance_data->output_file_fd,
873 "%s%" PRIu64, s == 0 ? "" : ", ",
874 data->accumulated_stats.stats[s]);
875 }
876 }
877 fprintf(instance_data->output_file_fd, "\n");
878 fflush(instance_data->output_file_fd);
879 }
880
881 memset(&data->accumulated_stats, 0, sizeof(data->accumulated_stats));
882 data->n_frames_since_update = 0;
883 data->last_fps_update = now;
884
885 if (capture_begin)
886 instance_data->capture_started = true;
887 }
888 } else {
889 data->last_fps_update = now;
890 }
891
892 memset(&device_data->frame_stats, 0, sizeof(device_data->frame_stats));
893 memset(&data->frame_stats, 0, sizeof(device_data->frame_stats));
894
895 data->last_present_time = now;
896 data->n_frames++;
897 data->n_frames_since_update++;
898 }
899
get_time_stat(void * _data,int _idx)900 static float get_time_stat(void *_data, int _idx)
901 {
902 struct swapchain_data *data = (struct swapchain_data *) _data;
903 if ((ARRAY_SIZE(data->frames_stats) - _idx) > data->n_frames)
904 return 0.0f;
905 int idx = ARRAY_SIZE(data->frames_stats) +
906 data->n_frames < ARRAY_SIZE(data->frames_stats) ?
907 _idx - data->n_frames :
908 _idx + data->n_frames;
909 idx %= ARRAY_SIZE(data->frames_stats);
910 /* Time stats are in us. */
911 return data->frames_stats[idx].stats[data->stat_selector] / data->time_dividor;
912 }
913
get_stat(void * _data,int _idx)914 static float get_stat(void *_data, int _idx)
915 {
916 struct swapchain_data *data = (struct swapchain_data *) _data;
917 if ((ARRAY_SIZE(data->frames_stats) - _idx) > data->n_frames)
918 return 0.0f;
919 int idx = ARRAY_SIZE(data->frames_stats) +
920 data->n_frames < ARRAY_SIZE(data->frames_stats) ?
921 _idx - data->n_frames :
922 _idx + data->n_frames;
923 idx %= ARRAY_SIZE(data->frames_stats);
924 return data->frames_stats[idx].stats[data->stat_selector];
925 }
926
position_layer(struct swapchain_data * data)927 static void position_layer(struct swapchain_data *data)
928
929 {
930 struct device_data *device_data = data->device;
931 struct instance_data *instance_data = device_data->instance;
932 const float margin = 10.0f;
933
934 ImGui::SetNextWindowBgAlpha(0.5);
935 ImGui::SetNextWindowSize(data->window_size, ImGuiCond_Always);
936 switch (instance_data->params.position) {
937 case LAYER_POSITION_TOP_LEFT:
938 ImGui::SetNextWindowPos(ImVec2(margin, margin), ImGuiCond_Always);
939 break;
940 case LAYER_POSITION_TOP_RIGHT:
941 ImGui::SetNextWindowPos(ImVec2(data->width - data->window_size.x - margin, margin),
942 ImGuiCond_Always);
943 break;
944 case LAYER_POSITION_BOTTOM_LEFT:
945 ImGui::SetNextWindowPos(ImVec2(margin, data->height - data->window_size.y - margin),
946 ImGuiCond_Always);
947 break;
948 case LAYER_POSITION_BOTTOM_RIGHT:
949 ImGui::SetNextWindowPos(ImVec2(data->width - data->window_size.x - margin,
950 data->height - data->window_size.y - margin),
951 ImGuiCond_Always);
952 break;
953 }
954 }
955
compute_swapchain_display(struct swapchain_data * data)956 static void compute_swapchain_display(struct swapchain_data *data)
957 {
958 struct device_data *device_data = data->device;
959 struct instance_data *instance_data = device_data->instance;
960
961 ImGui::SetCurrentContext(data->imgui_context);
962 ImGui::NewFrame();
963 position_layer(data);
964 ImGui::Begin("Mesa overlay");
965 if (instance_data->params.enabled[OVERLAY_PARAM_ENABLED_device])
966 ImGui::Text("Device: %s", device_data->properties.deviceName);
967
968 if (instance_data->params.enabled[OVERLAY_PARAM_ENABLED_format]) {
969 const char *format_name = vk_Format_to_str(data->format);
970 format_name = format_name ? (format_name + strlen("VK_FORMAT_")) : "unknown";
971 ImGui::Text("Swapchain format: %s", format_name);
972 }
973 if (instance_data->params.enabled[OVERLAY_PARAM_ENABLED_frame])
974 ImGui::Text("Frames: %" PRIu64, data->n_frames);
975 if (instance_data->params.enabled[OVERLAY_PARAM_ENABLED_fps])
976 ImGui::Text("FPS: %.2f" , data->fps);
977
978 /* Recompute min/max */
979 for (uint32_t s = 0; s < OVERLAY_PARAM_ENABLED_MAX; s++) {
980 data->stats_min.stats[s] = UINT64_MAX;
981 data->stats_max.stats[s] = 0;
982 }
983 for (uint32_t f = 0; f < MIN2(data->n_frames, ARRAY_SIZE(data->frames_stats)); f++) {
984 for (uint32_t s = 0; s < OVERLAY_PARAM_ENABLED_MAX; s++) {
985 data->stats_min.stats[s] = MIN2(data->frames_stats[f].stats[s],
986 data->stats_min.stats[s]);
987 data->stats_max.stats[s] = MAX2(data->frames_stats[f].stats[s],
988 data->stats_max.stats[s]);
989 }
990 }
991 for (uint32_t s = 0; s < OVERLAY_PARAM_ENABLED_MAX; s++) {
992 assert(data->stats_min.stats[s] != UINT64_MAX);
993 }
994
995 for (uint32_t s = 0; s < OVERLAY_PARAM_ENABLED_MAX; s++) {
996 if (!instance_data->params.enabled[s] ||
997 s == OVERLAY_PARAM_ENABLED_device ||
998 s == OVERLAY_PARAM_ENABLED_format ||
999 s == OVERLAY_PARAM_ENABLED_fps ||
1000 s == OVERLAY_PARAM_ENABLED_frame)
1001 continue;
1002
1003 char hash[40];
1004 snprintf(hash, sizeof(hash), "##%s", overlay_param_names[s]);
1005 data->stat_selector = (enum overlay_param_enabled) s;
1006 data->time_dividor = 1000.0f;
1007 if (s == OVERLAY_PARAM_ENABLED_gpu_timing)
1008 data->time_dividor = 1000000.0f;
1009
1010 if (s == OVERLAY_PARAM_ENABLED_frame_timing ||
1011 s == OVERLAY_PARAM_ENABLED_acquire_timing ||
1012 s == OVERLAY_PARAM_ENABLED_present_timing ||
1013 s == OVERLAY_PARAM_ENABLED_gpu_timing) {
1014 double min_time = data->stats_min.stats[s] / data->time_dividor;
1015 double max_time = data->stats_max.stats[s] / data->time_dividor;
1016 ImGui::PlotHistogram(hash, get_time_stat, data,
1017 ARRAY_SIZE(data->frames_stats), 0,
1018 NULL, min_time, max_time,
1019 ImVec2(ImGui::GetContentRegionAvailWidth(), 30));
1020 ImGui::Text("%s: %.3fms [%.3f, %.3f]", overlay_param_names[s],
1021 get_time_stat(data, ARRAY_SIZE(data->frames_stats) - 1),
1022 min_time, max_time);
1023 } else {
1024 ImGui::PlotHistogram(hash, get_stat, data,
1025 ARRAY_SIZE(data->frames_stats), 0,
1026 NULL,
1027 data->stats_min.stats[s],
1028 data->stats_max.stats[s],
1029 ImVec2(ImGui::GetContentRegionAvailWidth(), 30));
1030 ImGui::Text("%s: %.0f [%" PRIu64 ", %" PRIu64 "]", overlay_param_names[s],
1031 get_stat(data, ARRAY_SIZE(data->frames_stats) - 1),
1032 data->stats_min.stats[s], data->stats_max.stats[s]);
1033 }
1034 }
1035 data->window_size = ImVec2(data->window_size.x, ImGui::GetCursorPosY() + 10.0f);
1036 ImGui::End();
1037 ImGui::EndFrame();
1038 ImGui::Render();
1039 }
1040
vk_memory_type(struct device_data * data,VkMemoryPropertyFlags properties,uint32_t type_bits)1041 static uint32_t vk_memory_type(struct device_data *data,
1042 VkMemoryPropertyFlags properties,
1043 uint32_t type_bits)
1044 {
1045 VkPhysicalDeviceMemoryProperties prop;
1046 data->instance->pd_vtable.GetPhysicalDeviceMemoryProperties(data->physical_device, &prop);
1047 for (uint32_t i = 0; i < prop.memoryTypeCount; i++)
1048 if ((prop.memoryTypes[i].propertyFlags & properties) == properties && type_bits & (1<<i))
1049 return i;
1050 return 0xFFFFFFFF; // Unable to find memoryType
1051 }
1052
ensure_swapchain_fonts(struct swapchain_data * data,VkCommandBuffer command_buffer)1053 static void ensure_swapchain_fonts(struct swapchain_data *data,
1054 VkCommandBuffer command_buffer)
1055 {
1056 if (data->font_uploaded)
1057 return;
1058
1059 data->font_uploaded = true;
1060
1061 struct device_data *device_data = data->device;
1062 ImGuiIO& io = ImGui::GetIO();
1063 unsigned char* pixels;
1064 int width, height;
1065 io.Fonts->GetTexDataAsRGBA32(&pixels, &width, &height);
1066 size_t upload_size = width * height * 4 * sizeof(char);
1067
1068 /* Upload buffer */
1069 VkBufferCreateInfo buffer_info = {};
1070 buffer_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
1071 buffer_info.size = upload_size;
1072 buffer_info.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
1073 buffer_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
1074 VK_CHECK(device_data->vtable.CreateBuffer(device_data->device, &buffer_info,
1075 NULL, &data->upload_font_buffer));
1076 VkMemoryRequirements upload_buffer_req;
1077 device_data->vtable.GetBufferMemoryRequirements(device_data->device,
1078 data->upload_font_buffer,
1079 &upload_buffer_req);
1080 VkMemoryAllocateInfo upload_alloc_info = {};
1081 upload_alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
1082 upload_alloc_info.allocationSize = upload_buffer_req.size;
1083 upload_alloc_info.memoryTypeIndex = vk_memory_type(device_data,
1084 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT,
1085 upload_buffer_req.memoryTypeBits);
1086 VK_CHECK(device_data->vtable.AllocateMemory(device_data->device,
1087 &upload_alloc_info,
1088 NULL,
1089 &data->upload_font_buffer_mem));
1090 VK_CHECK(device_data->vtable.BindBufferMemory(device_data->device,
1091 data->upload_font_buffer,
1092 data->upload_font_buffer_mem, 0));
1093
1094 /* Upload to Buffer */
1095 char* map = NULL;
1096 VK_CHECK(device_data->vtable.MapMemory(device_data->device,
1097 data->upload_font_buffer_mem,
1098 0, upload_size, 0, (void**)(&map)));
1099 memcpy(map, pixels, upload_size);
1100 VkMappedMemoryRange range[1] = {};
1101 range[0].sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE;
1102 range[0].memory = data->upload_font_buffer_mem;
1103 range[0].size = upload_size;
1104 VK_CHECK(device_data->vtable.FlushMappedMemoryRanges(device_data->device, 1, range));
1105 device_data->vtable.UnmapMemory(device_data->device,
1106 data->upload_font_buffer_mem);
1107
1108 /* Copy buffer to image */
1109 VkImageMemoryBarrier copy_barrier[1] = {};
1110 copy_barrier[0].sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
1111 copy_barrier[0].dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
1112 copy_barrier[0].oldLayout = VK_IMAGE_LAYOUT_UNDEFINED;
1113 copy_barrier[0].newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
1114 copy_barrier[0].srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
1115 copy_barrier[0].dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
1116 copy_barrier[0].image = data->font_image;
1117 copy_barrier[0].subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
1118 copy_barrier[0].subresourceRange.levelCount = 1;
1119 copy_barrier[0].subresourceRange.layerCount = 1;
1120 device_data->vtable.CmdPipelineBarrier(command_buffer,
1121 VK_PIPELINE_STAGE_HOST_BIT,
1122 VK_PIPELINE_STAGE_TRANSFER_BIT,
1123 0, 0, NULL, 0, NULL,
1124 1, copy_barrier);
1125
1126 VkBufferImageCopy region = {};
1127 region.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
1128 region.imageSubresource.layerCount = 1;
1129 region.imageExtent.width = width;
1130 region.imageExtent.height = height;
1131 region.imageExtent.depth = 1;
1132 device_data->vtable.CmdCopyBufferToImage(command_buffer,
1133 data->upload_font_buffer,
1134 data->font_image,
1135 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1136 1, ®ion);
1137
1138 VkImageMemoryBarrier use_barrier[1] = {};
1139 use_barrier[0].sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
1140 use_barrier[0].srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
1141 use_barrier[0].dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
1142 use_barrier[0].oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
1143 use_barrier[0].newLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
1144 use_barrier[0].srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
1145 use_barrier[0].dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
1146 use_barrier[0].image = data->font_image;
1147 use_barrier[0].subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
1148 use_barrier[0].subresourceRange.levelCount = 1;
1149 use_barrier[0].subresourceRange.layerCount = 1;
1150 device_data->vtable.CmdPipelineBarrier(command_buffer,
1151 VK_PIPELINE_STAGE_TRANSFER_BIT,
1152 VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
1153 0,
1154 0, NULL,
1155 0, NULL,
1156 1, use_barrier);
1157
1158 /* Store our identifier */
1159 io.Fonts->TexID = (ImTextureID)(intptr_t)data->font_image;
1160 }
1161
CreateOrResizeBuffer(struct device_data * data,VkBuffer * buffer,VkDeviceMemory * buffer_memory,VkDeviceSize * buffer_size,size_t new_size,VkBufferUsageFlagBits usage)1162 static void CreateOrResizeBuffer(struct device_data *data,
1163 VkBuffer *buffer,
1164 VkDeviceMemory *buffer_memory,
1165 VkDeviceSize *buffer_size,
1166 size_t new_size, VkBufferUsageFlagBits usage)
1167 {
1168 if (*buffer != VK_NULL_HANDLE)
1169 data->vtable.DestroyBuffer(data->device, *buffer, NULL);
1170 if (*buffer_memory)
1171 data->vtable.FreeMemory(data->device, *buffer_memory, NULL);
1172
1173 VkBufferCreateInfo buffer_info = {};
1174 buffer_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
1175 buffer_info.size = new_size;
1176 buffer_info.usage = usage;
1177 buffer_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
1178 VK_CHECK(data->vtable.CreateBuffer(data->device, &buffer_info, NULL, buffer));
1179
1180 VkMemoryRequirements req;
1181 data->vtable.GetBufferMemoryRequirements(data->device, *buffer, &req);
1182 VkMemoryAllocateInfo alloc_info = {};
1183 alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
1184 alloc_info.allocationSize = req.size;
1185 alloc_info.memoryTypeIndex =
1186 vk_memory_type(data, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, req.memoryTypeBits);
1187 VK_CHECK(data->vtable.AllocateMemory(data->device, &alloc_info, NULL, buffer_memory));
1188
1189 VK_CHECK(data->vtable.BindBufferMemory(data->device, *buffer, *buffer_memory, 0));
1190 *buffer_size = new_size;
1191 }
1192
render_swapchain_display(struct swapchain_data * data,struct queue_data * present_queue,const VkSemaphore * wait_semaphores,unsigned n_wait_semaphores,unsigned image_index)1193 static struct overlay_draw *render_swapchain_display(struct swapchain_data *data,
1194 struct queue_data *present_queue,
1195 const VkSemaphore *wait_semaphores,
1196 unsigned n_wait_semaphores,
1197 unsigned image_index)
1198 {
1199 ImDrawData* draw_data = ImGui::GetDrawData();
1200 if (draw_data->TotalVtxCount == 0)
1201 return NULL;
1202
1203 struct device_data *device_data = data->device;
1204 struct overlay_draw *draw = get_overlay_draw(data);
1205
1206 device_data->vtable.ResetCommandBuffer(draw->command_buffer, 0);
1207
1208 VkRenderPassBeginInfo render_pass_info = {};
1209 render_pass_info.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
1210 render_pass_info.renderPass = data->render_pass;
1211 render_pass_info.framebuffer = data->framebuffers[image_index];
1212 render_pass_info.renderArea.extent.width = data->width;
1213 render_pass_info.renderArea.extent.height = data->height;
1214
1215 VkCommandBufferBeginInfo buffer_begin_info = {};
1216 buffer_begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
1217
1218 device_data->vtable.BeginCommandBuffer(draw->command_buffer, &buffer_begin_info);
1219
1220 ensure_swapchain_fonts(data, draw->command_buffer);
1221
1222 /* Bounce the image to display back to color attachment layout for
1223 * rendering on top of it.
1224 */
1225 VkImageMemoryBarrier imb;
1226 imb.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
1227 imb.pNext = nullptr;
1228 imb.srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
1229 imb.dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
1230 imb.oldLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
1231 imb.newLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
1232 imb.image = data->images[image_index];
1233 imb.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
1234 imb.subresourceRange.baseMipLevel = 0;
1235 imb.subresourceRange.levelCount = 1;
1236 imb.subresourceRange.baseArrayLayer = 0;
1237 imb.subresourceRange.layerCount = 1;
1238 imb.srcQueueFamilyIndex = present_queue->family_index;
1239 imb.dstQueueFamilyIndex = device_data->graphic_queue->family_index;
1240 device_data->vtable.CmdPipelineBarrier(draw->command_buffer,
1241 VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT,
1242 VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT,
1243 0, /* dependency flags */
1244 0, nullptr, /* memory barriers */
1245 0, nullptr, /* buffer memory barriers */
1246 1, &imb); /* image memory barriers */
1247
1248 device_data->vtable.CmdBeginRenderPass(draw->command_buffer, &render_pass_info,
1249 VK_SUBPASS_CONTENTS_INLINE);
1250
1251 /* Create/Resize vertex & index buffers */
1252 size_t vertex_size = align_uintptr(draw_data->TotalVtxCount * sizeof(ImDrawVert), device_data->properties.limits.nonCoherentAtomSize);
1253 size_t index_size = align_uintptr(draw_data->TotalIdxCount * sizeof(ImDrawIdx), device_data->properties.limits.nonCoherentAtomSize);
1254 if (draw->vertex_buffer_size < vertex_size) {
1255 CreateOrResizeBuffer(device_data,
1256 &draw->vertex_buffer,
1257 &draw->vertex_buffer_mem,
1258 &draw->vertex_buffer_size,
1259 vertex_size, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT);
1260 }
1261 if (draw->index_buffer_size < index_size) {
1262 CreateOrResizeBuffer(device_data,
1263 &draw->index_buffer,
1264 &draw->index_buffer_mem,
1265 &draw->index_buffer_size,
1266 index_size, VK_BUFFER_USAGE_INDEX_BUFFER_BIT);
1267 }
1268
1269 /* Upload vertex & index data */
1270 ImDrawVert* vtx_dst = NULL;
1271 ImDrawIdx* idx_dst = NULL;
1272 VK_CHECK(device_data->vtable.MapMemory(device_data->device, draw->vertex_buffer_mem,
1273 0, vertex_size, 0, (void**)(&vtx_dst)));
1274 VK_CHECK(device_data->vtable.MapMemory(device_data->device, draw->index_buffer_mem,
1275 0, index_size, 0, (void**)(&idx_dst)));
1276 for (int n = 0; n < draw_data->CmdListsCount; n++)
1277 {
1278 const ImDrawList* cmd_list = draw_data->CmdLists[n];
1279 memcpy(vtx_dst, cmd_list->VtxBuffer.Data, cmd_list->VtxBuffer.Size * sizeof(ImDrawVert));
1280 memcpy(idx_dst, cmd_list->IdxBuffer.Data, cmd_list->IdxBuffer.Size * sizeof(ImDrawIdx));
1281 vtx_dst += cmd_list->VtxBuffer.Size;
1282 idx_dst += cmd_list->IdxBuffer.Size;
1283 }
1284 VkMappedMemoryRange range[2] = {};
1285 range[0].sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE;
1286 range[0].memory = draw->vertex_buffer_mem;
1287 range[0].size = VK_WHOLE_SIZE;
1288 range[1].sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE;
1289 range[1].memory = draw->index_buffer_mem;
1290 range[1].size = VK_WHOLE_SIZE;
1291 VK_CHECK(device_data->vtable.FlushMappedMemoryRanges(device_data->device, 2, range));
1292 device_data->vtable.UnmapMemory(device_data->device, draw->vertex_buffer_mem);
1293 device_data->vtable.UnmapMemory(device_data->device, draw->index_buffer_mem);
1294
1295 /* Bind pipeline and descriptor sets */
1296 device_data->vtable.CmdBindPipeline(draw->command_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS, data->pipeline);
1297 VkDescriptorSet desc_set[1] = { data->descriptor_set };
1298 device_data->vtable.CmdBindDescriptorSets(draw->command_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS,
1299 data->pipeline_layout, 0, 1, desc_set, 0, NULL);
1300
1301 /* Bind vertex & index buffers */
1302 VkBuffer vertex_buffers[1] = { draw->vertex_buffer };
1303 VkDeviceSize vertex_offset[1] = { 0 };
1304 device_data->vtable.CmdBindVertexBuffers(draw->command_buffer, 0, 1, vertex_buffers, vertex_offset);
1305 device_data->vtable.CmdBindIndexBuffer(draw->command_buffer, draw->index_buffer, 0, VK_INDEX_TYPE_UINT16);
1306
1307 /* Setup viewport */
1308 VkViewport viewport;
1309 viewport.x = 0;
1310 viewport.y = 0;
1311 viewport.width = draw_data->DisplaySize.x;
1312 viewport.height = draw_data->DisplaySize.y;
1313 viewport.minDepth = 0.0f;
1314 viewport.maxDepth = 1.0f;
1315 device_data->vtable.CmdSetViewport(draw->command_buffer, 0, 1, &viewport);
1316
1317
1318 /* Setup scale and translation through push constants :
1319 *
1320 * Our visible imgui space lies from draw_data->DisplayPos (top left) to
1321 * draw_data->DisplayPos+data_data->DisplaySize (bottom right). DisplayMin
1322 * is typically (0,0) for single viewport apps.
1323 */
1324 float scale[2];
1325 scale[0] = 2.0f / draw_data->DisplaySize.x;
1326 scale[1] = 2.0f / draw_data->DisplaySize.y;
1327 float translate[2];
1328 translate[0] = -1.0f - draw_data->DisplayPos.x * scale[0];
1329 translate[1] = -1.0f - draw_data->DisplayPos.y * scale[1];
1330 device_data->vtable.CmdPushConstants(draw->command_buffer, data->pipeline_layout,
1331 VK_SHADER_STAGE_VERTEX_BIT,
1332 sizeof(float) * 0, sizeof(float) * 2, scale);
1333 device_data->vtable.CmdPushConstants(draw->command_buffer, data->pipeline_layout,
1334 VK_SHADER_STAGE_VERTEX_BIT,
1335 sizeof(float) * 2, sizeof(float) * 2, translate);
1336
1337 // Render the command lists:
1338 int vtx_offset = 0;
1339 int idx_offset = 0;
1340 ImVec2 display_pos = draw_data->DisplayPos;
1341 for (int n = 0; n < draw_data->CmdListsCount; n++)
1342 {
1343 const ImDrawList* cmd_list = draw_data->CmdLists[n];
1344 for (int cmd_i = 0; cmd_i < cmd_list->CmdBuffer.Size; cmd_i++)
1345 {
1346 const ImDrawCmd* pcmd = &cmd_list->CmdBuffer[cmd_i];
1347 // Apply scissor/clipping rectangle
1348 // FIXME: We could clamp width/height based on clamped min/max values.
1349 VkRect2D scissor;
1350 scissor.offset.x = (int32_t)(pcmd->ClipRect.x - display_pos.x) > 0 ? (int32_t)(pcmd->ClipRect.x - display_pos.x) : 0;
1351 scissor.offset.y = (int32_t)(pcmd->ClipRect.y - display_pos.y) > 0 ? (int32_t)(pcmd->ClipRect.y - display_pos.y) : 0;
1352 scissor.extent.width = (uint32_t)(pcmd->ClipRect.z - pcmd->ClipRect.x);
1353 scissor.extent.height = (uint32_t)(pcmd->ClipRect.w - pcmd->ClipRect.y + 1); // FIXME: Why +1 here?
1354 device_data->vtable.CmdSetScissor(draw->command_buffer, 0, 1, &scissor);
1355
1356 // Draw
1357 device_data->vtable.CmdDrawIndexed(draw->command_buffer, pcmd->ElemCount, 1, idx_offset, vtx_offset, 0);
1358
1359 idx_offset += pcmd->ElemCount;
1360 }
1361 vtx_offset += cmd_list->VtxBuffer.Size;
1362 }
1363
1364 device_data->vtable.CmdEndRenderPass(draw->command_buffer);
1365
1366 if (device_data->graphic_queue->family_index != present_queue->family_index)
1367 {
1368 /* Transfer the image back to the present queue family
1369 * image layout was already changed to present by the render pass
1370 */
1371 imb.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
1372 imb.pNext = nullptr;
1373 imb.srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
1374 imb.dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
1375 imb.oldLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
1376 imb.newLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
1377 imb.image = data->images[image_index];
1378 imb.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
1379 imb.subresourceRange.baseMipLevel = 0;
1380 imb.subresourceRange.levelCount = 1;
1381 imb.subresourceRange.baseArrayLayer = 0;
1382 imb.subresourceRange.layerCount = 1;
1383 imb.srcQueueFamilyIndex = device_data->graphic_queue->family_index;
1384 imb.dstQueueFamilyIndex = present_queue->family_index;
1385 device_data->vtable.CmdPipelineBarrier(draw->command_buffer,
1386 VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT,
1387 VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT,
1388 0, /* dependency flags */
1389 0, nullptr, /* memory barriers */
1390 0, nullptr, /* buffer memory barriers */
1391 1, &imb); /* image memory barriers */
1392 }
1393
1394 device_data->vtable.EndCommandBuffer(draw->command_buffer);
1395
1396 /* When presenting on a different queue than where we're drawing the
1397 * overlay *AND* when the application does not provide a semaphore to
1398 * vkQueuePresent, insert our own cross engine synchronization
1399 * semaphore.
1400 */
1401 if (n_wait_semaphores == 0 && device_data->graphic_queue->queue != present_queue->queue) {
1402 VkPipelineStageFlags stages_wait = VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
1403 VkSubmitInfo submit_info = {};
1404 submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
1405 submit_info.commandBufferCount = 0;
1406 submit_info.pWaitDstStageMask = &stages_wait;
1407 submit_info.waitSemaphoreCount = 0;
1408 submit_info.signalSemaphoreCount = 1;
1409 submit_info.pSignalSemaphores = &draw->cross_engine_semaphore;
1410
1411 device_data->vtable.QueueSubmit(present_queue->queue, 1, &submit_info, VK_NULL_HANDLE);
1412
1413 submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
1414 submit_info.commandBufferCount = 1;
1415 submit_info.pWaitDstStageMask = &stages_wait;
1416 submit_info.pCommandBuffers = &draw->command_buffer;
1417 submit_info.waitSemaphoreCount = 1;
1418 submit_info.pWaitSemaphores = &draw->cross_engine_semaphore;
1419 submit_info.signalSemaphoreCount = 1;
1420 submit_info.pSignalSemaphores = &draw->semaphore;
1421
1422 device_data->vtable.QueueSubmit(device_data->graphic_queue->queue, 1, &submit_info, draw->fence);
1423 } else {
1424 VkPipelineStageFlags *stages_wait = (VkPipelineStageFlags*) malloc(sizeof(VkPipelineStageFlags) * n_wait_semaphores);
1425 for (unsigned i = 0; i < n_wait_semaphores; i++)
1426 {
1427 // wait in the fragment stage until the swapchain image is ready
1428 stages_wait[i] = VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
1429 }
1430
1431 VkSubmitInfo submit_info = {};
1432 submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
1433 submit_info.commandBufferCount = 1;
1434 submit_info.pCommandBuffers = &draw->command_buffer;
1435 submit_info.pWaitDstStageMask = stages_wait;
1436 submit_info.waitSemaphoreCount = n_wait_semaphores;
1437 submit_info.pWaitSemaphores = wait_semaphores;
1438 submit_info.signalSemaphoreCount = 1;
1439 submit_info.pSignalSemaphores = &draw->semaphore;
1440
1441 device_data->vtable.QueueSubmit(device_data->graphic_queue->queue, 1, &submit_info, draw->fence);
1442
1443 free(stages_wait);
1444 }
1445
1446 return draw;
1447 }
1448
1449 static const uint32_t overlay_vert_spv[] = {
1450 #include "overlay.vert.spv.h"
1451 };
1452 static const uint32_t overlay_frag_spv[] = {
1453 #include "overlay.frag.spv.h"
1454 };
1455
setup_swapchain_data_pipeline(struct swapchain_data * data)1456 static void setup_swapchain_data_pipeline(struct swapchain_data *data)
1457 {
1458 struct device_data *device_data = data->device;
1459 VkShaderModule vert_module, frag_module;
1460
1461 /* Create shader modules */
1462 VkShaderModuleCreateInfo vert_info = {};
1463 vert_info.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO;
1464 vert_info.codeSize = sizeof(overlay_vert_spv);
1465 vert_info.pCode = overlay_vert_spv;
1466 VK_CHECK(device_data->vtable.CreateShaderModule(device_data->device,
1467 &vert_info, NULL, &vert_module));
1468 VkShaderModuleCreateInfo frag_info = {};
1469 frag_info.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO;
1470 frag_info.codeSize = sizeof(overlay_frag_spv);
1471 frag_info.pCode = (uint32_t*)overlay_frag_spv;
1472 VK_CHECK(device_data->vtable.CreateShaderModule(device_data->device,
1473 &frag_info, NULL, &frag_module));
1474
1475 /* Font sampler */
1476 VkSamplerCreateInfo sampler_info = {};
1477 sampler_info.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO;
1478 sampler_info.magFilter = VK_FILTER_LINEAR;
1479 sampler_info.minFilter = VK_FILTER_LINEAR;
1480 sampler_info.mipmapMode = VK_SAMPLER_MIPMAP_MODE_LINEAR;
1481 sampler_info.addressModeU = VK_SAMPLER_ADDRESS_MODE_REPEAT;
1482 sampler_info.addressModeV = VK_SAMPLER_ADDRESS_MODE_REPEAT;
1483 sampler_info.addressModeW = VK_SAMPLER_ADDRESS_MODE_REPEAT;
1484 sampler_info.minLod = -1000;
1485 sampler_info.maxLod = 1000;
1486 sampler_info.maxAnisotropy = 1.0f;
1487 VK_CHECK(device_data->vtable.CreateSampler(device_data->device, &sampler_info,
1488 NULL, &data->font_sampler));
1489
1490 /* Descriptor pool */
1491 VkDescriptorPoolSize sampler_pool_size = {};
1492 sampler_pool_size.type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
1493 sampler_pool_size.descriptorCount = 1;
1494 VkDescriptorPoolCreateInfo desc_pool_info = {};
1495 desc_pool_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
1496 desc_pool_info.maxSets = 1;
1497 desc_pool_info.poolSizeCount = 1;
1498 desc_pool_info.pPoolSizes = &sampler_pool_size;
1499 VK_CHECK(device_data->vtable.CreateDescriptorPool(device_data->device,
1500 &desc_pool_info,
1501 NULL, &data->descriptor_pool));
1502
1503 /* Descriptor layout */
1504 VkSampler sampler[1] = { data->font_sampler };
1505 VkDescriptorSetLayoutBinding binding[1] = {};
1506 binding[0].descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
1507 binding[0].descriptorCount = 1;
1508 binding[0].stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT;
1509 binding[0].pImmutableSamplers = sampler;
1510 VkDescriptorSetLayoutCreateInfo set_layout_info = {};
1511 set_layout_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
1512 set_layout_info.bindingCount = 1;
1513 set_layout_info.pBindings = binding;
1514 VK_CHECK(device_data->vtable.CreateDescriptorSetLayout(device_data->device,
1515 &set_layout_info,
1516 NULL, &data->descriptor_layout));
1517
1518 /* Descriptor set */
1519 VkDescriptorSetAllocateInfo alloc_info = {};
1520 alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
1521 alloc_info.descriptorPool = data->descriptor_pool;
1522 alloc_info.descriptorSetCount = 1;
1523 alloc_info.pSetLayouts = &data->descriptor_layout;
1524 VK_CHECK(device_data->vtable.AllocateDescriptorSets(device_data->device,
1525 &alloc_info,
1526 &data->descriptor_set));
1527
1528 /* Constants: we are using 'vec2 offset' and 'vec2 scale' instead of a full
1529 * 3d projection matrix
1530 */
1531 VkPushConstantRange push_constants[1] = {};
1532 push_constants[0].stageFlags = VK_SHADER_STAGE_VERTEX_BIT;
1533 push_constants[0].offset = sizeof(float) * 0;
1534 push_constants[0].size = sizeof(float) * 4;
1535 VkPipelineLayoutCreateInfo layout_info = {};
1536 layout_info.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO;
1537 layout_info.setLayoutCount = 1;
1538 layout_info.pSetLayouts = &data->descriptor_layout;
1539 layout_info.pushConstantRangeCount = 1;
1540 layout_info.pPushConstantRanges = push_constants;
1541 VK_CHECK(device_data->vtable.CreatePipelineLayout(device_data->device,
1542 &layout_info,
1543 NULL, &data->pipeline_layout));
1544
1545 VkPipelineShaderStageCreateInfo stage[2] = {};
1546 stage[0].sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
1547 stage[0].stage = VK_SHADER_STAGE_VERTEX_BIT;
1548 stage[0].module = vert_module;
1549 stage[0].pName = "main";
1550 stage[1].sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
1551 stage[1].stage = VK_SHADER_STAGE_FRAGMENT_BIT;
1552 stage[1].module = frag_module;
1553 stage[1].pName = "main";
1554
1555 VkVertexInputBindingDescription binding_desc[1] = {};
1556 binding_desc[0].stride = sizeof(ImDrawVert);
1557 binding_desc[0].inputRate = VK_VERTEX_INPUT_RATE_VERTEX;
1558
1559 VkVertexInputAttributeDescription attribute_desc[3] = {};
1560 attribute_desc[0].location = 0;
1561 attribute_desc[0].binding = binding_desc[0].binding;
1562 attribute_desc[0].format = VK_FORMAT_R32G32_SFLOAT;
1563 attribute_desc[0].offset = IM_OFFSETOF(ImDrawVert, pos);
1564 attribute_desc[1].location = 1;
1565 attribute_desc[1].binding = binding_desc[0].binding;
1566 attribute_desc[1].format = VK_FORMAT_R32G32_SFLOAT;
1567 attribute_desc[1].offset = IM_OFFSETOF(ImDrawVert, uv);
1568 attribute_desc[2].location = 2;
1569 attribute_desc[2].binding = binding_desc[0].binding;
1570 attribute_desc[2].format = VK_FORMAT_R8G8B8A8_UNORM;
1571 attribute_desc[2].offset = IM_OFFSETOF(ImDrawVert, col);
1572
1573 VkPipelineVertexInputStateCreateInfo vertex_info = {};
1574 vertex_info.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO;
1575 vertex_info.vertexBindingDescriptionCount = 1;
1576 vertex_info.pVertexBindingDescriptions = binding_desc;
1577 vertex_info.vertexAttributeDescriptionCount = 3;
1578 vertex_info.pVertexAttributeDescriptions = attribute_desc;
1579
1580 VkPipelineInputAssemblyStateCreateInfo ia_info = {};
1581 ia_info.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO;
1582 ia_info.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST;
1583
1584 VkPipelineViewportStateCreateInfo viewport_info = {};
1585 viewport_info.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO;
1586 viewport_info.viewportCount = 1;
1587 viewport_info.scissorCount = 1;
1588
1589 VkPipelineRasterizationStateCreateInfo raster_info = {};
1590 raster_info.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO;
1591 raster_info.polygonMode = VK_POLYGON_MODE_FILL;
1592 raster_info.cullMode = VK_CULL_MODE_NONE;
1593 raster_info.frontFace = VK_FRONT_FACE_COUNTER_CLOCKWISE;
1594 raster_info.lineWidth = 1.0f;
1595
1596 VkPipelineMultisampleStateCreateInfo ms_info = {};
1597 ms_info.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO;
1598 ms_info.rasterizationSamples = VK_SAMPLE_COUNT_1_BIT;
1599
1600 VkPipelineColorBlendAttachmentState color_attachment[1] = {};
1601 color_attachment[0].blendEnable = VK_TRUE;
1602 color_attachment[0].srcColorBlendFactor = VK_BLEND_FACTOR_SRC_ALPHA;
1603 color_attachment[0].dstColorBlendFactor = VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA;
1604 color_attachment[0].colorBlendOp = VK_BLEND_OP_ADD;
1605 color_attachment[0].srcAlphaBlendFactor = VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA;
1606 color_attachment[0].dstAlphaBlendFactor = VK_BLEND_FACTOR_ZERO;
1607 color_attachment[0].alphaBlendOp = VK_BLEND_OP_ADD;
1608 color_attachment[0].colorWriteMask = VK_COLOR_COMPONENT_R_BIT |
1609 VK_COLOR_COMPONENT_G_BIT | VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT;
1610
1611 VkPipelineDepthStencilStateCreateInfo depth_info = {};
1612 depth_info.sType = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO;
1613
1614 VkPipelineColorBlendStateCreateInfo blend_info = {};
1615 blend_info.sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO;
1616 blend_info.attachmentCount = 1;
1617 blend_info.pAttachments = color_attachment;
1618
1619 VkDynamicState dynamic_states[2] = { VK_DYNAMIC_STATE_VIEWPORT, VK_DYNAMIC_STATE_SCISSOR };
1620 VkPipelineDynamicStateCreateInfo dynamic_state = {};
1621 dynamic_state.sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO;
1622 dynamic_state.dynamicStateCount = (uint32_t)IM_ARRAYSIZE(dynamic_states);
1623 dynamic_state.pDynamicStates = dynamic_states;
1624
1625 VkGraphicsPipelineCreateInfo info = {};
1626 info.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO;
1627 info.flags = 0;
1628 info.stageCount = 2;
1629 info.pStages = stage;
1630 info.pVertexInputState = &vertex_info;
1631 info.pInputAssemblyState = &ia_info;
1632 info.pViewportState = &viewport_info;
1633 info.pRasterizationState = &raster_info;
1634 info.pMultisampleState = &ms_info;
1635 info.pDepthStencilState = &depth_info;
1636 info.pColorBlendState = &blend_info;
1637 info.pDynamicState = &dynamic_state;
1638 info.layout = data->pipeline_layout;
1639 info.renderPass = data->render_pass;
1640 VK_CHECK(
1641 device_data->vtable.CreateGraphicsPipelines(device_data->device, VK_NULL_HANDLE,
1642 1, &info,
1643 NULL, &data->pipeline));
1644
1645 device_data->vtable.DestroyShaderModule(device_data->device, vert_module, NULL);
1646 device_data->vtable.DestroyShaderModule(device_data->device, frag_module, NULL);
1647
1648 ImGuiIO& io = ImGui::GetIO();
1649 unsigned char* pixels;
1650 int width, height;
1651 io.Fonts->GetTexDataAsRGBA32(&pixels, &width, &height);
1652
1653 /* Font image */
1654 VkImageCreateInfo image_info = {};
1655 image_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
1656 image_info.imageType = VK_IMAGE_TYPE_2D;
1657 image_info.format = VK_FORMAT_R8G8B8A8_UNORM;
1658 image_info.extent.width = width;
1659 image_info.extent.height = height;
1660 image_info.extent.depth = 1;
1661 image_info.mipLevels = 1;
1662 image_info.arrayLayers = 1;
1663 image_info.samples = VK_SAMPLE_COUNT_1_BIT;
1664 image_info.tiling = VK_IMAGE_TILING_OPTIMAL;
1665 image_info.usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
1666 image_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
1667 image_info.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
1668 VK_CHECK(device_data->vtable.CreateImage(device_data->device, &image_info,
1669 NULL, &data->font_image));
1670 VkMemoryRequirements font_image_req;
1671 device_data->vtable.GetImageMemoryRequirements(device_data->device,
1672 data->font_image, &font_image_req);
1673 VkMemoryAllocateInfo image_alloc_info = {};
1674 image_alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
1675 image_alloc_info.allocationSize = font_image_req.size;
1676 image_alloc_info.memoryTypeIndex = vk_memory_type(device_data,
1677 VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT,
1678 font_image_req.memoryTypeBits);
1679 VK_CHECK(device_data->vtable.AllocateMemory(device_data->device, &image_alloc_info,
1680 NULL, &data->font_mem));
1681 VK_CHECK(device_data->vtable.BindImageMemory(device_data->device,
1682 data->font_image,
1683 data->font_mem, 0));
1684
1685 /* Font image view */
1686 VkImageViewCreateInfo view_info = {};
1687 view_info.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
1688 view_info.image = data->font_image;
1689 view_info.viewType = VK_IMAGE_VIEW_TYPE_2D;
1690 view_info.format = VK_FORMAT_R8G8B8A8_UNORM;
1691 view_info.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
1692 view_info.subresourceRange.levelCount = 1;
1693 view_info.subresourceRange.layerCount = 1;
1694 VK_CHECK(device_data->vtable.CreateImageView(device_data->device, &view_info,
1695 NULL, &data->font_image_view));
1696
1697 /* Descriptor set */
1698 VkDescriptorImageInfo desc_image[1] = {};
1699 desc_image[0].sampler = data->font_sampler;
1700 desc_image[0].imageView = data->font_image_view;
1701 desc_image[0].imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
1702 VkWriteDescriptorSet write_desc[1] = {};
1703 write_desc[0].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
1704 write_desc[0].dstSet = data->descriptor_set;
1705 write_desc[0].descriptorCount = 1;
1706 write_desc[0].descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
1707 write_desc[0].pImageInfo = desc_image;
1708 device_data->vtable.UpdateDescriptorSets(device_data->device, 1, write_desc, 0, NULL);
1709 }
1710
setup_swapchain_data(struct swapchain_data * data,const VkSwapchainCreateInfoKHR * pCreateInfo)1711 static void setup_swapchain_data(struct swapchain_data *data,
1712 const VkSwapchainCreateInfoKHR *pCreateInfo)
1713 {
1714 data->width = pCreateInfo->imageExtent.width;
1715 data->height = pCreateInfo->imageExtent.height;
1716 data->format = pCreateInfo->imageFormat;
1717
1718 data->imgui_context = ImGui::CreateContext();
1719 ImGui::SetCurrentContext(data->imgui_context);
1720
1721 ImGui::GetIO().IniFilename = NULL;
1722 ImGui::GetIO().DisplaySize = ImVec2((float)data->width, (float)data->height);
1723
1724 struct device_data *device_data = data->device;
1725
1726 /* Render pass */
1727 VkAttachmentDescription attachment_desc = {};
1728 attachment_desc.format = pCreateInfo->imageFormat;
1729 attachment_desc.samples = VK_SAMPLE_COUNT_1_BIT;
1730 attachment_desc.loadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
1731 attachment_desc.storeOp = VK_ATTACHMENT_STORE_OP_STORE;
1732 attachment_desc.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
1733 attachment_desc.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
1734 attachment_desc.initialLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
1735 attachment_desc.finalLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
1736 VkAttachmentReference color_attachment = {};
1737 color_attachment.attachment = 0;
1738 color_attachment.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
1739 VkSubpassDescription subpass = {};
1740 subpass.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
1741 subpass.colorAttachmentCount = 1;
1742 subpass.pColorAttachments = &color_attachment;
1743 VkSubpassDependency dependency = {};
1744 dependency.srcSubpass = VK_SUBPASS_EXTERNAL;
1745 dependency.dstSubpass = 0;
1746 dependency.srcStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
1747 dependency.dstStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
1748 dependency.srcAccessMask = 0;
1749 dependency.dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
1750 VkRenderPassCreateInfo render_pass_info = {};
1751 render_pass_info.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO;
1752 render_pass_info.attachmentCount = 1;
1753 render_pass_info.pAttachments = &attachment_desc;
1754 render_pass_info.subpassCount = 1;
1755 render_pass_info.pSubpasses = &subpass;
1756 render_pass_info.dependencyCount = 1;
1757 render_pass_info.pDependencies = &dependency;
1758 VK_CHECK(device_data->vtable.CreateRenderPass(device_data->device,
1759 &render_pass_info,
1760 NULL, &data->render_pass));
1761
1762 setup_swapchain_data_pipeline(data);
1763
1764 VK_CHECK(device_data->vtable.GetSwapchainImagesKHR(device_data->device,
1765 data->swapchain,
1766 &data->n_images,
1767 NULL));
1768
1769 data->images = ralloc_array(data, VkImage, data->n_images);
1770 data->image_views = ralloc_array(data, VkImageView, data->n_images);
1771 data->framebuffers = ralloc_array(data, VkFramebuffer, data->n_images);
1772
1773 VK_CHECK(device_data->vtable.GetSwapchainImagesKHR(device_data->device,
1774 data->swapchain,
1775 &data->n_images,
1776 data->images));
1777
1778 /* Image views */
1779 VkImageViewCreateInfo view_info = {};
1780 view_info.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
1781 view_info.viewType = VK_IMAGE_VIEW_TYPE_2D;
1782 view_info.format = pCreateInfo->imageFormat;
1783 view_info.components.r = VK_COMPONENT_SWIZZLE_R;
1784 view_info.components.g = VK_COMPONENT_SWIZZLE_G;
1785 view_info.components.b = VK_COMPONENT_SWIZZLE_B;
1786 view_info.components.a = VK_COMPONENT_SWIZZLE_A;
1787 view_info.subresourceRange = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1 };
1788 for (uint32_t i = 0; i < data->n_images; i++) {
1789 view_info.image = data->images[i];
1790 VK_CHECK(device_data->vtable.CreateImageView(device_data->device,
1791 &view_info, NULL,
1792 &data->image_views[i]));
1793 }
1794
1795 /* Framebuffers */
1796 VkImageView attachment[1];
1797 VkFramebufferCreateInfo fb_info = {};
1798 fb_info.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO;
1799 fb_info.renderPass = data->render_pass;
1800 fb_info.attachmentCount = 1;
1801 fb_info.pAttachments = attachment;
1802 fb_info.width = data->width;
1803 fb_info.height = data->height;
1804 fb_info.layers = 1;
1805 for (uint32_t i = 0; i < data->n_images; i++) {
1806 attachment[0] = data->image_views[i];
1807 VK_CHECK(device_data->vtable.CreateFramebuffer(device_data->device, &fb_info,
1808 NULL, &data->framebuffers[i]));
1809 }
1810
1811 /* Command buffer pool */
1812 VkCommandPoolCreateInfo cmd_buffer_pool_info = {};
1813 cmd_buffer_pool_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
1814 cmd_buffer_pool_info.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
1815 cmd_buffer_pool_info.queueFamilyIndex = device_data->graphic_queue->family_index;
1816 VK_CHECK(device_data->vtable.CreateCommandPool(device_data->device,
1817 &cmd_buffer_pool_info,
1818 NULL, &data->command_pool));
1819 }
1820
shutdown_swapchain_data(struct swapchain_data * data)1821 static void shutdown_swapchain_data(struct swapchain_data *data)
1822 {
1823 struct device_data *device_data = data->device;
1824
1825 list_for_each_entry_safe(struct overlay_draw, draw, &data->draws, link) {
1826 device_data->vtable.DestroySemaphore(device_data->device, draw->cross_engine_semaphore, NULL);
1827 device_data->vtable.DestroySemaphore(device_data->device, draw->semaphore, NULL);
1828 device_data->vtable.DestroyFence(device_data->device, draw->fence, NULL);
1829 device_data->vtable.DestroyBuffer(device_data->device, draw->vertex_buffer, NULL);
1830 device_data->vtable.DestroyBuffer(device_data->device, draw->index_buffer, NULL);
1831 device_data->vtable.FreeMemory(device_data->device, draw->vertex_buffer_mem, NULL);
1832 device_data->vtable.FreeMemory(device_data->device, draw->index_buffer_mem, NULL);
1833 }
1834
1835 for (uint32_t i = 0; i < data->n_images; i++) {
1836 device_data->vtable.DestroyImageView(device_data->device, data->image_views[i], NULL);
1837 device_data->vtable.DestroyFramebuffer(device_data->device, data->framebuffers[i], NULL);
1838 }
1839
1840 device_data->vtable.DestroyRenderPass(device_data->device, data->render_pass, NULL);
1841
1842 device_data->vtable.DestroyCommandPool(device_data->device, data->command_pool, NULL);
1843
1844 device_data->vtable.DestroyPipeline(device_data->device, data->pipeline, NULL);
1845 device_data->vtable.DestroyPipelineLayout(device_data->device, data->pipeline_layout, NULL);
1846
1847 device_data->vtable.DestroyDescriptorPool(device_data->device,
1848 data->descriptor_pool, NULL);
1849 device_data->vtable.DestroyDescriptorSetLayout(device_data->device,
1850 data->descriptor_layout, NULL);
1851
1852 device_data->vtable.DestroySampler(device_data->device, data->font_sampler, NULL);
1853 device_data->vtable.DestroyImageView(device_data->device, data->font_image_view, NULL);
1854 device_data->vtable.DestroyImage(device_data->device, data->font_image, NULL);
1855 device_data->vtable.FreeMemory(device_data->device, data->font_mem, NULL);
1856
1857 device_data->vtable.DestroyBuffer(device_data->device, data->upload_font_buffer, NULL);
1858 device_data->vtable.FreeMemory(device_data->device, data->upload_font_buffer_mem, NULL);
1859
1860 ImGui::DestroyContext(data->imgui_context);
1861 }
1862
before_present(struct swapchain_data * swapchain_data,struct queue_data * present_queue,const VkSemaphore * wait_semaphores,unsigned n_wait_semaphores,unsigned imageIndex)1863 static struct overlay_draw *before_present(struct swapchain_data *swapchain_data,
1864 struct queue_data *present_queue,
1865 const VkSemaphore *wait_semaphores,
1866 unsigned n_wait_semaphores,
1867 unsigned imageIndex)
1868 {
1869 struct instance_data *instance_data = swapchain_data->device->instance;
1870 struct overlay_draw *draw = NULL;
1871
1872 snapshot_swapchain_frame(swapchain_data);
1873
1874 if (!instance_data->params.no_display && swapchain_data->n_frames > 0) {
1875 compute_swapchain_display(swapchain_data);
1876 draw = render_swapchain_display(swapchain_data, present_queue,
1877 wait_semaphores, n_wait_semaphores,
1878 imageIndex);
1879 }
1880
1881 return draw;
1882 }
1883
overlay_CreateSwapchainKHR(VkDevice device,const VkSwapchainCreateInfoKHR * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSwapchainKHR * pSwapchain)1884 static VkResult overlay_CreateSwapchainKHR(
1885 VkDevice device,
1886 const VkSwapchainCreateInfoKHR* pCreateInfo,
1887 const VkAllocationCallbacks* pAllocator,
1888 VkSwapchainKHR* pSwapchain)
1889 {
1890 struct device_data *device_data = FIND(struct device_data, device);
1891 VkResult result = device_data->vtable.CreateSwapchainKHR(device, pCreateInfo, pAllocator, pSwapchain);
1892 if (result != VK_SUCCESS) return result;
1893
1894 struct swapchain_data *swapchain_data = new_swapchain_data(*pSwapchain, device_data);
1895 setup_swapchain_data(swapchain_data, pCreateInfo);
1896 return result;
1897 }
1898
overlay_DestroySwapchainKHR(VkDevice device,VkSwapchainKHR swapchain,const VkAllocationCallbacks * pAllocator)1899 static void overlay_DestroySwapchainKHR(
1900 VkDevice device,
1901 VkSwapchainKHR swapchain,
1902 const VkAllocationCallbacks* pAllocator)
1903 {
1904 struct device_data *device_data = FIND(struct device_data, device);
1905 struct instance_data *instance_data = device_data->instance;
1906 if (swapchain == VK_NULL_HANDLE) {
1907 device_data->vtable.DestroySwapchainKHR(device, swapchain, pAllocator);
1908 return;
1909 }
1910
1911 if (instance_data->output_file_fd) {
1912 fclose(instance_data->output_file_fd);
1913 instance_data->output_file_fd = NULL;
1914 }
1915
1916 struct swapchain_data *swapchain_data =
1917 FIND(struct swapchain_data, swapchain);
1918
1919 shutdown_swapchain_data(swapchain_data);
1920 swapchain_data->device->vtable.DestroySwapchainKHR(device, swapchain, pAllocator);
1921 destroy_swapchain_data(swapchain_data);
1922 }
1923
overlay_QueuePresentKHR(VkQueue queue,const VkPresentInfoKHR * pPresentInfo)1924 static VkResult overlay_QueuePresentKHR(
1925 VkQueue queue,
1926 const VkPresentInfoKHR* pPresentInfo)
1927 {
1928 struct queue_data *queue_data = FIND(struct queue_data, queue);
1929 struct device_data *device_data = queue_data->device;
1930 struct instance_data *instance_data = device_data->instance;
1931 uint32_t query_results[OVERLAY_QUERY_COUNT];
1932
1933 device_data->frame_stats.stats[OVERLAY_PARAM_ENABLED_frame]++;
1934
1935 if (list_length(&queue_data->running_command_buffer) > 0) {
1936 /* Before getting the query results, make sure the operations have
1937 * completed.
1938 */
1939 VK_CHECK(device_data->vtable.ResetFences(device_data->device,
1940 1, &queue_data->queries_fence));
1941 VK_CHECK(device_data->vtable.QueueSubmit(queue, 0, NULL, queue_data->queries_fence));
1942 VK_CHECK(device_data->vtable.WaitForFences(device_data->device,
1943 1, &queue_data->queries_fence,
1944 VK_FALSE, UINT64_MAX));
1945
1946 /* Now get the results. */
1947 list_for_each_entry_safe(struct command_buffer_data, cmd_buffer_data,
1948 &queue_data->running_command_buffer, link) {
1949 list_delinit(&cmd_buffer_data->link);
1950
1951 if (cmd_buffer_data->pipeline_query_pool) {
1952 memset(query_results, 0, sizeof(query_results));
1953 VK_CHECK(device_data->vtable.GetQueryPoolResults(device_data->device,
1954 cmd_buffer_data->pipeline_query_pool,
1955 cmd_buffer_data->query_index, 1,
1956 sizeof(uint32_t) * OVERLAY_QUERY_COUNT,
1957 query_results, 0, VK_QUERY_RESULT_WAIT_BIT));
1958
1959 for (uint32_t i = OVERLAY_PARAM_ENABLED_vertices;
1960 i <= OVERLAY_PARAM_ENABLED_compute_invocations; i++) {
1961 device_data->frame_stats.stats[i] += query_results[i - OVERLAY_PARAM_ENABLED_vertices];
1962 }
1963 }
1964 if (cmd_buffer_data->timestamp_query_pool) {
1965 uint64_t gpu_timestamps[2] = { 0 };
1966 VK_CHECK(device_data->vtable.GetQueryPoolResults(device_data->device,
1967 cmd_buffer_data->timestamp_query_pool,
1968 cmd_buffer_data->query_index * 2, 2,
1969 2 * sizeof(uint64_t), gpu_timestamps, sizeof(uint64_t),
1970 VK_QUERY_RESULT_WAIT_BIT | VK_QUERY_RESULT_64_BIT));
1971
1972 gpu_timestamps[0] &= queue_data->timestamp_mask;
1973 gpu_timestamps[1] &= queue_data->timestamp_mask;
1974 device_data->frame_stats.stats[OVERLAY_PARAM_ENABLED_gpu_timing] +=
1975 (gpu_timestamps[1] - gpu_timestamps[0]) *
1976 device_data->properties.limits.timestampPeriod;
1977 }
1978 }
1979 }
1980
1981 /* Otherwise we need to add our overlay drawing semaphore to the list of
1982 * semaphores to wait on. If we don't do that the presented picture might
1983 * be have incomplete overlay drawings.
1984 */
1985 VkResult result = VK_SUCCESS;
1986 if (instance_data->params.no_display) {
1987 for (uint32_t i = 0; i < pPresentInfo->swapchainCount; i++) {
1988 VkSwapchainKHR swapchain = pPresentInfo->pSwapchains[i];
1989 struct swapchain_data *swapchain_data =
1990 FIND(struct swapchain_data, swapchain);
1991
1992 uint32_t image_index = pPresentInfo->pImageIndices[i];
1993
1994 before_present(swapchain_data,
1995 queue_data,
1996 pPresentInfo->pWaitSemaphores,
1997 pPresentInfo->waitSemaphoreCount,
1998 image_index);
1999
2000 VkPresentInfoKHR present_info = *pPresentInfo;
2001 present_info.swapchainCount = 1;
2002 present_info.pSwapchains = &swapchain;
2003 present_info.pImageIndices = &image_index;
2004
2005 uint64_t ts0 = os_time_get();
2006 result = queue_data->device->vtable.QueuePresentKHR(queue, &present_info);
2007 uint64_t ts1 = os_time_get();
2008 swapchain_data->frame_stats.stats[OVERLAY_PARAM_ENABLED_present_timing] += ts1 - ts0;
2009 }
2010 } else {
2011 for (uint32_t i = 0; i < pPresentInfo->swapchainCount; i++) {
2012 VkSwapchainKHR swapchain = pPresentInfo->pSwapchains[i];
2013 struct swapchain_data *swapchain_data =
2014 FIND(struct swapchain_data, swapchain);
2015
2016 uint32_t image_index = pPresentInfo->pImageIndices[i];
2017
2018 VkPresentInfoKHR present_info = *pPresentInfo;
2019 present_info.swapchainCount = 1;
2020 present_info.pSwapchains = &swapchain;
2021 present_info.pImageIndices = &image_index;
2022
2023 struct overlay_draw *draw = before_present(swapchain_data,
2024 queue_data,
2025 pPresentInfo->pWaitSemaphores,
2026 pPresentInfo->waitSemaphoreCount,
2027 image_index);
2028
2029 /* Because the submission of the overlay draw waits on the semaphores
2030 * handed for present, we don't need to have this present operation
2031 * wait on them as well, we can just wait on the overlay submission
2032 * semaphore.
2033 */
2034 present_info.pWaitSemaphores = &draw->semaphore;
2035 present_info.waitSemaphoreCount = 1;
2036
2037 uint64_t ts0 = os_time_get();
2038 VkResult chain_result = queue_data->device->vtable.QueuePresentKHR(queue, &present_info);
2039 uint64_t ts1 = os_time_get();
2040 swapchain_data->frame_stats.stats[OVERLAY_PARAM_ENABLED_present_timing] += ts1 - ts0;
2041 if (pPresentInfo->pResults)
2042 pPresentInfo->pResults[i] = chain_result;
2043 if (chain_result != VK_SUCCESS && result == VK_SUCCESS)
2044 result = chain_result;
2045 }
2046 }
2047 return result;
2048 }
2049
overlay_AcquireNextImageKHR(VkDevice device,VkSwapchainKHR swapchain,uint64_t timeout,VkSemaphore semaphore,VkFence fence,uint32_t * pImageIndex)2050 static VkResult overlay_AcquireNextImageKHR(
2051 VkDevice device,
2052 VkSwapchainKHR swapchain,
2053 uint64_t timeout,
2054 VkSemaphore semaphore,
2055 VkFence fence,
2056 uint32_t* pImageIndex)
2057 {
2058 struct swapchain_data *swapchain_data =
2059 FIND(struct swapchain_data, swapchain);
2060 struct device_data *device_data = swapchain_data->device;
2061
2062 uint64_t ts0 = os_time_get();
2063 VkResult result = device_data->vtable.AcquireNextImageKHR(device, swapchain, timeout,
2064 semaphore, fence, pImageIndex);
2065 uint64_t ts1 = os_time_get();
2066
2067 swapchain_data->frame_stats.stats[OVERLAY_PARAM_ENABLED_acquire_timing] += ts1 - ts0;
2068 swapchain_data->frame_stats.stats[OVERLAY_PARAM_ENABLED_acquire]++;
2069
2070 return result;
2071 }
2072
overlay_AcquireNextImage2KHR(VkDevice device,const VkAcquireNextImageInfoKHR * pAcquireInfo,uint32_t * pImageIndex)2073 static VkResult overlay_AcquireNextImage2KHR(
2074 VkDevice device,
2075 const VkAcquireNextImageInfoKHR* pAcquireInfo,
2076 uint32_t* pImageIndex)
2077 {
2078 struct swapchain_data *swapchain_data =
2079 FIND(struct swapchain_data, pAcquireInfo->swapchain);
2080 struct device_data *device_data = swapchain_data->device;
2081
2082 uint64_t ts0 = os_time_get();
2083 VkResult result = device_data->vtable.AcquireNextImage2KHR(device, pAcquireInfo, pImageIndex);
2084 uint64_t ts1 = os_time_get();
2085
2086 swapchain_data->frame_stats.stats[OVERLAY_PARAM_ENABLED_acquire_timing] += ts1 - ts0;
2087 swapchain_data->frame_stats.stats[OVERLAY_PARAM_ENABLED_acquire]++;
2088
2089 return result;
2090 }
2091
overlay_CmdDraw(VkCommandBuffer commandBuffer,uint32_t vertexCount,uint32_t instanceCount,uint32_t firstVertex,uint32_t firstInstance)2092 static void overlay_CmdDraw(
2093 VkCommandBuffer commandBuffer,
2094 uint32_t vertexCount,
2095 uint32_t instanceCount,
2096 uint32_t firstVertex,
2097 uint32_t firstInstance)
2098 {
2099 struct command_buffer_data *cmd_buffer_data =
2100 FIND(struct command_buffer_data, commandBuffer);
2101 cmd_buffer_data->stats.stats[OVERLAY_PARAM_ENABLED_draw]++;
2102 struct device_data *device_data = cmd_buffer_data->device;
2103 device_data->vtable.CmdDraw(commandBuffer, vertexCount, instanceCount,
2104 firstVertex, firstInstance);
2105 }
2106
overlay_CmdDrawIndexed(VkCommandBuffer commandBuffer,uint32_t indexCount,uint32_t instanceCount,uint32_t firstIndex,int32_t vertexOffset,uint32_t firstInstance)2107 static void overlay_CmdDrawIndexed(
2108 VkCommandBuffer commandBuffer,
2109 uint32_t indexCount,
2110 uint32_t instanceCount,
2111 uint32_t firstIndex,
2112 int32_t vertexOffset,
2113 uint32_t firstInstance)
2114 {
2115 struct command_buffer_data *cmd_buffer_data =
2116 FIND(struct command_buffer_data, commandBuffer);
2117 cmd_buffer_data->stats.stats[OVERLAY_PARAM_ENABLED_draw_indexed]++;
2118 struct device_data *device_data = cmd_buffer_data->device;
2119 device_data->vtable.CmdDrawIndexed(commandBuffer, indexCount, instanceCount,
2120 firstIndex, vertexOffset, firstInstance);
2121 }
2122
overlay_CmdDrawIndirect(VkCommandBuffer commandBuffer,VkBuffer buffer,VkDeviceSize offset,uint32_t drawCount,uint32_t stride)2123 static void overlay_CmdDrawIndirect(
2124 VkCommandBuffer commandBuffer,
2125 VkBuffer buffer,
2126 VkDeviceSize offset,
2127 uint32_t drawCount,
2128 uint32_t stride)
2129 {
2130 struct command_buffer_data *cmd_buffer_data =
2131 FIND(struct command_buffer_data, commandBuffer);
2132 cmd_buffer_data->stats.stats[OVERLAY_PARAM_ENABLED_draw_indirect]++;
2133 struct device_data *device_data = cmd_buffer_data->device;
2134 device_data->vtable.CmdDrawIndirect(commandBuffer, buffer, offset, drawCount, stride);
2135 }
2136
overlay_CmdDrawIndexedIndirect(VkCommandBuffer commandBuffer,VkBuffer buffer,VkDeviceSize offset,uint32_t drawCount,uint32_t stride)2137 static void overlay_CmdDrawIndexedIndirect(
2138 VkCommandBuffer commandBuffer,
2139 VkBuffer buffer,
2140 VkDeviceSize offset,
2141 uint32_t drawCount,
2142 uint32_t stride)
2143 {
2144 struct command_buffer_data *cmd_buffer_data =
2145 FIND(struct command_buffer_data, commandBuffer);
2146 cmd_buffer_data->stats.stats[OVERLAY_PARAM_ENABLED_draw_indexed_indirect]++;
2147 struct device_data *device_data = cmd_buffer_data->device;
2148 device_data->vtable.CmdDrawIndexedIndirect(commandBuffer, buffer, offset, drawCount, stride);
2149 }
2150
overlay_CmdDrawIndirectCount(VkCommandBuffer commandBuffer,VkBuffer buffer,VkDeviceSize offset,VkBuffer countBuffer,VkDeviceSize countBufferOffset,uint32_t maxDrawCount,uint32_t stride)2151 static void overlay_CmdDrawIndirectCount(
2152 VkCommandBuffer commandBuffer,
2153 VkBuffer buffer,
2154 VkDeviceSize offset,
2155 VkBuffer countBuffer,
2156 VkDeviceSize countBufferOffset,
2157 uint32_t maxDrawCount,
2158 uint32_t stride)
2159 {
2160 struct command_buffer_data *cmd_buffer_data =
2161 FIND(struct command_buffer_data, commandBuffer);
2162 cmd_buffer_data->stats.stats[OVERLAY_PARAM_ENABLED_draw_indirect_count]++;
2163 struct device_data *device_data = cmd_buffer_data->device;
2164 device_data->vtable.CmdDrawIndirectCount(commandBuffer, buffer, offset,
2165 countBuffer, countBufferOffset,
2166 maxDrawCount, stride);
2167 }
2168
overlay_CmdDrawIndexedIndirectCount(VkCommandBuffer commandBuffer,VkBuffer buffer,VkDeviceSize offset,VkBuffer countBuffer,VkDeviceSize countBufferOffset,uint32_t maxDrawCount,uint32_t stride)2169 static void overlay_CmdDrawIndexedIndirectCount(
2170 VkCommandBuffer commandBuffer,
2171 VkBuffer buffer,
2172 VkDeviceSize offset,
2173 VkBuffer countBuffer,
2174 VkDeviceSize countBufferOffset,
2175 uint32_t maxDrawCount,
2176 uint32_t stride)
2177 {
2178 struct command_buffer_data *cmd_buffer_data =
2179 FIND(struct command_buffer_data, commandBuffer);
2180 cmd_buffer_data->stats.stats[OVERLAY_PARAM_ENABLED_draw_indexed_indirect_count]++;
2181 struct device_data *device_data = cmd_buffer_data->device;
2182 device_data->vtable.CmdDrawIndexedIndirectCount(commandBuffer, buffer, offset,
2183 countBuffer, countBufferOffset,
2184 maxDrawCount, stride);
2185 }
2186
overlay_CmdDispatch(VkCommandBuffer commandBuffer,uint32_t groupCountX,uint32_t groupCountY,uint32_t groupCountZ)2187 static void overlay_CmdDispatch(
2188 VkCommandBuffer commandBuffer,
2189 uint32_t groupCountX,
2190 uint32_t groupCountY,
2191 uint32_t groupCountZ)
2192 {
2193 struct command_buffer_data *cmd_buffer_data =
2194 FIND(struct command_buffer_data, commandBuffer);
2195 cmd_buffer_data->stats.stats[OVERLAY_PARAM_ENABLED_dispatch]++;
2196 struct device_data *device_data = cmd_buffer_data->device;
2197 device_data->vtable.CmdDispatch(commandBuffer, groupCountX, groupCountY, groupCountZ);
2198 }
2199
overlay_CmdDispatchIndirect(VkCommandBuffer commandBuffer,VkBuffer buffer,VkDeviceSize offset)2200 static void overlay_CmdDispatchIndirect(
2201 VkCommandBuffer commandBuffer,
2202 VkBuffer buffer,
2203 VkDeviceSize offset)
2204 {
2205 struct command_buffer_data *cmd_buffer_data =
2206 FIND(struct command_buffer_data, commandBuffer);
2207 cmd_buffer_data->stats.stats[OVERLAY_PARAM_ENABLED_dispatch_indirect]++;
2208 struct device_data *device_data = cmd_buffer_data->device;
2209 device_data->vtable.CmdDispatchIndirect(commandBuffer, buffer, offset);
2210 }
2211
overlay_CmdBindPipeline(VkCommandBuffer commandBuffer,VkPipelineBindPoint pipelineBindPoint,VkPipeline pipeline)2212 static void overlay_CmdBindPipeline(
2213 VkCommandBuffer commandBuffer,
2214 VkPipelineBindPoint pipelineBindPoint,
2215 VkPipeline pipeline)
2216 {
2217 struct command_buffer_data *cmd_buffer_data =
2218 FIND(struct command_buffer_data, commandBuffer);
2219 switch (pipelineBindPoint) {
2220 case VK_PIPELINE_BIND_POINT_GRAPHICS: cmd_buffer_data->stats.stats[OVERLAY_PARAM_ENABLED_pipeline_graphics]++; break;
2221 case VK_PIPELINE_BIND_POINT_COMPUTE: cmd_buffer_data->stats.stats[OVERLAY_PARAM_ENABLED_pipeline_compute]++; break;
2222 case VK_PIPELINE_BIND_POINT_RAY_TRACING_KHR: cmd_buffer_data->stats.stats[OVERLAY_PARAM_ENABLED_pipeline_raytracing]++; break;
2223 default: break;
2224 }
2225 struct device_data *device_data = cmd_buffer_data->device;
2226 device_data->vtable.CmdBindPipeline(commandBuffer, pipelineBindPoint, pipeline);
2227 }
2228
overlay_BeginCommandBuffer(VkCommandBuffer commandBuffer,const VkCommandBufferBeginInfo * pBeginInfo)2229 static VkResult overlay_BeginCommandBuffer(
2230 VkCommandBuffer commandBuffer,
2231 const VkCommandBufferBeginInfo* pBeginInfo)
2232 {
2233 struct command_buffer_data *cmd_buffer_data =
2234 FIND(struct command_buffer_data, commandBuffer);
2235 struct device_data *device_data = cmd_buffer_data->device;
2236
2237 memset(&cmd_buffer_data->stats, 0, sizeof(cmd_buffer_data->stats));
2238
2239 /* We don't record any query in secondary command buffers, just make sure
2240 * we have the right inheritance.
2241 */
2242 if (cmd_buffer_data->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) {
2243 VkCommandBufferBeginInfo begin_info = *pBeginInfo;
2244
2245 struct VkBaseOutStructure *new_pnext =
2246 clone_chain((const struct VkBaseInStructure *)pBeginInfo->pNext);
2247 VkCommandBufferInheritanceInfo inhe_info;
2248
2249 /* If there was no pNext chain given or we managed to copy it, we can
2250 * add our stuff in there.
2251 *
2252 * Otherwise, keep the old pointer. We failed to copy the pNext chain,
2253 * meaning there is an unknown extension somewhere in there.
2254 */
2255 if (new_pnext || pBeginInfo->pNext == NULL) {
2256 begin_info.pNext = new_pnext;
2257
2258 VkCommandBufferInheritanceInfo *parent_inhe_info = (VkCommandBufferInheritanceInfo *)
2259 vk_find_struct(new_pnext, COMMAND_BUFFER_INHERITANCE_INFO);
2260 inhe_info = (VkCommandBufferInheritanceInfo) {
2261 VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO,
2262 NULL,
2263 VK_NULL_HANDLE,
2264 0,
2265 VK_NULL_HANDLE,
2266 VK_FALSE,
2267 0,
2268 overlay_query_flags,
2269 };
2270
2271 if (parent_inhe_info)
2272 parent_inhe_info->pipelineStatistics = overlay_query_flags;
2273 else
2274 __vk_append_struct(&begin_info, &inhe_info);
2275 }
2276
2277 VkResult result = device_data->vtable.BeginCommandBuffer(
2278 commandBuffer, &begin_info);
2279
2280 free_chain(new_pnext);
2281
2282 return result;
2283 }
2284
2285 /* Otherwise record a begin query as first command. */
2286 VkResult result = device_data->vtable.BeginCommandBuffer(commandBuffer, pBeginInfo);
2287
2288 if (result == VK_SUCCESS) {
2289 if (cmd_buffer_data->pipeline_query_pool) {
2290 device_data->vtable.CmdResetQueryPool(commandBuffer,
2291 cmd_buffer_data->pipeline_query_pool,
2292 cmd_buffer_data->query_index, 1);
2293 }
2294 if (cmd_buffer_data->timestamp_query_pool) {
2295 device_data->vtable.CmdResetQueryPool(commandBuffer,
2296 cmd_buffer_data->timestamp_query_pool,
2297 cmd_buffer_data->query_index * 2, 2);
2298 }
2299 if (cmd_buffer_data->pipeline_query_pool) {
2300 device_data->vtable.CmdBeginQuery(commandBuffer,
2301 cmd_buffer_data->pipeline_query_pool,
2302 cmd_buffer_data->query_index, 0);
2303 }
2304 if (cmd_buffer_data->timestamp_query_pool) {
2305 device_data->vtable.CmdWriteTimestamp(commandBuffer,
2306 VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT,
2307 cmd_buffer_data->timestamp_query_pool,
2308 cmd_buffer_data->query_index * 2);
2309 }
2310 }
2311
2312 return result;
2313 }
2314
overlay_EndCommandBuffer(VkCommandBuffer commandBuffer)2315 static VkResult overlay_EndCommandBuffer(
2316 VkCommandBuffer commandBuffer)
2317 {
2318 struct command_buffer_data *cmd_buffer_data =
2319 FIND(struct command_buffer_data, commandBuffer);
2320 struct device_data *device_data = cmd_buffer_data->device;
2321
2322 if (cmd_buffer_data->timestamp_query_pool) {
2323 device_data->vtable.CmdWriteTimestamp(commandBuffer,
2324 VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT,
2325 cmd_buffer_data->timestamp_query_pool,
2326 cmd_buffer_data->query_index * 2 + 1);
2327 }
2328 if (cmd_buffer_data->pipeline_query_pool) {
2329 device_data->vtable.CmdEndQuery(commandBuffer,
2330 cmd_buffer_data->pipeline_query_pool,
2331 cmd_buffer_data->query_index);
2332 }
2333
2334 return device_data->vtable.EndCommandBuffer(commandBuffer);
2335 }
2336
overlay_ResetCommandBuffer(VkCommandBuffer commandBuffer,VkCommandBufferResetFlags flags)2337 static VkResult overlay_ResetCommandBuffer(
2338 VkCommandBuffer commandBuffer,
2339 VkCommandBufferResetFlags flags)
2340 {
2341 struct command_buffer_data *cmd_buffer_data =
2342 FIND(struct command_buffer_data, commandBuffer);
2343 struct device_data *device_data = cmd_buffer_data->device;
2344
2345 memset(&cmd_buffer_data->stats, 0, sizeof(cmd_buffer_data->stats));
2346
2347 return device_data->vtable.ResetCommandBuffer(commandBuffer, flags);
2348 }
2349
overlay_CmdExecuteCommands(VkCommandBuffer commandBuffer,uint32_t commandBufferCount,const VkCommandBuffer * pCommandBuffers)2350 static void overlay_CmdExecuteCommands(
2351 VkCommandBuffer commandBuffer,
2352 uint32_t commandBufferCount,
2353 const VkCommandBuffer* pCommandBuffers)
2354 {
2355 struct command_buffer_data *cmd_buffer_data =
2356 FIND(struct command_buffer_data, commandBuffer);
2357 struct device_data *device_data = cmd_buffer_data->device;
2358
2359 /* Add the stats of the executed command buffers to the primary one. */
2360 for (uint32_t c = 0; c < commandBufferCount; c++) {
2361 struct command_buffer_data *sec_cmd_buffer_data =
2362 FIND(struct command_buffer_data, pCommandBuffers[c]);
2363
2364 for (uint32_t s = 0; s < OVERLAY_PARAM_ENABLED_MAX; s++)
2365 cmd_buffer_data->stats.stats[s] += sec_cmd_buffer_data->stats.stats[s];
2366 }
2367
2368 device_data->vtable.CmdExecuteCommands(commandBuffer, commandBufferCount, pCommandBuffers);
2369 }
2370
overlay_AllocateCommandBuffers(VkDevice device,const VkCommandBufferAllocateInfo * pAllocateInfo,VkCommandBuffer * pCommandBuffers)2371 static VkResult overlay_AllocateCommandBuffers(
2372 VkDevice device,
2373 const VkCommandBufferAllocateInfo* pAllocateInfo,
2374 VkCommandBuffer* pCommandBuffers)
2375 {
2376 struct device_data *device_data = FIND(struct device_data, device);
2377 VkResult result =
2378 device_data->vtable.AllocateCommandBuffers(device, pAllocateInfo, pCommandBuffers);
2379 if (result != VK_SUCCESS)
2380 return result;
2381
2382 VkQueryPool pipeline_query_pool = VK_NULL_HANDLE;
2383 VkQueryPool timestamp_query_pool = VK_NULL_HANDLE;
2384 if (device_data->pipeline_statistics_enabled &&
2385 pAllocateInfo->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
2386 VkQueryPoolCreateInfo pool_info = {
2387 VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO,
2388 NULL,
2389 0,
2390 VK_QUERY_TYPE_PIPELINE_STATISTICS,
2391 pAllocateInfo->commandBufferCount,
2392 overlay_query_flags,
2393 };
2394 VK_CHECK(device_data->vtable.CreateQueryPool(device_data->device, &pool_info,
2395 NULL, &pipeline_query_pool));
2396 }
2397 if (device_data->instance->params.enabled[OVERLAY_PARAM_ENABLED_gpu_timing]) {
2398 VkQueryPoolCreateInfo pool_info = {
2399 VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO,
2400 NULL,
2401 0,
2402 VK_QUERY_TYPE_TIMESTAMP,
2403 pAllocateInfo->commandBufferCount * 2,
2404 0,
2405 };
2406 VK_CHECK(device_data->vtable.CreateQueryPool(device_data->device, &pool_info,
2407 NULL, ×tamp_query_pool));
2408 }
2409
2410 for (uint32_t i = 0; i < pAllocateInfo->commandBufferCount; i++) {
2411 new_command_buffer_data(pCommandBuffers[i], pAllocateInfo->level,
2412 pipeline_query_pool, timestamp_query_pool,
2413 i, device_data);
2414 }
2415
2416 if (pipeline_query_pool)
2417 map_object(HKEY(pipeline_query_pool), (void *)(uintptr_t) pAllocateInfo->commandBufferCount);
2418 if (timestamp_query_pool)
2419 map_object(HKEY(timestamp_query_pool), (void *)(uintptr_t) pAllocateInfo->commandBufferCount);
2420
2421 return result;
2422 }
2423
overlay_FreeCommandBuffers(VkDevice device,VkCommandPool commandPool,uint32_t commandBufferCount,const VkCommandBuffer * pCommandBuffers)2424 static void overlay_FreeCommandBuffers(
2425 VkDevice device,
2426 VkCommandPool commandPool,
2427 uint32_t commandBufferCount,
2428 const VkCommandBuffer* pCommandBuffers)
2429 {
2430 struct device_data *device_data = FIND(struct device_data, device);
2431 for (uint32_t i = 0; i < commandBufferCount; i++) {
2432 struct command_buffer_data *cmd_buffer_data =
2433 FIND(struct command_buffer_data, pCommandBuffers[i]);
2434
2435 /* It is legal to free a NULL command buffer*/
2436 if (!cmd_buffer_data)
2437 continue;
2438
2439 uint64_t count = (uintptr_t)find_object_data(HKEY(cmd_buffer_data->pipeline_query_pool));
2440 if (count == 1) {
2441 unmap_object(HKEY(cmd_buffer_data->pipeline_query_pool));
2442 device_data->vtable.DestroyQueryPool(device_data->device,
2443 cmd_buffer_data->pipeline_query_pool, NULL);
2444 } else if (count != 0) {
2445 map_object(HKEY(cmd_buffer_data->pipeline_query_pool), (void *)(uintptr_t)(count - 1));
2446 }
2447 count = (uintptr_t)find_object_data(HKEY(cmd_buffer_data->timestamp_query_pool));
2448 if (count == 1) {
2449 unmap_object(HKEY(cmd_buffer_data->timestamp_query_pool));
2450 device_data->vtable.DestroyQueryPool(device_data->device,
2451 cmd_buffer_data->timestamp_query_pool, NULL);
2452 } else if (count != 0) {
2453 map_object(HKEY(cmd_buffer_data->timestamp_query_pool), (void *)(uintptr_t)(count - 1));
2454 }
2455 destroy_command_buffer_data(cmd_buffer_data);
2456 }
2457
2458 device_data->vtable.FreeCommandBuffers(device, commandPool,
2459 commandBufferCount, pCommandBuffers);
2460 }
2461
overlay_QueueSubmit(VkQueue queue,uint32_t submitCount,const VkSubmitInfo * pSubmits,VkFence fence)2462 static VkResult overlay_QueueSubmit(
2463 VkQueue queue,
2464 uint32_t submitCount,
2465 const VkSubmitInfo* pSubmits,
2466 VkFence fence)
2467 {
2468 struct queue_data *queue_data = FIND(struct queue_data, queue);
2469 struct device_data *device_data = queue_data->device;
2470
2471 device_data->frame_stats.stats[OVERLAY_PARAM_ENABLED_submit]++;
2472
2473 for (uint32_t s = 0; s < submitCount; s++) {
2474 for (uint32_t c = 0; c < pSubmits[s].commandBufferCount; c++) {
2475 struct command_buffer_data *cmd_buffer_data =
2476 FIND(struct command_buffer_data, pSubmits[s].pCommandBuffers[c]);
2477
2478 /* Merge the submitted command buffer stats into the device. */
2479 for (uint32_t st = 0; st < OVERLAY_PARAM_ENABLED_MAX; st++)
2480 device_data->frame_stats.stats[st] += cmd_buffer_data->stats.stats[st];
2481
2482 /* Attach the command buffer to the queue so we remember to read its
2483 * pipeline statistics & timestamps at QueuePresent().
2484 */
2485 if (!cmd_buffer_data->pipeline_query_pool &&
2486 !cmd_buffer_data->timestamp_query_pool)
2487 continue;
2488
2489 if (list_is_empty(&cmd_buffer_data->link)) {
2490 list_addtail(&cmd_buffer_data->link,
2491 &queue_data->running_command_buffer);
2492 } else {
2493 fprintf(stderr, "Command buffer submitted multiple times before present.\n"
2494 "This could lead to invalid data.\n");
2495 }
2496 }
2497 }
2498
2499 return device_data->vtable.QueueSubmit(queue, submitCount, pSubmits, fence);
2500 }
2501
overlay_QueueSubmit2(VkQueue queue,uint32_t submitCount,const VkSubmitInfo2 * pSubmits,VkFence fence)2502 static VkResult overlay_QueueSubmit2(
2503 VkQueue queue,
2504 uint32_t submitCount,
2505 const VkSubmitInfo2* pSubmits,
2506 VkFence fence)
2507 {
2508 struct queue_data *queue_data = FIND(struct queue_data, queue);
2509 struct device_data *device_data = queue_data->device;
2510
2511 device_data->frame_stats.stats[OVERLAY_PARAM_ENABLED_submit]++;
2512
2513 for (uint32_t s = 0; s < submitCount; s++) {
2514 for (uint32_t c = 0; c < pSubmits[s].commandBufferInfoCount; c++) {
2515 struct command_buffer_data *cmd_buffer_data =
2516 FIND(struct command_buffer_data, pSubmits[s].pCommandBufferInfos[c].commandBuffer);
2517
2518 /* Merge the submitted command buffer stats into the device. */
2519 for (uint32_t st = 0; st < OVERLAY_PARAM_ENABLED_MAX; st++)
2520 device_data->frame_stats.stats[st] += cmd_buffer_data->stats.stats[st];
2521
2522 /* Attach the command buffer to the queue so we remember to read its
2523 * pipeline statistics & timestamps at QueuePresent().
2524 */
2525 if (!cmd_buffer_data->pipeline_query_pool &&
2526 !cmd_buffer_data->timestamp_query_pool)
2527 continue;
2528
2529 if (list_is_empty(&cmd_buffer_data->link)) {
2530 list_addtail(&cmd_buffer_data->link,
2531 &queue_data->running_command_buffer);
2532 } else {
2533 fprintf(stderr, "Command buffer submitted multiple times before present.\n"
2534 "This could lead to invalid data.\n");
2535 }
2536 }
2537 }
2538
2539 return device_data->vtable.QueueSubmit2(queue, submitCount, pSubmits, fence);
2540 }
2541
overlay_CreateDevice(VkPhysicalDevice physicalDevice,const VkDeviceCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDevice * pDevice)2542 static VkResult overlay_CreateDevice(
2543 VkPhysicalDevice physicalDevice,
2544 const VkDeviceCreateInfo* pCreateInfo,
2545 const VkAllocationCallbacks* pAllocator,
2546 VkDevice* pDevice)
2547 {
2548 struct instance_data *instance_data =
2549 FIND(struct instance_data, physicalDevice);
2550 VkLayerDeviceCreateInfo *chain_info =
2551 get_device_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
2552
2553 assert(chain_info->u.pLayerInfo);
2554 PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
2555 PFN_vkGetDeviceProcAddr fpGetDeviceProcAddr = chain_info->u.pLayerInfo->pfnNextGetDeviceProcAddr;
2556 PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice)fpGetInstanceProcAddr(NULL, "vkCreateDevice");
2557 if (fpCreateDevice == NULL) {
2558 return VK_ERROR_INITIALIZATION_FAILED;
2559 }
2560
2561 // Advance the link info for the next element on the chain
2562 chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
2563
2564 VkPhysicalDeviceFeatures device_features = {};
2565 VkPhysicalDeviceFeatures *device_features_ptr = NULL;
2566
2567 VkDeviceCreateInfo create_info = *pCreateInfo;
2568
2569 struct VkBaseOutStructure *new_pnext =
2570 clone_chain((const struct VkBaseInStructure *) pCreateInfo->pNext);
2571 if (new_pnext != NULL) {
2572 create_info.pNext = new_pnext;
2573
2574 VkPhysicalDeviceFeatures2 *device_features2 = (VkPhysicalDeviceFeatures2 *)
2575 vk_find_struct(new_pnext, PHYSICAL_DEVICE_FEATURES_2);
2576 if (device_features2) {
2577 /* Can't use device_info->pEnabledFeatures when VkPhysicalDeviceFeatures2 is present */
2578 device_features_ptr = &device_features2->features;
2579 } else {
2580 if (create_info.pEnabledFeatures)
2581 device_features = *(create_info.pEnabledFeatures);
2582 device_features_ptr = &device_features;
2583 create_info.pEnabledFeatures = &device_features;
2584 }
2585
2586 if (instance_data->pipeline_statistics_enabled) {
2587 device_features_ptr->inheritedQueries = true;
2588 device_features_ptr->pipelineStatisticsQuery = true;
2589 }
2590 }
2591
2592 VkResult result = fpCreateDevice(physicalDevice, &create_info, pAllocator, pDevice);
2593 free_chain(new_pnext);
2594 if (result != VK_SUCCESS) return result;
2595
2596 struct device_data *device_data = new_device_data(*pDevice, instance_data);
2597 device_data->physical_device = physicalDevice;
2598 vk_device_dispatch_table_load(&device_data->vtable,
2599 fpGetDeviceProcAddr, *pDevice);
2600
2601 instance_data->pd_vtable.GetPhysicalDeviceProperties(device_data->physical_device,
2602 &device_data->properties);
2603
2604 VkLayerDeviceCreateInfo *load_data_info =
2605 get_device_chain_info(pCreateInfo, VK_LOADER_DATA_CALLBACK);
2606 device_data->set_device_loader_data = load_data_info->u.pfnSetDeviceLoaderData;
2607
2608 device_map_queues(device_data, pCreateInfo);
2609
2610 device_data->pipeline_statistics_enabled =
2611 new_pnext != NULL &&
2612 instance_data->pipeline_statistics_enabled;
2613
2614 return result;
2615 }
2616
overlay_DestroyDevice(VkDevice device,const VkAllocationCallbacks * pAllocator)2617 static void overlay_DestroyDevice(
2618 VkDevice device,
2619 const VkAllocationCallbacks* pAllocator)
2620 {
2621 struct device_data *device_data = FIND(struct device_data, device);
2622 device_unmap_queues(device_data);
2623 device_data->vtable.DestroyDevice(device, pAllocator);
2624 destroy_device_data(device_data);
2625 }
2626
overlay_CreateInstance(const VkInstanceCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkInstance * pInstance)2627 static VkResult overlay_CreateInstance(
2628 const VkInstanceCreateInfo* pCreateInfo,
2629 const VkAllocationCallbacks* pAllocator,
2630 VkInstance* pInstance)
2631 {
2632 VkLayerInstanceCreateInfo *chain_info =
2633 get_instance_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
2634
2635 assert(chain_info->u.pLayerInfo);
2636 PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr =
2637 chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
2638 PFN_vkCreateInstance fpCreateInstance =
2639 (PFN_vkCreateInstance)fpGetInstanceProcAddr(NULL, "vkCreateInstance");
2640 if (fpCreateInstance == NULL) {
2641 return VK_ERROR_INITIALIZATION_FAILED;
2642 }
2643
2644 // Advance the link info for the next element on the chain
2645 chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
2646
2647 VkResult result = fpCreateInstance(pCreateInfo, pAllocator, pInstance);
2648 if (result != VK_SUCCESS) return result;
2649
2650 struct instance_data *instance_data = new_instance_data(*pInstance);
2651 vk_instance_dispatch_table_load(&instance_data->vtable,
2652 fpGetInstanceProcAddr,
2653 instance_data->instance);
2654 vk_physical_device_dispatch_table_load(&instance_data->pd_vtable,
2655 fpGetInstanceProcAddr,
2656 instance_data->instance);
2657 instance_data_map_physical_devices(instance_data, true);
2658
2659 parse_overlay_env(&instance_data->params, getenv("VK_LAYER_MESA_OVERLAY_CONFIG"));
2660
2661 /* If there's no control file, and an output_file was specified, start
2662 * capturing fps data right away.
2663 */
2664 instance_data->capture_enabled =
2665 instance_data->output_file_fd && instance_data->params.control == NULL;
2666 instance_data->capture_started = instance_data->capture_enabled;
2667
2668 for (int i = OVERLAY_PARAM_ENABLED_vertices;
2669 i <= OVERLAY_PARAM_ENABLED_compute_invocations; i++) {
2670 if (instance_data->params.enabled[i]) {
2671 instance_data->pipeline_statistics_enabled = true;
2672 break;
2673 }
2674 }
2675
2676 return result;
2677 }
2678
overlay_DestroyInstance(VkInstance instance,const VkAllocationCallbacks * pAllocator)2679 static void overlay_DestroyInstance(
2680 VkInstance instance,
2681 const VkAllocationCallbacks* pAllocator)
2682 {
2683 struct instance_data *instance_data = FIND(struct instance_data, instance);
2684 instance_data_map_physical_devices(instance_data, false);
2685 instance_data->vtable.DestroyInstance(instance, pAllocator);
2686 destroy_instance_data(instance_data);
2687 }
2688
2689 static const struct {
2690 const char *name;
2691 void *ptr;
2692 } name_to_funcptr_map[] = {
2693 { "vkGetInstanceProcAddr", (void *) vkGetInstanceProcAddr },
2694 { "vkGetDeviceProcAddr", (void *) vkGetDeviceProcAddr },
2695 #define ADD_HOOK(fn) { "vk" # fn, (void *) overlay_ ## fn }
2696 #define ADD_ALIAS_HOOK(alias, fn) { "vk" # alias, (void *) overlay_ ## fn }
2697 ADD_HOOK(AllocateCommandBuffers),
2698 ADD_HOOK(FreeCommandBuffers),
2699 ADD_HOOK(ResetCommandBuffer),
2700 ADD_HOOK(BeginCommandBuffer),
2701 ADD_HOOK(EndCommandBuffer),
2702 ADD_HOOK(CmdExecuteCommands),
2703
2704 ADD_HOOK(CmdDraw),
2705 ADD_HOOK(CmdDrawIndexed),
2706 ADD_HOOK(CmdDrawIndirect),
2707 ADD_HOOK(CmdDrawIndexedIndirect),
2708 ADD_HOOK(CmdDispatch),
2709 ADD_HOOK(CmdDispatchIndirect),
2710 ADD_HOOK(CmdDrawIndirectCount),
2711 ADD_ALIAS_HOOK(CmdDrawIndirectCountKHR, CmdDrawIndirectCount),
2712 ADD_HOOK(CmdDrawIndexedIndirectCount),
2713 ADD_ALIAS_HOOK(CmdDrawIndexedIndirectCountKHR, CmdDrawIndexedIndirectCount),
2714
2715 ADD_HOOK(CmdBindPipeline),
2716
2717 ADD_HOOK(CreateSwapchainKHR),
2718 ADD_HOOK(QueuePresentKHR),
2719 ADD_HOOK(DestroySwapchainKHR),
2720 ADD_HOOK(AcquireNextImageKHR),
2721 ADD_HOOK(AcquireNextImage2KHR),
2722
2723 ADD_HOOK(QueueSubmit),
2724 ADD_HOOK(QueueSubmit2),
2725
2726 ADD_HOOK(CreateDevice),
2727 ADD_HOOK(DestroyDevice),
2728
2729 ADD_HOOK(CreateInstance),
2730 ADD_HOOK(DestroyInstance),
2731 #undef ADD_HOOK
2732 #undef ADD_ALIAS_HOOK
2733 };
2734
find_ptr(const char * name)2735 static void *find_ptr(const char *name)
2736 {
2737 for (uint32_t i = 0; i < ARRAY_SIZE(name_to_funcptr_map); i++) {
2738 if (strcmp(name, name_to_funcptr_map[i].name) == 0)
2739 return name_to_funcptr_map[i].ptr;
2740 }
2741
2742 return NULL;
2743 }
2744
vkGetDeviceProcAddr(VkDevice dev,const char * funcName)2745 PUBLIC VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkDevice dev,
2746 const char *funcName)
2747 {
2748 void *ptr = find_ptr(funcName);
2749 if (ptr) return reinterpret_cast<PFN_vkVoidFunction>(ptr);
2750
2751 if (dev == NULL) return NULL;
2752
2753 struct device_data *device_data = FIND(struct device_data, dev);
2754 if (device_data->vtable.GetDeviceProcAddr == NULL) return NULL;
2755 return device_data->vtable.GetDeviceProcAddr(dev, funcName);
2756 }
2757
vkGetInstanceProcAddr(VkInstance instance,const char * funcName)2758 PUBLIC VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(VkInstance instance,
2759 const char *funcName)
2760 {
2761 void *ptr = find_ptr(funcName);
2762 if (ptr) return reinterpret_cast<PFN_vkVoidFunction>(ptr);
2763
2764 if (instance == NULL) return NULL;
2765
2766 struct instance_data *instance_data = FIND(struct instance_data, instance);
2767 if (instance_data->vtable.GetInstanceProcAddr == NULL) return NULL;
2768 return instance_data->vtable.GetInstanceProcAddr(instance, funcName);
2769 }
2770