1 /*
2 * Copyright 2019 Google LLC
3 * SPDX-License-Identifier: MIT
4 *
5 * based in part on anv and radv which are:
6 * Copyright © 2015 Intel Corporation
7 * Copyright © 2016 Red Hat.
8 * Copyright © 2016 Bas Nieuwenhuizen
9 */
10
11 #include "vn_query_pool.h"
12
13 #include "venus-protocol/vn_protocol_driver_query_pool.h"
14
15 #include "vn_device.h"
16
17 /* query pool commands */
18
19 VkResult
vn_CreateQueryPool(VkDevice device,const VkQueryPoolCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkQueryPool * pQueryPool)20 vn_CreateQueryPool(VkDevice device,
21 const VkQueryPoolCreateInfo *pCreateInfo,
22 const VkAllocationCallbacks *pAllocator,
23 VkQueryPool *pQueryPool)
24 {
25 struct vn_device *dev = vn_device_from_handle(device);
26 const VkAllocationCallbacks *alloc =
27 pAllocator ? pAllocator : &dev->base.base.alloc;
28
29 struct vn_query_pool *pool =
30 vk_zalloc(alloc, sizeof(*pool), VN_DEFAULT_ALIGN,
31 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
32 if (!pool)
33 return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
34
35 vn_object_base_init(&pool->base, VK_OBJECT_TYPE_QUERY_POOL, &dev->base);
36
37 pool->allocator = *alloc;
38
39 switch (pCreateInfo->queryType) {
40 case VK_QUERY_TYPE_OCCLUSION:
41 pool->result_array_size = 1;
42 break;
43 case VK_QUERY_TYPE_PIPELINE_STATISTICS:
44 pool->result_array_size =
45 util_bitcount(pCreateInfo->pipelineStatistics);
46 break;
47 case VK_QUERY_TYPE_TIMESTAMP:
48 pool->result_array_size = 1;
49 break;
50 case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT:
51 pool->result_array_size = 2;
52 break;
53 default:
54 unreachable("bad query type");
55 break;
56 }
57
58 VkQueryPool pool_handle = vn_query_pool_to_handle(pool);
59 vn_async_vkCreateQueryPool(dev->instance, device, pCreateInfo, NULL,
60 &pool_handle);
61
62 *pQueryPool = pool_handle;
63
64 return VK_SUCCESS;
65 }
66
67 void
vn_DestroyQueryPool(VkDevice device,VkQueryPool queryPool,const VkAllocationCallbacks * pAllocator)68 vn_DestroyQueryPool(VkDevice device,
69 VkQueryPool queryPool,
70 const VkAllocationCallbacks *pAllocator)
71 {
72 struct vn_device *dev = vn_device_from_handle(device);
73 struct vn_query_pool *pool = vn_query_pool_from_handle(queryPool);
74 const VkAllocationCallbacks *alloc;
75
76 if (!pool)
77 return;
78
79 alloc = pAllocator ? pAllocator : &pool->allocator;
80
81 vn_async_vkDestroyQueryPool(dev->instance, device, queryPool, NULL);
82
83 vn_object_base_fini(&pool->base);
84 vk_free(alloc, pool);
85 }
86
87 void
vn_ResetQueryPool(VkDevice device,VkQueryPool queryPool,uint32_t firstQuery,uint32_t queryCount)88 vn_ResetQueryPool(VkDevice device,
89 VkQueryPool queryPool,
90 uint32_t firstQuery,
91 uint32_t queryCount)
92 {
93 struct vn_device *dev = vn_device_from_handle(device);
94
95 vn_async_vkResetQueryPool(dev->instance, device, queryPool, firstQuery,
96 queryCount);
97 }
98
99 VkResult
vn_GetQueryPoolResults(VkDevice device,VkQueryPool queryPool,uint32_t firstQuery,uint32_t queryCount,size_t dataSize,void * pData,VkDeviceSize stride,VkQueryResultFlags flags)100 vn_GetQueryPoolResults(VkDevice device,
101 VkQueryPool queryPool,
102 uint32_t firstQuery,
103 uint32_t queryCount,
104 size_t dataSize,
105 void *pData,
106 VkDeviceSize stride,
107 VkQueryResultFlags flags)
108 {
109 struct vn_device *dev = vn_device_from_handle(device);
110 struct vn_query_pool *pool = vn_query_pool_from_handle(queryPool);
111 const VkAllocationCallbacks *alloc = &pool->allocator;
112
113 const size_t result_width = flags & VK_QUERY_RESULT_64_BIT ? 8 : 4;
114 const size_t result_size = pool->result_array_size * result_width;
115 const bool result_always_written =
116 flags & (VK_QUERY_RESULT_WAIT_BIT | VK_QUERY_RESULT_PARTIAL_BIT);
117
118 VkQueryResultFlags packed_flags = flags;
119 size_t packed_stride = result_size;
120 if (!result_always_written)
121 packed_flags |= VK_QUERY_RESULT_WITH_AVAILABILITY_BIT;
122 if (packed_flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT)
123 packed_stride += result_width;
124
125 const size_t packed_size = packed_stride * queryCount;
126 void *packed_data;
127 if (result_always_written && packed_stride == stride) {
128 packed_data = pData;
129 } else {
130 packed_data = vk_alloc(alloc, packed_size, VN_DEFAULT_ALIGN,
131 VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
132 if (!packed_data)
133 return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
134 }
135
136 /* TODO the renderer should transparently vkCmdCopyQueryPoolResults to a
137 * coherent memory such that we can memcpy from the coherent memory to
138 * avoid this serialized round trip.
139 */
140 VkResult result = vn_call_vkGetQueryPoolResults(
141 dev->instance, device, queryPool, firstQuery, queryCount, packed_size,
142 packed_data, packed_stride, packed_flags);
143
144 if (packed_data == pData)
145 return vn_result(dev->instance, result);
146
147 const size_t copy_size =
148 result_size +
149 (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT ? result_width : 0);
150 const void *src = packed_data;
151 void *dst = pData;
152 if (result == VK_SUCCESS) {
153 for (uint32_t i = 0; i < queryCount; i++) {
154 memcpy(dst, src, copy_size);
155 src += packed_stride;
156 dst += stride;
157 }
158 } else if (result == VK_NOT_READY) {
159 assert(!result_always_written &&
160 (packed_flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT));
161 if (flags & VK_QUERY_RESULT_64_BIT) {
162 for (uint32_t i = 0; i < queryCount; i++) {
163 const bool avail = *(const uint64_t *)(src + result_size);
164 if (avail)
165 memcpy(dst, src, copy_size);
166 else if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT)
167 *(uint64_t *)(dst + result_size) = 0;
168
169 src += packed_stride;
170 dst += stride;
171 }
172 } else {
173 for (uint32_t i = 0; i < queryCount; i++) {
174 const bool avail = *(const uint32_t *)(src + result_size);
175 if (avail)
176 memcpy(dst, src, copy_size);
177 else if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT)
178 *(uint32_t *)(dst + result_size) = 0;
179
180 src += packed_stride;
181 dst += stride;
182 }
183 }
184 }
185
186 vk_free(alloc, packed_data);
187 return vn_result(dev->instance, result);
188 }
189