1 /*
2 * Copyright © 2019 Red Hat.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "lvp_private.h"
25 #include "pipe/p_context.h"
26
lvp_CreateQueryPool(VkDevice _device,const VkQueryPoolCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkQueryPool * pQueryPool)27 VkResult lvp_CreateQueryPool(
28 VkDevice _device,
29 const VkQueryPoolCreateInfo* pCreateInfo,
30 const VkAllocationCallbacks* pAllocator,
31 VkQueryPool* pQueryPool)
32 {
33 LVP_FROM_HANDLE(lvp_device, device, _device);
34
35 enum pipe_query_type pipeq;
36 switch (pCreateInfo->queryType) {
37 case VK_QUERY_TYPE_OCCLUSION:
38 pipeq = PIPE_QUERY_OCCLUSION_COUNTER;
39 break;
40 case VK_QUERY_TYPE_TIMESTAMP:
41 pipeq = PIPE_QUERY_TIMESTAMP;
42 break;
43 case VK_QUERY_TYPE_PIPELINE_STATISTICS:
44 pipeq = PIPE_QUERY_PIPELINE_STATISTICS;
45 break;
46 default:
47 return VK_ERROR_FEATURE_NOT_PRESENT;
48 }
49 struct lvp_query_pool *pool;
50 uint32_t pool_size = sizeof(*pool) + pCreateInfo->queryCount * sizeof(struct pipe_query *);
51
52 pool = vk_zalloc2(&device->vk.alloc, pAllocator,
53 pool_size, 8,
54 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
55 if (!pool)
56 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
57
58 vk_object_base_init(&device->vk, &pool->base,
59 VK_OBJECT_TYPE_QUERY_POOL);
60 pool->type = pCreateInfo->queryType;
61 pool->count = pCreateInfo->queryCount;
62 pool->base_type = pipeq;
63 pool->pipeline_stats = pCreateInfo->pipelineStatistics;
64
65 *pQueryPool = lvp_query_pool_to_handle(pool);
66 return VK_SUCCESS;
67 }
68
lvp_DestroyQueryPool(VkDevice _device,VkQueryPool _pool,const VkAllocationCallbacks * pAllocator)69 void lvp_DestroyQueryPool(
70 VkDevice _device,
71 VkQueryPool _pool,
72 const VkAllocationCallbacks* pAllocator)
73 {
74 LVP_FROM_HANDLE(lvp_device, device, _device);
75 LVP_FROM_HANDLE(lvp_query_pool, pool, _pool);
76
77 if (!pool)
78 return;
79
80 for (unsigned i = 0; i < pool->count; i++)
81 if (pool->queries[i])
82 device->queue.ctx->destroy_query(device->queue.ctx, pool->queries[i]);
83 vk_object_base_finish(&pool->base);
84 vk_free2(&device->vk.alloc, pAllocator, pool);
85 }
86
lvp_GetQueryPoolResults(VkDevice _device,VkQueryPool queryPool,uint32_t firstQuery,uint32_t queryCount,size_t dataSize,void * pData,VkDeviceSize stride,VkQueryResultFlags flags)87 VkResult lvp_GetQueryPoolResults(
88 VkDevice _device,
89 VkQueryPool queryPool,
90 uint32_t firstQuery,
91 uint32_t queryCount,
92 size_t dataSize,
93 void* pData,
94 VkDeviceSize stride,
95 VkQueryResultFlags flags)
96 {
97 LVP_FROM_HANDLE(lvp_device, device, _device);
98 LVP_FROM_HANDLE(lvp_query_pool, pool, queryPool);
99 VkResult vk_result = VK_SUCCESS;
100
101 lvp_DeviceWaitIdle(_device);
102
103 for (unsigned i = firstQuery; i < firstQuery + queryCount; i++) {
104 uint8_t *dptr = (uint8_t *)((char *)pData + (stride * (i - firstQuery)));
105 union pipe_query_result result;
106 bool ready = false;
107 if (pool->queries[i]) {
108 ready = device->queue.ctx->get_query_result(device->queue.ctx,
109 pool->queries[i],
110 (flags & VK_QUERY_RESULT_WAIT_BIT),
111 &result);
112 } else {
113 result.u64 = 0;
114 }
115
116 if (!ready && !(flags & VK_QUERY_RESULT_PARTIAL_BIT))
117 vk_result = VK_NOT_READY;
118 if (flags & VK_QUERY_RESULT_64_BIT) {
119 if (ready || (flags & VK_QUERY_RESULT_PARTIAL_BIT)) {
120 if (pool->type == VK_QUERY_TYPE_PIPELINE_STATISTICS) {
121 uint32_t mask = pool->pipeline_stats;
122 uint64_t *pstats = (uint64_t *)&result.pipeline_statistics;
123 while (mask) {
124 uint32_t i = u_bit_scan(&mask);
125
126 *(uint64_t *)dptr = pstats[i];
127 dptr += 8;
128 }
129 } else {
130 *(uint64_t *)dptr = result.u64;
131 dptr += 8;
132 }
133 } else
134 dptr += stride;
135
136 } else {
137 if (ready || (flags & VK_QUERY_RESULT_PARTIAL_BIT)) {
138 if (pool->type == VK_QUERY_TYPE_PIPELINE_STATISTICS) {
139 uint32_t mask = pool->pipeline_stats;
140 uint64_t *pstats = (uint64_t *)&result.pipeline_statistics;
141 while (mask) {
142 uint32_t i = u_bit_scan(&mask);
143
144 if (pstats[i] > UINT32_MAX)
145 *(uint32_t *)dptr = UINT32_MAX;
146 else
147 *(uint32_t *)dptr = pstats[i];
148 dptr += 4;
149 }
150 } else {
151 if (result.u64 > UINT32_MAX)
152 *(uint32_t *)dptr = UINT32_MAX;
153 else
154 *(uint32_t *)dptr = result.u32;
155 dptr += 4;
156 }
157 } else
158 dptr += stride;
159 }
160
161 if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) {
162 if (flags & VK_QUERY_RESULT_64_BIT)
163 *(uint64_t *)dptr = ready;
164 else
165 *(uint32_t *)dptr = ready;
166 }
167 }
168 return vk_result;
169 }
170