• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2024 Collabora Ltd.
3  * SPDX-License-Identifier: MIT
4  */
5 
6 #include "vk_log.h"
7 
8 #include "pan_props.h"
9 #include "panvk_device.h"
10 #include "panvk_entrypoints.h"
11 #include "panvk_query_pool.h"
12 
13 #define PANVK_QUERY_TIMEOUT 2000000000ull
14 
15 VKAPI_ATTR VkResult VKAPI_CALL
panvk_per_arch(CreateQueryPool)16 panvk_per_arch(CreateQueryPool)(VkDevice _device,
17                                 const VkQueryPoolCreateInfo *pCreateInfo,
18                                 const VkAllocationCallbacks *pAllocator,
19                                 VkQueryPool *pQueryPool)
20 {
21    VK_FROM_HANDLE(panvk_device, device, _device);
22 
23    struct panvk_query_pool *pool;
24 
25    pool =
26       vk_query_pool_create(&device->vk, pCreateInfo, pAllocator, sizeof(*pool));
27    if (!pool)
28       return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
29 
30    uint32_t reports_per_query;
31    switch (pCreateInfo->queryType) {
32    case VK_QUERY_TYPE_OCCLUSION: {
33       /* The counter is per core on Bifrost */
34 #if PAN_ARCH < 9
35       const struct panvk_physical_device *phys_dev =
36          to_panvk_physical_device(device->vk.physical);
37 
38       panfrost_query_core_count(&phys_dev->kmod.props, &reports_per_query);
39 #else
40       reports_per_query = 1;
41 #endif
42       break;
43    }
44    default:
45       unreachable("Unsupported query type");
46    }
47 
48    pool->reports_per_query = reports_per_query;
49    pool->query_stride = reports_per_query * sizeof(struct panvk_query_report);
50 
51    assert(pool->vk.query_count > 0);
52 
53    struct panvk_pool_alloc_info alloc_info = {
54       .size = pool->reports_per_query * sizeof(struct panvk_query_report) *
55               pool->vk.query_count,
56       .alignment = sizeof(struct panvk_query_report),
57    };
58    pool->mem = panvk_pool_alloc_mem(&device->mempools.rw, alloc_info);
59    if (!pool->mem.bo) {
60       vk_query_pool_destroy(&device->vk, pAllocator, &pool->vk);
61       return vk_error(device, VK_ERROR_OUT_OF_DEVICE_MEMORY);
62    }
63 
64    struct panvk_pool_alloc_info syncobjs_alloc_info = {
65       .size = sizeof(struct panvk_query_available_obj) * pool->vk.query_count,
66       .alignment = 64,
67    };
68    pool->available_mem =
69       panvk_pool_alloc_mem(&device->mempools.rw_nc, syncobjs_alloc_info);
70    if (!pool->available_mem.bo) {
71       panvk_pool_free_mem(&pool->mem);
72       vk_query_pool_destroy(&device->vk, pAllocator, &pool->vk);
73       return vk_error(device, VK_ERROR_OUT_OF_DEVICE_MEMORY);
74    }
75 
76    *pQueryPool = panvk_query_pool_to_handle(pool);
77 
78    return VK_SUCCESS;
79 }
80 
81 VKAPI_ATTR void VKAPI_CALL
panvk_per_arch(DestroyQueryPool)82 panvk_per_arch(DestroyQueryPool)(VkDevice _device, VkQueryPool queryPool,
83                                  const VkAllocationCallbacks *pAllocator)
84 {
85    VK_FROM_HANDLE(panvk_device, device, _device);
86    VK_FROM_HANDLE(panvk_query_pool, pool, queryPool);
87 
88    if (!pool)
89       return;
90 
91    panvk_pool_free_mem(&pool->mem);
92    panvk_pool_free_mem(&pool->available_mem);
93    vk_query_pool_destroy(&device->vk, pAllocator, &pool->vk);
94 }
95 
96 VKAPI_ATTR void VKAPI_CALL
panvk_per_arch(ResetQueryPool)97 panvk_per_arch(ResetQueryPool)(VkDevice device, VkQueryPool queryPool,
98                                uint32_t firstQuery, uint32_t queryCount)
99 {
100    VK_FROM_HANDLE(panvk_query_pool, pool, queryPool);
101 
102    struct panvk_query_available_obj *available =
103       panvk_query_available_host_addr(pool, firstQuery);
104    memset(available, 0, queryCount * sizeof(*available));
105 }
106 
107 static bool
panvk_query_is_available(struct panvk_query_pool * pool,uint32_t query)108 panvk_query_is_available(struct panvk_query_pool *pool, uint32_t query)
109 {
110    struct panvk_query_available_obj *available =
111       panvk_query_available_host_addr(pool, query);
112 
113 #if PAN_ARCH >= 10
114    return p_atomic_read(&available->sync_obj.seqno) != 0;
115 #else
116    return p_atomic_read(&available->value) != 0;
117 #endif
118 }
119 
120 static VkResult
panvk_query_wait_for_available(struct panvk_device * dev,struct panvk_query_pool * pool,uint32_t query)121 panvk_query_wait_for_available(struct panvk_device *dev,
122                                struct panvk_query_pool *pool, uint32_t query)
123 {
124    int64_t abs_timeout_ns = os_time_get_absolute_timeout(PANVK_QUERY_TIMEOUT);
125 
126    while (os_time_get_nano() < abs_timeout_ns) {
127       if (panvk_query_is_available(pool, query))
128          return VK_SUCCESS;
129 
130       VkResult status = vk_device_check_status(&dev->vk);
131       if (status != VK_SUCCESS)
132          return status;
133    }
134 
135    return vk_device_set_lost(&dev->vk, "query timeout");
136 }
137 
138 static void
cpu_write_query_result(void * dst,uint32_t idx,VkQueryResultFlags flags,uint64_t result)139 cpu_write_query_result(void *dst, uint32_t idx, VkQueryResultFlags flags,
140                        uint64_t result)
141 {
142    if (flags & VK_QUERY_RESULT_64_BIT) {
143       uint64_t *dst64 = dst;
144       dst64[idx] = result;
145    } else {
146       uint32_t *dst32 = dst;
147       dst32[idx] = result;
148    }
149 }
150 
151 static void
cpu_write_occlusion_query_result(void * dst,uint32_t idx,VkQueryResultFlags flags,const struct panvk_query_report * src,unsigned core_count)152 cpu_write_occlusion_query_result(void *dst, uint32_t idx,
153                                  VkQueryResultFlags flags,
154                                  const struct panvk_query_report *src,
155                                  unsigned core_count)
156 {
157    uint64_t result = 0;
158 
159    for (uint32_t core_idx = 0; core_idx < core_count; core_idx++)
160       result += src[core_idx].value;
161 
162    cpu_write_query_result(dst, idx, flags, result);
163 }
164 
165 VKAPI_ATTR VkResult VKAPI_CALL
panvk_per_arch(GetQueryPoolResults)166 panvk_per_arch(GetQueryPoolResults)(VkDevice _device, VkQueryPool queryPool,
167                                     uint32_t firstQuery, uint32_t queryCount,
168                                     size_t dataSize, void *pData,
169                                     VkDeviceSize stride,
170                                     VkQueryResultFlags flags)
171 {
172    VK_FROM_HANDLE(panvk_device, device, _device);
173    VK_FROM_HANDLE(panvk_query_pool, pool, queryPool);
174 
175    if (vk_device_is_lost(&device->vk))
176       return VK_ERROR_DEVICE_LOST;
177 
178    VkResult status = VK_SUCCESS;
179    for (uint32_t i = 0; i < queryCount; i++) {
180       const uint32_t query = firstQuery + i;
181 
182       bool available = panvk_query_is_available(pool, query);
183 
184       if (!available && (flags & VK_QUERY_RESULT_WAIT_BIT)) {
185          status = panvk_query_wait_for_available(device, pool, query);
186          if (status != VK_SUCCESS)
187             return status;
188 
189          available = true;
190       }
191 
192       bool write_results = available || (flags & VK_QUERY_RESULT_PARTIAL_BIT);
193 
194       const struct panvk_query_report *src =
195          panvk_query_report_host_addr(pool, query);
196       assert(i * stride < dataSize);
197       void *dst = (char *)pData + i * stride;
198 
199       switch (pool->vk.query_type) {
200       case VK_QUERY_TYPE_OCCLUSION: {
201          if (write_results)
202             cpu_write_occlusion_query_result(dst, 0, flags, src,
203                                              pool->reports_per_query);
204          break;
205       }
206       default:
207          unreachable("Unsupported query type");
208       }
209 
210       if (!write_results)
211          status = VK_NOT_READY;
212 
213       if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT)
214          cpu_write_query_result(dst, 1, flags, available);
215    }
216 
217    return status;
218 }
219