• Home
  • Raw
  • Download

Lines Matching full:pool

35 hk_has_available(const struct hk_query_pool *pool)  in hk_has_available()  argument
37 return pool->vk.query_type != VK_QUERY_TYPE_TIMESTAMP; in hk_has_available()
41 hk_pool_oq_index_ptr(const struct hk_query_pool *pool) in hk_pool_oq_index_ptr() argument
43 return agx_bo_map(pool->bo) + pool->query_start; in hk_pool_oq_index_ptr()
47 hk_reports_per_query(struct hk_query_pool *pool) in hk_reports_per_query() argument
49 switch (pool->vk.query_type) { in hk_reports_per_query()
55 return util_bitcount(pool->vk.pipeline_statistics); in hk_reports_per_query()
65 hk_flush_if_timestamp(struct hk_cmd_buffer *cmd, struct hk_query_pool *pool) in hk_flush_if_timestamp() argument
73 if (pool->vk.query_type == VK_QUERY_TYPE_TIMESTAMP) { in hk_flush_if_timestamp()
86 struct hk_query_pool *pool; in hk_CreateQueryPool() local
96 pool = in hk_CreateQueryPool()
97 vk_query_pool_create(&dev->vk, pCreateInfo, pAllocator, sizeof(*pool)); in hk_CreateQueryPool()
98 if (!pool) in hk_CreateQueryPool()
102 pool->query_start = 0; in hk_CreateQueryPool()
103 if (hk_has_available(pool)) { in hk_CreateQueryPool()
104 pool->query_start = align(pool->vk.query_count * sizeof(uint32_t), in hk_CreateQueryPool()
108 uint32_t reports_per_query = hk_reports_per_query(pool); in hk_CreateQueryPool()
109 pool->query_stride = reports_per_query * sizeof(struct hk_query_report); in hk_CreateQueryPool()
111 if (pool->vk.query_count > 0) { in hk_CreateQueryPool()
112 uint32_t bo_size = pool->query_start; in hk_CreateQueryPool()
116 bo_size += sizeof(uint16_t) * pool->vk.query_count; in hk_CreateQueryPool()
118 bo_size += pool->query_stride * pool->vk.query_count; in hk_CreateQueryPool()
125 pool->bo = agx_bo_create(&dev->dev, bo_size, 0, flags, "Query pool"); in hk_CreateQueryPool()
126 if (!pool->bo) { in hk_CreateQueryPool()
127 hk_DestroyQueryPool(device, hk_query_pool_to_handle(pool), pAllocator); in hk_CreateQueryPool()
136 &dev->dev, pool->bo, &pool->handle, pool->bo->size, 0, in hk_CreateQueryPool()
140 hk_DestroyQueryPool(device, hk_query_pool_to_handle(pool), in hk_CreateQueryPool()
145 assert(pool->handle && "handles are nonzero"); in hk_CreateQueryPool()
149 uint16_t *oq_index = hk_pool_oq_index_ptr(pool); in hk_CreateQueryPool()
159 hk_DestroyQueryPool(device, hk_query_pool_to_handle(pool), pAllocator); in hk_CreateQueryPool()
164 assert(pool->oq_queries < occlusion_queries); in hk_CreateQueryPool()
165 oq_index[pool->oq_queries++] = index; in hk_CreateQueryPool()
168 *pQueryPool = hk_query_pool_to_handle(pool); in hk_CreateQueryPool()
178 VK_FROM_HANDLE(hk_query_pool, pool, queryPool); in hk_DestroyQueryPool()
180 if (!pool) in hk_DestroyQueryPool()
183 uint16_t *oq_index = hk_pool_oq_index_ptr(pool); in hk_DestroyQueryPool()
185 for (unsigned i = 0; i < pool->oq_queries; ++i) { in hk_DestroyQueryPool()
189 if (pool->handle) in hk_DestroyQueryPool()
190 dev->dev.ops.bo_unbind_object(&dev->dev, pool->handle, 0); in hk_DestroyQueryPool()
192 agx_bo_unreference(&dev->dev, pool->bo); in hk_DestroyQueryPool()
193 vk_query_pool_destroy(&dev->vk, pAllocator, &pool->vk); in hk_DestroyQueryPool()
197 hk_query_available_addr(struct hk_query_pool *pool, uint32_t query) in hk_query_available_addr() argument
199 assert(hk_has_available(pool)); in hk_query_available_addr()
200 assert(query < pool->vk.query_count); in hk_query_available_addr()
201 return pool->bo->va->addr + query * sizeof(uint32_t); in hk_query_available_addr()
205 hk_query_available_map(struct hk_query_pool *pool, uint32_t query) in hk_query_available_map() argument
207 assert(hk_has_available(pool)); in hk_query_available_map()
208 assert(query < pool->vk.query_count); in hk_query_available_map()
209 return (uint32_t *)agx_bo_map(pool->bo) + query; in hk_query_available_map()
213 hk_query_offset(struct hk_query_pool *pool, uint32_t query) in hk_query_offset() argument
215 assert(query < pool->vk.query_count); in hk_query_offset()
216 return pool->query_start + query * pool->query_stride; in hk_query_offset()
220 hk_query_report_addr(struct hk_device *dev, struct hk_query_pool *pool, in hk_query_report_addr() argument
223 if (pool->oq_queries) { in hk_query_report_addr()
224 uint16_t *oq_index = hk_pool_oq_index_ptr(pool); in hk_query_report_addr()
228 return pool->bo->va->addr + hk_query_offset(pool, query); in hk_query_report_addr()
233 hk_query_report_map(struct hk_device *dev, struct hk_query_pool *pool, in hk_query_report_map() argument
236 if (pool->oq_queries) { in hk_query_report_map()
238 uint16_t *oq_index = hk_pool_oq_index_ptr(pool); in hk_query_report_map()
242 return (void *)((char *)agx_bo_map(pool->bo) + in hk_query_report_map()
243 hk_query_offset(pool, query)); in hk_query_report_map()
312 * Goes through a series of consecutive query indices in the given pool,
316 emit_zero_queries(struct hk_cmd_buffer *cmd, struct hk_query_pool *pool, in emit_zero_queries() argument
323 uint64_t report = hk_query_report_addr(dev, pool, first_index + i); in emit_zero_queries()
326 if (hk_has_available(pool)) { in emit_zero_queries()
327 uint64_t available = hk_query_available_addr(pool, first_index + i); in emit_zero_queries()
334 for (unsigned j = 0; j < hk_reports_per_query(pool); ++j) { in emit_zero_queries()
344 host_zero_queries(struct hk_device *dev, struct hk_query_pool *pool, in host_zero_queries() argument
350 hk_query_report_map(dev, pool, first_index + i); in host_zero_queries()
353 if (hk_has_available(pool)) { in host_zero_queries()
354 uint32_t *available = hk_query_available_map(pool, first_index + i); in host_zero_queries()
360 for (unsigned j = 0; j < hk_reports_per_query(pool); ++j) { in host_zero_queries()
370 VK_FROM_HANDLE(hk_query_pool, pool, queryPool); in hk_ResetQueryPool()
373 host_zero_queries(dev, pool, firstQuery, queryCount, false); in hk_ResetQueryPool()
381 VK_FROM_HANDLE(hk_query_pool, pool, queryPool); in hk_CmdResetQueryPool()
384 hk_flush_if_timestamp(cmd, pool); in hk_CmdResetQueryPool()
386 perf_debug(dev, "Reset query pool"); in hk_CmdResetQueryPool()
387 emit_zero_queries(cmd, pool, firstQuery, queryCount, false); in hk_CmdResetQueryPool()
396 VK_FROM_HANDLE(hk_query_pool, pool, queryPool); in hk_CmdWriteTimestamp2()
403 uint64_t report_addr = hk_query_report_addr(dev, pool, query); in hk_CmdWriteTimestamp2()
441 .handle = pool->handle, in hk_CmdWriteTimestamp2()
442 .offset_B = hk_query_offset(pool, query), in hk_CmdWriteTimestamp2()
450 * query indices in the query pool (starting at query) where N is the in hk_CmdWriteTimestamp2()
466 emit_zero_queries(cmd, pool, query + 1, num_queries - 1, true); in hk_CmdWriteTimestamp2()
471 hk_cmd_begin_end_query(struct hk_cmd_buffer *cmd, struct hk_query_pool *pool, in hk_cmd_begin_end_query() argument
478 switch (pool->vk.query_type) { in hk_cmd_begin_end_query()
480 assert(query < pool->oq_queries); in hk_cmd_begin_end_query()
490 uint16_t *oq_index = hk_pool_oq_index_ptr(pool); in hk_cmd_begin_end_query()
498 uint64_t addr = hk_query_report_addr(dev, pool, query); in hk_cmd_begin_end_query()
507 root->draw.pipeline_stats = hk_query_report_addr(dev, pool, query); in hk_cmd_begin_end_query()
508 root->draw.pipeline_stats_flags = pool->vk.pipeline_statistics; in hk_cmd_begin_end_query()
513 graphics = pool->vk.pipeline_statistics & in hk_cmd_begin_end_query()
524 perf_debug(dev, "Query ending, type %u", pool->vk.query_type); in hk_cmd_begin_end_query()
525 hk_queue_write(cmd, hk_query_available_addr(pool, query), 1, graphics); in hk_cmd_begin_end_query()
535 VK_FROM_HANDLE(hk_query_pool, pool, queryPool); in hk_CmdBeginQueryIndexedEXT()
537 hk_cmd_begin_end_query(cmd, pool, query, index, flags, false); in hk_CmdBeginQueryIndexedEXT()
545 VK_FROM_HANDLE(hk_query_pool, pool, queryPool); in hk_CmdEndQueryIndexedEXT()
548 hk_cmd_begin_end_query(cmd, pool, query, index, 0, true); in hk_CmdEndQueryIndexedEXT()
554 * the query pool (starting at query) where N is the number of bits set in hk_CmdEndQueryIndexedEXT()
567 emit_zero_queries(cmd, pool, query + 1, num_queries - 1, true); in hk_CmdEndQueryIndexedEXT()
573 hk_query_is_available(struct hk_device *dev, struct hk_query_pool *pool, in hk_query_is_available() argument
576 if (hk_has_available(pool)) { in hk_query_is_available()
577 uint32_t *available = hk_query_available_map(pool, query); in hk_query_is_available()
581 hk_query_report_map(dev, pool, query); in hk_query_is_available()
590 hk_query_wait_for_available(struct hk_device *dev, struct hk_query_pool *pool, in hk_query_wait_for_available() argument
596 if (hk_query_is_available(dev, pool, query)) in hk_query_wait_for_available()
627 VK_FROM_HANDLE(hk_query_pool, pool, queryPool); in hk_GetQueryPoolResults()
636 bool available = hk_query_is_available(dev, pool, query); in hk_GetQueryPoolResults()
639 status = hk_query_wait_for_available(dev, pool, query); in hk_GetQueryPoolResults()
648 const struct hk_query_report *src = hk_query_report_map(dev, pool, query); in hk_GetQueryPoolResults()
652 uint32_t reports = hk_reports_per_query(pool); in hk_GetQueryPoolResults()
676 VK_FROM_HANDLE(hk_query_pool, pool, queryPool); in hk_CmdCopyQueryPoolResults()
680 hk_flush_if_timestamp(cmd, pool); in hk_CmdCopyQueryPoolResults()
686 perf_debug(dev, "Query pool copy"); in hk_CmdCopyQueryPoolResults()
690 .availability = hk_has_available(pool) ? pool->bo->va->addr : 0, in hk_CmdCopyQueryPoolResults()
691 .results = pool->oq_queries ? dev->occlusion_queries.bo->va->addr in hk_CmdCopyQueryPoolResults()
692 : pool->bo->va->addr + pool->query_start, in hk_CmdCopyQueryPoolResults()
693 .oq_index = pool->oq_queries ? pool->bo->va->addr + pool->query_start : 0, in hk_CmdCopyQueryPoolResults()
698 .reports_per_query = hk_reports_per_query(pool), in hk_CmdCopyQueryPoolResults()