Lines Matching full:pool
100 * pool. */
101 #define query_iova(type, pool, query, field) \ argument
102 pool->bo.iova + pool->stride * (query) + offsetof(type, field)
104 #define occlusion_query_iova(pool, query, field) \ argument
105 query_iova(struct occlusion_query_slot, pool, query, field)
107 #define pipeline_stat_query_iova(pool, query, field) \ argument
108 pool->bo.iova + pool->stride * query + \
111 #define primitive_query_iova(pool, query, field, i) \ argument
112 query_iova(struct primitive_query_slot, pool, query, field) + \
115 #define query_available_iova(pool, query) \ argument
116 query_iova(struct query_slot, pool, query, available)
118 #define query_result_iova(pool, query, i) \ argument
119 pool->bo.iova + pool->stride * (query) + \
122 #define query_result_addr(pool, query, i) \ argument
123 pool->bo.map + pool->stride * query + \
129 * Returns a pointer to a given slot in a query pool.
131 static void* slot_address(struct tu_query_pool *pool, uint32_t query) in slot_address() argument
133 return (char*)pool->bo.map + query * pool->stride; in slot_address()
164 struct tu_query_pool *pool = in tu_CreateQueryPool() local
165 vk_object_alloc(&device->vk, pAllocator, sizeof(*pool), in tu_CreateQueryPool()
167 if (!pool) in tu_CreateQueryPool()
170 VkResult result = tu_bo_init_new(device, &pool->bo, in tu_CreateQueryPool()
173 vk_object_free(&device->vk, pAllocator, pool); in tu_CreateQueryPool()
177 result = tu_bo_map(device, &pool->bo); in tu_CreateQueryPool()
179 tu_bo_finish(device, &pool->bo); in tu_CreateQueryPool()
180 vk_object_free(&device->vk, pAllocator, pool); in tu_CreateQueryPool()
185 memset(pool->bo.map, 0, pool->bo.size); in tu_CreateQueryPool()
187 pool->type = pCreateInfo->queryType; in tu_CreateQueryPool()
188 pool->stride = slot_size; in tu_CreateQueryPool()
189 pool->size = pCreateInfo->queryCount; in tu_CreateQueryPool()
190 pool->pipeline_statistics = pCreateInfo->pipelineStatistics; in tu_CreateQueryPool()
191 *pQueryPool = tu_query_pool_to_handle(pool); in tu_CreateQueryPool()
202 TU_FROM_HANDLE(tu_query_pool, pool, _pool); in tu_DestroyQueryPool()
204 if (!pool) in tu_DestroyQueryPool()
207 tu_bo_finish(device, &pool->bo); in tu_DestroyQueryPool()
208 vk_object_free(&device->vk, pAllocator, pool); in tu_DestroyQueryPool()
212 get_result_count(struct tu_query_pool *pool) in get_result_count() argument
214 switch (pool->type) { in get_result_count()
223 return util_bitcount(pool->pipeline_statistics); in get_result_count()
265 wait_for_available(struct tu_device *device, struct tu_query_pool *pool, in wait_for_available() argument
271 struct query_slot *slot = slot_address(pool, query); in wait_for_available()
297 struct tu_query_pool *pool, in get_query_pool_results() argument
311 struct query_slot *slot = slot_address(pool, query); in get_query_pool_results()
313 uint32_t result_count = get_result_count(pool); in get_query_pool_results()
314 uint32_t statistics = pool->pipeline_statistics; in get_query_pool_results()
317 VkResult wait_result = wait_for_available(device, pool, query); in get_query_pool_results()
342 if (pool->type == VK_QUERY_TYPE_PIPELINE_STATISTICS) { in get_query_pool_results()
344 result = query_result_addr(pool, query, stat_idx); in get_query_pool_results()
346 result = query_result_addr(pool, query, k); in get_query_pool_results()
388 TU_FROM_HANDLE(tu_query_pool, pool, queryPool); in tu_GetQueryPoolResults()
389 assert(firstQuery + queryCount <= pool->size); in tu_GetQueryPoolResults()
394 switch (pool->type) { in tu_GetQueryPoolResults()
399 return get_query_pool_results(device, pool, firstQuery, queryCount, in tu_GetQueryPoolResults()
430 struct tu_query_pool *pool, in emit_copy_query_pool_results() argument
451 uint64_t available_iova = query_available_iova(pool, query); in emit_copy_query_pool_results()
453 uint32_t result_count = get_result_count(pool); in emit_copy_query_pool_results()
454 uint32_t statistics = pool->pipeline_statistics; in emit_copy_query_pool_results()
471 if (pool->type == VK_QUERY_TYPE_PIPELINE_STATISTICS) { in emit_copy_query_pool_results()
473 result_iova = query_result_iova(pool, query, stat_idx); in emit_copy_query_pool_results()
475 result_iova = query_result_iova(pool, query, k); in emit_copy_query_pool_results()
526 TU_FROM_HANDLE(tu_query_pool, pool, queryPool); in tu_CmdCopyQueryPoolResults()
529 assert(firstQuery + queryCount <= pool->size); in tu_CmdCopyQueryPoolResults()
531 switch (pool->type) { in tu_CmdCopyQueryPoolResults()
536 return emit_copy_query_pool_results(cmdbuf, cs, pool, firstQuery, in tu_CmdCopyQueryPoolResults()
545 struct tu_query_pool *pool, in emit_reset_query_pool() argument
553 uint32_t statistics = pool->pipeline_statistics; in emit_reset_query_pool()
556 tu_cs_emit_qw(cs, query_available_iova(pool, query)); in emit_reset_query_pool()
559 for (uint32_t k = 0; k < get_result_count(pool); k++) { in emit_reset_query_pool()
562 if (pool->type == VK_QUERY_TYPE_PIPELINE_STATISTICS) { in emit_reset_query_pool()
564 result_iova = query_result_iova(pool, query, stat_idx); in emit_reset_query_pool()
566 result_iova = query_result_iova(pool, query, k); in emit_reset_query_pool()
584 TU_FROM_HANDLE(tu_query_pool, pool, queryPool); in tu_CmdResetQueryPool()
586 switch (pool->type) { in tu_CmdResetQueryPool()
591 emit_reset_query_pool(cmdbuf, pool, firstQuery, queryCount); in tu_CmdResetQueryPool()
604 TU_FROM_HANDLE(tu_query_pool, pool, queryPool); in tu_ResetQueryPool()
607 struct query_slot *slot = slot_address(pool, i + firstQuery); in tu_ResetQueryPool()
610 for (uint32_t k = 0; k < get_result_count(pool); k++) { in tu_ResetQueryPool()
611 uint64_t *res = query_result_addr(pool, i + firstQuery, k); in tu_ResetQueryPool()
619 struct tu_query_pool *pool, in emit_begin_occlusion_query() argument
637 uint64_t begin_iova = occlusion_query_iova(pool, query, begin); in emit_begin_occlusion_query()
651 struct tu_query_pool *pool, in emit_begin_stat_query() argument
655 uint64_t begin_iova = pipeline_stat_query_iova(pool, query, begin); in emit_begin_stat_query()
672 struct tu_query_pool *pool, in emit_begin_xfb_query() argument
677 uint64_t begin_iova = primitive_query_iova(pool, query, begin[0], 0); in emit_begin_xfb_query()
690 TU_FROM_HANDLE(tu_query_pool, pool, queryPool); in tu_CmdBeginQuery()
691 assert(query < pool->size); in tu_CmdBeginQuery()
693 switch (pool->type) { in tu_CmdBeginQuery()
699 emit_begin_occlusion_query(cmdbuf, pool, query); in tu_CmdBeginQuery()
702 emit_begin_xfb_query(cmdbuf, pool, query, 0); in tu_CmdBeginQuery()
705 emit_begin_stat_query(cmdbuf, pool, query); in tu_CmdBeginQuery()
722 TU_FROM_HANDLE(tu_query_pool, pool, queryPool); in tu_CmdBeginQueryIndexedEXT()
723 assert(query < pool->size); in tu_CmdBeginQueryIndexedEXT()
725 switch (pool->type) { in tu_CmdBeginQueryIndexedEXT()
727 emit_begin_xfb_query(cmdbuf, pool, query, index); in tu_CmdBeginQueryIndexedEXT()
736 struct tu_query_pool *pool, in emit_end_occlusion_query() argument
756 uint64_t available_iova = query_available_iova(pool, query); in emit_end_occlusion_query()
757 uint64_t begin_iova = occlusion_query_iova(pool, query, begin); in emit_end_occlusion_query()
758 uint64_t end_iova = occlusion_query_iova(pool, query, end); in emit_end_occlusion_query()
759 uint64_t result_iova = query_result_iova(pool, query, 0); in emit_end_occlusion_query()
809 struct tu_query_pool *pool, in emit_end_stat_query() argument
813 uint64_t end_iova = pipeline_stat_query_iova(pool, query, end); in emit_end_stat_query()
814 uint64_t available_iova = query_available_iova(pool, query); in emit_end_stat_query()
832 result_iova = query_result_iova(pool, query, i); in emit_end_stat_query()
833 stat_start_iova = pipeline_stat_query_iova(pool, query, begin[i]); in emit_end_stat_query()
834 stat_stop_iova = pipeline_stat_query_iova(pool, query, end[i]); in emit_end_stat_query()
860 struct tu_query_pool *pool, in emit_end_xfb_query() argument
866 uint64_t end_iova = primitive_query_iova(pool, query, end[0], 0); in emit_end_xfb_query()
867 uint64_t result_written_iova = query_result_iova(pool, query, 0); in emit_end_xfb_query()
868 uint64_t result_generated_iova = query_result_iova(pool, query, 1); in emit_end_xfb_query()
869 uint64_t begin_written_iova = primitive_query_iova(pool, query, begin[stream_id], 0); in emit_end_xfb_query()
870 uint64_t begin_generated_iova = primitive_query_iova(pool, query, begin[stream_id], 1); in emit_end_xfb_query()
871 uint64_t end_written_iova = primitive_query_iova(pool, query, end[stream_id], 0); in emit_end_xfb_query()
872 uint64_t end_generated_iova = primitive_query_iova(pool, query, end[stream_id], 1); in emit_end_xfb_query()
873 uint64_t available_iova = query_available_iova(pool, query); in emit_end_xfb_query()
911 * query pool (starting at query) where N is the number of bits set in the
929 struct tu_query_pool *pool, in handle_multiview_queries() argument
940 tu_cs_emit_qw(cs, query_available_iova(pool, query + i)); in handle_multiview_queries()
951 TU_FROM_HANDLE(tu_query_pool, pool, queryPool); in tu_CmdEndQuery()
952 assert(query < pool->size); in tu_CmdEndQuery()
954 switch (pool->type) { in tu_CmdEndQuery()
956 emit_end_occlusion_query(cmdbuf, pool, query); in tu_CmdEndQuery()
959 emit_end_xfb_query(cmdbuf, pool, query, 0); in tu_CmdEndQuery()
962 emit_end_stat_query(cmdbuf, pool, query); in tu_CmdEndQuery()
970 handle_multiview_queries(cmdbuf, pool, query); in tu_CmdEndQuery()
980 TU_FROM_HANDLE(tu_query_pool, pool, queryPool); in tu_CmdEndQueryIndexedEXT()
981 assert(query < pool->size); in tu_CmdEndQueryIndexedEXT()
983 switch (pool->type) { in tu_CmdEndQueryIndexedEXT()
986 emit_end_xfb_query(cmdbuf, pool, query, index); in tu_CmdEndQueryIndexedEXT()
1000 TU_FROM_HANDLE(tu_query_pool, pool, queryPool); in tu_CmdWriteTimestamp()
1032 tu_cs_emit_qw(cs, query_result_iova(pool, query, 0)); in tu_CmdWriteTimestamp()
1040 tu_cs_emit_qw(cs, query_available_iova(pool, query)); in tu_CmdWriteTimestamp()
1047 * query indices in the query pool (starting at query) where N is the in tu_CmdWriteTimestamp()
1068 handle_multiview_queries(cmd, pool, query); in tu_CmdWriteTimestamp()