• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyrigh 2016 Red Hat Inc.
3  * Based on anv:
4  * Copyright © 2015 Intel Corporation
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the next
14  * paragraph) shall be included in all copies or substantial portions of the
15  * Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
23  * DEALINGS IN THE SOFTWARE.
24  */
25 
26 #include "tu_private.h"
27 
28 #include <assert.h>
29 #include <fcntl.h>
30 #include <stdbool.h>
31 #include <string.h>
32 #include <unistd.h>
33 
34 #include "adreno_pm4.xml.h"
35 #include "adreno_common.xml.h"
36 #include "a6xx.xml.h"
37 
38 #include "nir/nir_builder.h"
39 #include "util/os_time.h"
40 
41 #include "tu_cs.h"
42 
43 #define NSEC_PER_SEC 1000000000ull
44 #define WAIT_TIMEOUT 5
45 #define STAT_COUNT ((REG_A6XX_RBBM_PRIMCTR_10_LO - REG_A6XX_RBBM_PRIMCTR_0_LO) / 2 + 1)
46 
47 struct PACKED query_slot {
48    uint64_t available;
49 };
50 
51 struct PACKED occlusion_slot_value {
52    /* Seems sample counters are placed to be 16-byte aligned
53     * even though this query needs an 8-byte slot. */
54    uint64_t value;
55    uint64_t _padding;
56 };
57 
58 struct PACKED occlusion_query_slot {
59    struct query_slot common;
60    uint64_t result;
61 
62    struct occlusion_slot_value begin;
63    struct occlusion_slot_value end;
64 };
65 
66 struct PACKED timestamp_query_slot {
67    struct query_slot common;
68    uint64_t result;
69 };
70 
71 struct PACKED primitive_slot_value {
72    uint64_t values[2];
73 };
74 
75 struct PACKED pipeline_stat_query_slot {
76    struct query_slot common;
77    uint64_t results[STAT_COUNT];
78 
79    uint64_t begin[STAT_COUNT];
80    uint64_t end[STAT_COUNT];
81 };
82 
83 struct PACKED primitive_query_slot {
84    struct query_slot common;
85    /* The result of transform feedback queries is two integer values:
86     *   results[0] is the count of primitives written,
87     *   results[1] is the count of primitives generated.
88     * Also a result for each stream is stored at 4 slots respectively.
89     */
90    uint64_t results[2];
91 
92    /* Primitive counters also need to be 16-byte aligned. */
93    uint64_t _padding;
94 
95    struct primitive_slot_value begin[4];
96    struct primitive_slot_value end[4];
97 };
98 
99 /* Returns the IOVA of a given uint64_t field in a given slot of a query
100  * pool. */
101 #define query_iova(type, pool, query, field)                         \
102    pool->bo.iova + pool->stride * (query) + offsetof(type, field)
103 
104 #define occlusion_query_iova(pool, query, field)                     \
105    query_iova(struct occlusion_query_slot, pool, query, field)
106 
107 #define pipeline_stat_query_iova(pool, query, field)                 \
108    pool->bo.iova + pool->stride * query +                            \
109    offsetof(struct pipeline_stat_query_slot, field)
110 
111 #define primitive_query_iova(pool, query, field, i)                  \
112    query_iova(struct primitive_query_slot, pool, query, field) +     \
113    offsetof(struct primitive_slot_value, values[i])
114 
115 #define query_available_iova(pool, query)                            \
116    query_iova(struct query_slot, pool, query, available)
117 
118 #define query_result_iova(pool, query, i)                            \
119    pool->bo.iova + pool->stride * (query) +                          \
120    sizeof(struct query_slot) + sizeof(uint64_t) * i
121 
122 #define query_result_addr(pool, query, i)                            \
123    pool->bo.map + pool->stride * query +                             \
124    sizeof(struct query_slot) + sizeof(uint64_t) * i
125 
126 #define query_is_available(slot) slot->available
127 
128 /*
129  * Returns a pointer to a given slot in a query pool.
130  */
slot_address(struct tu_query_pool * pool,uint32_t query)131 static void* slot_address(struct tu_query_pool *pool, uint32_t query)
132 {
133    return (char*)pool->bo.map + query * pool->stride;
134 }
135 
136 VkResult
tu_CreateQueryPool(VkDevice _device,const VkQueryPoolCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkQueryPool * pQueryPool)137 tu_CreateQueryPool(VkDevice _device,
138                    const VkQueryPoolCreateInfo *pCreateInfo,
139                    const VkAllocationCallbacks *pAllocator,
140                    VkQueryPool *pQueryPool)
141 {
142    TU_FROM_HANDLE(tu_device, device, _device);
143    assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO);
144    assert(pCreateInfo->queryCount > 0);
145 
146    uint32_t slot_size;
147    switch (pCreateInfo->queryType) {
148    case VK_QUERY_TYPE_OCCLUSION:
149       slot_size = sizeof(struct occlusion_query_slot);
150       break;
151    case VK_QUERY_TYPE_TIMESTAMP:
152       slot_size = sizeof(struct timestamp_query_slot);
153       break;
154    case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT:
155       slot_size = sizeof(struct primitive_query_slot);
156       break;
157    case VK_QUERY_TYPE_PIPELINE_STATISTICS:
158       slot_size = sizeof(struct pipeline_stat_query_slot);
159       break;
160    default:
161       unreachable("Invalid query type");
162    }
163 
164    struct tu_query_pool *pool =
165          vk_object_alloc(&device->vk, pAllocator, sizeof(*pool),
166                          VK_OBJECT_TYPE_QUERY_POOL);
167    if (!pool)
168       return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
169 
170    VkResult result = tu_bo_init_new(device, &pool->bo,
171          pCreateInfo->queryCount * slot_size, false);
172    if (result != VK_SUCCESS) {
173       vk_object_free(&device->vk, pAllocator, pool);
174       return result;
175    }
176 
177    result = tu_bo_map(device, &pool->bo);
178    if (result != VK_SUCCESS) {
179       tu_bo_finish(device, &pool->bo);
180       vk_object_free(&device->vk, pAllocator, pool);
181       return result;
182    }
183 
184    /* Initialize all query statuses to unavailable */
185    memset(pool->bo.map, 0, pool->bo.size);
186 
187    pool->type = pCreateInfo->queryType;
188    pool->stride = slot_size;
189    pool->size = pCreateInfo->queryCount;
190    pool->pipeline_statistics = pCreateInfo->pipelineStatistics;
191    *pQueryPool = tu_query_pool_to_handle(pool);
192 
193    return VK_SUCCESS;
194 }
195 
196 void
tu_DestroyQueryPool(VkDevice _device,VkQueryPool _pool,const VkAllocationCallbacks * pAllocator)197 tu_DestroyQueryPool(VkDevice _device,
198                     VkQueryPool _pool,
199                     const VkAllocationCallbacks *pAllocator)
200 {
201    TU_FROM_HANDLE(tu_device, device, _device);
202    TU_FROM_HANDLE(tu_query_pool, pool, _pool);
203 
204    if (!pool)
205       return;
206 
207    tu_bo_finish(device, &pool->bo);
208    vk_object_free(&device->vk, pAllocator, pool);
209 }
210 
211 static uint32_t
get_result_count(struct tu_query_pool * pool)212 get_result_count(struct tu_query_pool *pool)
213 {
214    switch (pool->type) {
215    /* Occulusion and timestamp queries write one integer value */
216    case VK_QUERY_TYPE_OCCLUSION:
217    case VK_QUERY_TYPE_TIMESTAMP:
218       return 1;
219    /* Transform feedback queries write two integer values */
220    case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT:
221       return 2;
222    case VK_QUERY_TYPE_PIPELINE_STATISTICS:
223       return util_bitcount(pool->pipeline_statistics);
224    default:
225       assert(!"Invalid query type");
226       return 0;
227    }
228 }
229 
230 static uint32_t
statistics_index(uint32_t * statistics)231 statistics_index(uint32_t *statistics)
232 {
233    uint32_t stat;
234    stat = u_bit_scan(statistics);
235 
236    switch (1 << stat) {
237    case VK_QUERY_PIPELINE_STATISTIC_INPUT_ASSEMBLY_VERTICES_BIT:
238    case VK_QUERY_PIPELINE_STATISTIC_VERTEX_SHADER_INVOCATIONS_BIT:
239       return 0;
240    case VK_QUERY_PIPELINE_STATISTIC_INPUT_ASSEMBLY_PRIMITIVES_BIT:
241       return 1;
242    case VK_QUERY_PIPELINE_STATISTIC_TESSELLATION_CONTROL_SHADER_PATCHES_BIT:
243       return 2;
244    case VK_QUERY_PIPELINE_STATISTIC_TESSELLATION_EVALUATION_SHADER_INVOCATIONS_BIT:
245       return 4;
246    case VK_QUERY_PIPELINE_STATISTIC_GEOMETRY_SHADER_INVOCATIONS_BIT:
247       return 5;
248    case VK_QUERY_PIPELINE_STATISTIC_GEOMETRY_SHADER_PRIMITIVES_BIT:
249       return 6;
250    case VK_QUERY_PIPELINE_STATISTIC_CLIPPING_INVOCATIONS_BIT:
251       return 7;
252    case VK_QUERY_PIPELINE_STATISTIC_CLIPPING_PRIMITIVES_BIT:
253       return 8;
254    case VK_QUERY_PIPELINE_STATISTIC_FRAGMENT_SHADER_INVOCATIONS_BIT:
255       return 9;
256    case VK_QUERY_PIPELINE_STATISTIC_COMPUTE_SHADER_INVOCATIONS_BIT:
257       return 10;
258    default:
259       return 0;
260    }
261 }
262 
263 /* Wait on the the availability status of a query up until a timeout. */
264 static VkResult
wait_for_available(struct tu_device * device,struct tu_query_pool * pool,uint32_t query)265 wait_for_available(struct tu_device *device, struct tu_query_pool *pool,
266                    uint32_t query)
267 {
268    /* TODO: Use the MSM_IOVA_WAIT ioctl to wait on the available bit in a
269     * scheduler friendly way instead of busy polling once the patch has landed
270     * upstream. */
271    struct query_slot *slot = slot_address(pool, query);
272    uint64_t abs_timeout = os_time_get_absolute_timeout(
273          WAIT_TIMEOUT * NSEC_PER_SEC);
274    while(os_time_get_nano() < abs_timeout) {
275       if (query_is_available(slot))
276          return VK_SUCCESS;
277    }
278    return vk_error(device->instance, VK_TIMEOUT);
279 }
280 
281 /* Writes a query value to a buffer from the CPU. */
282 static void
write_query_value_cpu(char * base,uint32_t offset,uint64_t value,VkQueryResultFlags flags)283 write_query_value_cpu(char* base,
284                       uint32_t offset,
285                       uint64_t value,
286                       VkQueryResultFlags flags)
287 {
288    if (flags & VK_QUERY_RESULT_64_BIT) {
289       *(uint64_t*)(base + (offset * sizeof(uint64_t))) = value;
290    } else {
291       *(uint32_t*)(base + (offset * sizeof(uint32_t))) = value;
292    }
293 }
294 
295 static VkResult
get_query_pool_results(struct tu_device * device,struct tu_query_pool * pool,uint32_t firstQuery,uint32_t queryCount,size_t dataSize,void * pData,VkDeviceSize stride,VkQueryResultFlags flags)296 get_query_pool_results(struct tu_device *device,
297                        struct tu_query_pool *pool,
298                        uint32_t firstQuery,
299                        uint32_t queryCount,
300                        size_t dataSize,
301                        void *pData,
302                        VkDeviceSize stride,
303                        VkQueryResultFlags flags)
304 {
305    assert(dataSize >= stride * queryCount);
306 
307    char *result_base = pData;
308    VkResult result = VK_SUCCESS;
309    for (uint32_t i = 0; i < queryCount; i++) {
310       uint32_t query = firstQuery + i;
311       struct query_slot *slot = slot_address(pool, query);
312       bool available = query_is_available(slot);
313       uint32_t result_count = get_result_count(pool);
314       uint32_t statistics = pool->pipeline_statistics;
315 
316       if ((flags & VK_QUERY_RESULT_WAIT_BIT) && !available) {
317          VkResult wait_result = wait_for_available(device, pool, query);
318          if (wait_result != VK_SUCCESS)
319             return wait_result;
320          available = true;
321       } else if (!(flags & VK_QUERY_RESULT_PARTIAL_BIT) && !available) {
322          /* From the Vulkan 1.1.130 spec:
323           *
324           *    If VK_QUERY_RESULT_WAIT_BIT and VK_QUERY_RESULT_PARTIAL_BIT are
325           *    both not set then no result values are written to pData for
326           *    queries that are in the unavailable state at the time of the
327           *    call, and vkGetQueryPoolResults returns VK_NOT_READY. However,
328           *    availability state is still written to pData for those queries
329           *    if VK_QUERY_RESULT_WITH_AVAILABILITY_BIT is set.
330           */
331          result = VK_NOT_READY;
332          if (!(flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT)) {
333             result_base += stride;
334             continue;
335          }
336       }
337 
338       for (uint32_t k = 0; k < result_count; k++) {
339          if (available) {
340             uint64_t *result;
341 
342             if (pool->type == VK_QUERY_TYPE_PIPELINE_STATISTICS) {
343                uint32_t stat_idx = statistics_index(&statistics);
344                result = query_result_addr(pool, query, stat_idx);
345             } else {
346                result = query_result_addr(pool, query, k);
347             }
348 
349             write_query_value_cpu(result_base, k, *result, flags);
350          } else if (flags & VK_QUERY_RESULT_PARTIAL_BIT)
351              /* From the Vulkan 1.1.130 spec:
352               *
353               *   If VK_QUERY_RESULT_PARTIAL_BIT is set, VK_QUERY_RESULT_WAIT_BIT
354               *   is not set, and the query’s status is unavailable, an
355               *   intermediate result value between zero and the final result
356               *   value is written to pData for that query.
357               *
358               * Just return 0 here for simplicity since it's a valid result.
359               */
360             write_query_value_cpu(result_base, k, 0, flags);
361       }
362 
363       if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT)
364          /* From the Vulkan 1.1.130 spec:
365           *
366           *    If VK_QUERY_RESULT_WITH_AVAILABILITY_BIT is set, the final
367           *    integer value written for each query is non-zero if the query’s
368           *    status was available or zero if the status was unavailable.
369           */
370          write_query_value_cpu(result_base, result_count, available, flags);
371 
372       result_base += stride;
373    }
374    return result;
375 }
376 
377 VkResult
tu_GetQueryPoolResults(VkDevice _device,VkQueryPool queryPool,uint32_t firstQuery,uint32_t queryCount,size_t dataSize,void * pData,VkDeviceSize stride,VkQueryResultFlags flags)378 tu_GetQueryPoolResults(VkDevice _device,
379                        VkQueryPool queryPool,
380                        uint32_t firstQuery,
381                        uint32_t queryCount,
382                        size_t dataSize,
383                        void *pData,
384                        VkDeviceSize stride,
385                        VkQueryResultFlags flags)
386 {
387    TU_FROM_HANDLE(tu_device, device, _device);
388    TU_FROM_HANDLE(tu_query_pool, pool, queryPool);
389    assert(firstQuery + queryCount <= pool->size);
390 
391    if (tu_device_is_lost(device))
392       return VK_ERROR_DEVICE_LOST;
393 
394    switch (pool->type) {
395    case VK_QUERY_TYPE_OCCLUSION:
396    case VK_QUERY_TYPE_TIMESTAMP:
397    case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT:
398    case VK_QUERY_TYPE_PIPELINE_STATISTICS:
399       return get_query_pool_results(device, pool, firstQuery, queryCount,
400                                     dataSize, pData, stride, flags);
401    default:
402       assert(!"Invalid query type");
403    }
404    return VK_SUCCESS;
405 }
406 
407 /* Copies a query value from one buffer to another from the GPU. */
408 static void
copy_query_value_gpu(struct tu_cmd_buffer * cmdbuf,struct tu_cs * cs,uint64_t src_iova,uint64_t base_write_iova,uint32_t offset,VkQueryResultFlags flags)409 copy_query_value_gpu(struct tu_cmd_buffer *cmdbuf,
410                      struct tu_cs *cs,
411                      uint64_t src_iova,
412                      uint64_t base_write_iova,
413                      uint32_t offset,
414                      VkQueryResultFlags flags) {
415    uint32_t element_size = flags & VK_QUERY_RESULT_64_BIT ?
416          sizeof(uint64_t) : sizeof(uint32_t);
417    uint64_t write_iova = base_write_iova + (offset * element_size);
418 
419    tu_cs_emit_pkt7(cs, CP_MEM_TO_MEM, 5);
420    uint32_t mem_to_mem_flags = flags & VK_QUERY_RESULT_64_BIT ?
421          CP_MEM_TO_MEM_0_DOUBLE : 0;
422    tu_cs_emit(cs, mem_to_mem_flags);
423    tu_cs_emit_qw(cs, write_iova);
424    tu_cs_emit_qw(cs, src_iova);
425 }
426 
427 static void
emit_copy_query_pool_results(struct tu_cmd_buffer * cmdbuf,struct tu_cs * cs,struct tu_query_pool * pool,uint32_t firstQuery,uint32_t queryCount,struct tu_buffer * buffer,VkDeviceSize dstOffset,VkDeviceSize stride,VkQueryResultFlags flags)428 emit_copy_query_pool_results(struct tu_cmd_buffer *cmdbuf,
429                              struct tu_cs *cs,
430                              struct tu_query_pool *pool,
431                              uint32_t firstQuery,
432                              uint32_t queryCount,
433                              struct tu_buffer *buffer,
434                              VkDeviceSize dstOffset,
435                              VkDeviceSize stride,
436                              VkQueryResultFlags flags)
437 {
438    /* From the Vulkan 1.1.130 spec:
439     *
440     *    vkCmdCopyQueryPoolResults is guaranteed to see the effect of previous
441     *    uses of vkCmdResetQueryPool in the same queue, without any additional
442     *    synchronization.
443     *
444     * To ensure that previous writes to the available bit are coherent, first
445     * wait for all writes to complete.
446     */
447    tu_cs_emit_pkt7(cs, CP_WAIT_MEM_WRITES, 0);
448 
449    for (uint32_t i = 0; i < queryCount; i++) {
450       uint32_t query = firstQuery + i;
451       uint64_t available_iova = query_available_iova(pool, query);
452       uint64_t buffer_iova = tu_buffer_iova(buffer) + dstOffset + i * stride;
453       uint32_t result_count = get_result_count(pool);
454       uint32_t statistics = pool->pipeline_statistics;
455 
456       /* Wait for the available bit to be set if executed with the
457        * VK_QUERY_RESULT_WAIT_BIT flag. */
458       if (flags & VK_QUERY_RESULT_WAIT_BIT) {
459          tu_cs_emit_pkt7(cs, CP_WAIT_REG_MEM, 6);
460          tu_cs_emit(cs, CP_WAIT_REG_MEM_0_FUNCTION(WRITE_EQ) |
461                         CP_WAIT_REG_MEM_0_POLL_MEMORY);
462          tu_cs_emit_qw(cs, available_iova);
463          tu_cs_emit(cs, CP_WAIT_REG_MEM_3_REF(0x1));
464          tu_cs_emit(cs, CP_WAIT_REG_MEM_4_MASK(~0));
465          tu_cs_emit(cs, CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES(16));
466       }
467 
468       for (uint32_t k = 0; k < result_count; k++) {
469          uint64_t result_iova;
470 
471          if (pool->type == VK_QUERY_TYPE_PIPELINE_STATISTICS) {
472             uint32_t stat_idx = statistics_index(&statistics);
473             result_iova = query_result_iova(pool, query, stat_idx);
474          } else {
475             result_iova = query_result_iova(pool, query, k);
476          }
477 
478          if (flags & VK_QUERY_RESULT_PARTIAL_BIT) {
479             /* Unconditionally copying the bo->result into the buffer here is
480              * valid because we only set bo->result on vkCmdEndQuery. Thus, even
481              * if the query is unavailable, this will copy the correct partial
482              * value of 0.
483              */
484             copy_query_value_gpu(cmdbuf, cs, result_iova, buffer_iova,
485                                  k /* offset */, flags);
486          } else {
487             /* Conditionally copy bo->result into the buffer based on whether the
488              * query is available.
489              *
490              * NOTE: For the conditional packets to be executed, CP_COND_EXEC
491              * tests that ADDR0 != 0 and ADDR1 < REF. The packet here simply tests
492              * that 0 < available < 2, aka available == 1.
493              */
494             tu_cs_reserve(cs, 7 + 6);
495             tu_cs_emit_pkt7(cs, CP_COND_EXEC, 6);
496             tu_cs_emit_qw(cs, available_iova);
497             tu_cs_emit_qw(cs, available_iova);
498             tu_cs_emit(cs, CP_COND_EXEC_4_REF(0x2));
499             tu_cs_emit(cs, 6); /* Cond execute the next 6 DWORDS */
500 
501             /* Start of conditional execution */
502             copy_query_value_gpu(cmdbuf, cs, result_iova, buffer_iova,
503                               k /* offset */, flags);
504             /* End of conditional execution */
505          }
506       }
507 
508       if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) {
509          copy_query_value_gpu(cmdbuf, cs, available_iova, buffer_iova,
510                               result_count /* offset */, flags);
511       }
512    }
513 }
514 
515 void
tu_CmdCopyQueryPoolResults(VkCommandBuffer commandBuffer,VkQueryPool queryPool,uint32_t firstQuery,uint32_t queryCount,VkBuffer dstBuffer,VkDeviceSize dstOffset,VkDeviceSize stride,VkQueryResultFlags flags)516 tu_CmdCopyQueryPoolResults(VkCommandBuffer commandBuffer,
517                            VkQueryPool queryPool,
518                            uint32_t firstQuery,
519                            uint32_t queryCount,
520                            VkBuffer dstBuffer,
521                            VkDeviceSize dstOffset,
522                            VkDeviceSize stride,
523                            VkQueryResultFlags flags)
524 {
525    TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, commandBuffer);
526    TU_FROM_HANDLE(tu_query_pool, pool, queryPool);
527    TU_FROM_HANDLE(tu_buffer, buffer, dstBuffer);
528    struct tu_cs *cs = &cmdbuf->cs;
529    assert(firstQuery + queryCount <= pool->size);
530 
531    switch (pool->type) {
532    case VK_QUERY_TYPE_OCCLUSION:
533    case VK_QUERY_TYPE_TIMESTAMP:
534    case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT:
535    case VK_QUERY_TYPE_PIPELINE_STATISTICS:
536       return emit_copy_query_pool_results(cmdbuf, cs, pool, firstQuery,
537                queryCount, buffer, dstOffset, stride, flags);
538    default:
539       assert(!"Invalid query type");
540    }
541 }
542 
543 static void
emit_reset_query_pool(struct tu_cmd_buffer * cmdbuf,struct tu_query_pool * pool,uint32_t firstQuery,uint32_t queryCount)544 emit_reset_query_pool(struct tu_cmd_buffer *cmdbuf,
545                       struct tu_query_pool *pool,
546                       uint32_t firstQuery,
547                       uint32_t queryCount)
548 {
549    struct tu_cs *cs = &cmdbuf->cs;
550 
551    for (uint32_t i = 0; i < queryCount; i++) {
552       uint32_t query = firstQuery + i;
553       uint32_t statistics = pool->pipeline_statistics;
554 
555       tu_cs_emit_pkt7(cs, CP_MEM_WRITE, 4);
556       tu_cs_emit_qw(cs, query_available_iova(pool, query));
557       tu_cs_emit_qw(cs, 0x0);
558 
559       for (uint32_t k = 0; k < get_result_count(pool); k++) {
560          uint64_t result_iova;
561 
562          if (pool->type == VK_QUERY_TYPE_PIPELINE_STATISTICS) {
563             uint32_t stat_idx = statistics_index(&statistics);
564             result_iova = query_result_iova(pool, query, stat_idx);
565          } else {
566             result_iova = query_result_iova(pool, query, k);
567          }
568 
569          tu_cs_emit_pkt7(cs, CP_MEM_WRITE, 4);
570          tu_cs_emit_qw(cs, result_iova);
571          tu_cs_emit_qw(cs, 0x0);
572       }
573    }
574 
575 }
576 
577 void
tu_CmdResetQueryPool(VkCommandBuffer commandBuffer,VkQueryPool queryPool,uint32_t firstQuery,uint32_t queryCount)578 tu_CmdResetQueryPool(VkCommandBuffer commandBuffer,
579                      VkQueryPool queryPool,
580                      uint32_t firstQuery,
581                      uint32_t queryCount)
582 {
583    TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, commandBuffer);
584    TU_FROM_HANDLE(tu_query_pool, pool, queryPool);
585 
586    switch (pool->type) {
587    case VK_QUERY_TYPE_TIMESTAMP:
588    case VK_QUERY_TYPE_OCCLUSION:
589    case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT:
590    case VK_QUERY_TYPE_PIPELINE_STATISTICS:
591       emit_reset_query_pool(cmdbuf, pool, firstQuery, queryCount);
592       break;
593    default:
594       assert(!"Invalid query type");
595    }
596 }
597 
598 void
tu_ResetQueryPool(VkDevice device,VkQueryPool queryPool,uint32_t firstQuery,uint32_t queryCount)599 tu_ResetQueryPool(VkDevice device,
600                   VkQueryPool queryPool,
601                   uint32_t firstQuery,
602                   uint32_t queryCount)
603 {
604    TU_FROM_HANDLE(tu_query_pool, pool, queryPool);
605 
606    for (uint32_t i = 0; i < queryCount; i++) {
607       struct query_slot *slot = slot_address(pool, i + firstQuery);
608       slot->available = 0;
609 
610       for (uint32_t k = 0; k < get_result_count(pool); k++) {
611          uint64_t *res = query_result_addr(pool, i + firstQuery, k);
612          *res = 0;
613       }
614    }
615 }
616 
617 static void
emit_begin_occlusion_query(struct tu_cmd_buffer * cmdbuf,struct tu_query_pool * pool,uint32_t query)618 emit_begin_occlusion_query(struct tu_cmd_buffer *cmdbuf,
619                            struct tu_query_pool *pool,
620                            uint32_t query)
621 {
622    /* From the Vulkan 1.1.130 spec:
623     *
624     *    A query must begin and end inside the same subpass of a render pass
625     *    instance, or must both begin and end outside of a render pass
626     *    instance.
627     *
628     * Unlike on an immediate-mode renderer, Turnip renders all tiles on
629     * vkCmdEndRenderPass, not individually on each vkCmdDraw*. As such, if a
630     * query begins/ends inside the same subpass of a render pass, we need to
631     * record the packets on the secondary draw command stream. cmdbuf->draw_cs
632     * is then run on every tile during render, so we just need to accumulate
633     * sample counts in slot->result to compute the query result.
634     */
635    struct tu_cs *cs = cmdbuf->state.pass ? &cmdbuf->draw_cs : &cmdbuf->cs;
636 
637    uint64_t begin_iova = occlusion_query_iova(pool, query, begin);
638 
639    tu_cs_emit_regs(cs,
640                    A6XX_RB_SAMPLE_COUNT_CONTROL(.copy = true));
641 
642    tu_cs_emit_regs(cs,
643                    A6XX_RB_SAMPLE_COUNT_ADDR(.qword = begin_iova));
644 
645    tu_cs_emit_pkt7(cs, CP_EVENT_WRITE, 1);
646    tu_cs_emit(cs, ZPASS_DONE);
647 }
648 
649 static void
emit_begin_stat_query(struct tu_cmd_buffer * cmdbuf,struct tu_query_pool * pool,uint32_t query)650 emit_begin_stat_query(struct tu_cmd_buffer *cmdbuf,
651                       struct tu_query_pool *pool,
652                       uint32_t query)
653 {
654    struct tu_cs *cs = cmdbuf->state.pass ? &cmdbuf->draw_cs : &cmdbuf->cs;
655    uint64_t begin_iova = pipeline_stat_query_iova(pool, query, begin);
656 
657    tu6_emit_event_write(cmdbuf, cs, START_PRIMITIVE_CTRS);
658    tu6_emit_event_write(cmdbuf, cs, RST_PIX_CNT);
659    tu6_emit_event_write(cmdbuf, cs, TILE_FLUSH);
660 
661    tu_cs_emit_wfi(cs);
662 
663    tu_cs_emit_pkt7(cs, CP_REG_TO_MEM, 3);
664    tu_cs_emit(cs, CP_REG_TO_MEM_0_REG(REG_A6XX_RBBM_PRIMCTR_0_LO) |
665                   CP_REG_TO_MEM_0_CNT(STAT_COUNT * 2) |
666                   CP_REG_TO_MEM_0_64B);
667    tu_cs_emit_qw(cs, begin_iova);
668 }
669 
670 static void
emit_begin_xfb_query(struct tu_cmd_buffer * cmdbuf,struct tu_query_pool * pool,uint32_t query,uint32_t stream_id)671 emit_begin_xfb_query(struct tu_cmd_buffer *cmdbuf,
672                      struct tu_query_pool *pool,
673                      uint32_t query,
674                      uint32_t stream_id)
675 {
676    struct tu_cs *cs = cmdbuf->state.pass ? &cmdbuf->draw_cs : &cmdbuf->cs;
677    uint64_t begin_iova = primitive_query_iova(pool, query, begin[0], 0);
678 
679    tu_cs_emit_regs(cs, A6XX_VPC_SO_STREAM_COUNTS(.qword = begin_iova));
680    tu6_emit_event_write(cmdbuf, cs, WRITE_PRIMITIVE_COUNTS);
681 }
682 
683 void
tu_CmdBeginQuery(VkCommandBuffer commandBuffer,VkQueryPool queryPool,uint32_t query,VkQueryControlFlags flags)684 tu_CmdBeginQuery(VkCommandBuffer commandBuffer,
685                  VkQueryPool queryPool,
686                  uint32_t query,
687                  VkQueryControlFlags flags)
688 {
689    TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, commandBuffer);
690    TU_FROM_HANDLE(tu_query_pool, pool, queryPool);
691    assert(query < pool->size);
692 
693    switch (pool->type) {
694    case VK_QUERY_TYPE_OCCLUSION:
695       /* In freedreno, there is no implementation difference between
696        * GL_SAMPLES_PASSED and GL_ANY_SAMPLES_PASSED, so we can similarly
697        * ignore the VK_QUERY_CONTROL_PRECISE_BIT flag here.
698        */
699       emit_begin_occlusion_query(cmdbuf, pool, query);
700       break;
701    case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT:
702       emit_begin_xfb_query(cmdbuf, pool, query, 0);
703       break;
704    case VK_QUERY_TYPE_PIPELINE_STATISTICS:
705       emit_begin_stat_query(cmdbuf, pool, query);
706       break;
707    case VK_QUERY_TYPE_TIMESTAMP:
708       unreachable("Unimplemented query type");
709    default:
710       assert(!"Invalid query type");
711    }
712 }
713 
714 void
tu_CmdBeginQueryIndexedEXT(VkCommandBuffer commandBuffer,VkQueryPool queryPool,uint32_t query,VkQueryControlFlags flags,uint32_t index)715 tu_CmdBeginQueryIndexedEXT(VkCommandBuffer commandBuffer,
716                            VkQueryPool queryPool,
717                            uint32_t query,
718                            VkQueryControlFlags flags,
719                            uint32_t index)
720 {
721    TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, commandBuffer);
722    TU_FROM_HANDLE(tu_query_pool, pool, queryPool);
723    assert(query < pool->size);
724 
725    switch (pool->type) {
726    case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT:
727       emit_begin_xfb_query(cmdbuf, pool, query, index);
728       break;
729    default:
730       assert(!"Invalid query type");
731    }
732 }
733 
734 static void
emit_end_occlusion_query(struct tu_cmd_buffer * cmdbuf,struct tu_query_pool * pool,uint32_t query)735 emit_end_occlusion_query(struct tu_cmd_buffer *cmdbuf,
736                          struct tu_query_pool *pool,
737                          uint32_t query)
738 {
739    /* Ending an occlusion query happens in a few steps:
740     *    1) Set the slot->end to UINT64_MAX.
741     *    2) Set up the SAMPLE_COUNT registers and trigger a CP_EVENT_WRITE to
742     *       write the current sample count value into slot->end.
743     *    3) Since (2) is asynchronous, wait until slot->end is not equal to
744     *       UINT64_MAX before continuing via CP_WAIT_REG_MEM.
745     *    4) Accumulate the results of the query (slot->end - slot->begin) into
746     *       slot->result.
747     *    5) If vkCmdEndQuery is *not* called from within the scope of a render
748     *       pass, set the slot's available bit since the query is now done.
749     *    6) If vkCmdEndQuery *is* called from within the scope of a render
750     *       pass, we cannot mark as available yet since the commands in
751     *       draw_cs are not run until vkCmdEndRenderPass.
752     */
753    const struct tu_render_pass *pass = cmdbuf->state.pass;
754    struct tu_cs *cs = pass ? &cmdbuf->draw_cs : &cmdbuf->cs;
755 
756    uint64_t available_iova = query_available_iova(pool, query);
757    uint64_t begin_iova = occlusion_query_iova(pool, query, begin);
758    uint64_t end_iova = occlusion_query_iova(pool, query, end);
759    uint64_t result_iova = query_result_iova(pool, query, 0);
760    tu_cs_emit_pkt7(cs, CP_MEM_WRITE, 4);
761    tu_cs_emit_qw(cs, end_iova);
762    tu_cs_emit_qw(cs, 0xffffffffffffffffull);
763 
764    tu_cs_emit_pkt7(cs, CP_WAIT_MEM_WRITES, 0);
765 
766    tu_cs_emit_regs(cs,
767                    A6XX_RB_SAMPLE_COUNT_CONTROL(.copy = true));
768 
769    tu_cs_emit_regs(cs,
770                    A6XX_RB_SAMPLE_COUNT_ADDR(.qword = end_iova));
771 
772    tu_cs_emit_pkt7(cs, CP_EVENT_WRITE, 1);
773    tu_cs_emit(cs, ZPASS_DONE);
774 
775    tu_cs_emit_pkt7(cs, CP_WAIT_REG_MEM, 6);
776    tu_cs_emit(cs, CP_WAIT_REG_MEM_0_FUNCTION(WRITE_NE) |
777                   CP_WAIT_REG_MEM_0_POLL_MEMORY);
778    tu_cs_emit_qw(cs, end_iova);
779    tu_cs_emit(cs, CP_WAIT_REG_MEM_3_REF(0xffffffff));
780    tu_cs_emit(cs, CP_WAIT_REG_MEM_4_MASK(~0));
781    tu_cs_emit(cs, CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES(16));
782 
783    /* result (dst) = result (srcA) + end (srcB) - begin (srcC) */
784    tu_cs_emit_pkt7(cs, CP_MEM_TO_MEM, 9);
785    tu_cs_emit(cs, CP_MEM_TO_MEM_0_DOUBLE | CP_MEM_TO_MEM_0_NEG_C);
786    tu_cs_emit_qw(cs, result_iova);
787    tu_cs_emit_qw(cs, result_iova);
788    tu_cs_emit_qw(cs, end_iova);
789    tu_cs_emit_qw(cs, begin_iova);
790 
791    tu_cs_emit_pkt7(cs, CP_WAIT_MEM_WRITES, 0);
792 
793    if (pass)
794       /* Technically, queries should be tracked per-subpass, but here we track
795        * at the render pass level to simply the code a bit. This is safe
796        * because the only commands that use the available bit are
797        * vkCmdCopyQueryPoolResults and vkCmdResetQueryPool, both of which
798        * cannot be invoked from inside a render pass scope.
799        */
800       cs = &cmdbuf->draw_epilogue_cs;
801 
802    tu_cs_emit_pkt7(cs, CP_MEM_WRITE, 4);
803    tu_cs_emit_qw(cs, available_iova);
804    tu_cs_emit_qw(cs, 0x1);
805 }
806 
807 static void
emit_end_stat_query(struct tu_cmd_buffer * cmdbuf,struct tu_query_pool * pool,uint32_t query)808 emit_end_stat_query(struct tu_cmd_buffer *cmdbuf,
809                     struct tu_query_pool *pool,
810                     uint32_t query)
811 {
812    struct tu_cs *cs = cmdbuf->state.pass ? &cmdbuf->draw_cs : &cmdbuf->cs;
813    uint64_t end_iova = pipeline_stat_query_iova(pool, query, end);
814    uint64_t available_iova = query_available_iova(pool, query);
815    uint64_t result_iova;
816    uint64_t stat_start_iova;
817    uint64_t stat_stop_iova;
818 
819    tu6_emit_event_write(cmdbuf, cs, STOP_PRIMITIVE_CTRS);
820    tu6_emit_event_write(cmdbuf, cs, RST_VTX_CNT);
821    tu6_emit_event_write(cmdbuf, cs, STAT_EVENT);
822 
823    tu_cs_emit_wfi(cs);
824 
825    tu_cs_emit_pkt7(cs, CP_REG_TO_MEM, 3);
826    tu_cs_emit(cs, CP_REG_TO_MEM_0_REG(REG_A6XX_RBBM_PRIMCTR_0_LO) |
827                   CP_REG_TO_MEM_0_CNT(STAT_COUNT * 2) |
828                   CP_REG_TO_MEM_0_64B);
829    tu_cs_emit_qw(cs, end_iova);
830 
831    for (int i = 0; i < STAT_COUNT; i++) {
832       result_iova = query_result_iova(pool, query, i);
833       stat_start_iova = pipeline_stat_query_iova(pool, query, begin[i]);
834       stat_stop_iova = pipeline_stat_query_iova(pool, query, end[i]);
835 
836       tu_cs_emit_pkt7(cs, CP_MEM_TO_MEM, 9);
837       tu_cs_emit(cs, CP_MEM_TO_MEM_0_WAIT_FOR_MEM_WRITES |
838                      CP_MEM_TO_MEM_0_DOUBLE |
839                      CP_MEM_TO_MEM_0_NEG_C);
840 
841       tu_cs_emit_qw(cs, result_iova);
842       tu_cs_emit_qw(cs, result_iova);
843       tu_cs_emit_qw(cs, stat_stop_iova);
844       tu_cs_emit_qw(cs, stat_start_iova);
845    }
846 
847    tu_cs_emit_pkt7(cs, CP_WAIT_MEM_WRITES, 0);
848 
849    if (cmdbuf->state.pass)
850       cs = &cmdbuf->draw_epilogue_cs;
851 
852    /* Set the availability to 1 */
853    tu_cs_emit_pkt7(cs, CP_MEM_WRITE, 4);
854    tu_cs_emit_qw(cs, available_iova);
855    tu_cs_emit_qw(cs, 0x1);
856 }
857 
858 static void
emit_end_xfb_query(struct tu_cmd_buffer * cmdbuf,struct tu_query_pool * pool,uint32_t query,uint32_t stream_id)859 emit_end_xfb_query(struct tu_cmd_buffer *cmdbuf,
860                    struct tu_query_pool *pool,
861                    uint32_t query,
862                    uint32_t stream_id)
863 {
864    struct tu_cs *cs = cmdbuf->state.pass ? &cmdbuf->draw_cs : &cmdbuf->cs;
865 
866    uint64_t end_iova = primitive_query_iova(pool, query, end[0], 0);
867    uint64_t result_written_iova = query_result_iova(pool, query, 0);
868    uint64_t result_generated_iova = query_result_iova(pool, query, 1);
869    uint64_t begin_written_iova = primitive_query_iova(pool, query, begin[stream_id], 0);
870    uint64_t begin_generated_iova = primitive_query_iova(pool, query, begin[stream_id], 1);
871    uint64_t end_written_iova = primitive_query_iova(pool, query, end[stream_id], 0);
872    uint64_t end_generated_iova = primitive_query_iova(pool, query, end[stream_id], 1);
873    uint64_t available_iova = query_available_iova(pool, query);
874 
875    tu_cs_emit_regs(cs, A6XX_VPC_SO_STREAM_COUNTS(.qword = end_iova));
876    tu6_emit_event_write(cmdbuf, cs, WRITE_PRIMITIVE_COUNTS);
877 
878    tu_cs_emit_wfi(cs);
879    tu6_emit_event_write(cmdbuf, cs, CACHE_FLUSH_TS);
880 
881    /* Set the count of written primitives */
882    tu_cs_emit_pkt7(cs, CP_MEM_TO_MEM, 9);
883    tu_cs_emit(cs, CP_MEM_TO_MEM_0_DOUBLE | CP_MEM_TO_MEM_0_NEG_C |
884                   CP_MEM_TO_MEM_0_WAIT_FOR_MEM_WRITES | 0x80000000);
885    tu_cs_emit_qw(cs, result_written_iova);
886    tu_cs_emit_qw(cs, result_written_iova);
887    tu_cs_emit_qw(cs, end_written_iova);
888    tu_cs_emit_qw(cs, begin_written_iova);
889 
890    tu6_emit_event_write(cmdbuf, cs, CACHE_FLUSH_TS);
891 
892    /* Set the count of generated primitives */
893    tu_cs_emit_pkt7(cs, CP_MEM_TO_MEM, 9);
894    tu_cs_emit(cs, CP_MEM_TO_MEM_0_DOUBLE | CP_MEM_TO_MEM_0_NEG_C |
895                   CP_MEM_TO_MEM_0_WAIT_FOR_MEM_WRITES | 0x80000000);
896    tu_cs_emit_qw(cs, result_generated_iova);
897    tu_cs_emit_qw(cs, result_generated_iova);
898    tu_cs_emit_qw(cs, end_generated_iova);
899    tu_cs_emit_qw(cs, begin_generated_iova);
900 
901    /* Set the availability to 1 */
902    tu_cs_emit_pkt7(cs, CP_MEM_WRITE, 4);
903    tu_cs_emit_qw(cs, available_iova);
904    tu_cs_emit_qw(cs, 0x1);
905 }
906 
907 /* Implement this bit of spec text from section 17.2 "Query Operation":
908  *
909  *     If queries are used while executing a render pass instance that has
910  *     multiview enabled, the query uses N consecutive query indices in the
911  *     query pool (starting at query) where N is the number of bits set in the
912  *     view mask in the subpass the query is used in. How the numerical
913  *     results of the query are distributed among the queries is
914  *     implementation-dependent. For example, some implementations may write
915  *     each view’s results to a distinct query, while other implementations
916  *     may write the total result to the first query and write zero to the
917  *     other queries. However, the sum of the results in all the queries must
918  *     accurately reflect the total result of the query summed over all views.
919  *     Applications can sum the results from all the queries to compute the
920  *     total result.
921  *
922  * Since we execute all views at once, we write zero to the other queries.
923  * Furthermore, because queries must be reset before use, and we set the
924  * result to 0 in vkCmdResetQueryPool(), we just need to mark it as available.
925  */
926 
927 static void
handle_multiview_queries(struct tu_cmd_buffer * cmd,struct tu_query_pool * pool,uint32_t query)928 handle_multiview_queries(struct tu_cmd_buffer *cmd,
929                          struct tu_query_pool *pool,
930                          uint32_t query)
931 {
932    if (!cmd->state.pass || !cmd->state.subpass->multiview_mask)
933       return;
934 
935    unsigned views = util_bitcount(cmd->state.subpass->multiview_mask);
936    struct tu_cs *cs = &cmd->draw_epilogue_cs;
937 
938    for (uint32_t i = 1; i < views; i++) {
939       tu_cs_emit_pkt7(cs, CP_MEM_WRITE, 4);
940       tu_cs_emit_qw(cs, query_available_iova(pool, query + i));
941       tu_cs_emit_qw(cs, 0x1);
942    }
943 }
944 
945 void
tu_CmdEndQuery(VkCommandBuffer commandBuffer,VkQueryPool queryPool,uint32_t query)946 tu_CmdEndQuery(VkCommandBuffer commandBuffer,
947                VkQueryPool queryPool,
948                uint32_t query)
949 {
950    TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, commandBuffer);
951    TU_FROM_HANDLE(tu_query_pool, pool, queryPool);
952    assert(query < pool->size);
953 
954    switch (pool->type) {
955    case VK_QUERY_TYPE_OCCLUSION:
956       emit_end_occlusion_query(cmdbuf, pool, query);
957       break;
958    case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT:
959       emit_end_xfb_query(cmdbuf, pool, query, 0);
960       break;
961    case VK_QUERY_TYPE_PIPELINE_STATISTICS:
962       emit_end_stat_query(cmdbuf, pool, query);
963       break;
964    case VK_QUERY_TYPE_TIMESTAMP:
965       unreachable("Unimplemented query type");
966    default:
967       assert(!"Invalid query type");
968    }
969 
970    handle_multiview_queries(cmdbuf, pool, query);
971 }
972 
973 void
tu_CmdEndQueryIndexedEXT(VkCommandBuffer commandBuffer,VkQueryPool queryPool,uint32_t query,uint32_t index)974 tu_CmdEndQueryIndexedEXT(VkCommandBuffer commandBuffer,
975                          VkQueryPool queryPool,
976                          uint32_t query,
977                          uint32_t index)
978 {
979    TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, commandBuffer);
980    TU_FROM_HANDLE(tu_query_pool, pool, queryPool);
981    assert(query < pool->size);
982 
983    switch (pool->type) {
984    case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT:
985       assert(index <= 4);
986       emit_end_xfb_query(cmdbuf, pool, query, index);
987       break;
988    default:
989       assert(!"Invalid query type");
990    }
991 }
992 
993 void
tu_CmdWriteTimestamp(VkCommandBuffer commandBuffer,VkPipelineStageFlagBits pipelineStage,VkQueryPool queryPool,uint32_t query)994 tu_CmdWriteTimestamp(VkCommandBuffer commandBuffer,
995                      VkPipelineStageFlagBits pipelineStage,
996                      VkQueryPool queryPool,
997                      uint32_t query)
998 {
999    TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
1000    TU_FROM_HANDLE(tu_query_pool, pool, queryPool);
1001 
1002    /* Inside a render pass, just write the timestamp multiple times so that
1003     * the user gets the last one if we use GMEM. There isn't really much
1004     * better we can do, and this seems to be what the blob does too.
1005     */
1006    struct tu_cs *cs = cmd->state.pass ? &cmd->draw_cs : &cmd->cs;
1007 
1008    /* Stages that will already have been executed by the time the CP executes
1009     * the REG_TO_MEM. DrawIndirect parameters are read by the CP, so the draw
1010     * indirect stage counts as top-of-pipe too.
1011     */
1012    VkPipelineStageFlags top_of_pipe_flags =
1013       VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT |
1014       VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT;
1015 
1016    if (pipelineStage & ~top_of_pipe_flags) {
1017       /* Execute a WFI so that all commands complete. Note that CP_REG_TO_MEM
1018        * does CP_WAIT_FOR_ME internally, which will wait for the WFI to
1019        * complete.
1020        *
1021        * Stalling the CP like this is really unfortunate, but I don't think
1022        * there's a better solution that allows all 48 bits of precision
1023        * because CP_EVENT_WRITE doesn't support 64-bit timestamps.
1024        */
1025       tu_cs_emit_wfi(cs);
1026    }
1027 
1028    tu_cs_emit_pkt7(cs, CP_REG_TO_MEM, 3);
1029    tu_cs_emit(cs, CP_REG_TO_MEM_0_REG(REG_A6XX_CP_ALWAYS_ON_COUNTER_LO) |
1030                   CP_REG_TO_MEM_0_CNT(2) |
1031                   CP_REG_TO_MEM_0_64B);
1032    tu_cs_emit_qw(cs, query_result_iova(pool, query, 0));
1033 
1034    /* Only flag availability once the entire renderpass is done, similar to
1035     * the begin/end path.
1036     */
1037    cs = cmd->state.pass ? &cmd->draw_epilogue_cs : &cmd->cs;
1038 
1039    tu_cs_emit_pkt7(cs, CP_MEM_WRITE, 4);
1040    tu_cs_emit_qw(cs, query_available_iova(pool, query));
1041    tu_cs_emit_qw(cs, 0x1);
1042 
1043    /* From the spec for vkCmdWriteTimestamp:
1044     *
1045     *    If vkCmdWriteTimestamp is called while executing a render pass
1046     *    instance that has multiview enabled, the timestamp uses N consecutive
1047     *    query indices in the query pool (starting at query) where N is the
1048     *    number of bits set in the view mask of the subpass the command is
1049     *    executed in. The resulting query values are determined by an
1050     *    implementation-dependent choice of one of the following behaviors:
1051     *
1052     *    -   The first query is a timestamp value and (if more than one bit is
1053     *        set in the view mask) zero is written to the remaining queries.
1054     *        If two timestamps are written in the same subpass, the sum of the
1055     *        execution time of all views between those commands is the
1056     *        difference between the first query written by each command.
1057     *
1058     *    -   All N queries are timestamp values. If two timestamps are written
1059     *        in the same subpass, the sum of the execution time of all views
1060     *        between those commands is the sum of the difference between
1061     *        corresponding queries written by each command. The difference
1062     *        between corresponding queries may be the execution time of a
1063     *        single view.
1064     *
1065     * We execute all views in the same draw call, so we implement the first
1066     * option, the same as regular queries.
1067     */
1068    handle_multiview_queries(cmd, pool, query);
1069 }
1070