• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2015 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include <assert.h>
25 #include <stdbool.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29 
30 #include "anv_private.h"
31 
32 #include "util/os_time.h"
33 
34 #include "genxml/gen_macros.h"
35 #include "genxml/genX_pack.h"
36 
37 #include "ds/intel_tracepoints.h"
38 
39 #include "anv_internal_kernels.h"
40 #include "genX_mi_builder.h"
41 
42 #if GFX_VERx10 >= 125
43 #define ANV_PIPELINE_STATISTICS_MASK 0x00001fff
44 #else
45 #define ANV_PIPELINE_STATISTICS_MASK 0x000007ff
46 #endif
47 
48 #include "perf/intel_perf.h"
49 #include "perf/intel_perf_mdapi.h"
50 #include "perf/intel_perf_regs.h"
51 
52 #include "vk_util.h"
53 
54 static struct anv_address
anv_query_address(struct anv_query_pool * pool,uint32_t query)55 anv_query_address(struct anv_query_pool *pool, uint32_t query)
56 {
57    return (struct anv_address) {
58       .bo = pool->bo,
59       .offset = query * pool->stride,
60    };
61 }
62 
63 static void
emit_query_mi_flush_availability(struct anv_cmd_buffer * cmd_buffer,struct anv_address addr,bool available)64 emit_query_mi_flush_availability(struct anv_cmd_buffer *cmd_buffer,
65                                  struct anv_address addr,
66                                  bool available)
67 {
68    anv_batch_emit(&cmd_buffer->batch, GENX(MI_FLUSH_DW), flush) {
69       flush.PostSyncOperation = WriteImmediateData;
70       flush.Address = addr;
71       flush.ImmediateData = available;
72    }
73 }
74 
genX(CreateQueryPool)75 VkResult genX(CreateQueryPool)(
76     VkDevice                                    _device,
77     const VkQueryPoolCreateInfo*                pCreateInfo,
78     const VkAllocationCallbacks*                pAllocator,
79     VkQueryPool*                                pQueryPool)
80 {
81    ANV_FROM_HANDLE(anv_device, device, _device);
82    const struct anv_physical_device *pdevice = device->physical;
83    const VkQueryPoolPerformanceCreateInfoKHR *perf_query_info = NULL;
84    struct intel_perf_counter_pass *counter_pass;
85    struct intel_perf_query_info **pass_query;
86    uint32_t n_passes = 0;
87    uint32_t data_offset = 0;
88    VK_MULTIALLOC(ma);
89    VkResult result;
90 
91    assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO);
92 
93    /* Query pool slots are made up of some number of 64-bit values packed
94     * tightly together. For most query types have the first 64-bit value is
95     * the "available" bit which is 0 when the query is unavailable and 1 when
96     * it is available. The 64-bit values that follow are determined by the
97     * type of query.
98     *
99     * For performance queries, we have a requirement to align OA reports at
100     * 64bytes so we put those first and have the "available" bit behind
101     * together with some other counters.
102     */
103    uint32_t uint64s_per_slot = 0;
104 
105    VK_MULTIALLOC_DECL(&ma, struct anv_query_pool, pool, 1);
106 
107    VkQueryPipelineStatisticFlags pipeline_statistics = 0;
108    switch (pCreateInfo->queryType) {
109    case VK_QUERY_TYPE_OCCLUSION:
110       /* Occlusion queries have two values: begin and end. */
111       uint64s_per_slot = 1 + 2;
112       break;
113    case VK_QUERY_TYPE_TIMESTAMP:
114       /* Timestamps just have the one timestamp value */
115       uint64s_per_slot = 1 + 1;
116       break;
117    case VK_QUERY_TYPE_PIPELINE_STATISTICS:
118       pipeline_statistics = pCreateInfo->pipelineStatistics;
119       /* We're going to trust this field implicitly so we need to ensure that
120        * no unhandled extension bits leak in.
121        */
122       pipeline_statistics &= ANV_PIPELINE_STATISTICS_MASK;
123 
124       /* Statistics queries have a min and max for every statistic */
125       uint64s_per_slot = 1 + 2 * util_bitcount(pipeline_statistics);
126       break;
127    case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT:
128       /* Transform feedback queries are 4 values, begin/end for
129        * written/available.
130        */
131       uint64s_per_slot = 1 + 4;
132       break;
133    case VK_QUERY_TYPE_PERFORMANCE_QUERY_INTEL: {
134       const struct intel_perf_query_field_layout *layout =
135          &pdevice->perf->query_layout;
136 
137       uint64s_per_slot = 2; /* availability + marker */
138       /* Align to the requirement of the layout */
139       uint64s_per_slot = align(uint64s_per_slot,
140                                DIV_ROUND_UP(layout->alignment, sizeof(uint64_t)));
141       data_offset = uint64s_per_slot * sizeof(uint64_t);
142       /* Add the query data for begin & end commands */
143       uint64s_per_slot += 2 * DIV_ROUND_UP(layout->size, sizeof(uint64_t));
144       break;
145    }
146    case VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR: {
147       const struct intel_perf_query_field_layout *layout =
148          &pdevice->perf->query_layout;
149       const struct anv_queue_family *queue_family;
150 
151       perf_query_info = vk_find_struct_const(pCreateInfo->pNext,
152                                              QUERY_POOL_PERFORMANCE_CREATE_INFO_KHR);
153       /* Same restriction as in EnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR() */
154       queue_family = &pdevice->queue.families[perf_query_info->queueFamilyIndex];
155       if (!queue_family->supports_perf)
156          return vk_error(device, VK_ERROR_UNKNOWN);
157 
158       n_passes = intel_perf_get_n_passes(pdevice->perf,
159                                          perf_query_info->pCounterIndices,
160                                          perf_query_info->counterIndexCount,
161                                          NULL);
162       vk_multialloc_add(&ma, &counter_pass, struct intel_perf_counter_pass,
163                              perf_query_info->counterIndexCount);
164       vk_multialloc_add(&ma, &pass_query, struct intel_perf_query_info *,
165                              n_passes);
166       uint64s_per_slot = 1 /* availability */;
167       /* Align to the requirement of the layout */
168       uint64s_per_slot = align(uint64s_per_slot,
169                                DIV_ROUND_UP(layout->alignment, sizeof(uint64_t)));
170       data_offset = uint64s_per_slot * sizeof(uint64_t);
171       /* Add the query data for begin & end commands */
172       uint64s_per_slot += 2 * DIV_ROUND_UP(layout->size, sizeof(uint64_t));
173       /* Multiply by the number of passes */
174       uint64s_per_slot *= n_passes;
175       break;
176    }
177    case VK_QUERY_TYPE_PRIMITIVES_GENERATED_EXT:
178       /* Query has two values: begin and end. */
179       uint64s_per_slot = 1 + 2;
180       break;
181 #if GFX_VERx10 >= 125
182    case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_KHR:
183    case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SIZE_KHR:
184       uint64s_per_slot = 1 + 1 /* availability + size (PostbuildInfoCurrentSize, PostbuildInfoCompactedSize) */;
185       break;
186 
187    case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SERIALIZATION_SIZE_KHR:
188    case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SERIALIZATION_BOTTOM_LEVEL_POINTERS_KHR:
189       uint64s_per_slot = 1 + 2 /* availability + size (PostbuildInfoSerializationDesc) */;
190       break;
191 
192    case VK_QUERY_TYPE_MESH_PRIMITIVES_GENERATED_EXT:
193       /* Query has two values: begin and end. */
194       uint64s_per_slot = 1 + 2;
195       break;
196 
197 #endif
198    case VK_QUERY_TYPE_RESULT_STATUS_ONLY_KHR:
199       uint64s_per_slot = 1;
200       break;
201    case VK_QUERY_TYPE_VIDEO_ENCODE_FEEDBACK_KHR:
202       uint64s_per_slot = 1 + 1; /* availability + length of written bitstream data */
203       break;
204    default:
205       assert(!"Invalid query type");
206    }
207 
208    if (!vk_multialloc_zalloc2(&ma, &device->vk.alloc, pAllocator,
209                               VK_SYSTEM_ALLOCATION_SCOPE_OBJECT))
210       return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
211 
212    vk_query_pool_init(&device->vk, &pool->vk, pCreateInfo);
213    pool->stride = uint64s_per_slot * sizeof(uint64_t);
214 
215    if (pool->vk.query_type == VK_QUERY_TYPE_PERFORMANCE_QUERY_INTEL) {
216       pool->data_offset = data_offset;
217       pool->snapshot_size = (pool->stride - data_offset) / 2;
218    }
219    else if (pool->vk.query_type == VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR) {
220       pool->pass_size = pool->stride / n_passes;
221       pool->data_offset = data_offset;
222       pool->snapshot_size = (pool->pass_size - data_offset) / 2;
223       pool->n_counters = perf_query_info->counterIndexCount;
224       pool->counter_pass = counter_pass;
225       intel_perf_get_counters_passes(pdevice->perf,
226                                      perf_query_info->pCounterIndices,
227                                      perf_query_info->counterIndexCount,
228                                      pool->counter_pass);
229       pool->n_passes = n_passes;
230       pool->pass_query = pass_query;
231       intel_perf_get_n_passes(pdevice->perf,
232                               perf_query_info->pCounterIndices,
233                               perf_query_info->counterIndexCount,
234                               pool->pass_query);
235    } else if (pool->vk.query_type == VK_QUERY_TYPE_VIDEO_ENCODE_FEEDBACK_KHR) {
236       const VkVideoProfileInfoKHR* pVideoProfile = vk_find_struct_const(pCreateInfo->pNext, VIDEO_PROFILE_INFO_KHR);
237       assert (pVideoProfile);
238 
239       pool->codec = pVideoProfile->videoCodecOperation;
240    }
241 
242    uint64_t size = pool->vk.query_count * (uint64_t)pool->stride;
243 
244    /* For KHR_performance_query we need some space in the buffer for a small
245     * batch updating ANV_PERF_QUERY_OFFSET_REG.
246     */
247    if (pool->vk.query_type == VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR) {
248       pool->khr_perf_preamble_stride = 32;
249       pool->khr_perf_preambles_offset = size;
250       size += (uint64_t)pool->n_passes * pool->khr_perf_preamble_stride;
251    }
252 
253    result = anv_device_alloc_bo(device, "query-pool", size,
254                                 ANV_BO_ALLOC_MAPPED |
255                                 ANV_BO_ALLOC_HOST_CACHED_COHERENT |
256                                 ANV_BO_ALLOC_CAPTURE,
257                                 0 /* explicit_address */,
258                                 &pool->bo);
259    if (result != VK_SUCCESS)
260       goto fail;
261 
262    if (pool->vk.query_type == VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR) {
263       for (uint32_t p = 0; p < pool->n_passes; p++) {
264          struct mi_builder b;
265          struct anv_batch batch = {
266             .start = pool->bo->map + khr_perf_query_preamble_offset(pool, p),
267             .end = pool->bo->map + khr_perf_query_preamble_offset(pool, p) + pool->khr_perf_preamble_stride,
268          };
269          batch.next = batch.start;
270 
271          mi_builder_init(&b, device->info, &batch);
272          mi_store(&b, mi_reg64(ANV_PERF_QUERY_OFFSET_REG),
273                       mi_imm(p * (uint64_t)pool->pass_size));
274          anv_batch_emit(&batch, GENX(MI_BATCH_BUFFER_END), bbe);
275       }
276    }
277 
278    ANV_RMV(query_pool_create, device, pool, false);
279 
280    *pQueryPool = anv_query_pool_to_handle(pool);
281 
282    return VK_SUCCESS;
283 
284  fail:
285    vk_free2(&device->vk.alloc, pAllocator, pool);
286 
287    return result;
288 }
289 
genX(DestroyQueryPool)290 void genX(DestroyQueryPool)(
291     VkDevice                                    _device,
292     VkQueryPool                                 _pool,
293     const VkAllocationCallbacks*                pAllocator)
294 {
295    ANV_FROM_HANDLE(anv_device, device, _device);
296    ANV_FROM_HANDLE(anv_query_pool, pool, _pool);
297 
298    if (!pool)
299       return;
300 
301    ANV_RMV(resource_destroy, device, pool);
302 
303    anv_device_release_bo(device, pool->bo);
304    vk_object_free(&device->vk, pAllocator, pool);
305 }
306 
307 /**
308  * VK_KHR_performance_query layout  :
309  *
310  * --------------------------------------------
311  * |       availability (8b)       | |        |
312  * |-------------------------------| |        |
313  * |       some padding (see       | |        |
314  * | query_field_layout:alignment) | | Pass 0 |
315  * |-------------------------------| |        |
316  * |           query data          | |        |
317  * | (2 * query_field_layout:size) | |        |
318  * |-------------------------------|--        | Query 0
319  * |       availability (8b)       | |        |
320  * |-------------------------------| |        |
321  * |       some padding (see       | |        |
322  * | query_field_layout:alignment) | | Pass 1 |
323  * |-------------------------------| |        |
324  * |           query data          | |        |
325  * | (2 * query_field_layout:size) | |        |
326  * |-------------------------------|-----------
327  * |       availability (8b)       | |        |
328  * |-------------------------------| |        |
329  * |       some padding (see       | |        |
330  * | query_field_layout:alignment) | | Pass 0 |
331  * |-------------------------------| |        |
332  * |           query data          | |        |
333  * | (2 * query_field_layout:size) | |        |
334  * |-------------------------------|--        | Query 1
335  * |               ...             | |        |
336  * --------------------------------------------
337  */
338 
339 static uint64_t
khr_perf_query_availability_offset(struct anv_query_pool * pool,uint32_t query,uint32_t pass)340 khr_perf_query_availability_offset(struct anv_query_pool *pool, uint32_t query, uint32_t pass)
341 {
342    return (query * (uint64_t)pool->stride) + (pass * (uint64_t)pool->pass_size);
343 }
344 
345 static uint64_t
khr_perf_query_data_offset(struct anv_query_pool * pool,uint32_t query,uint32_t pass,bool end)346 khr_perf_query_data_offset(struct anv_query_pool *pool, uint32_t query, uint32_t pass, bool end)
347 {
348    return khr_perf_query_availability_offset(pool, query, pass) +
349           pool->data_offset + (end ? pool->snapshot_size : 0);
350 }
351 
352 static struct anv_address
khr_perf_query_availability_address(struct anv_query_pool * pool,uint32_t query,uint32_t pass)353 khr_perf_query_availability_address(struct anv_query_pool *pool, uint32_t query, uint32_t pass)
354 {
355    return anv_address_add(
356       (struct anv_address) { .bo = pool->bo, },
357       khr_perf_query_availability_offset(pool, query, pass));
358 }
359 
360 static struct anv_address
khr_perf_query_data_address(struct anv_query_pool * pool,uint32_t query,uint32_t pass,bool end)361 khr_perf_query_data_address(struct anv_query_pool *pool, uint32_t query, uint32_t pass, bool end)
362 {
363    return anv_address_add(
364       (struct anv_address) { .bo = pool->bo, },
365       khr_perf_query_data_offset(pool, query, pass, end));
366 }
367 
368 static bool
khr_perf_query_ensure_relocs(struct anv_cmd_buffer * cmd_buffer)369 khr_perf_query_ensure_relocs(struct anv_cmd_buffer *cmd_buffer)
370 {
371    if (anv_batch_has_error(&cmd_buffer->batch))
372       return false;
373 
374    if (cmd_buffer->self_mod_locations)
375       return true;
376 
377    struct anv_device *device = cmd_buffer->device;
378    const struct anv_physical_device *pdevice = device->physical;
379 
380    cmd_buffer->self_mod_locations =
381       vk_alloc(&cmd_buffer->vk.pool->alloc,
382                pdevice->n_perf_query_commands * sizeof(*cmd_buffer->self_mod_locations), 8,
383                VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
384 
385    if (!cmd_buffer->self_mod_locations) {
386       anv_batch_set_error(&cmd_buffer->batch, VK_ERROR_OUT_OF_HOST_MEMORY);
387       return false;
388    }
389 
390    return true;
391 }
392 
393 /**
394  * VK_INTEL_performance_query layout :
395  *
396  * ---------------------------------
397  * |       availability (8b)       |
398  * |-------------------------------|
399  * |          marker (8b)          |
400  * |-------------------------------|
401  * |       some padding (see       |
402  * | query_field_layout:alignment) |
403  * |-------------------------------|
404  * |           query data          |
405  * | (2 * query_field_layout:size) |
406  * ---------------------------------
407  */
408 
409 static uint32_t
intel_perf_marker_offset(void)410 intel_perf_marker_offset(void)
411 {
412    return 8;
413 }
414 
415 static uint32_t
intel_perf_query_data_offset(struct anv_query_pool * pool,bool end)416 intel_perf_query_data_offset(struct anv_query_pool *pool, bool end)
417 {
418    return pool->data_offset + (end ? pool->snapshot_size : 0);
419 }
420 
421 static void
cpu_write_query_result(void * dst_slot,VkQueryResultFlags flags,uint32_t value_index,uint64_t result)422 cpu_write_query_result(void *dst_slot, VkQueryResultFlags flags,
423                        uint32_t value_index, uint64_t result)
424 {
425    if (flags & VK_QUERY_RESULT_64_BIT) {
426       uint64_t *dst64 = dst_slot;
427       dst64[value_index] = result;
428    } else {
429       uint32_t *dst32 = dst_slot;
430       dst32[value_index] = result;
431    }
432 }
433 
434 static void *
query_slot(struct anv_query_pool * pool,uint32_t query)435 query_slot(struct anv_query_pool *pool, uint32_t query)
436 {
437    return pool->bo->map + query * pool->stride;
438 }
439 
440 static bool
query_is_available(struct anv_query_pool * pool,uint32_t query)441 query_is_available(struct anv_query_pool *pool, uint32_t query)
442 {
443    if (pool->vk.query_type == VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR) {
444       for (uint32_t p = 0; p < pool->n_passes; p++) {
445          volatile uint64_t *slot =
446             pool->bo->map + khr_perf_query_availability_offset(pool, query, p);
447          if (!slot[0])
448             return false;
449       }
450       return true;
451    }
452 
453    return *(volatile uint64_t *)query_slot(pool, query);
454 }
455 
456 static VkResult
wait_for_available(struct anv_device * device,struct anv_query_pool * pool,uint32_t query)457 wait_for_available(struct anv_device *device,
458                    struct anv_query_pool *pool, uint32_t query)
459 {
460    /* By default we leave a 2s timeout before declaring the device lost. */
461    uint64_t rel_timeout = 2 * NSEC_PER_SEC;
462    if (pool->vk.query_type == VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR) {
463       /* With performance queries, there is an additional 500us reconfiguration
464        * time in i915.
465        */
466       rel_timeout += 500 * 1000;
467       /* Additionally a command buffer can be replayed N times to gather data
468        * for each of the metric sets to capture all the counters requested.
469        */
470       rel_timeout *= pool->n_passes;
471    }
472    uint64_t abs_timeout_ns = os_time_get_absolute_timeout(rel_timeout);
473 
474    while (os_time_get_nano() < abs_timeout_ns) {
475       if (query_is_available(pool, query))
476          return VK_SUCCESS;
477       VkResult status = vk_device_check_status(&device->vk);
478       if (status != VK_SUCCESS)
479          return status;
480    }
481 
482    return vk_device_set_lost(&device->vk, "query timeout");
483 }
484 
genX(GetQueryPoolResults)485 VkResult genX(GetQueryPoolResults)(
486     VkDevice                                    _device,
487     VkQueryPool                                 queryPool,
488     uint32_t                                    firstQuery,
489     uint32_t                                    queryCount,
490     size_t                                      dataSize,
491     void*                                       pData,
492     VkDeviceSize                                stride,
493     VkQueryResultFlags                          flags)
494 {
495    ANV_FROM_HANDLE(anv_device, device, _device);
496    ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
497 
498    assert(
499 #if GFX_VERx10 >= 125
500    pool->vk.query_type == VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_KHR ||
501    pool->vk.query_type == VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SERIALIZATION_SIZE_KHR ||
502    pool->vk.query_type == VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SIZE_KHR ||
503    pool->vk.query_type == VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SERIALIZATION_BOTTOM_LEVEL_POINTERS_KHR ||
504    pool->vk.query_type == VK_QUERY_TYPE_MESH_PRIMITIVES_GENERATED_EXT ||
505 #endif
506    pool->vk.query_type == VK_QUERY_TYPE_OCCLUSION ||
507    pool->vk.query_type == VK_QUERY_TYPE_PIPELINE_STATISTICS ||
508    pool->vk.query_type == VK_QUERY_TYPE_TIMESTAMP ||
509    pool->vk.query_type == VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT ||
510    pool->vk.query_type == VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR ||
511    pool->vk.query_type == VK_QUERY_TYPE_PERFORMANCE_QUERY_INTEL ||
512    pool->vk.query_type == VK_QUERY_TYPE_PRIMITIVES_GENERATED_EXT ||
513    pool->vk.query_type == VK_QUERY_TYPE_RESULT_STATUS_ONLY_KHR ||
514    pool->vk.query_type == VK_QUERY_TYPE_VIDEO_ENCODE_FEEDBACK_KHR);
515 
516    if (vk_device_is_lost(&device->vk))
517       return VK_ERROR_DEVICE_LOST;
518 
519    if (pData == NULL)
520       return VK_SUCCESS;
521 
522    void *data_end = pData + dataSize;
523 
524    VkResult status = VK_SUCCESS;
525    for (uint32_t i = 0; i < queryCount; i++) {
526       bool available = query_is_available(pool, firstQuery + i);
527 
528       if (!available && (flags & VK_QUERY_RESULT_WAIT_BIT)) {
529          status = wait_for_available(device, pool, firstQuery + i);
530          if (status != VK_SUCCESS) {
531             return status;
532          }
533 
534          available = true;
535       }
536 
537       /* From the Vulkan 1.0.42 spec:
538        *
539        *    "If VK_QUERY_RESULT_WAIT_BIT and VK_QUERY_RESULT_PARTIAL_BIT are
540        *    both not set then no result values are written to pData for
541        *    queries that are in the unavailable state at the time of the call,
542        *    and vkGetQueryPoolResults returns VK_NOT_READY. However,
543        *    availability state is still written to pData for those queries if
544        *    VK_QUERY_RESULT_WITH_AVAILABILITY_BIT is set."
545        *
546        * From VK_KHR_performance_query :
547        *
548        *    "VK_QUERY_RESULT_PERFORMANCE_QUERY_RECORDED_COUNTERS_BIT_KHR specifies
549        *     that the result should contain the number of counters that were recorded
550        *     into a query pool of type ename:VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR"
551        */
552       bool write_results = available || (flags & VK_QUERY_RESULT_PARTIAL_BIT);
553 
554       uint32_t idx = 0;
555       switch (pool->vk.query_type) {
556       case VK_QUERY_TYPE_OCCLUSION:
557       case VK_QUERY_TYPE_PRIMITIVES_GENERATED_EXT:
558 #if GFX_VERx10 >= 125
559       case VK_QUERY_TYPE_MESH_PRIMITIVES_GENERATED_EXT:
560 #endif
561       {
562          uint64_t *slot = query_slot(pool, firstQuery + i);
563          if (write_results) {
564             /* From the Vulkan 1.2.132 spec:
565              *
566              *    "If VK_QUERY_RESULT_PARTIAL_BIT is set,
567              *    VK_QUERY_RESULT_WAIT_BIT is not set, and the query’s status
568              *    is unavailable, an intermediate result value between zero and
569              *    the final result value is written to pData for that query."
570              */
571             uint64_t result = available ? slot[2] - slot[1] : 0;
572             cpu_write_query_result(pData, flags, idx, result);
573          }
574          idx++;
575          break;
576       }
577 
578       case VK_QUERY_TYPE_PIPELINE_STATISTICS: {
579          uint64_t *slot = query_slot(pool, firstQuery + i);
580          uint32_t statistics = pool->vk.pipeline_statistics;
581          while (statistics) {
582             UNUSED uint32_t stat = u_bit_scan(&statistics);
583             if (write_results) {
584                /* If a query is not available but VK_QUERY_RESULT_PARTIAL_BIT is set, write 0. */
585                uint64_t result = available ? slot[idx * 2 + 2] - slot[idx * 2 + 1] : 0;
586                cpu_write_query_result(pData, flags, idx, result);
587             }
588             idx++;
589          }
590          assert(idx == util_bitcount(pool->vk.pipeline_statistics));
591          break;
592       }
593 
594       case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT: {
595          uint64_t *slot = query_slot(pool, firstQuery + i);
596          if (write_results) {
597             /* If a query is not available but VK_QUERY_RESULT_PARTIAL_BIT is set, write 0. */
598             uint64_t result = available ? slot[2] - slot[1] : 0;
599             cpu_write_query_result(pData, flags, idx, result);
600          }
601          idx++;
602          if (write_results) {
603             /* If a query is not available but VK_QUERY_RESULT_PARTIAL_BIT is set, write 0. */
604             uint64_t result = available ? slot[4] - slot[3] : 0;
605             cpu_write_query_result(pData, flags, idx, result);
606          }
607          idx++;
608          break;
609       }
610 
611 #if GFX_VERx10 >= 125
612       case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SIZE_KHR:
613       case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_KHR:
614       case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SERIALIZATION_SIZE_KHR: {
615          uint64_t *slot = query_slot(pool, firstQuery + i);
616          if (write_results)
617             cpu_write_query_result(pData, flags, idx, slot[1]);
618          idx++;
619          break;
620       }
621 
622       case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SERIALIZATION_BOTTOM_LEVEL_POINTERS_KHR: {
623          uint64_t *slot = query_slot(pool, firstQuery + i);
624          if (write_results)
625             cpu_write_query_result(pData, flags, idx, slot[2]);
626          idx++;
627          break;
628       }
629 #endif
630 
631       case VK_QUERY_TYPE_TIMESTAMP: {
632          uint64_t *slot = query_slot(pool, firstQuery + i);
633          if (write_results)
634             cpu_write_query_result(pData, flags, idx, slot[1]);
635          idx++;
636          break;
637       }
638 
639       case VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR: {
640          const struct anv_physical_device *pdevice = device->physical;
641          assert((flags & (VK_QUERY_RESULT_WITH_AVAILABILITY_BIT |
642                           VK_QUERY_RESULT_PARTIAL_BIT)) == 0);
643          for (uint32_t p = 0; p < pool->n_passes; p++) {
644             const struct intel_perf_query_info *query = pool->pass_query[p];
645             struct intel_perf_query_result result;
646             intel_perf_query_result_clear(&result);
647             intel_perf_query_result_accumulate_fields(&result, query,
648                                                       pool->bo->map + khr_perf_query_data_offset(pool, firstQuery + i, p, false),
649                                                       pool->bo->map + khr_perf_query_data_offset(pool, firstQuery + i, p, true),
650                                                       false /* no_oa_accumulate */);
651             anv_perf_write_pass_results(pdevice->perf, pool, p, &result, pData);
652          }
653          break;
654       }
655 
656       case VK_QUERY_TYPE_PERFORMANCE_QUERY_INTEL: {
657          if (!write_results)
658             break;
659          const void *query_data = query_slot(pool, firstQuery + i);
660          const struct intel_perf_query_info *query = &device->physical->perf->queries[0];
661          struct intel_perf_query_result result;
662          intel_perf_query_result_clear(&result);
663          intel_perf_query_result_accumulate_fields(&result, query,
664                                                    query_data + intel_perf_query_data_offset(pool, false),
665                                                    query_data + intel_perf_query_data_offset(pool, true),
666                                                    false /* no_oa_accumulate */);
667          intel_perf_query_result_write_mdapi(pData, stride,
668                                              device->info,
669                                              query, &result);
670          const uint64_t *marker = query_data + intel_perf_marker_offset();
671          intel_perf_query_mdapi_write_marker(pData, stride, device->info, *marker);
672          break;
673       }
674 
675       case VK_QUERY_TYPE_RESULT_STATUS_ONLY_KHR:
676          if (!write_results)
677             break;
678          const uint32_t *query_data = query_slot(pool, firstQuery + i);
679          uint32_t result = available ? *query_data : 0;
680          cpu_write_query_result(pData, flags, idx, result);
681          break;
682       case VK_QUERY_TYPE_VIDEO_ENCODE_FEEDBACK_KHR: {
683          if (!write_results)
684             break;
685 
686          /*
687           * Slot 0 : Availability.
688           * Slot 1 : Bitstream bytes written.
689           */
690          const uint64_t *slot = query_slot(pool, firstQuery + i);
691          /* Set 0 as offset. */
692          cpu_write_query_result(pData, flags, idx++, 0);
693          cpu_write_query_result(pData, flags, idx++, slot[1]);
694          break;
695       }
696 
697       default:
698          unreachable("invalid pool type");
699       }
700 
701       if (!write_results)
702          status = VK_NOT_READY;
703 
704       if (flags & (VK_QUERY_RESULT_WITH_AVAILABILITY_BIT |
705                    VK_QUERY_RESULT_WITH_STATUS_BIT_KHR))
706          cpu_write_query_result(pData, flags, idx, available);
707 
708       pData += stride;
709       if (pData >= data_end)
710          break;
711    }
712 
713    return status;
714 }
715 
716 static void
emit_ps_depth_count(struct anv_cmd_buffer * cmd_buffer,struct anv_address addr)717 emit_ps_depth_count(struct anv_cmd_buffer *cmd_buffer,
718                     struct anv_address addr)
719 {
720    cmd_buffer->state.pending_pipe_bits |= ANV_PIPE_POST_SYNC_BIT;
721    genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
722 
723    bool cs_stall_needed = (GFX_VER == 9 && cmd_buffer->device->info->gt == 4);
724    genx_batch_emit_pipe_control_write
725       (&cmd_buffer->batch, cmd_buffer->device->info,
726        cmd_buffer->state.current_pipeline, WritePSDepthCount, addr, 0,
727        ANV_PIPE_DEPTH_STALL_BIT | (cs_stall_needed ? ANV_PIPE_CS_STALL_BIT : 0));
728 }
729 
730 static void
emit_query_mi_availability(struct mi_builder * b,struct anv_address addr,bool available)731 emit_query_mi_availability(struct mi_builder *b,
732                            struct anv_address addr,
733                            bool available)
734 {
735    mi_store(b, mi_mem64(addr), mi_imm(available));
736 }
737 
738 static void
emit_query_pc_availability(struct anv_cmd_buffer * cmd_buffer,struct anv_address addr,bool available)739 emit_query_pc_availability(struct anv_cmd_buffer *cmd_buffer,
740                            struct anv_address addr,
741                            bool available)
742 {
743    cmd_buffer->state.pending_pipe_bits |= ANV_PIPE_POST_SYNC_BIT;
744    genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
745 
746    genx_batch_emit_pipe_control_write
747       (&cmd_buffer->batch, cmd_buffer->device->info,
748        cmd_buffer->state.current_pipeline, WriteImmediateData, addr,
749        available, 0);
750 }
751 
752 /**
753  * Goes through a series of consecutive query indices in the given pool
754  * setting all element values to 0 and emitting them as available.
755  */
756 static void
emit_zero_queries(struct anv_cmd_buffer * cmd_buffer,struct mi_builder * b,struct anv_query_pool * pool,uint32_t first_index,uint32_t num_queries)757 emit_zero_queries(struct anv_cmd_buffer *cmd_buffer,
758                   struct mi_builder *b, struct anv_query_pool *pool,
759                   uint32_t first_index, uint32_t num_queries)
760 {
761    switch (pool->vk.query_type) {
762    case VK_QUERY_TYPE_OCCLUSION:
763    case VK_QUERY_TYPE_TIMESTAMP:
764       /* These queries are written with a PIPE_CONTROL so clear them using the
765        * PIPE_CONTROL as well so we don't have to synchronize between 2 types
766        * of operations.
767        */
768       assert((pool->stride % 8) == 0);
769       for (uint32_t i = 0; i < num_queries; i++) {
770          struct anv_address slot_addr =
771             anv_query_address(pool, first_index + i);
772 
773          for (uint32_t qword = 1; qword < (pool->stride / 8); qword++) {
774             emit_query_pc_availability(cmd_buffer,
775                                        anv_address_add(slot_addr, qword * 8),
776                                        false);
777          }
778          emit_query_pc_availability(cmd_buffer, slot_addr, true);
779       }
780       break;
781 
782    case VK_QUERY_TYPE_PRIMITIVES_GENERATED_EXT:
783    case VK_QUERY_TYPE_PIPELINE_STATISTICS:
784    case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT:
785 #if GFX_VERx10 >= 125
786    case VK_QUERY_TYPE_MESH_PRIMITIVES_GENERATED_EXT:
787 #endif
788       for (uint32_t i = 0; i < num_queries; i++) {
789          struct anv_address slot_addr =
790             anv_query_address(pool, first_index + i);
791          mi_memset(b, anv_address_add(slot_addr, 8), 0, pool->stride - 8);
792          emit_query_mi_availability(b, slot_addr, true);
793       }
794       break;
795 
796    case VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR: {
797       for (uint32_t i = 0; i < num_queries; i++) {
798          for (uint32_t p = 0; p < pool->n_passes; p++) {
799             mi_memset(b, khr_perf_query_data_address(pool, first_index + i, p, false),
800                          0, 2 * pool->snapshot_size);
801             emit_query_mi_availability(b,
802                                        khr_perf_query_availability_address(pool, first_index + i, p),
803                                        true);
804          }
805       }
806       break;
807    }
808 
809    case VK_QUERY_TYPE_PERFORMANCE_QUERY_INTEL:
810       for (uint32_t i = 0; i < num_queries; i++) {
811          struct anv_address slot_addr =
812             anv_query_address(pool, first_index + i);
813          mi_memset(b, anv_address_add(slot_addr, 8), 0, pool->stride - 8);
814          emit_query_mi_availability(b, slot_addr, true);
815       }
816       break;
817 
818    default:
819       unreachable("Unsupported query type");
820    }
821 }
822 
genX(CmdResetQueryPool)823 void genX(CmdResetQueryPool)(
824     VkCommandBuffer                             commandBuffer,
825     VkQueryPool                                 queryPool,
826     uint32_t                                    firstQuery,
827     uint32_t                                    queryCount)
828 {
829    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
830    ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
831    struct anv_physical_device *pdevice = cmd_buffer->device->physical;
832 
833    /* Shader clearing is only possible on render/compute when not in protected
834     * mode.
835     */
836    if (anv_cmd_buffer_is_render_or_compute_queue(cmd_buffer) &&
837        (cmd_buffer->vk.pool->flags & VK_COMMAND_POOL_CREATE_PROTECTED_BIT) == 0 &&
838        queryCount >= pdevice->instance->query_clear_with_blorp_threshold) {
839       trace_intel_begin_query_clear_blorp(&cmd_buffer->trace);
840 
841       anv_cmd_buffer_fill_area(cmd_buffer,
842                                anv_query_address(pool, firstQuery),
843                                queryCount * pool->stride,
844                                0, false);
845 
846       /* The pending clearing writes are in compute if we're in gpgpu mode on
847        * the render engine or on the compute engine.
848        */
849       if (anv_cmd_buffer_is_compute_queue(cmd_buffer) ||
850           cmd_buffer->state.current_pipeline == pdevice->gpgpu_pipeline_value) {
851          cmd_buffer->state.queries.clear_bits =
852             ANV_QUERY_COMPUTE_WRITES_PENDING_BITS;
853       } else {
854          cmd_buffer->state.queries.clear_bits =
855             ANV_QUERY_RENDER_TARGET_WRITES_PENDING_BITS(&pdevice->info);
856       }
857 
858       trace_intel_end_query_clear_blorp(&cmd_buffer->trace, queryCount);
859       return;
860    }
861 
862    trace_intel_begin_query_clear_cs(&cmd_buffer->trace);
863 
864    switch (pool->vk.query_type) {
865    case VK_QUERY_TYPE_OCCLUSION:
866 #if GFX_VERx10 >= 125
867    case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_KHR:
868    case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SERIALIZATION_SIZE_KHR:
869    case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SIZE_KHR:
870    case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SERIALIZATION_BOTTOM_LEVEL_POINTERS_KHR:
871 #endif
872       for (uint32_t i = 0; i < queryCount; i++) {
873          emit_query_pc_availability(cmd_buffer,
874                                     anv_query_address(pool, firstQuery + i),
875                                     false);
876       }
877       break;
878 
879    case VK_QUERY_TYPE_TIMESTAMP: {
880       for (uint32_t i = 0; i < queryCount; i++) {
881          emit_query_pc_availability(cmd_buffer,
882                                     anv_query_address(pool, firstQuery + i),
883                                     false);
884       }
885 
886       /* Add a CS stall here to make sure the PIPE_CONTROL above has
887        * completed. Otherwise some timestamps written later with MI_STORE_*
888        * commands might race with the PIPE_CONTROL in the loop above.
889        */
890       anv_add_pending_pipe_bits(cmd_buffer, ANV_PIPE_CS_STALL_BIT,
891                                 "vkCmdResetQueryPool of timestamps");
892       genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
893       break;
894    }
895 
896    case VK_QUERY_TYPE_PIPELINE_STATISTICS:
897    case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT:
898    case VK_QUERY_TYPE_PRIMITIVES_GENERATED_EXT:
899    case VK_QUERY_TYPE_VIDEO_ENCODE_FEEDBACK_KHR:
900 #if GFX_VERx10 >= 125
901    case VK_QUERY_TYPE_MESH_PRIMITIVES_GENERATED_EXT:
902 #endif
903    {
904       struct mi_builder b;
905       mi_builder_init(&b, cmd_buffer->device->info, &cmd_buffer->batch);
906 
907       for (uint32_t i = 0; i < queryCount; i++)
908          emit_query_mi_availability(&b, anv_query_address(pool, firstQuery + i), false);
909       break;
910    }
911 
912    case VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR: {
913       struct mi_builder b;
914       mi_builder_init(&b, cmd_buffer->device->info, &cmd_buffer->batch);
915 
916       for (uint32_t i = 0; i < queryCount; i++) {
917          for (uint32_t p = 0; p < pool->n_passes; p++) {
918             emit_query_mi_availability(
919                &b,
920                khr_perf_query_availability_address(pool, firstQuery + i, p),
921                false);
922          }
923       }
924       break;
925    }
926 
927    case VK_QUERY_TYPE_PERFORMANCE_QUERY_INTEL: {
928       struct mi_builder b;
929       mi_builder_init(&b, cmd_buffer->device->info, &cmd_buffer->batch);
930 
931       for (uint32_t i = 0; i < queryCount; i++)
932          emit_query_mi_availability(&b, anv_query_address(pool, firstQuery + i), false);
933       break;
934    }
935    case VK_QUERY_TYPE_RESULT_STATUS_ONLY_KHR:
936       for (uint32_t i = 0; i < queryCount; i++)
937          emit_query_mi_flush_availability(cmd_buffer, anv_query_address(pool, firstQuery + i), false);
938       break;
939    default:
940       unreachable("Unsupported query type");
941    }
942 
943    trace_intel_end_query_clear_cs(&cmd_buffer->trace, queryCount);
944 }
945 
genX(ResetQueryPool)946 void genX(ResetQueryPool)(
947     VkDevice                                    _device,
948     VkQueryPool                                 queryPool,
949     uint32_t                                    firstQuery,
950     uint32_t                                    queryCount)
951 {
952    ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
953 
954    for (uint32_t i = 0; i < queryCount; i++) {
955       if (pool->vk.query_type == VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR) {
956          for (uint32_t p = 0; p < pool->n_passes; p++) {
957             uint64_t *pass_slot = pool->bo->map +
958                khr_perf_query_availability_offset(pool, firstQuery + i, p);
959             *pass_slot = 0;
960          }
961       } else {
962          uint64_t *slot = query_slot(pool, firstQuery + i);
963          *slot = 0;
964       }
965    }
966 }
967 
968 static const uint32_t vk_pipeline_stat_to_reg[] = {
969    GENX(IA_VERTICES_COUNT_num),
970    GENX(IA_PRIMITIVES_COUNT_num),
971    GENX(VS_INVOCATION_COUNT_num),
972    GENX(GS_INVOCATION_COUNT_num),
973    GENX(GS_PRIMITIVES_COUNT_num),
974    GENX(CL_INVOCATION_COUNT_num),
975    GENX(CL_PRIMITIVES_COUNT_num),
976    GENX(PS_INVOCATION_COUNT_num),
977    GENX(HS_INVOCATION_COUNT_num),
978    GENX(DS_INVOCATION_COUNT_num),
979    GENX(CS_INVOCATION_COUNT_num),
980 #if GFX_VERx10 >= 125
981    GENX(TASK_INVOCATION_COUNT_num),
982    GENX(MESH_INVOCATION_COUNT_num)
983 #endif
984 };
985 
986 static void
emit_pipeline_stat(struct mi_builder * b,uint32_t stat,struct anv_address addr)987 emit_pipeline_stat(struct mi_builder *b, uint32_t stat,
988                    struct anv_address addr)
989 {
990    STATIC_ASSERT(ANV_PIPELINE_STATISTICS_MASK ==
991                  (1 << ARRAY_SIZE(vk_pipeline_stat_to_reg)) - 1);
992 
993    assert(stat < ARRAY_SIZE(vk_pipeline_stat_to_reg));
994    mi_store(b, mi_mem64(addr), mi_reg64(vk_pipeline_stat_to_reg[stat]));
995 }
996 
997 static void
emit_xfb_query(struct mi_builder * b,uint32_t stream,struct anv_address addr)998 emit_xfb_query(struct mi_builder *b, uint32_t stream,
999                struct anv_address addr)
1000 {
1001    assert(stream < MAX_XFB_STREAMS);
1002 
1003    mi_store(b, mi_mem64(anv_address_add(addr, 0)),
1004                mi_reg64(GENX(SO_NUM_PRIMS_WRITTEN0_num) + stream * 8));
1005    mi_store(b, mi_mem64(anv_address_add(addr, 16)),
1006                mi_reg64(GENX(SO_PRIM_STORAGE_NEEDED0_num) + stream * 8));
1007 }
1008 
1009 static void
emit_perf_intel_query(struct anv_cmd_buffer * cmd_buffer,struct anv_query_pool * pool,struct mi_builder * b,struct anv_address query_addr,bool end)1010 emit_perf_intel_query(struct anv_cmd_buffer *cmd_buffer,
1011                       struct anv_query_pool *pool,
1012                       struct mi_builder *b,
1013                       struct anv_address query_addr,
1014                       bool end)
1015 {
1016    const struct intel_perf_query_field_layout *layout =
1017       &cmd_buffer->device->physical->perf->query_layout;
1018    struct anv_address data_addr =
1019       anv_address_add(query_addr, intel_perf_query_data_offset(pool, end));
1020 
1021    for (uint32_t f = 0; f < layout->n_fields; f++) {
1022       const struct intel_perf_query_field *field =
1023          &layout->fields[end ? f : (layout->n_fields - 1 - f)];
1024 
1025       switch (field->type) {
1026       case INTEL_PERF_QUERY_FIELD_TYPE_MI_RPC:
1027          anv_batch_emit(&cmd_buffer->batch, GENX(MI_REPORT_PERF_COUNT), rpc) {
1028             rpc.MemoryAddress = anv_address_add(data_addr, field->location);
1029          }
1030          break;
1031 
1032       case INTEL_PERF_QUERY_FIELD_TYPE_SRM_PERFCNT:
1033       case INTEL_PERF_QUERY_FIELD_TYPE_SRM_RPSTAT:
1034       case INTEL_PERF_QUERY_FIELD_TYPE_SRM_OA_A:
1035       case INTEL_PERF_QUERY_FIELD_TYPE_SRM_OA_B:
1036       case INTEL_PERF_QUERY_FIELD_TYPE_SRM_OA_C:
1037       case INTEL_PERF_QUERY_FIELD_TYPE_SRM_OA_PEC: {
1038          struct anv_address addr = anv_address_add(data_addr, field->location);
1039          struct mi_value src = field->size == 8 ?
1040             mi_reg64(field->mmio_offset) :
1041             mi_reg32(field->mmio_offset);
1042          struct mi_value dst = field->size == 8 ?
1043             mi_mem64(addr) : mi_mem32(addr);
1044          mi_store(b, dst, src);
1045          break;
1046       }
1047 
1048       default:
1049          unreachable("Invalid query field");
1050          break;
1051       }
1052    }
1053 }
1054 
1055 static bool
append_query_clear_flush(struct anv_cmd_buffer * cmd_buffer,struct anv_query_pool * pool,const char * reason)1056 append_query_clear_flush(struct anv_cmd_buffer *cmd_buffer,
1057                          struct anv_query_pool *pool,
1058                          const char *reason)
1059 {
1060    if (cmd_buffer->state.queries.clear_bits == 0)
1061       return false;
1062 
1063    anv_add_pending_pipe_bits(cmd_buffer,
1064                              ANV_PIPE_QUERY_BITS(
1065                                 cmd_buffer->state.queries.clear_bits),
1066                              reason);
1067    return true;
1068 }
1069 
1070 
genX(CmdBeginQueryIndexedEXT)1071 void genX(CmdBeginQueryIndexedEXT)(
1072     VkCommandBuffer                             commandBuffer,
1073     VkQueryPool                                 queryPool,
1074     uint32_t                                    query,
1075     VkQueryControlFlags                         flags,
1076     uint32_t                                    index)
1077 {
1078    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1079    ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
1080    struct anv_address query_addr = anv_query_address(pool, query);
1081 
1082    if (append_query_clear_flush(cmd_buffer, pool,
1083                                 "CmdBeginQuery* flush query clears"))
1084       genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
1085 
1086    struct mi_builder b;
1087    mi_builder_init(&b, cmd_buffer->device->info, &cmd_buffer->batch);
1088    const uint32_t mocs = anv_mocs_for_address(cmd_buffer->device, &query_addr);
1089    mi_builder_set_mocs(&b, mocs);
1090 
1091    switch (pool->vk.query_type) {
1092    case VK_QUERY_TYPE_OCCLUSION:
1093       cmd_buffer->state.gfx.n_occlusion_queries++;
1094       cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_OCCLUSION_QUERY_ACTIVE;
1095       emit_ps_depth_count(cmd_buffer, anv_address_add(query_addr, 8));
1096       break;
1097 
1098    case VK_QUERY_TYPE_PRIMITIVES_GENERATED_EXT:
1099       genx_batch_emit_pipe_control(&cmd_buffer->batch,
1100                                    cmd_buffer->device->info,
1101                                    cmd_buffer->state.current_pipeline,
1102                                    ANV_PIPE_CS_STALL_BIT |
1103                                    ANV_PIPE_STALL_AT_SCOREBOARD_BIT);
1104       mi_store(&b, mi_mem64(anv_address_add(query_addr, 8)),
1105                    mi_reg64(GENX(CL_INVOCATION_COUNT_num)));
1106       break;
1107 
1108 #if GFX_VERx10 >= 125
1109    case VK_QUERY_TYPE_MESH_PRIMITIVES_GENERATED_EXT:
1110       genx_batch_emit_pipe_control(&cmd_buffer->batch,
1111                                    cmd_buffer->device->info,
1112                                    cmd_buffer->state.current_pipeline,
1113                                    ANV_PIPE_CS_STALL_BIT |
1114                                    ANV_PIPE_STALL_AT_SCOREBOARD_BIT);
1115       mi_store(&b, mi_mem64(anv_address_add(query_addr, 8)),
1116                    mi_reg64(GENX(MESH_PRIMITIVE_COUNT_num)));
1117       break;
1118 #endif
1119 
1120    case VK_QUERY_TYPE_PIPELINE_STATISTICS: {
1121       /* TODO: This might only be necessary for certain stats */
1122       genx_batch_emit_pipe_control(&cmd_buffer->batch,
1123                                    cmd_buffer->device->info,
1124                                    cmd_buffer->state.current_pipeline,
1125                                    ANV_PIPE_CS_STALL_BIT |
1126                                    ANV_PIPE_STALL_AT_SCOREBOARD_BIT);
1127 
1128       uint32_t statistics = pool->vk.pipeline_statistics;
1129       uint32_t offset = 8;
1130       while (statistics) {
1131          uint32_t stat = u_bit_scan(&statistics);
1132          emit_pipeline_stat(&b, stat, anv_address_add(query_addr, offset));
1133          offset += 16;
1134       }
1135       break;
1136    }
1137 
1138    case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT:
1139       genx_batch_emit_pipe_control(&cmd_buffer->batch,
1140                                    cmd_buffer->device->info,
1141                                    cmd_buffer->state.current_pipeline,
1142                                    ANV_PIPE_CS_STALL_BIT |
1143                                    ANV_PIPE_STALL_AT_SCOREBOARD_BIT);
1144       emit_xfb_query(&b, index, anv_address_add(query_addr, 8));
1145       break;
1146 
1147    case VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR: {
1148       if (!khr_perf_query_ensure_relocs(cmd_buffer))
1149          return;
1150 
1151       const struct anv_physical_device *pdevice = cmd_buffer->device->physical;
1152       const struct intel_perf_query_field_layout *layout = &pdevice->perf->query_layout;
1153 
1154       uint32_t reloc_idx = 0;
1155       for (uint32_t end = 0; end < 2; end++) {
1156          for (uint32_t r = 0; r < layout->n_fields; r++) {
1157             const struct intel_perf_query_field *field =
1158                &layout->fields[end ? r : (layout->n_fields - 1 - r)];
1159             struct mi_value reg_addr =
1160                mi_iadd(
1161                   &b,
1162                   mi_imm(intel_canonical_address(pool->bo->offset +
1163                                                  khr_perf_query_data_offset(pool, query, 0, end) +
1164                                                  field->location)),
1165                   mi_reg64(ANV_PERF_QUERY_OFFSET_REG));
1166             cmd_buffer->self_mod_locations[reloc_idx++] =
1167                mi_store_relocated_address_reg64(&b, reg_addr);
1168 
1169             if (field->type != INTEL_PERF_QUERY_FIELD_TYPE_MI_RPC &&
1170                 field->size == 8) {
1171                reg_addr =
1172                   mi_iadd(
1173                      &b,
1174                      mi_imm(intel_canonical_address(pool->bo->offset +
1175                                                     khr_perf_query_data_offset(pool, query, 0, end) +
1176                                                     field->location + 4)),
1177                      mi_reg64(ANV_PERF_QUERY_OFFSET_REG));
1178                cmd_buffer->self_mod_locations[reloc_idx++] =
1179                   mi_store_relocated_address_reg64(&b, reg_addr);
1180             }
1181          }
1182       }
1183 
1184       struct mi_value availability_write_offset =
1185          mi_iadd(
1186             &b,
1187             mi_imm(
1188                intel_canonical_address(
1189                   pool->bo->offset +
1190                   khr_perf_query_availability_offset(pool, query, 0 /* pass */))),
1191             mi_reg64(ANV_PERF_QUERY_OFFSET_REG));
1192       cmd_buffer->self_mod_locations[reloc_idx++] =
1193          mi_store_relocated_address_reg64(&b, availability_write_offset);
1194 
1195       assert(reloc_idx == pdevice->n_perf_query_commands);
1196 
1197       const struct intel_device_info *devinfo = cmd_buffer->device->info;
1198       const enum intel_engine_class engine_class = cmd_buffer->queue_family->engine_class;
1199       mi_self_mod_barrier(&b, devinfo->engine_class_prefetch[engine_class]);
1200 
1201       genx_batch_emit_pipe_control(&cmd_buffer->batch,
1202                                    cmd_buffer->device->info,
1203                                    cmd_buffer->state.current_pipeline,
1204                                    ANV_PIPE_CS_STALL_BIT |
1205                                    ANV_PIPE_STALL_AT_SCOREBOARD_BIT);
1206       cmd_buffer->perf_query_pool = pool;
1207 
1208       cmd_buffer->perf_reloc_idx = 0;
1209       for (uint32_t r = 0; r < layout->n_fields; r++) {
1210          const struct intel_perf_query_field *field =
1211             &layout->fields[layout->n_fields - 1 - r];
1212          void *dws;
1213 
1214          switch (field->type) {
1215          case INTEL_PERF_QUERY_FIELD_TYPE_MI_RPC:
1216             dws = anv_batch_emitn(&cmd_buffer->batch,
1217                                   GENX(MI_REPORT_PERF_COUNT_length),
1218                                   GENX(MI_REPORT_PERF_COUNT),
1219                                   .MemoryAddress = query_addr /* Will be overwritten */);
1220             mi_resolve_relocated_address_token(
1221                &b,
1222                cmd_buffer->self_mod_locations[cmd_buffer->perf_reloc_idx++],
1223                dws + GENX(MI_REPORT_PERF_COUNT_MemoryAddress_start) / 8);
1224             break;
1225 
1226          case INTEL_PERF_QUERY_FIELD_TYPE_SRM_PERFCNT:
1227          case INTEL_PERF_QUERY_FIELD_TYPE_SRM_RPSTAT:
1228          case INTEL_PERF_QUERY_FIELD_TYPE_SRM_OA_A:
1229          case INTEL_PERF_QUERY_FIELD_TYPE_SRM_OA_B:
1230          case INTEL_PERF_QUERY_FIELD_TYPE_SRM_OA_C:
1231          case INTEL_PERF_QUERY_FIELD_TYPE_SRM_OA_PEC:
1232             dws =
1233                anv_batch_emitn(&cmd_buffer->batch,
1234                                GENX(MI_STORE_REGISTER_MEM_length),
1235                                GENX(MI_STORE_REGISTER_MEM),
1236                                .RegisterAddress = field->mmio_offset,
1237                                .MemoryAddress = query_addr /* Will be overwritten */ );
1238             mi_resolve_relocated_address_token(
1239                &b,
1240                cmd_buffer->self_mod_locations[cmd_buffer->perf_reloc_idx++],
1241                dws + GENX(MI_STORE_REGISTER_MEM_MemoryAddress_start) / 8);
1242             if (field->size == 8) {
1243                dws =
1244                   anv_batch_emitn(&cmd_buffer->batch,
1245                                   GENX(MI_STORE_REGISTER_MEM_length),
1246                                   GENX(MI_STORE_REGISTER_MEM),
1247                                   .RegisterAddress = field->mmio_offset + 4,
1248                                   .MemoryAddress = query_addr /* Will be overwritten */ );
1249                mi_resolve_relocated_address_token(
1250                   &b,
1251                   cmd_buffer->self_mod_locations[cmd_buffer->perf_reloc_idx++],
1252                   dws + GENX(MI_STORE_REGISTER_MEM_MemoryAddress_start) / 8);
1253             }
1254             break;
1255 
1256          default:
1257             unreachable("Invalid query field");
1258             break;
1259          }
1260       }
1261       break;
1262    }
1263 
1264    case VK_QUERY_TYPE_PERFORMANCE_QUERY_INTEL: {
1265       genx_batch_emit_pipe_control(&cmd_buffer->batch,
1266                                    cmd_buffer->device->info,
1267                                    cmd_buffer->state.current_pipeline,
1268                                    ANV_PIPE_CS_STALL_BIT |
1269                                    ANV_PIPE_STALL_AT_SCOREBOARD_BIT);
1270       emit_perf_intel_query(cmd_buffer, pool, &b, query_addr, false);
1271       break;
1272    }
1273    case VK_QUERY_TYPE_RESULT_STATUS_ONLY_KHR:
1274       emit_query_mi_flush_availability(cmd_buffer, query_addr, false);
1275       break;
1276    case VK_QUERY_TYPE_VIDEO_ENCODE_FEEDBACK_KHR:
1277       emit_query_mi_availability(&b, query_addr, false);
1278       break;
1279    default:
1280       unreachable("");
1281    }
1282 }
1283 
genX(CmdEndQueryIndexedEXT)1284 void genX(CmdEndQueryIndexedEXT)(
1285     VkCommandBuffer                             commandBuffer,
1286     VkQueryPool                                 queryPool,
1287     uint32_t                                    query,
1288     uint32_t                                    index)
1289 {
1290    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1291    ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
1292    struct anv_address query_addr = anv_query_address(pool, query);
1293 
1294    struct mi_builder b;
1295    mi_builder_init(&b, cmd_buffer->device->info, &cmd_buffer->batch);
1296 
1297    switch (pool->vk.query_type) {
1298    case VK_QUERY_TYPE_OCCLUSION:
1299       emit_ps_depth_count(cmd_buffer, anv_address_add(query_addr, 16));
1300       emit_query_pc_availability(cmd_buffer, query_addr, true);
1301       cmd_buffer->state.gfx.n_occlusion_queries--;
1302       cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_OCCLUSION_QUERY_ACTIVE;
1303       break;
1304 
1305    case VK_QUERY_TYPE_PRIMITIVES_GENERATED_EXT:
1306       /* Ensure previous commands have completed before capturing the register
1307        * value.
1308        */
1309       genx_batch_emit_pipe_control(&cmd_buffer->batch,
1310                                    cmd_buffer->device->info,
1311                                    cmd_buffer->state.current_pipeline,
1312                                    ANV_PIPE_CS_STALL_BIT |
1313                                    ANV_PIPE_STALL_AT_SCOREBOARD_BIT);
1314 
1315       mi_store(&b, mi_mem64(anv_address_add(query_addr, 16)),
1316                    mi_reg64(GENX(CL_INVOCATION_COUNT_num)));
1317       emit_query_mi_availability(&b, query_addr, true);
1318       break;
1319 
1320 #if GFX_VERx10 >= 125
1321    case VK_QUERY_TYPE_MESH_PRIMITIVES_GENERATED_EXT:
1322       genx_batch_emit_pipe_control(&cmd_buffer->batch,
1323                                    cmd_buffer->device->info,
1324                                    cmd_buffer->state.current_pipeline,
1325                                    ANV_PIPE_CS_STALL_BIT |
1326                                    ANV_PIPE_STALL_AT_SCOREBOARD_BIT);
1327       mi_store(&b, mi_mem64(anv_address_add(query_addr, 16)),
1328                    mi_reg64(GENX(MESH_PRIMITIVE_COUNT_num)));
1329       emit_query_mi_availability(&b, query_addr, true);
1330       break;
1331 #endif
1332 
1333    case VK_QUERY_TYPE_PIPELINE_STATISTICS: {
1334       /* TODO: This might only be necessary for certain stats */
1335       genx_batch_emit_pipe_control(&cmd_buffer->batch,
1336                                    cmd_buffer->device->info,
1337                                    cmd_buffer->state.current_pipeline,
1338                                    ANV_PIPE_CS_STALL_BIT |
1339                                    ANV_PIPE_STALL_AT_SCOREBOARD_BIT);
1340 
1341       uint32_t statistics = pool->vk.pipeline_statistics;
1342       uint32_t offset = 16;
1343       while (statistics) {
1344          uint32_t stat = u_bit_scan(&statistics);
1345          emit_pipeline_stat(&b, stat, anv_address_add(query_addr, offset));
1346          offset += 16;
1347       }
1348 
1349       emit_query_mi_availability(&b, query_addr, true);
1350       break;
1351    }
1352 
1353    case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT:
1354       genx_batch_emit_pipe_control(&cmd_buffer->batch,
1355                                    cmd_buffer->device->info,
1356                                    cmd_buffer->state.current_pipeline,
1357                                    ANV_PIPE_CS_STALL_BIT |
1358                                    ANV_PIPE_STALL_AT_SCOREBOARD_BIT);
1359       emit_xfb_query(&b, index, anv_address_add(query_addr, 16));
1360 #if GFX_VER == 11
1361       /* Running the following CTS pattern on ICL will likely report a failure :
1362        *
1363        * dEQP-VK.transform_feedback.primitives_generated_query.get.queue_reset.32bit.geom.*
1364        *
1365        * If you dump the returned values in genX(GetQueryPoolResults)(), you
1366        * will notice that the last 64bit value is 0 and rereading the value
1367        * once more will return a non-zero value. This seems to indicate that
1368        * the memory writes are not ordered somehow... Otherwise the
1369        * availability write below would ensure the previous writes above have
1370        * completed.
1371        *
1372        * So as a workaround, we stall CS to make sure the previous writes have
1373        * landed before emitting the availability.
1374        */
1375       genx_batch_emit_pipe_control(&cmd_buffer->batch,
1376                                    cmd_buffer->device->info,
1377                                    cmd_buffer->state.current_pipeline,
1378                                    ANV_PIPE_CS_STALL_BIT);
1379 #endif
1380       emit_query_mi_availability(&b, query_addr, true);
1381       break;
1382 
1383    case VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR: {
1384       genx_batch_emit_pipe_control(&cmd_buffer->batch,
1385                                    cmd_buffer->device->info,
1386                                    cmd_buffer->state.current_pipeline,
1387                                    ANV_PIPE_CS_STALL_BIT |
1388                                    ANV_PIPE_STALL_AT_SCOREBOARD_BIT);
1389       cmd_buffer->perf_query_pool = pool;
1390 
1391       if (!khr_perf_query_ensure_relocs(cmd_buffer))
1392          return;
1393 
1394       const struct anv_physical_device *pdevice = cmd_buffer->device->physical;
1395       const struct intel_perf_query_field_layout *layout = &pdevice->perf->query_layout;
1396 
1397       void *dws;
1398       for (uint32_t r = 0; r < layout->n_fields; r++) {
1399          const struct intel_perf_query_field *field = &layout->fields[r];
1400 
1401          switch (field->type) {
1402          case INTEL_PERF_QUERY_FIELD_TYPE_MI_RPC:
1403             dws = anv_batch_emitn(&cmd_buffer->batch,
1404                                   GENX(MI_REPORT_PERF_COUNT_length),
1405                                   GENX(MI_REPORT_PERF_COUNT),
1406                                   .MemoryAddress = query_addr /* Will be overwritten */);
1407             mi_resolve_relocated_address_token(
1408                &b,
1409                cmd_buffer->self_mod_locations[cmd_buffer->perf_reloc_idx++],
1410                dws + GENX(MI_REPORT_PERF_COUNT_MemoryAddress_start) / 8);
1411             break;
1412 
1413          case INTEL_PERF_QUERY_FIELD_TYPE_SRM_PERFCNT:
1414          case INTEL_PERF_QUERY_FIELD_TYPE_SRM_RPSTAT:
1415          case INTEL_PERF_QUERY_FIELD_TYPE_SRM_OA_A:
1416          case INTEL_PERF_QUERY_FIELD_TYPE_SRM_OA_B:
1417          case INTEL_PERF_QUERY_FIELD_TYPE_SRM_OA_C:
1418          case INTEL_PERF_QUERY_FIELD_TYPE_SRM_OA_PEC:
1419             dws =
1420                anv_batch_emitn(&cmd_buffer->batch,
1421                                GENX(MI_STORE_REGISTER_MEM_length),
1422                                GENX(MI_STORE_REGISTER_MEM),
1423                                .RegisterAddress = field->mmio_offset,
1424                                .MemoryAddress = query_addr /* Will be overwritten */ );
1425             mi_resolve_relocated_address_token(
1426                &b,
1427                cmd_buffer->self_mod_locations[cmd_buffer->perf_reloc_idx++],
1428                dws + GENX(MI_STORE_REGISTER_MEM_MemoryAddress_start) / 8);
1429             if (field->size == 8) {
1430                dws =
1431                   anv_batch_emitn(&cmd_buffer->batch,
1432                                   GENX(MI_STORE_REGISTER_MEM_length),
1433                                   GENX(MI_STORE_REGISTER_MEM),
1434                                   .RegisterAddress = field->mmio_offset + 4,
1435                                   .MemoryAddress = query_addr /* Will be overwritten */ );
1436                mi_resolve_relocated_address_token(
1437                   &b,
1438                   cmd_buffer->self_mod_locations[cmd_buffer->perf_reloc_idx++],
1439                   dws + GENX(MI_STORE_REGISTER_MEM_MemoryAddress_start) / 8);
1440             }
1441             break;
1442 
1443          default:
1444             unreachable("Invalid query field");
1445             break;
1446          }
1447       }
1448 
1449       dws =
1450          anv_batch_emitn(&cmd_buffer->batch,
1451                          GENX(MI_STORE_DATA_IMM_length),
1452                          GENX(MI_STORE_DATA_IMM),
1453                          .ImmediateData = true);
1454       mi_resolve_relocated_address_token(
1455          &b,
1456          cmd_buffer->self_mod_locations[cmd_buffer->perf_reloc_idx++],
1457          dws + GENX(MI_STORE_DATA_IMM_Address_start) / 8);
1458 
1459       assert(cmd_buffer->perf_reloc_idx == pdevice->n_perf_query_commands);
1460       break;
1461    }
1462 
1463    case VK_QUERY_TYPE_PERFORMANCE_QUERY_INTEL: {
1464       genx_batch_emit_pipe_control(&cmd_buffer->batch,
1465                                    cmd_buffer->device->info,
1466                                    cmd_buffer->state.current_pipeline,
1467                                    ANV_PIPE_CS_STALL_BIT |
1468                                    ANV_PIPE_STALL_AT_SCOREBOARD_BIT);
1469       uint32_t marker_offset = intel_perf_marker_offset();
1470       mi_store(&b, mi_mem64(anv_address_add(query_addr, marker_offset)),
1471                    mi_imm(cmd_buffer->intel_perf_marker));
1472       emit_perf_intel_query(cmd_buffer, pool, &b, query_addr, true);
1473       emit_query_mi_availability(&b, query_addr, true);
1474       break;
1475    }
1476    case VK_QUERY_TYPE_RESULT_STATUS_ONLY_KHR:
1477       emit_query_mi_flush_availability(cmd_buffer, query_addr, true);
1478       break;
1479 
1480 #if GFX_VER < 11
1481 #define MFC_BITSTREAM_BYTECOUNT_FRAME_REG       0x128A0
1482 #define HCP_BITSTREAM_BYTECOUNT_FRAME_REG       0x1E9A0
1483 #elif GFX_VER >= 11
1484 #define MFC_BITSTREAM_BYTECOUNT_FRAME_REG       0x1C08A0
1485 #define HCP_BITSTREAM_BYTECOUNT_FRAME_REG       0x1C28A0
1486 #endif
1487 
1488    case VK_QUERY_TYPE_VIDEO_ENCODE_FEEDBACK_KHR: {
1489       uint32_t reg_addr;
1490 
1491       if (pool->codec & VK_VIDEO_CODEC_OPERATION_ENCODE_H264_BIT_KHR) {
1492          reg_addr = MFC_BITSTREAM_BYTECOUNT_FRAME_REG;
1493       } else if (pool->codec & VK_VIDEO_CODEC_OPERATION_ENCODE_H265_BIT_KHR) {
1494          reg_addr = HCP_BITSTREAM_BYTECOUNT_FRAME_REG;
1495       } else {
1496          unreachable("Invalid codec operation");
1497       }
1498 
1499       mi_store(&b, mi_mem64(anv_address_add(query_addr, 8)), mi_reg32(reg_addr));
1500       emit_query_mi_availability(&b, query_addr, true);
1501       break;
1502    }
1503    default:
1504       unreachable("");
1505    }
1506 
1507    /* When multiview is active the spec requires that N consecutive query
1508     * indices are used, where N is the number of active views in the subpass.
1509     * The spec allows that we only write the results to one of the queries
1510     * but we still need to manage result availability for all the query indices.
1511     * Since we only emit a single query for all active views in the
1512     * first index, mark the other query indices as being already available
1513     * with result 0.
1514     */
1515    if (cmd_buffer->state.gfx.view_mask) {
1516       const uint32_t num_queries =
1517          util_bitcount(cmd_buffer->state.gfx.view_mask);
1518       if (num_queries > 1)
1519          emit_zero_queries(cmd_buffer, &b, pool, query + 1, num_queries - 1);
1520    }
1521 }
1522 
1523 #define TIMESTAMP 0x2358
1524 
genX(CmdWriteTimestamp2)1525 void genX(CmdWriteTimestamp2)(
1526     VkCommandBuffer                             commandBuffer,
1527     VkPipelineStageFlags2                       stage,
1528     VkQueryPool                                 queryPool,
1529     uint32_t                                    query)
1530 {
1531    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1532    ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
1533    struct anv_address query_addr = anv_query_address(pool, query);
1534 
1535    assert(pool->vk.query_type == VK_QUERY_TYPE_TIMESTAMP);
1536 
1537    if (append_query_clear_flush(cmd_buffer, pool,
1538                                 "CmdWriteTimestamp flush query clears"))
1539       genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
1540 
1541    struct mi_builder b;
1542    mi_builder_init(&b, cmd_buffer->device->info, &cmd_buffer->batch);
1543 
1544    if (stage == VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT) {
1545       mi_store(&b, mi_mem64(anv_address_add(query_addr, 8)),
1546                    mi_reg64(TIMESTAMP));
1547       emit_query_mi_availability(&b, query_addr, true);
1548    } else {
1549       /* Everything else is bottom-of-pipe */
1550       cmd_buffer->state.pending_pipe_bits |= ANV_PIPE_POST_SYNC_BIT;
1551       genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
1552 
1553       bool cs_stall_needed =
1554          (GFX_VER == 9 && cmd_buffer->device->info->gt == 4);
1555 
1556       if (anv_cmd_buffer_is_blitter_queue(cmd_buffer) ||
1557           anv_cmd_buffer_is_video_queue(cmd_buffer)) {
1558          /* Wa_16018063123 - emit fast color dummy blit before MI_FLUSH_DW. */
1559          if (intel_needs_workaround(cmd_buffer->device->info, 16018063123)) {
1560             genX(batch_emit_fast_color_dummy_blit)(&cmd_buffer->batch,
1561                                                    cmd_buffer->device);
1562          }
1563          anv_batch_emit(&cmd_buffer->batch, GENX(MI_FLUSH_DW), dw) {
1564             dw.Address = anv_address_add(query_addr, 8);
1565             dw.PostSyncOperation = WriteTimestamp;
1566          }
1567          emit_query_mi_flush_availability(cmd_buffer, query_addr, true);
1568       } else {
1569          genx_batch_emit_pipe_control_write
1570             (&cmd_buffer->batch, cmd_buffer->device->info,
1571              cmd_buffer->state.current_pipeline, WriteTimestamp,
1572              anv_address_add(query_addr, 8), 0,
1573              cs_stall_needed ? ANV_PIPE_CS_STALL_BIT : 0);
1574          emit_query_pc_availability(cmd_buffer, query_addr, true);
1575       }
1576 
1577    }
1578 
1579 
1580    /* When multiview is active the spec requires that N consecutive query
1581     * indices are used, where N is the number of active views in the subpass.
1582     * The spec allows that we only write the results to one of the queries
1583     * but we still need to manage result availability for all the query indices.
1584     * Since we only emit a single query for all active views in the
1585     * first index, mark the other query indices as being already available
1586     * with result 0.
1587     */
1588    if (cmd_buffer->state.gfx.view_mask) {
1589       const uint32_t num_queries =
1590          util_bitcount(cmd_buffer->state.gfx.view_mask);
1591       if (num_queries > 1)
1592          emit_zero_queries(cmd_buffer, &b, pool, query + 1, num_queries - 1);
1593    }
1594 }
1595 
1596 #define MI_PREDICATE_SRC0    0x2400
1597 #define MI_PREDICATE_SRC1    0x2408
1598 #define MI_PREDICATE_RESULT  0x2418
1599 
1600 /**
1601  * Writes the results of a query to dst_addr is the value at poll_addr is equal
1602  * to the reference value.
1603  */
1604 static void
gpu_write_query_result_cond(struct anv_cmd_buffer * cmd_buffer,struct mi_builder * b,struct anv_address poll_addr,struct anv_address dst_addr,uint64_t ref_value,VkQueryResultFlags flags,uint32_t value_index,struct mi_value query_result)1605 gpu_write_query_result_cond(struct anv_cmd_buffer *cmd_buffer,
1606                             struct mi_builder *b,
1607                             struct anv_address poll_addr,
1608                             struct anv_address dst_addr,
1609                             uint64_t ref_value,
1610                             VkQueryResultFlags flags,
1611                             uint32_t value_index,
1612                             struct mi_value query_result)
1613 {
1614    mi_store(b, mi_reg64(MI_PREDICATE_SRC0), mi_mem64(poll_addr));
1615    mi_store(b, mi_reg64(MI_PREDICATE_SRC1), mi_imm(ref_value));
1616    anv_batch_emit(&cmd_buffer->batch, GENX(MI_PREDICATE), mip) {
1617       mip.LoadOperation    = LOAD_LOAD;
1618       mip.CombineOperation = COMBINE_SET;
1619       mip.CompareOperation = COMPARE_SRCS_EQUAL;
1620    }
1621 
1622    if (flags & VK_QUERY_RESULT_64_BIT) {
1623       struct anv_address res_addr = anv_address_add(dst_addr, value_index * 8);
1624       mi_store_if(b, mi_mem64(res_addr), query_result);
1625    } else {
1626       struct anv_address res_addr = anv_address_add(dst_addr, value_index * 4);
1627       mi_store_if(b, mi_mem32(res_addr), query_result);
1628    }
1629 }
1630 
1631 static void
gpu_write_query_result(struct mi_builder * b,struct anv_address dst_addr,VkQueryResultFlags flags,uint32_t value_index,struct mi_value query_result)1632 gpu_write_query_result(struct mi_builder *b,
1633                        struct anv_address dst_addr,
1634                        VkQueryResultFlags flags,
1635                        uint32_t value_index,
1636                        struct mi_value query_result)
1637 {
1638    if (flags & VK_QUERY_RESULT_64_BIT) {
1639       struct anv_address res_addr = anv_address_add(dst_addr, value_index * 8);
1640       mi_store(b, mi_mem64(res_addr), query_result);
1641    } else {
1642       struct anv_address res_addr = anv_address_add(dst_addr, value_index * 4);
1643       mi_store(b, mi_mem32(res_addr), query_result);
1644    }
1645 }
1646 
1647 static struct mi_value
compute_query_result(struct mi_builder * b,struct anv_address addr)1648 compute_query_result(struct mi_builder *b, struct anv_address addr)
1649 {
1650    return mi_isub(b, mi_mem64(anv_address_add(addr, 8)),
1651                      mi_mem64(anv_address_add(addr, 0)));
1652 }
1653 
1654 static void
copy_query_results_with_cs(struct anv_cmd_buffer * cmd_buffer,struct anv_query_pool * pool,struct anv_address dest_addr,uint64_t dest_stride,uint32_t first_query,uint32_t query_count,VkQueryResultFlags flags)1655 copy_query_results_with_cs(struct anv_cmd_buffer *cmd_buffer,
1656                            struct anv_query_pool *pool,
1657                            struct anv_address dest_addr,
1658                            uint64_t dest_stride,
1659                            uint32_t first_query,
1660                            uint32_t query_count,
1661                            VkQueryResultFlags flags)
1662 {
1663    enum anv_pipe_bits needed_flushes = 0;
1664 
1665    trace_intel_begin_query_copy_cs(&cmd_buffer->trace);
1666 
1667    /* If render target writes are ongoing, request a render target cache flush
1668     * to ensure proper ordering of the commands from the 3d pipe and the
1669     * command streamer.
1670     */
1671 
1672    const enum anv_query_bits query_bits =
1673       cmd_buffer->state.queries.buffer_write_bits |
1674       cmd_buffer->state.queries.clear_bits;
1675 
1676    needed_flushes |= ANV_PIPE_QUERY_BITS(query_bits);
1677 
1678    /* Occlusion & timestamp queries are written using a PIPE_CONTROL and
1679     * because we're about to copy values from MI commands, we need to stall
1680     * the command streamer to make sure the PIPE_CONTROL values have
1681     * landed, otherwise we could see inconsistent values & availability.
1682     *
1683     *  From the vulkan spec:
1684     *
1685     *     "vkCmdCopyQueryPoolResults is guaranteed to see the effect of
1686     *     previous uses of vkCmdResetQueryPool in the same queue, without any
1687     *     additional synchronization."
1688     */
1689    if (pool->vk.query_type == VK_QUERY_TYPE_OCCLUSION ||
1690        pool->vk.query_type == VK_QUERY_TYPE_TIMESTAMP)
1691       needed_flushes |= ANV_PIPE_CS_STALL_BIT;
1692 
1693    if (needed_flushes) {
1694       anv_add_pending_pipe_bits(cmd_buffer,
1695                                 needed_flushes,
1696                                 "CopyQueryPoolResults");
1697       genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
1698    }
1699 
1700    struct mi_builder b;
1701    mi_builder_init(&b, cmd_buffer->device->info, &cmd_buffer->batch);
1702    mi_builder_set_mocs(&b, anv_mocs_for_address(
1703                           cmd_buffer->device,
1704                           &(struct anv_address) { .bo = pool->bo }));
1705 
1706    for (uint32_t i = 0; i < query_count; i++) {
1707       struct anv_address query_addr = anv_query_address(pool, first_query + i);
1708       struct mi_value result;
1709 
1710       /* Wait for the availability write to land before we go read the data */
1711       if (flags & VK_QUERY_RESULT_WAIT_BIT) {
1712          anv_batch_emit(&cmd_buffer->batch, GENX(MI_SEMAPHORE_WAIT), sem) {
1713             sem.WaitMode            = PollingMode;
1714             sem.CompareOperation    = COMPARE_SAD_EQUAL_SDD;
1715             sem.SemaphoreDataDword  = true;
1716             sem.SemaphoreAddress    = query_addr;
1717          }
1718       }
1719 
1720       uint32_t idx = 0;
1721       switch (pool->vk.query_type) {
1722       case VK_QUERY_TYPE_OCCLUSION:
1723       case VK_QUERY_TYPE_PRIMITIVES_GENERATED_EXT:
1724 #if GFX_VERx10 >= 125
1725       case VK_QUERY_TYPE_MESH_PRIMITIVES_GENERATED_EXT:
1726 #endif
1727          result = compute_query_result(&b, anv_address_add(query_addr, 8));
1728          /* Like in the case of vkGetQueryPoolResults, if the query is
1729           * unavailable and the VK_QUERY_RESULT_PARTIAL_BIT flag is set,
1730           * conservatively write 0 as the query result. If the
1731           * VK_QUERY_RESULT_PARTIAL_BIT isn't set, don't write any value.
1732           */
1733          gpu_write_query_result_cond(cmd_buffer, &b, query_addr, dest_addr,
1734                                      1 /* available */, flags, idx, result);
1735          if (flags & VK_QUERY_RESULT_PARTIAL_BIT) {
1736             gpu_write_query_result_cond(cmd_buffer, &b, query_addr, dest_addr,
1737                                         0 /* unavailable */, flags, idx, mi_imm(0));
1738          }
1739          idx++;
1740          break;
1741 
1742       case VK_QUERY_TYPE_PIPELINE_STATISTICS: {
1743          uint32_t statistics = pool->vk.pipeline_statistics;
1744          while (statistics) {
1745             UNUSED uint32_t stat = u_bit_scan(&statistics);
1746             result = compute_query_result(&b, anv_address_add(query_addr,
1747                                                               idx * 16 + 8));
1748             gpu_write_query_result(&b, dest_addr, flags, idx++, result);
1749          }
1750          assert(idx == util_bitcount(pool->vk.pipeline_statistics));
1751          break;
1752       }
1753 
1754       case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT:
1755          result = compute_query_result(&b, anv_address_add(query_addr, 8));
1756          gpu_write_query_result(&b, dest_addr, flags, idx++, result);
1757          result = compute_query_result(&b, anv_address_add(query_addr, 24));
1758          gpu_write_query_result(&b, dest_addr, flags, idx++, result);
1759          break;
1760 
1761       case VK_QUERY_TYPE_TIMESTAMP:
1762          result = mi_mem64(anv_address_add(query_addr, 8));
1763          gpu_write_query_result(&b, dest_addr, flags, idx++, result);
1764          break;
1765 
1766 #if GFX_VERx10 >= 125
1767       case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_KHR:
1768       case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SERIALIZATION_SIZE_KHR:
1769       case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SIZE_KHR:
1770          result = mi_mem64(anv_address_add(query_addr, 8));
1771          gpu_write_query_result(&b, dest_addr, flags, idx++, result);
1772          break;
1773 
1774       case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SERIALIZATION_BOTTOM_LEVEL_POINTERS_KHR:
1775          result = mi_mem64(anv_address_add(query_addr, 16));
1776          gpu_write_query_result(&b, dest_addr, flags, idx++, result);
1777          break;
1778 #endif
1779 
1780       case VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR:
1781          unreachable("Copy KHR performance query results not implemented");
1782          break;
1783 
1784       default:
1785          unreachable("unhandled query type");
1786       }
1787 
1788       if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) {
1789          gpu_write_query_result(&b, dest_addr, flags, idx,
1790                                 mi_mem64(query_addr));
1791       }
1792 
1793       dest_addr = anv_address_add(dest_addr, dest_stride);
1794    }
1795 
1796    trace_intel_end_query_copy_cs(&cmd_buffer->trace, query_count);
1797 }
1798 
1799 static void
copy_query_results_with_shader(struct anv_cmd_buffer * cmd_buffer,struct anv_query_pool * pool,struct anv_address dest_addr,uint64_t dest_stride,uint32_t first_query,uint32_t query_count,VkQueryResultFlags flags)1800 copy_query_results_with_shader(struct anv_cmd_buffer *cmd_buffer,
1801                                struct anv_query_pool *pool,
1802                                struct anv_address dest_addr,
1803                                uint64_t dest_stride,
1804                                uint32_t first_query,
1805                                uint32_t query_count,
1806                                VkQueryResultFlags flags)
1807 {
1808    struct anv_device *device = cmd_buffer->device;
1809    enum anv_pipe_bits needed_flushes = 0;
1810 
1811    trace_intel_begin_query_copy_shader(&cmd_buffer->trace);
1812 
1813    /* Ensure all query MI writes are visible to the shader */
1814    struct mi_builder b;
1815    mi_builder_init(&b, cmd_buffer->device->info, &cmd_buffer->batch);
1816    mi_ensure_write_fence(&b);
1817 
1818    /* If this is the first command in the batch buffer, make sure we have
1819     * consistent pipeline mode.
1820     */
1821    if (cmd_buffer->state.current_pipeline == UINT32_MAX)
1822       genX(flush_pipeline_select_3d)(cmd_buffer);
1823 
1824    if ((cmd_buffer->state.queries.buffer_write_bits |
1825         cmd_buffer->state.queries.clear_bits) & ANV_QUERY_WRITES_RT_FLUSH)
1826       needed_flushes |= ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT;
1827 
1828    if ((cmd_buffer->state.queries.buffer_write_bits |
1829         cmd_buffer->state.queries.clear_bits) & ANV_QUERY_WRITES_DATA_FLUSH) {
1830       needed_flushes |= (ANV_PIPE_HDC_PIPELINE_FLUSH_BIT |
1831                          ANV_PIPE_UNTYPED_DATAPORT_CACHE_FLUSH_BIT);
1832    }
1833 
1834    /* Flushes for the queries to complete */
1835    if (flags & VK_QUERY_RESULT_WAIT_BIT) {
1836       /* Some queries are done with shaders, so we need to have them flush
1837        * high level caches writes. The L3 should be shared across the GPU.
1838        */
1839       if (pool->vk.query_type == VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_KHR ||
1840           pool->vk.query_type == VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SIZE_KHR ||
1841           pool->vk.query_type == VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SERIALIZATION_SIZE_KHR ||
1842           pool->vk.query_type == VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SERIALIZATION_BOTTOM_LEVEL_POINTERS_KHR) {
1843          needed_flushes |= ANV_PIPE_UNTYPED_DATAPORT_CACHE_FLUSH_BIT;
1844       }
1845       /* And we need to stall for previous CS writes to land or the flushes to
1846        * complete.
1847        */
1848       needed_flushes |= ANV_PIPE_CS_STALL_BIT;
1849    }
1850 
1851    /* Occlusion & timestamp queries are written using a PIPE_CONTROL and
1852     * because we're about to copy values from MI commands, we need to stall
1853     * the command streamer to make sure the PIPE_CONTROL values have
1854     * landed, otherwise we could see inconsistent values & availability.
1855     *
1856     *  From the vulkan spec:
1857     *
1858     *     "vkCmdCopyQueryPoolResults is guaranteed to see the effect of
1859     *     previous uses of vkCmdResetQueryPool in the same queue, without any
1860     *     additional synchronization."
1861     */
1862    if (pool->vk.query_type == VK_QUERY_TYPE_OCCLUSION ||
1863        pool->vk.query_type == VK_QUERY_TYPE_TIMESTAMP)
1864       needed_flushes |= ANV_PIPE_CS_STALL_BIT;
1865 
1866    if (needed_flushes) {
1867       anv_add_pending_pipe_bits(cmd_buffer,
1868                                 needed_flushes | ANV_PIPE_END_OF_PIPE_SYNC_BIT,
1869                                 "CopyQueryPoolResults");
1870       genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
1871    }
1872 
1873    struct anv_shader_bin *copy_kernel;
1874    VkResult ret =
1875       anv_device_get_internal_shader(
1876          cmd_buffer->device,
1877          cmd_buffer->state.current_pipeline == GPGPU ?
1878          ANV_INTERNAL_KERNEL_COPY_QUERY_RESULTS_COMPUTE :
1879          ANV_INTERNAL_KERNEL_COPY_QUERY_RESULTS_FRAGMENT,
1880          &copy_kernel);
1881    if (ret != VK_SUCCESS) {
1882       anv_batch_set_error(&cmd_buffer->batch, ret);
1883       return;
1884    }
1885 
1886    struct anv_simple_shader state = {
1887       .device               = cmd_buffer->device,
1888       .cmd_buffer           = cmd_buffer,
1889       .dynamic_state_stream = &cmd_buffer->dynamic_state_stream,
1890       .general_state_stream = &cmd_buffer->general_state_stream,
1891       .batch                = &cmd_buffer->batch,
1892       .kernel               = copy_kernel,
1893       .l3_config            = device->internal_kernels_l3_config,
1894       .urb_cfg              = &cmd_buffer->state.gfx.urb_cfg,
1895    };
1896    genX(emit_simple_shader_init)(&state);
1897 
1898    struct anv_state push_data_state =
1899       genX(simple_shader_alloc_push)(&state,
1900                                      sizeof(struct anv_query_copy_params));
1901    if (push_data_state.map == NULL)
1902       return;
1903 
1904    struct anv_query_copy_params *params = push_data_state.map;
1905 
1906    uint32_t copy_flags =
1907       ((flags & VK_QUERY_RESULT_64_BIT) ? ANV_COPY_QUERY_FLAG_RESULT64 : 0) |
1908       ((flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) ? ANV_COPY_QUERY_FLAG_AVAILABLE : 0);
1909 
1910    uint32_t num_items = 1;
1911    uint32_t data_offset = 8 /* behind availability */;
1912    switch (pool->vk.query_type) {
1913    case VK_QUERY_TYPE_OCCLUSION:
1914       copy_flags |= ANV_COPY_QUERY_FLAG_DELTA;
1915       /* Occlusion and timestamps queries are the only ones where we would have partial data
1916        * because they are capture with a PIPE_CONTROL post sync operation. The
1917        * other ones are captured with MI_STORE_REGISTER_DATA so we're always
1918        * available by the time we reach the copy command.
1919        */
1920       copy_flags |= (flags & VK_QUERY_RESULT_PARTIAL_BIT) ? ANV_COPY_QUERY_FLAG_PARTIAL : 0;
1921       break;
1922 
1923    case VK_QUERY_TYPE_TIMESTAMP:
1924       copy_flags |= (flags & VK_QUERY_RESULT_PARTIAL_BIT) ? ANV_COPY_QUERY_FLAG_PARTIAL : 0;
1925       break;
1926 
1927    case VK_QUERY_TYPE_PRIMITIVES_GENERATED_EXT:
1928 #if GFX_VERx10 >= 125
1929    case VK_QUERY_TYPE_MESH_PRIMITIVES_GENERATED_EXT:
1930 #endif
1931       copy_flags |= ANV_COPY_QUERY_FLAG_DELTA;
1932       break;
1933 
1934    case VK_QUERY_TYPE_PIPELINE_STATISTICS:
1935       num_items = util_bitcount(pool->vk.pipeline_statistics);
1936       copy_flags |= ANV_COPY_QUERY_FLAG_DELTA;
1937       break;
1938 
1939    case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT:
1940       num_items = 2;
1941       copy_flags |= ANV_COPY_QUERY_FLAG_DELTA;
1942       break;
1943 
1944    case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_KHR:
1945    case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SERIALIZATION_SIZE_KHR:
1946    case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SIZE_KHR:
1947       break;
1948 
1949    case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SERIALIZATION_BOTTOM_LEVEL_POINTERS_KHR:
1950       data_offset += 8;
1951       break;
1952 
1953    default:
1954       unreachable("unhandled query type");
1955    }
1956 
1957    *params = (struct anv_query_copy_params) {
1958       .flags              = copy_flags,
1959       .num_queries        = query_count,
1960       .num_items          = num_items,
1961       .query_base         = first_query,
1962       .query_stride       = pool->stride,
1963       .query_data_offset  = data_offset,
1964       .destination_stride = dest_stride,
1965       .query_data_addr    = anv_address_physical(
1966          (struct anv_address) {
1967             .bo = pool->bo,
1968          }),
1969       .destination_addr   = anv_address_physical(dest_addr),
1970    };
1971 
1972    genX(emit_simple_shader_dispatch)(&state, query_count, push_data_state);
1973 
1974    /* The query copy result shader is writing using the dataport, flush
1975     * HDC/Data cache depending on the generation. Also stall at pixel
1976     * scoreboard in case we're doing the copy with a fragment shader.
1977     */
1978    cmd_buffer->state.queries.buffer_write_bits |= ANV_QUERY_WRITES_DATA_FLUSH;
1979 
1980    trace_intel_end_query_copy_shader(&cmd_buffer->trace, query_count);
1981 }
1982 
genX(CmdCopyQueryPoolResults)1983 void genX(CmdCopyQueryPoolResults)(
1984     VkCommandBuffer                             commandBuffer,
1985     VkQueryPool                                 queryPool,
1986     uint32_t                                    firstQuery,
1987     uint32_t                                    queryCount,
1988     VkBuffer                                    destBuffer,
1989     VkDeviceSize                                destOffset,
1990     VkDeviceSize                                destStride,
1991     VkQueryResultFlags                          flags)
1992 {
1993    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1994    ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
1995    ANV_FROM_HANDLE(anv_buffer, buffer, destBuffer);
1996    struct anv_device *device = cmd_buffer->device;
1997    struct anv_physical_device *pdevice = device->physical;
1998 
1999    if (queryCount > pdevice->instance->query_copy_with_shader_threshold) {
2000       copy_query_results_with_shader(cmd_buffer, pool,
2001                                      anv_address_add(buffer->address,
2002                                                      destOffset),
2003                                      destStride,
2004                                      firstQuery,
2005                                      queryCount,
2006                                      flags);
2007    } else {
2008       copy_query_results_with_cs(cmd_buffer, pool,
2009                                  anv_address_add(buffer->address,
2010                                                  destOffset),
2011                                  destStride,
2012                                  firstQuery,
2013                                  queryCount,
2014                                  flags);
2015    }
2016 }
2017 
2018 #if GFX_VERx10 >= 125 && ANV_SUPPORT_RT
2019 
2020 #if ANV_SUPPORT_RT_GRL
2021 #include "grl/include/GRLRTASCommon.h"
2022 #include "grl/grl_metakernel_postbuild_info.h"
2023 #else
2024 #include "bvh/anv_bvh.h"
2025 #endif
2026 
2027 void
genX(CmdWriteAccelerationStructuresPropertiesKHR)2028 genX(CmdWriteAccelerationStructuresPropertiesKHR)(
2029     VkCommandBuffer                             commandBuffer,
2030     uint32_t                                    accelerationStructureCount,
2031     const VkAccelerationStructureKHR*           pAccelerationStructures,
2032     VkQueryType                                 queryType,
2033     VkQueryPool                                 queryPool,
2034     uint32_t                                    firstQuery)
2035 {
2036    assert(queryType == VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_KHR ||
2037           queryType == VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SERIALIZATION_SIZE_KHR ||
2038           queryType == VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SIZE_KHR ||
2039           queryType == VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SERIALIZATION_BOTTOM_LEVEL_POINTERS_KHR);
2040 
2041    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
2042    ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
2043 
2044 #if !ANV_SUPPORT_RT_GRL
2045    anv_add_pending_pipe_bits(cmd_buffer,
2046                              ANV_PIPE_END_OF_PIPE_SYNC_BIT |
2047                              ANV_PIPE_DATA_CACHE_FLUSH_BIT,
2048                              "read BVH data using CS");
2049 #endif
2050 
2051    if (append_query_clear_flush(
2052           cmd_buffer, pool,
2053           "CmdWriteAccelerationStructuresPropertiesKHR flush query clears") ||
2054        !ANV_SUPPORT_RT_GRL)
2055       genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
2056 
2057    struct mi_builder b;
2058    mi_builder_init(&b, cmd_buffer->device->info, &cmd_buffer->batch);
2059 
2060 #if ANV_SUPPORT_RT_GRL
2061    for (uint32_t i = 0; i < accelerationStructureCount; i++) {
2062       ANV_FROM_HANDLE(vk_acceleration_structure, accel, pAccelerationStructures[i]);
2063       struct anv_address query_addr =
2064          anv_address_add(anv_query_address(pool, firstQuery + i), 8);
2065 
2066       switch (queryType) {
2067       case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_KHR:
2068          genX(grl_postbuild_info_compacted_size)(cmd_buffer,
2069                                                  vk_acceleration_structure_get_va(accel),
2070                                                  anv_address_physical(query_addr));
2071          break;
2072 
2073       case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SIZE_KHR:
2074          genX(grl_postbuild_info_current_size)(cmd_buffer,
2075                                                vk_acceleration_structure_get_va(accel),
2076                                                anv_address_physical(query_addr));
2077          break;
2078 
2079       case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SERIALIZATION_SIZE_KHR:
2080       case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SERIALIZATION_BOTTOM_LEVEL_POINTERS_KHR:
2081          genX(grl_postbuild_info_serialized_size)(cmd_buffer,
2082                                                   vk_acceleration_structure_get_va(accel),
2083                                                   anv_address_physical(query_addr));
2084          break;
2085 
2086       default:
2087          unreachable("unhandled query type");
2088       }
2089    }
2090 
2091    /* TODO: Figure out why MTL needs ANV_PIPE_DATA_CACHE_FLUSH_BIT in order
2092     * to not lose the availability bit.
2093     */
2094    anv_add_pending_pipe_bits(cmd_buffer,
2095                              ANV_PIPE_END_OF_PIPE_SYNC_BIT |
2096                              ANV_PIPE_DATA_CACHE_FLUSH_BIT,
2097                              "after write acceleration struct props");
2098    genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
2099 
2100    for (uint32_t i = 0; i < accelerationStructureCount; i++)
2101       emit_query_mi_availability(&b, anv_query_address(pool, firstQuery + i), true);
2102 
2103 #else
2104    for (uint32_t i = 0; i < accelerationStructureCount; i++) {
2105       ANV_FROM_HANDLE(vk_acceleration_structure, accel, pAccelerationStructures[i]);
2106       struct anv_address query_addr =
2107          anv_address_add(anv_query_address(pool, firstQuery + i), 8);
2108       uint64_t va = vk_acceleration_structure_get_va(accel);
2109 
2110       mi_builder_set_write_check(&b, (i == (accelerationStructureCount - 1)));
2111 
2112       switch (queryType) {
2113       case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_KHR:
2114          va += offsetof(struct anv_accel_struct_header, compacted_size);
2115          break;
2116       case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SIZE_KHR:
2117          va += offsetof(struct anv_accel_struct_header, size);
2118          break;
2119       case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SERIALIZATION_SIZE_KHR:
2120          va += offsetof(struct anv_accel_struct_header, serialization_size);
2121          break;
2122       case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SERIALIZATION_BOTTOM_LEVEL_POINTERS_KHR:
2123          va += offsetof(struct anv_accel_struct_header, instance_count);
2124          /* To respect current set up tailored for GRL, the numBlasPtrs are
2125           * stored at the second slot (third slot, if you count availability)
2126           */
2127          query_addr = anv_address_add(query_addr, 8);
2128          break;
2129       default:
2130          unreachable("unhandled query type");
2131       }
2132 
2133       mi_store(&b, mi_mem64(query_addr), mi_mem64(anv_address_from_u64(va)));
2134    }
2135 
2136    struct mi_builder b1;
2137    mi_builder_init(&b1, cmd_buffer->device->info, &cmd_buffer->batch);
2138 
2139    for (uint32_t i = 0; i < accelerationStructureCount; i++) {
2140       mi_builder_set_write_check(&b1, (i == (accelerationStructureCount - 1)));
2141       emit_query_mi_availability(&b1, anv_query_address(pool, firstQuery + i), true);
2142    }
2143 #endif /* ANV_SUPPORT_RT_GRL */
2144 }
2145 #endif /* GFX_VERx10 >= 125 && ANV_SUPPORT_RT */
2146