1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <assert.h>
25 #include <stdbool.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29
30 #include "anv_private.h"
31
32 #include "util/os_time.h"
33
34 #include "genxml/gen_macros.h"
35 #include "genxml/genX_pack.h"
36
37 #include "ds/intel_tracepoints.h"
38
39 #include "anv_internal_kernels.h"
40
41 /* We reserve :
42 * - GPR 14 for perf queries
43 * - GPR 15 for conditional rendering
44 */
45 #define MI_BUILDER_NUM_ALLOC_GPRS 14
46 #define MI_BUILDER_CAN_WRITE_BATCH true
47 #define __gen_get_batch_dwords anv_batch_emit_dwords
48 #define __gen_address_offset anv_address_add
49 #define __gen_get_batch_address(b, a) anv_batch_address(b, a)
50 #include "common/mi_builder.h"
51 #include "perf/intel_perf.h"
52 #include "perf/intel_perf_mdapi.h"
53 #include "perf/intel_perf_regs.h"
54
55 #include "vk_util.h"
56
57 static struct anv_address
anv_query_address(struct anv_query_pool * pool,uint32_t query)58 anv_query_address(struct anv_query_pool *pool, uint32_t query)
59 {
60 return (struct anv_address) {
61 .bo = pool->bo,
62 .offset = query * pool->stride,
63 };
64 }
65
66 static void
emit_query_mi_flush_availability(struct anv_cmd_buffer * cmd_buffer,struct anv_address addr,bool available)67 emit_query_mi_flush_availability(struct anv_cmd_buffer *cmd_buffer,
68 struct anv_address addr,
69 bool available)
70 {
71 anv_batch_emit(&cmd_buffer->batch, GENX(MI_FLUSH_DW), flush) {
72 flush.PostSyncOperation = WriteImmediateData;
73 flush.Address = addr;
74 flush.ImmediateData = available;
75 }
76 }
77
genX(CreateQueryPool)78 VkResult genX(CreateQueryPool)(
79 VkDevice _device,
80 const VkQueryPoolCreateInfo* pCreateInfo,
81 const VkAllocationCallbacks* pAllocator,
82 VkQueryPool* pQueryPool)
83 {
84 ANV_FROM_HANDLE(anv_device, device, _device);
85 const struct anv_physical_device *pdevice = device->physical;
86 const VkQueryPoolPerformanceCreateInfoKHR *perf_query_info = NULL;
87 struct intel_perf_counter_pass *counter_pass;
88 struct intel_perf_query_info **pass_query;
89 uint32_t n_passes = 0;
90 uint32_t data_offset = 0;
91 VK_MULTIALLOC(ma);
92 VkResult result;
93
94 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO);
95
96 /* Query pool slots are made up of some number of 64-bit values packed
97 * tightly together. For most query types have the first 64-bit value is
98 * the "available" bit which is 0 when the query is unavailable and 1 when
99 * it is available. The 64-bit values that follow are determined by the
100 * type of query.
101 *
102 * For performance queries, we have a requirement to align OA reports at
103 * 64bytes so we put those first and have the "available" bit behind
104 * together with some other counters.
105 */
106 uint32_t uint64s_per_slot = 0;
107
108 VK_MULTIALLOC_DECL(&ma, struct anv_query_pool, pool, 1);
109
110 VkQueryPipelineStatisticFlags pipeline_statistics = 0;
111 switch (pCreateInfo->queryType) {
112 case VK_QUERY_TYPE_OCCLUSION:
113 /* Occlusion queries have two values: begin and end. */
114 uint64s_per_slot = 1 + 2;
115 break;
116 case VK_QUERY_TYPE_TIMESTAMP:
117 /* Timestamps just have the one timestamp value */
118 uint64s_per_slot = 1 + 1;
119 break;
120 case VK_QUERY_TYPE_PIPELINE_STATISTICS:
121 pipeline_statistics = pCreateInfo->pipelineStatistics;
122 /* We're going to trust this field implicitly so we need to ensure that
123 * no unhandled extension bits leak in.
124 */
125 pipeline_statistics &= ANV_PIPELINE_STATISTICS_MASK;
126
127 /* Statistics queries have a min and max for every statistic */
128 uint64s_per_slot = 1 + 2 * util_bitcount(pipeline_statistics);
129 break;
130 case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT:
131 /* Transform feedback queries are 4 values, begin/end for
132 * written/available.
133 */
134 uint64s_per_slot = 1 + 4;
135 break;
136 case VK_QUERY_TYPE_PERFORMANCE_QUERY_INTEL: {
137 const struct intel_perf_query_field_layout *layout =
138 &pdevice->perf->query_layout;
139
140 uint64s_per_slot = 2; /* availability + marker */
141 /* Align to the requirement of the layout */
142 uint64s_per_slot = align(uint64s_per_slot,
143 DIV_ROUND_UP(layout->alignment, sizeof(uint64_t)));
144 data_offset = uint64s_per_slot * sizeof(uint64_t);
145 /* Add the query data for begin & end commands */
146 uint64s_per_slot += 2 * DIV_ROUND_UP(layout->size, sizeof(uint64_t));
147 break;
148 }
149 case VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR: {
150 const struct intel_perf_query_field_layout *layout =
151 &pdevice->perf->query_layout;
152
153 perf_query_info = vk_find_struct_const(pCreateInfo->pNext,
154 QUERY_POOL_PERFORMANCE_CREATE_INFO_KHR);
155 n_passes = intel_perf_get_n_passes(pdevice->perf,
156 perf_query_info->pCounterIndices,
157 perf_query_info->counterIndexCount,
158 NULL);
159 vk_multialloc_add(&ma, &counter_pass, struct intel_perf_counter_pass,
160 perf_query_info->counterIndexCount);
161 vk_multialloc_add(&ma, &pass_query, struct intel_perf_query_info *,
162 n_passes);
163 uint64s_per_slot = 1 /* availability */;
164 /* Align to the requirement of the layout */
165 uint64s_per_slot = align(uint64s_per_slot,
166 DIV_ROUND_UP(layout->alignment, sizeof(uint64_t)));
167 data_offset = uint64s_per_slot * sizeof(uint64_t);
168 /* Add the query data for begin & end commands */
169 uint64s_per_slot += 2 * DIV_ROUND_UP(layout->size, sizeof(uint64_t));
170 /* Multiply by the number of passes */
171 uint64s_per_slot *= n_passes;
172 break;
173 }
174 case VK_QUERY_TYPE_PRIMITIVES_GENERATED_EXT:
175 /* Query has two values: begin and end. */
176 uint64s_per_slot = 1 + 2;
177 break;
178 #if GFX_VERx10 >= 125
179 case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_KHR:
180 case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SIZE_KHR:
181 uint64s_per_slot = 1 + 1 /* availability + size (PostbuildInfoCurrentSize, PostbuildInfoCompactedSize) */;
182 break;
183
184 case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SERIALIZATION_SIZE_KHR:
185 case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SERIALIZATION_BOTTOM_LEVEL_POINTERS_KHR:
186 uint64s_per_slot = 1 + 2 /* availability + size (PostbuildInfoSerializationDesc) */;
187 break;
188
189 #endif
190 case VK_QUERY_TYPE_RESULT_STATUS_ONLY_KHR:
191 uint64s_per_slot = 1;
192 break;
193 default:
194 assert(!"Invalid query type");
195 }
196
197 if (!vk_multialloc_zalloc2(&ma, &device->vk.alloc, pAllocator,
198 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT))
199 return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
200
201 vk_query_pool_init(&device->vk, &pool->vk, pCreateInfo);
202 pool->stride = uint64s_per_slot * sizeof(uint64_t);
203
204 if (pool->vk.query_type == VK_QUERY_TYPE_PERFORMANCE_QUERY_INTEL) {
205 pool->data_offset = data_offset;
206 pool->snapshot_size = (pool->stride - data_offset) / 2;
207 }
208 else if (pool->vk.query_type == VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR) {
209 pool->pass_size = pool->stride / n_passes;
210 pool->data_offset = data_offset;
211 pool->snapshot_size = (pool->pass_size - data_offset) / 2;
212 pool->n_counters = perf_query_info->counterIndexCount;
213 pool->counter_pass = counter_pass;
214 intel_perf_get_counters_passes(pdevice->perf,
215 perf_query_info->pCounterIndices,
216 perf_query_info->counterIndexCount,
217 pool->counter_pass);
218 pool->n_passes = n_passes;
219 pool->pass_query = pass_query;
220 intel_perf_get_n_passes(pdevice->perf,
221 perf_query_info->pCounterIndices,
222 perf_query_info->counterIndexCount,
223 pool->pass_query);
224 }
225
226 uint64_t size = pool->vk.query_count * (uint64_t)pool->stride;
227
228 /* For KHR_performance_query we need some space in the buffer for a small
229 * batch updating ANV_PERF_QUERY_OFFSET_REG.
230 */
231 if (pool->vk.query_type == VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR) {
232 pool->khr_perf_preamble_stride = 32;
233 pool->khr_perf_preambles_offset = size;
234 size += (uint64_t)pool->n_passes * pool->khr_perf_preamble_stride;
235 }
236
237 result = anv_device_alloc_bo(device, "query-pool", size,
238 ANV_BO_ALLOC_MAPPED |
239 ANV_BO_ALLOC_HOST_CACHED_COHERENT,
240 0 /* explicit_address */,
241 &pool->bo);
242 if (result != VK_SUCCESS)
243 goto fail;
244
245 if (pool->vk.query_type == VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR) {
246 for (uint32_t p = 0; p < pool->n_passes; p++) {
247 struct mi_builder b;
248 struct anv_batch batch = {
249 .start = pool->bo->map + khr_perf_query_preamble_offset(pool, p),
250 .end = pool->bo->map + khr_perf_query_preamble_offset(pool, p) + pool->data_offset,
251 };
252 batch.next = batch.start;
253
254 mi_builder_init(&b, device->info, &batch);
255 mi_store(&b, mi_reg64(ANV_PERF_QUERY_OFFSET_REG),
256 mi_imm(p * (uint64_t)pool->pass_size));
257 anv_batch_emit(&batch, GENX(MI_BATCH_BUFFER_END), bbe);
258 }
259 }
260
261 ANV_RMV(query_pool_create, device, pool, false);
262
263 *pQueryPool = anv_query_pool_to_handle(pool);
264
265 return VK_SUCCESS;
266
267 fail:
268 vk_free2(&device->vk.alloc, pAllocator, pool);
269
270 return result;
271 }
272
genX(DestroyQueryPool)273 void genX(DestroyQueryPool)(
274 VkDevice _device,
275 VkQueryPool _pool,
276 const VkAllocationCallbacks* pAllocator)
277 {
278 ANV_FROM_HANDLE(anv_device, device, _device);
279 ANV_FROM_HANDLE(anv_query_pool, pool, _pool);
280
281 if (!pool)
282 return;
283
284 ANV_RMV(resource_destroy, device, pool);
285
286 anv_device_release_bo(device, pool->bo);
287 vk_object_free(&device->vk, pAllocator, pool);
288 }
289
290 /**
291 * VK_KHR_performance_query layout :
292 *
293 * --------------------------------------------
294 * | availability (8b) | | |
295 * |-------------------------------| | |
296 * | some padding (see | | |
297 * | query_field_layout:alignment) | | Pass 0 |
298 * |-------------------------------| | |
299 * | query data | | |
300 * | (2 * query_field_layout:size) | | |
301 * |-------------------------------|-- | Query 0
302 * | availability (8b) | | |
303 * |-------------------------------| | |
304 * | some padding (see | | |
305 * | query_field_layout:alignment) | | Pass 1 |
306 * |-------------------------------| | |
307 * | query data | | |
308 * | (2 * query_field_layout:size) | | |
309 * |-------------------------------|-----------
310 * | availability (8b) | | |
311 * |-------------------------------| | |
312 * | some padding (see | | |
313 * | query_field_layout:alignment) | | Pass 0 |
314 * |-------------------------------| | |
315 * | query data | | |
316 * | (2 * query_field_layout:size) | | |
317 * |-------------------------------|-- | Query 1
318 * | ... | | |
319 * --------------------------------------------
320 */
321
322 static uint64_t
khr_perf_query_availability_offset(struct anv_query_pool * pool,uint32_t query,uint32_t pass)323 khr_perf_query_availability_offset(struct anv_query_pool *pool, uint32_t query, uint32_t pass)
324 {
325 return query * (uint64_t)pool->stride + pass * (uint64_t)pool->pass_size;
326 }
327
328 static uint64_t
khr_perf_query_data_offset(struct anv_query_pool * pool,uint32_t query,uint32_t pass,bool end)329 khr_perf_query_data_offset(struct anv_query_pool *pool, uint32_t query, uint32_t pass, bool end)
330 {
331 return query * (uint64_t)pool->stride + pass * (uint64_t)pool->pass_size +
332 pool->data_offset + (end ? pool->snapshot_size : 0);
333 }
334
335 static struct anv_address
khr_perf_query_availability_address(struct anv_query_pool * pool,uint32_t query,uint32_t pass)336 khr_perf_query_availability_address(struct anv_query_pool *pool, uint32_t query, uint32_t pass)
337 {
338 return anv_address_add(
339 (struct anv_address) { .bo = pool->bo, },
340 khr_perf_query_availability_offset(pool, query, pass));
341 }
342
343 static struct anv_address
khr_perf_query_data_address(struct anv_query_pool * pool,uint32_t query,uint32_t pass,bool end)344 khr_perf_query_data_address(struct anv_query_pool *pool, uint32_t query, uint32_t pass, bool end)
345 {
346 return anv_address_add(
347 (struct anv_address) { .bo = pool->bo, },
348 khr_perf_query_data_offset(pool, query, pass, end));
349 }
350
351 static bool
khr_perf_query_ensure_relocs(struct anv_cmd_buffer * cmd_buffer)352 khr_perf_query_ensure_relocs(struct anv_cmd_buffer *cmd_buffer)
353 {
354 if (anv_batch_has_error(&cmd_buffer->batch))
355 return false;
356
357 if (cmd_buffer->self_mod_locations)
358 return true;
359
360 struct anv_device *device = cmd_buffer->device;
361 const struct anv_physical_device *pdevice = device->physical;
362
363 cmd_buffer->self_mod_locations =
364 vk_alloc(&cmd_buffer->vk.pool->alloc,
365 pdevice->n_perf_query_commands * sizeof(*cmd_buffer->self_mod_locations), 8,
366 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
367
368 if (!cmd_buffer->self_mod_locations) {
369 anv_batch_set_error(&cmd_buffer->batch, VK_ERROR_OUT_OF_HOST_MEMORY);
370 return false;
371 }
372
373 return true;
374 }
375
376 /**
377 * VK_INTEL_performance_query layout :
378 *
379 * ---------------------------------
380 * | availability (8b) |
381 * |-------------------------------|
382 * | marker (8b) |
383 * |-------------------------------|
384 * | some padding (see |
385 * | query_field_layout:alignment) |
386 * |-------------------------------|
387 * | query data |
388 * | (2 * query_field_layout:size) |
389 * ---------------------------------
390 */
391
392 static uint32_t
intel_perf_marker_offset(void)393 intel_perf_marker_offset(void)
394 {
395 return 8;
396 }
397
398 static uint32_t
intel_perf_query_data_offset(struct anv_query_pool * pool,bool end)399 intel_perf_query_data_offset(struct anv_query_pool *pool, bool end)
400 {
401 return pool->data_offset + (end ? pool->snapshot_size : 0);
402 }
403
404 static void
cpu_write_query_result(void * dst_slot,VkQueryResultFlags flags,uint32_t value_index,uint64_t result)405 cpu_write_query_result(void *dst_slot, VkQueryResultFlags flags,
406 uint32_t value_index, uint64_t result)
407 {
408 if (flags & VK_QUERY_RESULT_64_BIT) {
409 uint64_t *dst64 = dst_slot;
410 dst64[value_index] = result;
411 } else {
412 uint32_t *dst32 = dst_slot;
413 dst32[value_index] = result;
414 }
415 }
416
417 static void *
query_slot(struct anv_query_pool * pool,uint32_t query)418 query_slot(struct anv_query_pool *pool, uint32_t query)
419 {
420 return pool->bo->map + query * pool->stride;
421 }
422
423 static bool
query_is_available(struct anv_query_pool * pool,uint32_t query)424 query_is_available(struct anv_query_pool *pool, uint32_t query)
425 {
426 if (pool->vk.query_type == VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR) {
427 for (uint32_t p = 0; p < pool->n_passes; p++) {
428 volatile uint64_t *slot =
429 pool->bo->map + khr_perf_query_availability_offset(pool, query, p);
430 if (!slot[0])
431 return false;
432 }
433 return true;
434 }
435
436 return *(volatile uint64_t *)query_slot(pool, query);
437 }
438
439 static VkResult
wait_for_available(struct anv_device * device,struct anv_query_pool * pool,uint32_t query)440 wait_for_available(struct anv_device *device,
441 struct anv_query_pool *pool, uint32_t query)
442 {
443 /* By default we leave a 2s timeout before declaring the device lost. */
444 uint64_t rel_timeout = 2 * NSEC_PER_SEC;
445 if (pool->vk.query_type == VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR) {
446 /* With performance queries, there is an additional 500us reconfiguration
447 * time in i915.
448 */
449 rel_timeout += 500 * 1000;
450 /* Additionally a command buffer can be replayed N times to gather data
451 * for each of the metric sets to capture all the counters requested.
452 */
453 rel_timeout *= pool->n_passes;
454 }
455 uint64_t abs_timeout_ns = os_time_get_absolute_timeout(rel_timeout);
456
457 while (os_time_get_nano() < abs_timeout_ns) {
458 if (query_is_available(pool, query))
459 return VK_SUCCESS;
460 VkResult status = vk_device_check_status(&device->vk);
461 if (status != VK_SUCCESS)
462 return status;
463 }
464
465 return vk_device_set_lost(&device->vk, "query timeout");
466 }
467
genX(GetQueryPoolResults)468 VkResult genX(GetQueryPoolResults)(
469 VkDevice _device,
470 VkQueryPool queryPool,
471 uint32_t firstQuery,
472 uint32_t queryCount,
473 size_t dataSize,
474 void* pData,
475 VkDeviceSize stride,
476 VkQueryResultFlags flags)
477 {
478 ANV_FROM_HANDLE(anv_device, device, _device);
479 ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
480
481 assert(
482 #if GFX_VERx10 >= 125
483 pool->vk.query_type == VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_KHR ||
484 pool->vk.query_type == VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SERIALIZATION_SIZE_KHR ||
485 pool->vk.query_type == VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SIZE_KHR ||
486 pool->vk.query_type == VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SERIALIZATION_BOTTOM_LEVEL_POINTERS_KHR ||
487 #endif
488 pool->vk.query_type == VK_QUERY_TYPE_OCCLUSION ||
489 pool->vk.query_type == VK_QUERY_TYPE_PIPELINE_STATISTICS ||
490 pool->vk.query_type == VK_QUERY_TYPE_TIMESTAMP ||
491 pool->vk.query_type == VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT ||
492 pool->vk.query_type == VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR ||
493 pool->vk.query_type == VK_QUERY_TYPE_PERFORMANCE_QUERY_INTEL ||
494 pool->vk.query_type == VK_QUERY_TYPE_PRIMITIVES_GENERATED_EXT ||
495 pool->vk.query_type == VK_QUERY_TYPE_RESULT_STATUS_ONLY_KHR);
496
497 if (vk_device_is_lost(&device->vk))
498 return VK_ERROR_DEVICE_LOST;
499
500 if (pData == NULL)
501 return VK_SUCCESS;
502
503 void *data_end = pData + dataSize;
504
505 VkResult status = VK_SUCCESS;
506 for (uint32_t i = 0; i < queryCount; i++) {
507 bool available = query_is_available(pool, firstQuery + i);
508
509 if (!available && (flags & VK_QUERY_RESULT_WAIT_BIT)) {
510 status = wait_for_available(device, pool, firstQuery + i);
511 if (status != VK_SUCCESS) {
512 return status;
513 }
514
515 available = true;
516 }
517
518 /* From the Vulkan 1.0.42 spec:
519 *
520 * "If VK_QUERY_RESULT_WAIT_BIT and VK_QUERY_RESULT_PARTIAL_BIT are
521 * both not set then no result values are written to pData for
522 * queries that are in the unavailable state at the time of the call,
523 * and vkGetQueryPoolResults returns VK_NOT_READY. However,
524 * availability state is still written to pData for those queries if
525 * VK_QUERY_RESULT_WITH_AVAILABILITY_BIT is set."
526 *
527 * From VK_KHR_performance_query :
528 *
529 * "VK_QUERY_RESULT_PERFORMANCE_QUERY_RECORDED_COUNTERS_BIT_KHR specifies
530 * that the result should contain the number of counters that were recorded
531 * into a query pool of type ename:VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR"
532 */
533 bool write_results = available || (flags & VK_QUERY_RESULT_PARTIAL_BIT);
534
535 uint32_t idx = 0;
536 switch (pool->vk.query_type) {
537 case VK_QUERY_TYPE_OCCLUSION:
538 case VK_QUERY_TYPE_PRIMITIVES_GENERATED_EXT: {
539 uint64_t *slot = query_slot(pool, firstQuery + i);
540 if (write_results) {
541 /* From the Vulkan 1.2.132 spec:
542 *
543 * "If VK_QUERY_RESULT_PARTIAL_BIT is set,
544 * VK_QUERY_RESULT_WAIT_BIT is not set, and the query’s status
545 * is unavailable, an intermediate result value between zero and
546 * the final result value is written to pData for that query."
547 */
548 uint64_t result = available ? slot[2] - slot[1] : 0;
549 cpu_write_query_result(pData, flags, idx, result);
550 }
551 idx++;
552 break;
553 }
554
555 case VK_QUERY_TYPE_PIPELINE_STATISTICS: {
556 uint64_t *slot = query_slot(pool, firstQuery + i);
557 uint32_t statistics = pool->vk.pipeline_statistics;
558 while (statistics) {
559 UNUSED uint32_t stat = u_bit_scan(&statistics);
560 if (write_results) {
561 uint64_t result = slot[idx * 2 + 2] - slot[idx * 2 + 1];
562 cpu_write_query_result(pData, flags, idx, result);
563 }
564 idx++;
565 }
566 assert(idx == util_bitcount(pool->vk.pipeline_statistics));
567 break;
568 }
569
570 case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT: {
571 uint64_t *slot = query_slot(pool, firstQuery + i);
572 if (write_results)
573 cpu_write_query_result(pData, flags, idx, slot[2] - slot[1]);
574 idx++;
575 if (write_results)
576 cpu_write_query_result(pData, flags, idx, slot[4] - slot[3]);
577 idx++;
578 break;
579 }
580
581 #if GFX_VERx10 >= 125
582 case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SIZE_KHR:
583 case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_KHR:
584 case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SERIALIZATION_SIZE_KHR: {
585 uint64_t *slot = query_slot(pool, firstQuery + i);
586 if (write_results)
587 cpu_write_query_result(pData, flags, idx, slot[1]);
588 idx++;
589 break;
590 }
591
592 case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SERIALIZATION_BOTTOM_LEVEL_POINTERS_KHR: {
593 uint64_t *slot = query_slot(pool, firstQuery + i);
594 if (write_results)
595 cpu_write_query_result(pData, flags, idx, slot[2]);
596 idx++;
597 break;
598 }
599 #endif
600
601 case VK_QUERY_TYPE_TIMESTAMP: {
602 uint64_t *slot = query_slot(pool, firstQuery + i);
603 if (write_results)
604 cpu_write_query_result(pData, flags, idx, slot[1]);
605 idx++;
606 break;
607 }
608
609 case VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR: {
610 const struct anv_physical_device *pdevice = device->physical;
611 assert((flags & (VK_QUERY_RESULT_WITH_AVAILABILITY_BIT |
612 VK_QUERY_RESULT_PARTIAL_BIT)) == 0);
613 for (uint32_t p = 0; p < pool->n_passes; p++) {
614 const struct intel_perf_query_info *query = pool->pass_query[p];
615 struct intel_perf_query_result result;
616 intel_perf_query_result_clear(&result);
617 intel_perf_query_result_accumulate_fields(&result, query,
618 pool->bo->map + khr_perf_query_data_offset(pool, firstQuery + i, p, false),
619 pool->bo->map + khr_perf_query_data_offset(pool, firstQuery + i, p, true),
620 false /* no_oa_accumulate */);
621 anv_perf_write_pass_results(pdevice->perf, pool, p, &result, pData);
622 }
623 break;
624 }
625
626 case VK_QUERY_TYPE_PERFORMANCE_QUERY_INTEL: {
627 if (!write_results)
628 break;
629 const void *query_data = query_slot(pool, firstQuery + i);
630 const struct intel_perf_query_info *query = &device->physical->perf->queries[0];
631 struct intel_perf_query_result result;
632 intel_perf_query_result_clear(&result);
633 intel_perf_query_result_accumulate_fields(&result, query,
634 query_data + intel_perf_query_data_offset(pool, false),
635 query_data + intel_perf_query_data_offset(pool, true),
636 false /* no_oa_accumulate */);
637 intel_perf_query_result_write_mdapi(pData, stride,
638 device->info,
639 query, &result);
640 const uint64_t *marker = query_data + intel_perf_marker_offset();
641 intel_perf_query_mdapi_write_marker(pData, stride, device->info, *marker);
642 break;
643 }
644
645 case VK_QUERY_TYPE_RESULT_STATUS_ONLY_KHR:
646 if (!write_results)
647 break;
648 const uint32_t *query_data = query_slot(pool, firstQuery + i);
649 uint32_t result = available ? *query_data : 0;
650 cpu_write_query_result(pData, flags, idx, result);
651 break;
652
653 default:
654 unreachable("invalid pool type");
655 }
656
657 if (!write_results)
658 status = VK_NOT_READY;
659
660 if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT)
661 cpu_write_query_result(pData, flags, idx, available);
662
663 pData += stride;
664 if (pData >= data_end)
665 break;
666 }
667
668 return status;
669 }
670
671 static void
emit_ps_depth_count(struct anv_cmd_buffer * cmd_buffer,struct anv_address addr)672 emit_ps_depth_count(struct anv_cmd_buffer *cmd_buffer,
673 struct anv_address addr)
674 {
675 cmd_buffer->state.pending_pipe_bits |= ANV_PIPE_POST_SYNC_BIT;
676 genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
677
678 bool cs_stall_needed = (GFX_VER == 9 && cmd_buffer->device->info->gt == 4);
679 genx_batch_emit_pipe_control_write
680 (&cmd_buffer->batch, cmd_buffer->device->info,
681 cmd_buffer->state.current_pipeline, WritePSDepthCount, addr, 0,
682 ANV_PIPE_DEPTH_STALL_BIT | (cs_stall_needed ? ANV_PIPE_CS_STALL_BIT : 0));
683 }
684
685 static void
emit_query_mi_availability(struct mi_builder * b,struct anv_address addr,bool available)686 emit_query_mi_availability(struct mi_builder *b,
687 struct anv_address addr,
688 bool available)
689 {
690 mi_store(b, mi_mem64(addr), mi_imm(available));
691 }
692
693 static void
emit_query_pc_availability(struct anv_cmd_buffer * cmd_buffer,struct anv_address addr,bool available)694 emit_query_pc_availability(struct anv_cmd_buffer *cmd_buffer,
695 struct anv_address addr,
696 bool available)
697 {
698 cmd_buffer->state.pending_pipe_bits |= ANV_PIPE_POST_SYNC_BIT;
699 genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
700
701 genx_batch_emit_pipe_control_write
702 (&cmd_buffer->batch, cmd_buffer->device->info,
703 cmd_buffer->state.current_pipeline, WriteImmediateData, addr,
704 available, 0);
705 }
706
707 /**
708 * Goes through a series of consecutive query indices in the given pool
709 * setting all element values to 0 and emitting them as available.
710 */
711 static void
emit_zero_queries(struct anv_cmd_buffer * cmd_buffer,struct mi_builder * b,struct anv_query_pool * pool,uint32_t first_index,uint32_t num_queries)712 emit_zero_queries(struct anv_cmd_buffer *cmd_buffer,
713 struct mi_builder *b, struct anv_query_pool *pool,
714 uint32_t first_index, uint32_t num_queries)
715 {
716 switch (pool->vk.query_type) {
717 case VK_QUERY_TYPE_OCCLUSION:
718 case VK_QUERY_TYPE_TIMESTAMP:
719 /* These queries are written with a PIPE_CONTROL so clear them using the
720 * PIPE_CONTROL as well so we don't have to synchronize between 2 types
721 * of operations.
722 */
723 assert((pool->stride % 8) == 0);
724 for (uint32_t i = 0; i < num_queries; i++) {
725 struct anv_address slot_addr =
726 anv_query_address(pool, first_index + i);
727
728 for (uint32_t qword = 1; qword < (pool->stride / 8); qword++) {
729 emit_query_pc_availability(cmd_buffer,
730 anv_address_add(slot_addr, qword * 8),
731 false);
732 }
733 emit_query_pc_availability(cmd_buffer, slot_addr, true);
734 }
735 break;
736
737 case VK_QUERY_TYPE_PRIMITIVES_GENERATED_EXT:
738 case VK_QUERY_TYPE_PIPELINE_STATISTICS:
739 case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT:
740 for (uint32_t i = 0; i < num_queries; i++) {
741 struct anv_address slot_addr =
742 anv_query_address(pool, first_index + i);
743 mi_memset(b, anv_address_add(slot_addr, 8), 0, pool->stride - 8);
744 emit_query_mi_availability(b, slot_addr, true);
745 }
746 break;
747
748 case VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR: {
749 for (uint32_t i = 0; i < num_queries; i++) {
750 for (uint32_t p = 0; p < pool->n_passes; p++) {
751 mi_memset(b, khr_perf_query_data_address(pool, first_index + i, p, false),
752 0, 2 * pool->snapshot_size);
753 emit_query_mi_availability(b,
754 khr_perf_query_availability_address(pool, first_index + i, p),
755 true);
756 }
757 }
758 break;
759 }
760
761 case VK_QUERY_TYPE_PERFORMANCE_QUERY_INTEL:
762 for (uint32_t i = 0; i < num_queries; i++) {
763 struct anv_address slot_addr =
764 anv_query_address(pool, first_index + i);
765 mi_memset(b, anv_address_add(slot_addr, 8), 0, pool->stride - 8);
766 emit_query_mi_availability(b, slot_addr, true);
767 }
768 break;
769
770 default:
771 unreachable("Unsupported query type");
772 }
773 }
774
genX(CmdResetQueryPool)775 void genX(CmdResetQueryPool)(
776 VkCommandBuffer commandBuffer,
777 VkQueryPool queryPool,
778 uint32_t firstQuery,
779 uint32_t queryCount)
780 {
781 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
782 ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
783 struct anv_physical_device *pdevice = cmd_buffer->device->physical;
784
785 if (queryCount >= pdevice->instance->query_clear_with_blorp_threshold) {
786 trace_intel_begin_query_clear_blorp(&cmd_buffer->trace);
787
788 anv_cmd_buffer_fill_area(cmd_buffer,
789 anv_query_address(pool, firstQuery),
790 queryCount * pool->stride,
791 0);
792
793 cmd_buffer->state.queries.clear_bits =
794 (cmd_buffer->queue_family->queueFlags & VK_QUEUE_GRAPHICS_BIT) == 0 ?
795 ANV_QUERY_COMPUTE_WRITES_PENDING_BITS :
796 ANV_QUERY_RENDER_TARGET_WRITES_PENDING_BITS(cmd_buffer->device->info);
797
798 trace_intel_end_query_clear_blorp(&cmd_buffer->trace, queryCount);
799 return;
800 }
801
802 trace_intel_begin_query_clear_cs(&cmd_buffer->trace);
803
804 switch (pool->vk.query_type) {
805 case VK_QUERY_TYPE_OCCLUSION:
806 #if GFX_VERx10 >= 125
807 case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_KHR:
808 case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SERIALIZATION_SIZE_KHR:
809 case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SIZE_KHR:
810 case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SERIALIZATION_BOTTOM_LEVEL_POINTERS_KHR:
811 #endif
812 for (uint32_t i = 0; i < queryCount; i++) {
813 emit_query_pc_availability(cmd_buffer,
814 anv_query_address(pool, firstQuery + i),
815 false);
816 }
817 break;
818
819 case VK_QUERY_TYPE_TIMESTAMP: {
820 for (uint32_t i = 0; i < queryCount; i++) {
821 emit_query_pc_availability(cmd_buffer,
822 anv_query_address(pool, firstQuery + i),
823 false);
824 }
825
826 /* Add a CS stall here to make sure the PIPE_CONTROL above has
827 * completed. Otherwise some timestamps written later with MI_STORE_*
828 * commands might race with the PIPE_CONTROL in the loop above.
829 */
830 anv_add_pending_pipe_bits(cmd_buffer, ANV_PIPE_CS_STALL_BIT,
831 "vkCmdResetQueryPool of timestamps");
832 genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
833 break;
834 }
835
836 case VK_QUERY_TYPE_PIPELINE_STATISTICS:
837 case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT:
838 case VK_QUERY_TYPE_PRIMITIVES_GENERATED_EXT: {
839 struct mi_builder b;
840 mi_builder_init(&b, cmd_buffer->device->info, &cmd_buffer->batch);
841
842 for (uint32_t i = 0; i < queryCount; i++)
843 emit_query_mi_availability(&b, anv_query_address(pool, firstQuery + i), false);
844 break;
845 }
846
847 case VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR: {
848 struct mi_builder b;
849 mi_builder_init(&b, cmd_buffer->device->info, &cmd_buffer->batch);
850
851 for (uint32_t i = 0; i < queryCount; i++) {
852 for (uint32_t p = 0; p < pool->n_passes; p++) {
853 emit_query_mi_availability(
854 &b,
855 khr_perf_query_availability_address(pool, firstQuery + i, p),
856 false);
857 }
858 }
859 break;
860 }
861
862 case VK_QUERY_TYPE_PERFORMANCE_QUERY_INTEL: {
863 struct mi_builder b;
864 mi_builder_init(&b, cmd_buffer->device->info, &cmd_buffer->batch);
865
866 for (uint32_t i = 0; i < queryCount; i++)
867 emit_query_mi_availability(&b, anv_query_address(pool, firstQuery + i), false);
868 break;
869 }
870 case VK_QUERY_TYPE_RESULT_STATUS_ONLY_KHR:
871 for (uint32_t i = 0; i < queryCount; i++)
872 emit_query_mi_flush_availability(cmd_buffer, anv_query_address(pool, firstQuery + i), false);
873 break;
874 default:
875 unreachable("Unsupported query type");
876 }
877
878 trace_intel_end_query_clear_cs(&cmd_buffer->trace, queryCount);
879 }
880
genX(ResetQueryPool)881 void genX(ResetQueryPool)(
882 VkDevice _device,
883 VkQueryPool queryPool,
884 uint32_t firstQuery,
885 uint32_t queryCount)
886 {
887 ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
888
889 for (uint32_t i = 0; i < queryCount; i++) {
890 if (pool->vk.query_type == VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR) {
891 for (uint32_t p = 0; p < pool->n_passes; p++) {
892 uint64_t *pass_slot = pool->bo->map +
893 khr_perf_query_availability_offset(pool, firstQuery + i, p);
894 *pass_slot = 0;
895 }
896 } else {
897 uint64_t *slot = query_slot(pool, firstQuery + i);
898 *slot = 0;
899 }
900 }
901 }
902
903 static const uint32_t vk_pipeline_stat_to_reg[] = {
904 GENX(IA_VERTICES_COUNT_num),
905 GENX(IA_PRIMITIVES_COUNT_num),
906 GENX(VS_INVOCATION_COUNT_num),
907 GENX(GS_INVOCATION_COUNT_num),
908 GENX(GS_PRIMITIVES_COUNT_num),
909 GENX(CL_INVOCATION_COUNT_num),
910 GENX(CL_PRIMITIVES_COUNT_num),
911 GENX(PS_INVOCATION_COUNT_num),
912 GENX(HS_INVOCATION_COUNT_num),
913 GENX(DS_INVOCATION_COUNT_num),
914 GENX(CS_INVOCATION_COUNT_num),
915 };
916
917 static void
emit_pipeline_stat(struct mi_builder * b,uint32_t stat,struct anv_address addr)918 emit_pipeline_stat(struct mi_builder *b, uint32_t stat,
919 struct anv_address addr)
920 {
921 STATIC_ASSERT(ANV_PIPELINE_STATISTICS_MASK ==
922 (1 << ARRAY_SIZE(vk_pipeline_stat_to_reg)) - 1);
923
924 assert(stat < ARRAY_SIZE(vk_pipeline_stat_to_reg));
925 mi_store(b, mi_mem64(addr), mi_reg64(vk_pipeline_stat_to_reg[stat]));
926 }
927
928 static void
emit_xfb_query(struct mi_builder * b,uint32_t stream,struct anv_address addr)929 emit_xfb_query(struct mi_builder *b, uint32_t stream,
930 struct anv_address addr)
931 {
932 assert(stream < MAX_XFB_STREAMS);
933
934 mi_store(b, mi_mem64(anv_address_add(addr, 0)),
935 mi_reg64(GENX(SO_NUM_PRIMS_WRITTEN0_num) + stream * 8));
936 mi_store(b, mi_mem64(anv_address_add(addr, 16)),
937 mi_reg64(GENX(SO_PRIM_STORAGE_NEEDED0_num) + stream * 8));
938 }
939
940 static void
emit_perf_intel_query(struct anv_cmd_buffer * cmd_buffer,struct anv_query_pool * pool,struct mi_builder * b,struct anv_address query_addr,bool end)941 emit_perf_intel_query(struct anv_cmd_buffer *cmd_buffer,
942 struct anv_query_pool *pool,
943 struct mi_builder *b,
944 struct anv_address query_addr,
945 bool end)
946 {
947 const struct intel_perf_query_field_layout *layout =
948 &cmd_buffer->device->physical->perf->query_layout;
949 struct anv_address data_addr =
950 anv_address_add(query_addr, intel_perf_query_data_offset(pool, end));
951
952 for (uint32_t f = 0; f < layout->n_fields; f++) {
953 const struct intel_perf_query_field *field =
954 &layout->fields[end ? f : (layout->n_fields - 1 - f)];
955
956 switch (field->type) {
957 case INTEL_PERF_QUERY_FIELD_TYPE_MI_RPC:
958 anv_batch_emit(&cmd_buffer->batch, GENX(MI_REPORT_PERF_COUNT), rpc) {
959 rpc.MemoryAddress = anv_address_add(data_addr, field->location);
960 }
961 break;
962
963 case INTEL_PERF_QUERY_FIELD_TYPE_SRM_PERFCNT:
964 case INTEL_PERF_QUERY_FIELD_TYPE_SRM_RPSTAT:
965 case INTEL_PERF_QUERY_FIELD_TYPE_SRM_OA_A:
966 case INTEL_PERF_QUERY_FIELD_TYPE_SRM_OA_B:
967 case INTEL_PERF_QUERY_FIELD_TYPE_SRM_OA_C: {
968 struct anv_address addr = anv_address_add(data_addr, field->location);
969 struct mi_value src = field->size == 8 ?
970 mi_reg64(field->mmio_offset) :
971 mi_reg32(field->mmio_offset);
972 struct mi_value dst = field->size == 8 ?
973 mi_mem64(addr) : mi_mem32(addr);
974 mi_store(b, dst, src);
975 break;
976 }
977
978 default:
979 unreachable("Invalid query field");
980 break;
981 }
982 }
983 }
984
985 static void
emit_query_clear_flush(struct anv_cmd_buffer * cmd_buffer,struct anv_query_pool * pool,const char * reason)986 emit_query_clear_flush(struct anv_cmd_buffer *cmd_buffer,
987 struct anv_query_pool *pool,
988 const char *reason)
989 {
990 if (cmd_buffer->state.queries.clear_bits == 0)
991 return;
992
993 anv_add_pending_pipe_bits(cmd_buffer,
994 ANV_PIPE_QUERY_BITS(
995 cmd_buffer->state.queries.clear_bits),
996 reason);
997 genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
998 }
999
1000
genX(CmdBeginQueryIndexedEXT)1001 void genX(CmdBeginQueryIndexedEXT)(
1002 VkCommandBuffer commandBuffer,
1003 VkQueryPool queryPool,
1004 uint32_t query,
1005 VkQueryControlFlags flags,
1006 uint32_t index)
1007 {
1008 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1009 ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
1010 struct anv_address query_addr = anv_query_address(pool, query);
1011
1012 emit_query_clear_flush(cmd_buffer, pool, "CmdBeginQuery* flush query clears");
1013
1014 struct mi_builder b;
1015 mi_builder_init(&b, cmd_buffer->device->info, &cmd_buffer->batch);
1016 const uint32_t mocs = anv_mocs_for_address(cmd_buffer->device, &query_addr);
1017 mi_builder_set_mocs(&b, mocs);
1018
1019 switch (pool->vk.query_type) {
1020 case VK_QUERY_TYPE_OCCLUSION:
1021 cmd_buffer->state.gfx.n_occlusion_queries++;
1022 cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_OCCLUSION_QUERY_ACTIVE;
1023 emit_ps_depth_count(cmd_buffer, anv_address_add(query_addr, 8));
1024 break;
1025
1026 case VK_QUERY_TYPE_PRIMITIVES_GENERATED_EXT:
1027 genx_batch_emit_pipe_control(&cmd_buffer->batch,
1028 cmd_buffer->device->info,
1029 cmd_buffer->state.current_pipeline,
1030 ANV_PIPE_CS_STALL_BIT |
1031 ANV_PIPE_STALL_AT_SCOREBOARD_BIT);
1032 mi_store(&b, mi_mem64(anv_address_add(query_addr, 8)),
1033 mi_reg64(GENX(CL_INVOCATION_COUNT_num)));
1034 break;
1035
1036 case VK_QUERY_TYPE_PIPELINE_STATISTICS: {
1037 /* TODO: This might only be necessary for certain stats */
1038 genx_batch_emit_pipe_control(&cmd_buffer->batch,
1039 cmd_buffer->device->info,
1040 cmd_buffer->state.current_pipeline,
1041 ANV_PIPE_CS_STALL_BIT |
1042 ANV_PIPE_STALL_AT_SCOREBOARD_BIT);
1043
1044 uint32_t statistics = pool->vk.pipeline_statistics;
1045 uint32_t offset = 8;
1046 while (statistics) {
1047 uint32_t stat = u_bit_scan(&statistics);
1048 emit_pipeline_stat(&b, stat, anv_address_add(query_addr, offset));
1049 offset += 16;
1050 }
1051 break;
1052 }
1053
1054 case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT:
1055 genx_batch_emit_pipe_control(&cmd_buffer->batch,
1056 cmd_buffer->device->info,
1057 cmd_buffer->state.current_pipeline,
1058 ANV_PIPE_CS_STALL_BIT |
1059 ANV_PIPE_STALL_AT_SCOREBOARD_BIT);
1060 emit_xfb_query(&b, index, anv_address_add(query_addr, 8));
1061 break;
1062
1063 case VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR: {
1064 if (!khr_perf_query_ensure_relocs(cmd_buffer))
1065 return;
1066
1067 const struct anv_physical_device *pdevice = cmd_buffer->device->physical;
1068 const struct intel_perf_query_field_layout *layout = &pdevice->perf->query_layout;
1069
1070 uint32_t reloc_idx = 0;
1071 for (uint32_t end = 0; end < 2; end++) {
1072 for (uint32_t r = 0; r < layout->n_fields; r++) {
1073 const struct intel_perf_query_field *field =
1074 &layout->fields[end ? r : (layout->n_fields - 1 - r)];
1075 struct mi_value reg_addr =
1076 mi_iadd(
1077 &b,
1078 mi_imm(intel_canonical_address(pool->bo->offset +
1079 khr_perf_query_data_offset(pool, query, 0, end) +
1080 field->location)),
1081 mi_reg64(ANV_PERF_QUERY_OFFSET_REG));
1082 cmd_buffer->self_mod_locations[reloc_idx++] = mi_store_address(&b, reg_addr);
1083
1084 if (field->type != INTEL_PERF_QUERY_FIELD_TYPE_MI_RPC &&
1085 field->size == 8) {
1086 reg_addr =
1087 mi_iadd(
1088 &b,
1089 mi_imm(intel_canonical_address(pool->bo->offset +
1090 khr_perf_query_data_offset(pool, query, 0, end) +
1091 field->location + 4)),
1092 mi_reg64(ANV_PERF_QUERY_OFFSET_REG));
1093 cmd_buffer->self_mod_locations[reloc_idx++] = mi_store_address(&b, reg_addr);
1094 }
1095 }
1096 }
1097
1098 struct mi_value availability_write_offset =
1099 mi_iadd(
1100 &b,
1101 mi_imm(
1102 intel_canonical_address(
1103 pool->bo->offset +
1104 khr_perf_query_availability_offset(pool, query, 0 /* pass */))),
1105 mi_reg64(ANV_PERF_QUERY_OFFSET_REG));
1106 cmd_buffer->self_mod_locations[reloc_idx++] =
1107 mi_store_address(&b, availability_write_offset);
1108
1109 assert(reloc_idx == pdevice->n_perf_query_commands);
1110
1111 const struct intel_device_info *devinfo = cmd_buffer->device->info;
1112 const enum intel_engine_class engine_class = cmd_buffer->queue_family->engine_class;
1113 mi_self_mod_barrier(&b, devinfo->engine_class_prefetch[engine_class]);
1114
1115 genx_batch_emit_pipe_control(&cmd_buffer->batch,
1116 cmd_buffer->device->info,
1117 cmd_buffer->state.current_pipeline,
1118 ANV_PIPE_CS_STALL_BIT |
1119 ANV_PIPE_STALL_AT_SCOREBOARD_BIT);
1120 cmd_buffer->perf_query_pool = pool;
1121
1122 cmd_buffer->perf_reloc_idx = 0;
1123 for (uint32_t r = 0; r < layout->n_fields; r++) {
1124 const struct intel_perf_query_field *field =
1125 &layout->fields[layout->n_fields - 1 - r];
1126 void *dws;
1127
1128 switch (field->type) {
1129 case INTEL_PERF_QUERY_FIELD_TYPE_MI_RPC:
1130 dws = anv_batch_emitn(&cmd_buffer->batch,
1131 GENX(MI_REPORT_PERF_COUNT_length),
1132 GENX(MI_REPORT_PERF_COUNT),
1133 .MemoryAddress = query_addr /* Will be overwritten */);
1134 _mi_resolve_address_token(&b,
1135 cmd_buffer->self_mod_locations[cmd_buffer->perf_reloc_idx++],
1136 dws +
1137 GENX(MI_REPORT_PERF_COUNT_MemoryAddress_start) / 8);
1138 break;
1139
1140 case INTEL_PERF_QUERY_FIELD_TYPE_SRM_PERFCNT:
1141 case INTEL_PERF_QUERY_FIELD_TYPE_SRM_RPSTAT:
1142 case INTEL_PERF_QUERY_FIELD_TYPE_SRM_OA_A:
1143 case INTEL_PERF_QUERY_FIELD_TYPE_SRM_OA_B:
1144 case INTEL_PERF_QUERY_FIELD_TYPE_SRM_OA_C:
1145 dws =
1146 anv_batch_emitn(&cmd_buffer->batch,
1147 GENX(MI_STORE_REGISTER_MEM_length),
1148 GENX(MI_STORE_REGISTER_MEM),
1149 .RegisterAddress = field->mmio_offset,
1150 .MemoryAddress = query_addr /* Will be overwritten */ );
1151 _mi_resolve_address_token(&b,
1152 cmd_buffer->self_mod_locations[cmd_buffer->perf_reloc_idx++],
1153 dws +
1154 GENX(MI_STORE_REGISTER_MEM_MemoryAddress_start) / 8);
1155 if (field->size == 8) {
1156 dws =
1157 anv_batch_emitn(&cmd_buffer->batch,
1158 GENX(MI_STORE_REGISTER_MEM_length),
1159 GENX(MI_STORE_REGISTER_MEM),
1160 .RegisterAddress = field->mmio_offset + 4,
1161 .MemoryAddress = query_addr /* Will be overwritten */ );
1162 _mi_resolve_address_token(&b,
1163 cmd_buffer->self_mod_locations[cmd_buffer->perf_reloc_idx++],
1164 dws +
1165 GENX(MI_STORE_REGISTER_MEM_MemoryAddress_start) / 8);
1166 }
1167 break;
1168
1169 default:
1170 unreachable("Invalid query field");
1171 break;
1172 }
1173 }
1174 break;
1175 }
1176
1177 case VK_QUERY_TYPE_PERFORMANCE_QUERY_INTEL: {
1178 genx_batch_emit_pipe_control(&cmd_buffer->batch,
1179 cmd_buffer->device->info,
1180 cmd_buffer->state.current_pipeline,
1181 ANV_PIPE_CS_STALL_BIT |
1182 ANV_PIPE_STALL_AT_SCOREBOARD_BIT);
1183 emit_perf_intel_query(cmd_buffer, pool, &b, query_addr, false);
1184 break;
1185 }
1186 case VK_QUERY_TYPE_RESULT_STATUS_ONLY_KHR:
1187 emit_query_mi_flush_availability(cmd_buffer, query_addr, false);
1188 break;
1189 default:
1190 unreachable("");
1191 }
1192 }
1193
genX(CmdEndQueryIndexedEXT)1194 void genX(CmdEndQueryIndexedEXT)(
1195 VkCommandBuffer commandBuffer,
1196 VkQueryPool queryPool,
1197 uint32_t query,
1198 uint32_t index)
1199 {
1200 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1201 ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
1202 struct anv_address query_addr = anv_query_address(pool, query);
1203
1204 struct mi_builder b;
1205 mi_builder_init(&b, cmd_buffer->device->info, &cmd_buffer->batch);
1206
1207 switch (pool->vk.query_type) {
1208 case VK_QUERY_TYPE_OCCLUSION:
1209 emit_ps_depth_count(cmd_buffer, anv_address_add(query_addr, 16));
1210 emit_query_pc_availability(cmd_buffer, query_addr, true);
1211 cmd_buffer->state.gfx.n_occlusion_queries--;
1212 cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_OCCLUSION_QUERY_ACTIVE;
1213 break;
1214
1215 case VK_QUERY_TYPE_PRIMITIVES_GENERATED_EXT:
1216 /* Ensure previous commands have completed before capturing the register
1217 * value.
1218 */
1219 genx_batch_emit_pipe_control(&cmd_buffer->batch,
1220 cmd_buffer->device->info,
1221 cmd_buffer->state.current_pipeline,
1222 ANV_PIPE_CS_STALL_BIT |
1223 ANV_PIPE_STALL_AT_SCOREBOARD_BIT);
1224
1225 mi_store(&b, mi_mem64(anv_address_add(query_addr, 16)),
1226 mi_reg64(GENX(CL_INVOCATION_COUNT_num)));
1227 emit_query_mi_availability(&b, query_addr, true);
1228 break;
1229
1230 case VK_QUERY_TYPE_PIPELINE_STATISTICS: {
1231 /* TODO: This might only be necessary for certain stats */
1232 genx_batch_emit_pipe_control(&cmd_buffer->batch,
1233 cmd_buffer->device->info,
1234 cmd_buffer->state.current_pipeline,
1235 ANV_PIPE_CS_STALL_BIT |
1236 ANV_PIPE_STALL_AT_SCOREBOARD_BIT);
1237
1238 uint32_t statistics = pool->vk.pipeline_statistics;
1239 uint32_t offset = 16;
1240 while (statistics) {
1241 uint32_t stat = u_bit_scan(&statistics);
1242 emit_pipeline_stat(&b, stat, anv_address_add(query_addr, offset));
1243 offset += 16;
1244 }
1245
1246 emit_query_mi_availability(&b, query_addr, true);
1247 break;
1248 }
1249
1250 case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT:
1251 genx_batch_emit_pipe_control(&cmd_buffer->batch,
1252 cmd_buffer->device->info,
1253 cmd_buffer->state.current_pipeline,
1254 ANV_PIPE_CS_STALL_BIT |
1255 ANV_PIPE_STALL_AT_SCOREBOARD_BIT);
1256 emit_xfb_query(&b, index, anv_address_add(query_addr, 16));
1257 emit_query_mi_availability(&b, query_addr, true);
1258 break;
1259
1260 case VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR: {
1261 genx_batch_emit_pipe_control(&cmd_buffer->batch,
1262 cmd_buffer->device->info,
1263 cmd_buffer->state.current_pipeline,
1264 ANV_PIPE_CS_STALL_BIT |
1265 ANV_PIPE_STALL_AT_SCOREBOARD_BIT);
1266 cmd_buffer->perf_query_pool = pool;
1267
1268 if (!khr_perf_query_ensure_relocs(cmd_buffer))
1269 return;
1270
1271 const struct anv_physical_device *pdevice = cmd_buffer->device->physical;
1272 const struct intel_perf_query_field_layout *layout = &pdevice->perf->query_layout;
1273
1274 void *dws;
1275 for (uint32_t r = 0; r < layout->n_fields; r++) {
1276 const struct intel_perf_query_field *field = &layout->fields[r];
1277
1278 switch (field->type) {
1279 case INTEL_PERF_QUERY_FIELD_TYPE_MI_RPC:
1280 dws = anv_batch_emitn(&cmd_buffer->batch,
1281 GENX(MI_REPORT_PERF_COUNT_length),
1282 GENX(MI_REPORT_PERF_COUNT),
1283 .MemoryAddress = query_addr /* Will be overwritten */);
1284 _mi_resolve_address_token(&b,
1285 cmd_buffer->self_mod_locations[cmd_buffer->perf_reloc_idx++],
1286 dws +
1287 GENX(MI_REPORT_PERF_COUNT_MemoryAddress_start) / 8);
1288 break;
1289
1290 case INTEL_PERF_QUERY_FIELD_TYPE_SRM_PERFCNT:
1291 case INTEL_PERF_QUERY_FIELD_TYPE_SRM_RPSTAT:
1292 case INTEL_PERF_QUERY_FIELD_TYPE_SRM_OA_A:
1293 case INTEL_PERF_QUERY_FIELD_TYPE_SRM_OA_B:
1294 case INTEL_PERF_QUERY_FIELD_TYPE_SRM_OA_C:
1295 dws =
1296 anv_batch_emitn(&cmd_buffer->batch,
1297 GENX(MI_STORE_REGISTER_MEM_length),
1298 GENX(MI_STORE_REGISTER_MEM),
1299 .RegisterAddress = field->mmio_offset,
1300 .MemoryAddress = query_addr /* Will be overwritten */ );
1301 _mi_resolve_address_token(&b,
1302 cmd_buffer->self_mod_locations[cmd_buffer->perf_reloc_idx++],
1303 dws +
1304 GENX(MI_STORE_REGISTER_MEM_MemoryAddress_start) / 8);
1305 if (field->size == 8) {
1306 dws =
1307 anv_batch_emitn(&cmd_buffer->batch,
1308 GENX(MI_STORE_REGISTER_MEM_length),
1309 GENX(MI_STORE_REGISTER_MEM),
1310 .RegisterAddress = field->mmio_offset + 4,
1311 .MemoryAddress = query_addr /* Will be overwritten */ );
1312 _mi_resolve_address_token(&b,
1313 cmd_buffer->self_mod_locations[cmd_buffer->perf_reloc_idx++],
1314 dws +
1315 GENX(MI_STORE_REGISTER_MEM_MemoryAddress_start) / 8);
1316 }
1317 break;
1318
1319 default:
1320 unreachable("Invalid query field");
1321 break;
1322 }
1323 }
1324
1325 dws =
1326 anv_batch_emitn(&cmd_buffer->batch,
1327 GENX(MI_STORE_DATA_IMM_length),
1328 GENX(MI_STORE_DATA_IMM),
1329 .ImmediateData = true);
1330 _mi_resolve_address_token(&b,
1331 cmd_buffer->self_mod_locations[cmd_buffer->perf_reloc_idx++],
1332 dws +
1333 GENX(MI_STORE_DATA_IMM_Address_start) / 8);
1334
1335 assert(cmd_buffer->perf_reloc_idx == pdevice->n_perf_query_commands);
1336 break;
1337 }
1338
1339 case VK_QUERY_TYPE_PERFORMANCE_QUERY_INTEL: {
1340 genx_batch_emit_pipe_control(&cmd_buffer->batch,
1341 cmd_buffer->device->info,
1342 cmd_buffer->state.current_pipeline,
1343 ANV_PIPE_CS_STALL_BIT |
1344 ANV_PIPE_STALL_AT_SCOREBOARD_BIT);
1345 uint32_t marker_offset = intel_perf_marker_offset();
1346 mi_store(&b, mi_mem64(anv_address_add(query_addr, marker_offset)),
1347 mi_imm(cmd_buffer->intel_perf_marker));
1348 emit_perf_intel_query(cmd_buffer, pool, &b, query_addr, true);
1349 emit_query_mi_availability(&b, query_addr, true);
1350 break;
1351 }
1352 case VK_QUERY_TYPE_RESULT_STATUS_ONLY_KHR:
1353 emit_query_mi_flush_availability(cmd_buffer, query_addr, true);
1354 break;
1355
1356 default:
1357 unreachable("");
1358 }
1359
1360 /* When multiview is active the spec requires that N consecutive query
1361 * indices are used, where N is the number of active views in the subpass.
1362 * The spec allows that we only write the results to one of the queries
1363 * but we still need to manage result availability for all the query indices.
1364 * Since we only emit a single query for all active views in the
1365 * first index, mark the other query indices as being already available
1366 * with result 0.
1367 */
1368 if (cmd_buffer->state.gfx.view_mask) {
1369 const uint32_t num_queries =
1370 util_bitcount(cmd_buffer->state.gfx.view_mask);
1371 if (num_queries > 1)
1372 emit_zero_queries(cmd_buffer, &b, pool, query + 1, num_queries - 1);
1373 }
1374 }
1375
1376 #define TIMESTAMP 0x2358
1377
genX(CmdWriteTimestamp2)1378 void genX(CmdWriteTimestamp2)(
1379 VkCommandBuffer commandBuffer,
1380 VkPipelineStageFlags2 stage,
1381 VkQueryPool queryPool,
1382 uint32_t query)
1383 {
1384 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1385 ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
1386 struct anv_address query_addr = anv_query_address(pool, query);
1387
1388 assert(pool->vk.query_type == VK_QUERY_TYPE_TIMESTAMP);
1389
1390 emit_query_clear_flush(cmd_buffer, pool,
1391 "CmdWriteTimestamp flush query clears");
1392
1393 struct mi_builder b;
1394 mi_builder_init(&b, cmd_buffer->device->info, &cmd_buffer->batch);
1395
1396 if (stage == VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT) {
1397 mi_store(&b, mi_mem64(anv_address_add(query_addr, 8)),
1398 mi_reg64(TIMESTAMP));
1399 emit_query_mi_availability(&b, query_addr, true);
1400 } else {
1401 /* Everything else is bottom-of-pipe */
1402 cmd_buffer->state.pending_pipe_bits |= ANV_PIPE_POST_SYNC_BIT;
1403 genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
1404
1405 bool cs_stall_needed =
1406 (GFX_VER == 9 && cmd_buffer->device->info->gt == 4);
1407
1408 if (anv_cmd_buffer_is_blitter_queue(cmd_buffer) ||
1409 anv_cmd_buffer_is_video_queue(cmd_buffer)) {
1410 /* Wa_16018063123 - emit fast color dummy blit before MI_FLUSH_DW. */
1411 if (intel_needs_workaround(cmd_buffer->device->info, 16018063123)) {
1412 genX(batch_emit_fast_color_dummy_blit)(&cmd_buffer->batch,
1413 cmd_buffer->device);
1414 }
1415 anv_batch_emit(&cmd_buffer->batch, GENX(MI_FLUSH_DW), dw) {
1416 dw.Address = anv_address_add(query_addr, 8);
1417 dw.PostSyncOperation = WriteTimestamp;
1418 }
1419 emit_query_mi_flush_availability(cmd_buffer, query_addr, true);
1420 } else {
1421 genx_batch_emit_pipe_control_write
1422 (&cmd_buffer->batch, cmd_buffer->device->info,
1423 cmd_buffer->state.current_pipeline, WriteTimestamp,
1424 anv_address_add(query_addr, 8), 0,
1425 cs_stall_needed ? ANV_PIPE_CS_STALL_BIT : 0);
1426 emit_query_pc_availability(cmd_buffer, query_addr, true);
1427 }
1428
1429 }
1430
1431
1432 /* When multiview is active the spec requires that N consecutive query
1433 * indices are used, where N is the number of active views in the subpass.
1434 * The spec allows that we only write the results to one of the queries
1435 * but we still need to manage result availability for all the query indices.
1436 * Since we only emit a single query for all active views in the
1437 * first index, mark the other query indices as being already available
1438 * with result 0.
1439 */
1440 if (cmd_buffer->state.gfx.view_mask) {
1441 const uint32_t num_queries =
1442 util_bitcount(cmd_buffer->state.gfx.view_mask);
1443 if (num_queries > 1)
1444 emit_zero_queries(cmd_buffer, &b, pool, query + 1, num_queries - 1);
1445 }
1446 }
1447
1448 #define MI_PREDICATE_SRC0 0x2400
1449 #define MI_PREDICATE_SRC1 0x2408
1450 #define MI_PREDICATE_RESULT 0x2418
1451
1452 /**
1453 * Writes the results of a query to dst_addr is the value at poll_addr is equal
1454 * to the reference value.
1455 */
1456 static void
gpu_write_query_result_cond(struct anv_cmd_buffer * cmd_buffer,struct mi_builder * b,struct anv_address poll_addr,struct anv_address dst_addr,uint64_t ref_value,VkQueryResultFlags flags,uint32_t value_index,struct mi_value query_result)1457 gpu_write_query_result_cond(struct anv_cmd_buffer *cmd_buffer,
1458 struct mi_builder *b,
1459 struct anv_address poll_addr,
1460 struct anv_address dst_addr,
1461 uint64_t ref_value,
1462 VkQueryResultFlags flags,
1463 uint32_t value_index,
1464 struct mi_value query_result)
1465 {
1466 mi_store(b, mi_reg64(MI_PREDICATE_SRC0), mi_mem64(poll_addr));
1467 mi_store(b, mi_reg64(MI_PREDICATE_SRC1), mi_imm(ref_value));
1468 anv_batch_emit(&cmd_buffer->batch, GENX(MI_PREDICATE), mip) {
1469 mip.LoadOperation = LOAD_LOAD;
1470 mip.CombineOperation = COMBINE_SET;
1471 mip.CompareOperation = COMPARE_SRCS_EQUAL;
1472 }
1473
1474 if (flags & VK_QUERY_RESULT_64_BIT) {
1475 struct anv_address res_addr = anv_address_add(dst_addr, value_index * 8);
1476 mi_store_if(b, mi_mem64(res_addr), query_result);
1477 } else {
1478 struct anv_address res_addr = anv_address_add(dst_addr, value_index * 4);
1479 mi_store_if(b, mi_mem32(res_addr), query_result);
1480 }
1481 }
1482
1483 static void
gpu_write_query_result(struct mi_builder * b,struct anv_address dst_addr,VkQueryResultFlags flags,uint32_t value_index,struct mi_value query_result)1484 gpu_write_query_result(struct mi_builder *b,
1485 struct anv_address dst_addr,
1486 VkQueryResultFlags flags,
1487 uint32_t value_index,
1488 struct mi_value query_result)
1489 {
1490 if (flags & VK_QUERY_RESULT_64_BIT) {
1491 struct anv_address res_addr = anv_address_add(dst_addr, value_index * 8);
1492 mi_store(b, mi_mem64(res_addr), query_result);
1493 } else {
1494 struct anv_address res_addr = anv_address_add(dst_addr, value_index * 4);
1495 mi_store(b, mi_mem32(res_addr), query_result);
1496 }
1497 }
1498
1499 static struct mi_value
compute_query_result(struct mi_builder * b,struct anv_address addr)1500 compute_query_result(struct mi_builder *b, struct anv_address addr)
1501 {
1502 return mi_isub(b, mi_mem64(anv_address_add(addr, 8)),
1503 mi_mem64(anv_address_add(addr, 0)));
1504 }
1505
1506 static void
copy_query_results_with_cs(struct anv_cmd_buffer * cmd_buffer,struct anv_query_pool * pool,struct anv_address dest_addr,uint64_t dest_stride,uint32_t first_query,uint32_t query_count,VkQueryResultFlags flags)1507 copy_query_results_with_cs(struct anv_cmd_buffer *cmd_buffer,
1508 struct anv_query_pool *pool,
1509 struct anv_address dest_addr,
1510 uint64_t dest_stride,
1511 uint32_t first_query,
1512 uint32_t query_count,
1513 VkQueryResultFlags flags)
1514 {
1515 enum anv_pipe_bits needed_flushes = 0;
1516
1517 trace_intel_begin_query_copy_cs(&cmd_buffer->trace);
1518
1519 /* If render target writes are ongoing, request a render target cache flush
1520 * to ensure proper ordering of the commands from the 3d pipe and the
1521 * command streamer.
1522 */
1523 if ((cmd_buffer->state.queries.buffer_write_bits |
1524 cmd_buffer->state.queries.clear_bits) &
1525 ANV_QUERY_WRITES_RT_FLUSH)
1526 needed_flushes |= ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT;
1527
1528 if ((cmd_buffer->state.queries.buffer_write_bits |
1529 cmd_buffer->state.queries.clear_bits) &
1530 ANV_QUERY_WRITES_TILE_FLUSH)
1531 needed_flushes |= ANV_PIPE_TILE_CACHE_FLUSH_BIT;
1532
1533 if ((cmd_buffer->state.queries.buffer_write_bits |
1534 cmd_buffer->state.queries.clear_bits) &
1535 ANV_QUERY_WRITES_DATA_FLUSH) {
1536 needed_flushes |= (ANV_PIPE_DATA_CACHE_FLUSH_BIT |
1537 ANV_PIPE_HDC_PIPELINE_FLUSH_BIT |
1538 ANV_PIPE_UNTYPED_DATAPORT_CACHE_FLUSH_BIT);
1539 }
1540
1541 if ((cmd_buffer->state.queries.buffer_write_bits |
1542 cmd_buffer->state.queries.clear_bits) &
1543 ANV_QUERY_WRITES_CS_STALL)
1544 needed_flushes |= ANV_PIPE_CS_STALL_BIT;
1545
1546 /* Occlusion & timestamp queries are written using a PIPE_CONTROL and
1547 * because we're about to copy values from MI commands, we need to stall
1548 * the command streamer to make sure the PIPE_CONTROL values have
1549 * landed, otherwise we could see inconsistent values & availability.
1550 *
1551 * From the vulkan spec:
1552 *
1553 * "vkCmdCopyQueryPoolResults is guaranteed to see the effect of
1554 * previous uses of vkCmdResetQueryPool in the same queue, without any
1555 * additional synchronization."
1556 */
1557 if (pool->vk.query_type == VK_QUERY_TYPE_OCCLUSION ||
1558 pool->vk.query_type == VK_QUERY_TYPE_TIMESTAMP)
1559 needed_flushes |= ANV_PIPE_CS_STALL_BIT;
1560
1561 if (needed_flushes) {
1562 anv_add_pending_pipe_bits(cmd_buffer,
1563 needed_flushes,
1564 "CopyQueryPoolResults");
1565 genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
1566 }
1567
1568 struct mi_builder b;
1569 mi_builder_init(&b, cmd_buffer->device->info, &cmd_buffer->batch);
1570 struct mi_value result;
1571
1572 for (uint32_t i = 0; i < query_count; i++) {
1573 struct anv_address query_addr = anv_query_address(pool, first_query + i);
1574 const uint32_t mocs = anv_mocs_for_address(cmd_buffer->device, &query_addr);
1575
1576 mi_builder_set_mocs(&b, mocs);
1577
1578 /* Wait for the availability write to land before we go read the data */
1579 if (flags & VK_QUERY_RESULT_WAIT_BIT) {
1580 anv_batch_emit(&cmd_buffer->batch, GENX(MI_SEMAPHORE_WAIT), sem) {
1581 sem.WaitMode = PollingMode;
1582 sem.CompareOperation = COMPARE_SAD_EQUAL_SDD;
1583 sem.SemaphoreDataDword = true;
1584 sem.SemaphoreAddress = query_addr;
1585 }
1586 }
1587
1588 uint32_t idx = 0;
1589 switch (pool->vk.query_type) {
1590 case VK_QUERY_TYPE_OCCLUSION:
1591 case VK_QUERY_TYPE_PRIMITIVES_GENERATED_EXT:
1592 result = compute_query_result(&b, anv_address_add(query_addr, 8));
1593 /* Like in the case of vkGetQueryPoolResults, if the query is
1594 * unavailable and the VK_QUERY_RESULT_PARTIAL_BIT flag is set,
1595 * conservatively write 0 as the query result. If the
1596 * VK_QUERY_RESULT_PARTIAL_BIT isn't set, don't write any value.
1597 */
1598 gpu_write_query_result_cond(cmd_buffer, &b, query_addr, dest_addr,
1599 1 /* available */, flags, idx, result);
1600 if (flags & VK_QUERY_RESULT_PARTIAL_BIT) {
1601 gpu_write_query_result_cond(cmd_buffer, &b, query_addr, dest_addr,
1602 0 /* unavailable */, flags, idx, mi_imm(0));
1603 }
1604 idx++;
1605 break;
1606
1607 case VK_QUERY_TYPE_PIPELINE_STATISTICS: {
1608 uint32_t statistics = pool->vk.pipeline_statistics;
1609 while (statistics) {
1610 UNUSED uint32_t stat = u_bit_scan(&statistics);
1611 result = compute_query_result(&b, anv_address_add(query_addr,
1612 idx * 16 + 8));
1613 gpu_write_query_result(&b, dest_addr, flags, idx++, result);
1614 }
1615 assert(idx == util_bitcount(pool->vk.pipeline_statistics));
1616 break;
1617 }
1618
1619 case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT:
1620 result = compute_query_result(&b, anv_address_add(query_addr, 8));
1621 gpu_write_query_result(&b, dest_addr, flags, idx++, result);
1622 result = compute_query_result(&b, anv_address_add(query_addr, 24));
1623 gpu_write_query_result(&b, dest_addr, flags, idx++, result);
1624 break;
1625
1626 case VK_QUERY_TYPE_TIMESTAMP:
1627 result = mi_mem64(anv_address_add(query_addr, 8));
1628 gpu_write_query_result(&b, dest_addr, flags, idx++, result);
1629 break;
1630
1631 #if GFX_VERx10 >= 125
1632 case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_KHR:
1633 case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SERIALIZATION_SIZE_KHR:
1634 case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SIZE_KHR:
1635 result = mi_mem64(anv_address_add(query_addr, 8));
1636 gpu_write_query_result(&b, dest_addr, flags, idx++, result);
1637 break;
1638
1639 case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SERIALIZATION_BOTTOM_LEVEL_POINTERS_KHR:
1640 result = mi_mem64(anv_address_add(query_addr, 16));
1641 gpu_write_query_result(&b, dest_addr, flags, idx++, result);
1642 break;
1643 #endif
1644
1645 case VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR:
1646 unreachable("Copy KHR performance query results not implemented");
1647 break;
1648
1649 default:
1650 unreachable("unhandled query type");
1651 }
1652
1653 if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) {
1654 gpu_write_query_result(&b, dest_addr, flags, idx,
1655 mi_mem64(query_addr));
1656 }
1657
1658 dest_addr = anv_address_add(dest_addr, dest_stride);
1659 }
1660
1661 trace_intel_end_query_copy_cs(&cmd_buffer->trace, query_count);
1662 }
1663
1664 static void
copy_query_results_with_shader(struct anv_cmd_buffer * cmd_buffer,struct anv_query_pool * pool,struct anv_address dest_addr,uint64_t dest_stride,uint32_t first_query,uint32_t query_count,VkQueryResultFlags flags)1665 copy_query_results_with_shader(struct anv_cmd_buffer *cmd_buffer,
1666 struct anv_query_pool *pool,
1667 struct anv_address dest_addr,
1668 uint64_t dest_stride,
1669 uint32_t first_query,
1670 uint32_t query_count,
1671 VkQueryResultFlags flags)
1672 {
1673 struct anv_device *device = cmd_buffer->device;
1674 enum anv_pipe_bits needed_flushes = 0;
1675
1676 trace_intel_begin_query_copy_shader(&cmd_buffer->trace);
1677
1678 /* If this is the first command in the batch buffer, make sure we have
1679 * consistent pipeline mode.
1680 */
1681 if (cmd_buffer->state.current_pipeline == UINT32_MAX)
1682 genX(flush_pipeline_select_3d)(cmd_buffer);
1683
1684 if ((cmd_buffer->state.queries.buffer_write_bits |
1685 cmd_buffer->state.queries.clear_bits) & ANV_QUERY_WRITES_RT_FLUSH)
1686 needed_flushes |= ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT;
1687
1688 if ((cmd_buffer->state.queries.buffer_write_bits |
1689 cmd_buffer->state.queries.clear_bits) & ANV_QUERY_WRITES_DATA_FLUSH) {
1690 needed_flushes |= (ANV_PIPE_HDC_PIPELINE_FLUSH_BIT |
1691 ANV_PIPE_UNTYPED_DATAPORT_CACHE_FLUSH_BIT);
1692 }
1693
1694 /* Flushes for the queries to complete */
1695 if (flags & VK_QUERY_RESULT_WAIT_BIT) {
1696 /* Some queries are done with shaders, so we need to have them flush
1697 * high level caches writes. The L3 should be shared across the GPU.
1698 */
1699 if (pool->vk.query_type == VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_KHR ||
1700 pool->vk.query_type == VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SIZE_KHR ||
1701 pool->vk.query_type == VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SERIALIZATION_SIZE_KHR ||
1702 pool->vk.query_type == VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SERIALIZATION_BOTTOM_LEVEL_POINTERS_KHR) {
1703 needed_flushes |= ANV_PIPE_UNTYPED_DATAPORT_CACHE_FLUSH_BIT;
1704 }
1705 /* And we need to stall for previous CS writes to land or the flushes to
1706 * complete.
1707 */
1708 needed_flushes |= ANV_PIPE_CS_STALL_BIT;
1709 }
1710
1711 /* Occlusion & timestamp queries are written using a PIPE_CONTROL and
1712 * because we're about to copy values from MI commands, we need to stall
1713 * the command streamer to make sure the PIPE_CONTROL values have
1714 * landed, otherwise we could see inconsistent values & availability.
1715 *
1716 * From the vulkan spec:
1717 *
1718 * "vkCmdCopyQueryPoolResults is guaranteed to see the effect of
1719 * previous uses of vkCmdResetQueryPool in the same queue, without any
1720 * additional synchronization."
1721 */
1722 if (pool->vk.query_type == VK_QUERY_TYPE_OCCLUSION ||
1723 pool->vk.query_type == VK_QUERY_TYPE_TIMESTAMP)
1724 needed_flushes |= ANV_PIPE_CS_STALL_BIT;
1725
1726 if (needed_flushes) {
1727 anv_add_pending_pipe_bits(cmd_buffer,
1728 needed_flushes | ANV_PIPE_END_OF_PIPE_SYNC_BIT,
1729 "CopyQueryPoolResults");
1730 genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
1731 }
1732
1733 struct anv_simple_shader state = {
1734 .device = cmd_buffer->device,
1735 .cmd_buffer = cmd_buffer,
1736 .dynamic_state_stream = &cmd_buffer->dynamic_state_stream,
1737 .general_state_stream = &cmd_buffer->general_state_stream,
1738 .batch = &cmd_buffer->batch,
1739 .kernel = device->internal_kernels[
1740 cmd_buffer->state.current_pipeline == GPGPU ?
1741 ANV_INTERNAL_KERNEL_COPY_QUERY_RESULTS_COMPUTE :
1742 ANV_INTERNAL_KERNEL_COPY_QUERY_RESULTS_FRAGMENT],
1743 .l3_config = device->internal_kernels_l3_config,
1744 .urb_cfg = &cmd_buffer->state.gfx.urb_cfg,
1745 };
1746 genX(emit_simple_shader_init)(&state);
1747
1748 struct anv_state push_data_state =
1749 genX(simple_shader_alloc_push)(&state,
1750 sizeof(struct anv_query_copy_params));
1751 if (push_data_state.map == NULL)
1752 return;
1753
1754 struct anv_query_copy_params *params = push_data_state.map;
1755
1756 uint32_t copy_flags =
1757 ((flags & VK_QUERY_RESULT_64_BIT) ? ANV_COPY_QUERY_FLAG_RESULT64 : 0) |
1758 ((flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) ? ANV_COPY_QUERY_FLAG_AVAILABLE : 0);
1759
1760 uint32_t num_items = 1;
1761 uint32_t data_offset = 8 /* behind availability */;
1762 switch (pool->vk.query_type) {
1763 case VK_QUERY_TYPE_OCCLUSION:
1764 case VK_QUERY_TYPE_PRIMITIVES_GENERATED_EXT:
1765 copy_flags |= ANV_COPY_QUERY_FLAG_DELTA;
1766 /* These 2 queries are the only ones where we would have partial data
1767 * because they are capture with a PIPE_CONTROL post sync operation. The
1768 * other ones are captured with MI_STORE_REGISTER_DATA so we're always
1769 * available by the time we reach the copy command.
1770 */
1771 copy_flags |= (flags & VK_QUERY_RESULT_PARTIAL_BIT) ? ANV_COPY_QUERY_FLAG_PARTIAL : 0;
1772 break;
1773
1774 case VK_QUERY_TYPE_PIPELINE_STATISTICS:
1775 num_items = util_bitcount(pool->vk.pipeline_statistics);
1776 copy_flags |= ANV_COPY_QUERY_FLAG_DELTA;
1777 break;
1778
1779 case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT:
1780 num_items = 2;
1781 copy_flags |= ANV_COPY_QUERY_FLAG_DELTA;
1782 break;
1783
1784 case VK_QUERY_TYPE_TIMESTAMP:
1785 case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_KHR:
1786 case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SERIALIZATION_SIZE_KHR:
1787 case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SIZE_KHR:
1788 break;
1789
1790 case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SERIALIZATION_BOTTOM_LEVEL_POINTERS_KHR:
1791 data_offset += 8;
1792 break;
1793
1794 default:
1795 unreachable("unhandled query type");
1796 }
1797
1798 *params = (struct anv_query_copy_params) {
1799 .flags = copy_flags,
1800 .num_queries = query_count,
1801 .num_items = num_items,
1802 .query_base = first_query,
1803 .query_stride = pool->stride,
1804 .query_data_offset = data_offset,
1805 .destination_stride = dest_stride,
1806 .query_data_addr = anv_address_physical(
1807 (struct anv_address) {
1808 .bo = pool->bo,
1809 }),
1810 .destination_addr = anv_address_physical(dest_addr),
1811 };
1812
1813 genX(emit_simple_shader_dispatch)(&state, query_count, push_data_state);
1814
1815 /* The query copy result shader is writing using the dataport, flush
1816 * HDC/Data cache depending on the generation. Also stall at pixel
1817 * scoreboard in case we're doing the copy with a fragment shader.
1818 */
1819 cmd_buffer->state.queries.buffer_write_bits |= ANV_QUERY_WRITES_DATA_FLUSH;
1820
1821 trace_intel_end_query_copy_shader(&cmd_buffer->trace, query_count);
1822 }
1823
genX(CmdCopyQueryPoolResults)1824 void genX(CmdCopyQueryPoolResults)(
1825 VkCommandBuffer commandBuffer,
1826 VkQueryPool queryPool,
1827 uint32_t firstQuery,
1828 uint32_t queryCount,
1829 VkBuffer destBuffer,
1830 VkDeviceSize destOffset,
1831 VkDeviceSize destStride,
1832 VkQueryResultFlags flags)
1833 {
1834 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1835 ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
1836 ANV_FROM_HANDLE(anv_buffer, buffer, destBuffer);
1837 struct anv_device *device = cmd_buffer->device;
1838 struct anv_physical_device *pdevice = device->physical;
1839
1840 if (queryCount > pdevice->instance->query_copy_with_shader_threshold) {
1841 copy_query_results_with_shader(cmd_buffer, pool,
1842 anv_address_add(buffer->address,
1843 destOffset),
1844 destStride,
1845 firstQuery,
1846 queryCount,
1847 flags);
1848 } else {
1849 copy_query_results_with_cs(cmd_buffer, pool,
1850 anv_address_add(buffer->address,
1851 destOffset),
1852 destStride,
1853 firstQuery,
1854 queryCount,
1855 flags);
1856 }
1857 }
1858
1859 #if GFX_VERx10 == 125 && ANV_SUPPORT_RT
1860
1861 #include "grl/include/GRLRTASCommon.h"
1862 #include "grl/grl_metakernel_postbuild_info.h"
1863
1864 void
genX(CmdWriteAccelerationStructuresPropertiesKHR)1865 genX(CmdWriteAccelerationStructuresPropertiesKHR)(
1866 VkCommandBuffer commandBuffer,
1867 uint32_t accelerationStructureCount,
1868 const VkAccelerationStructureKHR* pAccelerationStructures,
1869 VkQueryType queryType,
1870 VkQueryPool queryPool,
1871 uint32_t firstQuery)
1872 {
1873 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1874 ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
1875
1876 assert(queryType == VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_KHR ||
1877 queryType == VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SERIALIZATION_SIZE_KHR ||
1878 queryType == VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SIZE_KHR ||
1879 queryType == VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SERIALIZATION_BOTTOM_LEVEL_POINTERS_KHR);
1880
1881 emit_query_clear_flush(cmd_buffer, pool,
1882 "CmdWriteAccelerationStructuresPropertiesKHR flush query clears");
1883
1884 struct mi_builder b;
1885 mi_builder_init(&b, cmd_buffer->device->info, &cmd_buffer->batch);
1886
1887 for (uint32_t i = 0; i < accelerationStructureCount; i++) {
1888 ANV_FROM_HANDLE(vk_acceleration_structure, accel, pAccelerationStructures[i]);
1889 struct anv_address query_addr =
1890 anv_address_add(anv_query_address(pool, firstQuery + i), 8);
1891
1892 switch (queryType) {
1893 case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_KHR:
1894 genX(grl_postbuild_info_compacted_size)(cmd_buffer,
1895 vk_acceleration_structure_get_va(accel),
1896 anv_address_physical(query_addr));
1897 break;
1898
1899 case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SIZE_KHR:
1900 genX(grl_postbuild_info_current_size)(cmd_buffer,
1901 vk_acceleration_structure_get_va(accel),
1902 anv_address_physical(query_addr));
1903 break;
1904
1905 case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SERIALIZATION_SIZE_KHR:
1906 case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SERIALIZATION_BOTTOM_LEVEL_POINTERS_KHR:
1907 genX(grl_postbuild_info_serialized_size)(cmd_buffer,
1908 vk_acceleration_structure_get_va(accel),
1909 anv_address_physical(query_addr));
1910 break;
1911
1912 default:
1913 unreachable("unhandled query type");
1914 }
1915 }
1916
1917 /* TODO: Figure out why MTL needs ANV_PIPE_DATA_CACHE_FLUSH_BIT in order
1918 * to not lose the availability bit.
1919 */
1920 anv_add_pending_pipe_bits(cmd_buffer,
1921 ANV_PIPE_END_OF_PIPE_SYNC_BIT |
1922 ANV_PIPE_DATA_CACHE_FLUSH_BIT,
1923 "after write acceleration struct props");
1924 genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
1925
1926 for (uint32_t i = 0; i < accelerationStructureCount; i++)
1927 emit_query_mi_availability(&b, anv_query_address(pool, firstQuery + i), true);
1928 }
1929 #endif
1930