• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2018 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include <assert.h>
25 #include <stdbool.h>
26 #include <stdint.h>
27 
28 #include "anv_private.h"
29 #include "vk_util.h"
30 
31 #include "perf/intel_perf.h"
32 #include "perf/intel_perf_mdapi.h"
33 
34 #include "util/mesa-sha1.h"
35 
36 void
anv_physical_device_init_perf(struct anv_physical_device * device,int fd)37 anv_physical_device_init_perf(struct anv_physical_device *device, int fd)
38 {
39    struct intel_perf_config *perf = intel_perf_new(NULL);
40 
41    intel_perf_init_metrics(perf, &device->info, fd,
42                            false /* pipeline statistics */,
43                            true /* register snapshots */);
44 
45    if (!perf->n_queries)
46       goto err;
47 
48    /* We need DRM_I915_PERF_PROP_HOLD_PREEMPTION support, only available in
49     * perf revision 2.
50     */
51    if (!INTEL_DEBUG(DEBUG_NO_OACONFIG)) {
52       if (!intel_perf_has_hold_preemption(perf))
53          goto err;
54    }
55 
56    device->perf = perf;
57 
58    /* Compute the number of commands we need to implement a performance
59     * query.
60     */
61    const struct intel_perf_query_field_layout *layout = &perf->query_layout;
62    device->n_perf_query_commands = 0;
63    for (uint32_t f = 0; f < layout->n_fields; f++) {
64       struct intel_perf_query_field *field = &layout->fields[f];
65 
66       switch (field->type) {
67       case INTEL_PERF_QUERY_FIELD_TYPE_MI_RPC:
68          device->n_perf_query_commands++;
69          break;
70       case INTEL_PERF_QUERY_FIELD_TYPE_SRM_PERFCNT:
71       case INTEL_PERF_QUERY_FIELD_TYPE_SRM_RPSTAT:
72       case INTEL_PERF_QUERY_FIELD_TYPE_SRM_OA_A:
73       case INTEL_PERF_QUERY_FIELD_TYPE_SRM_OA_B:
74       case INTEL_PERF_QUERY_FIELD_TYPE_SRM_OA_C:
75       case INTEL_PERF_QUERY_FIELD_TYPE_SRM_OA_PEC:
76          device->n_perf_query_commands += field->size / 4;
77          break;
78       default:
79          unreachable("Unhandled register type");
80       }
81    }
82    device->n_perf_query_commands *= 2; /* Begin & End */
83    device->n_perf_query_commands += 1; /* availability */
84 
85    return;
86 
87 err:
88    intel_perf_free(perf);
89 }
90 
91 void
anv_device_perf_init(struct anv_device * device)92 anv_device_perf_init(struct anv_device *device)
93 {
94    device->perf_fd = -1;
95    device->perf_queue = NULL;
96 }
97 
98 void
anv_device_perf_close(struct anv_device * device)99 anv_device_perf_close(struct anv_device *device)
100 {
101    if (device->perf_fd == -1)
102       return;
103 
104    if (intel_bind_timeline_get_syncobj(&device->perf_timeline))
105       intel_bind_timeline_finish(&device->perf_timeline, device->fd);
106    close(device->perf_fd);
107    device->perf_fd = -1;
108 }
109 
110 static uint32_t
anv_device_perf_get_queue_context_or_exec_queue_id(struct anv_queue * queue)111 anv_device_perf_get_queue_context_or_exec_queue_id(struct anv_queue *queue)
112 {
113    struct anv_device *device = queue->device;
114    uint32_t context_or_exec_queue_id;
115 
116    switch (device->physical->info.kmd_type) {
117    case INTEL_KMD_TYPE_I915:
118       context_or_exec_queue_id = device->physical->has_vm_control ?
119                                  queue->context_id : device->context_id;
120       break;
121    case INTEL_KMD_TYPE_XE:
122       context_or_exec_queue_id = queue->exec_queue_id;
123       break;
124    default:
125       unreachable("missing");
126       context_or_exec_queue_id = 0;
127    }
128 
129    return context_or_exec_queue_id;
130 }
131 
132 static int
anv_device_perf_open(struct anv_device * device,struct anv_queue * queue,uint64_t metric_id)133 anv_device_perf_open(struct anv_device *device, struct anv_queue *queue, uint64_t metric_id)
134 {
135    uint64_t period_exponent = 31; /* slowest sampling period */
136    int ret;
137 
138    if (intel_perf_has_metric_sync(device->physical->perf)) {
139       if (!intel_bind_timeline_init(&device->perf_timeline, device->fd))
140          return -1;
141    }
142 
143    ret = intel_perf_stream_open(device->physical->perf, device->fd,
144                                 anv_device_perf_get_queue_context_or_exec_queue_id(queue),
145                                 metric_id, period_exponent, true, true,
146                                 &device->perf_timeline);
147    if (ret >= 0)
148       device->perf_queue = queue;
149    else
150       intel_bind_timeline_finish(&device->perf_timeline, device->fd);
151 
152    return ret;
153 }
154 
155 /* VK_INTEL_performance_query */
anv_InitializePerformanceApiINTEL(VkDevice _device,const VkInitializePerformanceApiInfoINTEL * pInitializeInfo)156 VkResult anv_InitializePerformanceApiINTEL(
157     VkDevice                                    _device,
158     const VkInitializePerformanceApiInfoINTEL*  pInitializeInfo)
159 {
160    ANV_FROM_HANDLE(anv_device, device, _device);
161 
162    if (!device->physical->perf)
163       return VK_ERROR_EXTENSION_NOT_PRESENT;
164 
165    /* Not much to do here */
166    return VK_SUCCESS;
167 }
168 
anv_GetPerformanceParameterINTEL(VkDevice _device,VkPerformanceParameterTypeINTEL parameter,VkPerformanceValueINTEL * pValue)169 VkResult anv_GetPerformanceParameterINTEL(
170     VkDevice                                    _device,
171     VkPerformanceParameterTypeINTEL             parameter,
172     VkPerformanceValueINTEL*                    pValue)
173 {
174       ANV_FROM_HANDLE(anv_device, device, _device);
175 
176       if (!device->physical->perf)
177          return VK_ERROR_EXTENSION_NOT_PRESENT;
178 
179       VkResult result = VK_SUCCESS;
180       switch (parameter) {
181       case VK_PERFORMANCE_PARAMETER_TYPE_HW_COUNTERS_SUPPORTED_INTEL:
182          pValue->type = VK_PERFORMANCE_VALUE_TYPE_BOOL_INTEL;
183          pValue->data.valueBool = VK_TRUE;
184          break;
185 
186       case VK_PERFORMANCE_PARAMETER_TYPE_STREAM_MARKER_VALID_BITS_INTEL:
187          pValue->type = VK_PERFORMANCE_VALUE_TYPE_UINT32_INTEL;
188          pValue->data.value32 = 25;
189          break;
190 
191       default:
192          result = VK_ERROR_FEATURE_NOT_PRESENT;
193          break;
194       }
195 
196       return result;
197 }
198 
anv_CmdSetPerformanceMarkerINTEL(VkCommandBuffer commandBuffer,const VkPerformanceMarkerInfoINTEL * pMarkerInfo)199 VkResult anv_CmdSetPerformanceMarkerINTEL(
200     VkCommandBuffer                             commandBuffer,
201     const VkPerformanceMarkerInfoINTEL*         pMarkerInfo)
202 {
203    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
204 
205    cmd_buffer->intel_perf_marker = pMarkerInfo->marker;
206 
207    return VK_SUCCESS;
208 }
209 
anv_AcquirePerformanceConfigurationINTEL(VkDevice _device,const VkPerformanceConfigurationAcquireInfoINTEL * pAcquireInfo,VkPerformanceConfigurationINTEL * pConfiguration)210 VkResult anv_AcquirePerformanceConfigurationINTEL(
211     VkDevice                                    _device,
212     const VkPerformanceConfigurationAcquireInfoINTEL* pAcquireInfo,
213     VkPerformanceConfigurationINTEL*            pConfiguration)
214 {
215    ANV_FROM_HANDLE(anv_device, device, _device);
216    struct anv_performance_configuration_intel *config;
217 
218    config = vk_object_alloc(&device->vk, NULL, sizeof(*config),
219                             VK_OBJECT_TYPE_PERFORMANCE_CONFIGURATION_INTEL);
220    if (!config)
221       return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
222 
223    if (!INTEL_DEBUG(DEBUG_NO_OACONFIG)) {
224       config->register_config =
225          intel_perf_load_configuration(device->physical->perf, device->fd,
226                                      INTEL_PERF_QUERY_GUID_MDAPI);
227       if (!config->register_config) {
228          vk_object_free(&device->vk, NULL, config);
229          return VK_INCOMPLETE;
230       }
231 
232       uint64_t ret =
233          intel_perf_store_configuration(device->physical->perf, device->fd,
234                                       config->register_config, NULL /* guid */);
235       if (ret == 0) {
236          ralloc_free(config->register_config);
237          vk_object_free(&device->vk, NULL, config);
238          return VK_INCOMPLETE;
239       }
240 
241       config->config_id = ret;
242    }
243 
244    *pConfiguration = anv_performance_configuration_intel_to_handle(config);
245 
246    return VK_SUCCESS;
247 }
248 
anv_ReleasePerformanceConfigurationINTEL(VkDevice _device,VkPerformanceConfigurationINTEL _configuration)249 VkResult anv_ReleasePerformanceConfigurationINTEL(
250     VkDevice                                    _device,
251     VkPerformanceConfigurationINTEL             _configuration)
252 {
253    ANV_FROM_HANDLE(anv_device, device, _device);
254    ANV_FROM_HANDLE(anv_performance_configuration_intel, config, _configuration);
255 
256    if (!INTEL_DEBUG(DEBUG_NO_OACONFIG))
257       intel_perf_remove_configuration(device->physical->perf, device->fd, config->config_id);
258 
259    ralloc_free(config->register_config);
260 
261    vk_object_free(&device->vk, NULL, config);
262 
263    return VK_SUCCESS;
264 }
265 
266 static struct anv_queue *
anv_device_get_perf_queue(struct anv_device * device)267 anv_device_get_perf_queue(struct anv_device *device)
268 {
269    for (uint32_t i = 0; i < device->queue_count; i++) {
270       struct anv_queue *queue = &device->queues[i];
271       const struct anv_queue_family *family = queue->family;
272 
273       if (family->supports_perf)
274          return queue;
275    }
276 
277    return NULL;
278 }
279 
anv_QueueSetPerformanceConfigurationINTEL(VkQueue _queue,VkPerformanceConfigurationINTEL _configuration)280 VkResult anv_QueueSetPerformanceConfigurationINTEL(
281     VkQueue                                     _queue,
282     VkPerformanceConfigurationINTEL             _configuration)
283 {
284    ANV_FROM_HANDLE(anv_queue, queue, _queue);
285    ANV_FROM_HANDLE(anv_performance_configuration_intel, config, _configuration);
286    struct anv_device *device = queue->device;
287 
288    if (queue != anv_device_get_perf_queue(device))
289       return VK_ERROR_UNKNOWN;
290 
291    if (!INTEL_DEBUG(DEBUG_NO_OACONFIG)) {
292       if (device->perf_fd < 0) {
293          device->perf_fd = anv_device_perf_open(device, queue, config->config_id);
294          if (device->perf_fd < 0)
295             return VK_ERROR_INITIALIZATION_FAILED;
296       } else {
297          uint32_t context_or_exec_queue = anv_device_perf_get_queue_context_or_exec_queue_id(device->perf_queue);
298          int ret = intel_perf_stream_set_metrics_id(device->physical->perf,
299                                                     device->fd,
300                                                     device->perf_fd,
301                                                     context_or_exec_queue,
302                                                     config->config_id,
303                                                     &device->perf_timeline);
304          if (ret < 0)
305             return vk_device_set_lost(&device->vk, "i915-perf config failed: %m");
306       }
307    }
308 
309    return VK_SUCCESS;
310 }
311 
anv_UninitializePerformanceApiINTEL(VkDevice _device)312 void anv_UninitializePerformanceApiINTEL(
313     VkDevice                                    _device)
314 {
315    ANV_FROM_HANDLE(anv_device, device, _device);
316 
317    anv_device_perf_close(device);
318 }
319 
320 /* VK_KHR_performance_query */
321 static const VkPerformanceCounterUnitKHR
322 intel_perf_counter_unit_to_vk_unit[] = {
323    [INTEL_PERF_COUNTER_UNITS_BYTES]                                = VK_PERFORMANCE_COUNTER_UNIT_BYTES_KHR,
324    [INTEL_PERF_COUNTER_UNITS_HZ]                                   = VK_PERFORMANCE_COUNTER_UNIT_HERTZ_KHR,
325    [INTEL_PERF_COUNTER_UNITS_NS]                                   = VK_PERFORMANCE_COUNTER_UNIT_NANOSECONDS_KHR,
326    [INTEL_PERF_COUNTER_UNITS_US]                                   = VK_PERFORMANCE_COUNTER_UNIT_NANOSECONDS_KHR, /* todo */
327    [INTEL_PERF_COUNTER_UNITS_PIXELS]                               = VK_PERFORMANCE_COUNTER_UNIT_GENERIC_KHR,
328    [INTEL_PERF_COUNTER_UNITS_TEXELS]                               = VK_PERFORMANCE_COUNTER_UNIT_GENERIC_KHR,
329    [INTEL_PERF_COUNTER_UNITS_THREADS]                              = VK_PERFORMANCE_COUNTER_UNIT_GENERIC_KHR,
330    [INTEL_PERF_COUNTER_UNITS_PERCENT]                              = VK_PERFORMANCE_COUNTER_UNIT_PERCENTAGE_KHR,
331    [INTEL_PERF_COUNTER_UNITS_MESSAGES]                             = VK_PERFORMANCE_COUNTER_UNIT_GENERIC_KHR,
332    [INTEL_PERF_COUNTER_UNITS_NUMBER]                               = VK_PERFORMANCE_COUNTER_UNIT_GENERIC_KHR,
333    [INTEL_PERF_COUNTER_UNITS_CYCLES]                               = VK_PERFORMANCE_COUNTER_UNIT_GENERIC_KHR,
334    [INTEL_PERF_COUNTER_UNITS_EVENTS]                               = VK_PERFORMANCE_COUNTER_UNIT_GENERIC_KHR,
335    [INTEL_PERF_COUNTER_UNITS_UTILIZATION]                          = VK_PERFORMANCE_COUNTER_UNIT_GENERIC_KHR,
336    [INTEL_PERF_COUNTER_UNITS_EU_SENDS_TO_L3_CACHE_LINES]           = VK_PERFORMANCE_COUNTER_UNIT_GENERIC_KHR,
337    [INTEL_PERF_COUNTER_UNITS_EU_ATOMIC_REQUESTS_TO_L3_CACHE_LINES] = VK_PERFORMANCE_COUNTER_UNIT_GENERIC_KHR,
338    [INTEL_PERF_COUNTER_UNITS_EU_REQUESTS_TO_L3_CACHE_LINES]        = VK_PERFORMANCE_COUNTER_UNIT_GENERIC_KHR,
339    [INTEL_PERF_COUNTER_UNITS_EU_BYTES_PER_L3_CACHE_LINE]           = VK_PERFORMANCE_COUNTER_UNIT_GENERIC_KHR,
340 };
341 
342 static const VkPerformanceCounterStorageKHR
343 intel_perf_counter_data_type_to_vk_storage[] = {
344    [INTEL_PERF_COUNTER_DATA_TYPE_BOOL32] = VK_PERFORMANCE_COUNTER_STORAGE_UINT32_KHR,
345    [INTEL_PERF_COUNTER_DATA_TYPE_UINT32] = VK_PERFORMANCE_COUNTER_STORAGE_UINT32_KHR,
346    [INTEL_PERF_COUNTER_DATA_TYPE_UINT64] = VK_PERFORMANCE_COUNTER_STORAGE_UINT64_KHR,
347    [INTEL_PERF_COUNTER_DATA_TYPE_FLOAT]  = VK_PERFORMANCE_COUNTER_STORAGE_FLOAT32_KHR,
348    [INTEL_PERF_COUNTER_DATA_TYPE_DOUBLE] = VK_PERFORMANCE_COUNTER_STORAGE_FLOAT64_KHR,
349 };
350 
anv_EnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR(VkPhysicalDevice physicalDevice,uint32_t queueFamilyIndex,uint32_t * pCounterCount,VkPerformanceCounterKHR * pCounters,VkPerformanceCounterDescriptionKHR * pCounterDescriptions)351 VkResult anv_EnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR(
352     VkPhysicalDevice                            physicalDevice,
353     uint32_t                                    queueFamilyIndex,
354     uint32_t*                                   pCounterCount,
355     VkPerformanceCounterKHR*                    pCounters,
356     VkPerformanceCounterDescriptionKHR*         pCounterDescriptions)
357 {
358    ANV_FROM_HANDLE(anv_physical_device, pdevice, physicalDevice);
359    struct intel_perf_config *perf = pdevice->perf;
360 
361    uint32_t desc_count = *pCounterCount;
362 
363    VK_OUTARRAY_MAKE_TYPED(VkPerformanceCounterKHR, out, pCounters, pCounterCount);
364    VK_OUTARRAY_MAKE_TYPED(VkPerformanceCounterDescriptionKHR, out_desc,
365                           pCounterDescriptions, &desc_count);
366 
367    /* We cannot support performance queries on anything other than RCS,
368     * because the MI_REPORT_PERF_COUNT command is not available on other
369     * engines.
370     */
371    struct anv_queue_family *queue_family =
372       &pdevice->queue.families[queueFamilyIndex];
373    if (queue_family->engine_class != INTEL_ENGINE_CLASS_RENDER)
374       return vk_outarray_status(&out);
375 
376    for (int c = 0; c < (perf ? perf->n_counters : 0); c++) {
377       const struct intel_perf_query_counter *intel_counter = perf->counter_infos[c].counter;
378 
379       vk_outarray_append_typed(VkPerformanceCounterKHR, &out, counter) {
380          counter->unit = intel_perf_counter_unit_to_vk_unit[intel_counter->units];
381          counter->scope = VK_PERFORMANCE_COUNTER_SCOPE_COMMAND_KHR;
382          counter->storage = intel_perf_counter_data_type_to_vk_storage[intel_counter->data_type];
383 
384          unsigned char sha1_result[20];
385          _mesa_sha1_compute(intel_counter->symbol_name,
386                             strlen(intel_counter->symbol_name),
387                             sha1_result);
388          memcpy(counter->uuid, sha1_result, sizeof(counter->uuid));
389       }
390 
391       vk_outarray_append_typed(VkPerformanceCounterDescriptionKHR, &out_desc, desc) {
392          desc->flags = 0; /* None so far. */
393          snprintf(desc->name, sizeof(desc->name), "%s",
394                   INTEL_DEBUG(DEBUG_PERF_SYMBOL_NAMES) ?
395                   intel_counter->symbol_name :
396                   intel_counter->name);
397          snprintf(desc->category, sizeof(desc->category), "%s", intel_counter->category);
398          snprintf(desc->description, sizeof(desc->description), "%s", intel_counter->desc);
399       }
400    }
401 
402    return vk_outarray_status(&out);
403 }
404 
anv_GetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR(VkPhysicalDevice physicalDevice,const VkQueryPoolPerformanceCreateInfoKHR * pPerformanceQueryCreateInfo,uint32_t * pNumPasses)405 void anv_GetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR(
406     VkPhysicalDevice                            physicalDevice,
407     const VkQueryPoolPerformanceCreateInfoKHR*  pPerformanceQueryCreateInfo,
408     uint32_t*                                   pNumPasses)
409 {
410    ANV_FROM_HANDLE(anv_physical_device, pdevice, physicalDevice);
411    struct intel_perf_config *perf = pdevice->perf;
412 
413    if (!perf) {
414       *pNumPasses = 0;
415       return;
416    }
417 
418    *pNumPasses = intel_perf_get_n_passes(perf,
419                                        pPerformanceQueryCreateInfo->pCounterIndices,
420                                        pPerformanceQueryCreateInfo->counterIndexCount,
421                                        NULL);
422 }
423 
anv_AcquireProfilingLockKHR(VkDevice _device,const VkAcquireProfilingLockInfoKHR * pInfo)424 VkResult anv_AcquireProfilingLockKHR(
425     VkDevice                                    _device,
426     const VkAcquireProfilingLockInfoKHR*        pInfo)
427 {
428    ANV_FROM_HANDLE(anv_device, device, _device);
429    struct intel_perf_config *perf = device->physical->perf;
430    struct intel_perf_query_info *first_metric_set = &perf->queries[0];
431    int fd = -1;
432 
433    assert(device->perf_fd == -1);
434 
435    if (!INTEL_DEBUG(DEBUG_NO_OACONFIG)) {
436       struct anv_queue *queue = anv_device_get_perf_queue(device);
437 
438       if (queue == NULL)
439          return VK_ERROR_UNKNOWN;
440       fd = anv_device_perf_open(device, queue, first_metric_set->oa_metrics_set_id);
441       if (fd < 0)
442          return VK_TIMEOUT;
443    }
444 
445    device->perf_fd = fd;
446    return VK_SUCCESS;
447 }
448 
anv_ReleaseProfilingLockKHR(VkDevice _device)449 void anv_ReleaseProfilingLockKHR(
450     VkDevice                                    _device)
451 {
452    ANV_FROM_HANDLE(anv_device, device, _device);
453 
454    anv_device_perf_close(device);
455 }
456 
457 void
anv_perf_write_pass_results(struct intel_perf_config * perf,struct anv_query_pool * pool,uint32_t pass,const struct intel_perf_query_result * accumulated_results,union VkPerformanceCounterResultKHR * results)458 anv_perf_write_pass_results(struct intel_perf_config *perf,
459                             struct anv_query_pool *pool, uint32_t pass,
460                             const struct intel_perf_query_result *accumulated_results,
461                             union VkPerformanceCounterResultKHR *results)
462 {
463    const struct intel_perf_query_info *query = pool->pass_query[pass];
464 
465    for (uint32_t c = 0; c < pool->n_counters; c++) {
466       const struct intel_perf_counter_pass *counter_pass = &pool->counter_pass[c];
467 
468       if (counter_pass->query != query)
469          continue;
470 
471       switch (pool->pass_query[pass]->kind) {
472       case INTEL_PERF_QUERY_TYPE_PIPELINE: {
473          assert(counter_pass->counter->data_type == INTEL_PERF_COUNTER_DATA_TYPE_UINT64);
474          uint32_t accu_offset = counter_pass->counter->offset / sizeof(uint64_t);
475          results[c].uint64 = accumulated_results->accumulator[accu_offset];
476          break;
477       }
478 
479       case INTEL_PERF_QUERY_TYPE_OA:
480       case INTEL_PERF_QUERY_TYPE_RAW:
481          switch (counter_pass->counter->data_type) {
482          case INTEL_PERF_COUNTER_DATA_TYPE_UINT64:
483             results[c].uint64 =
484                counter_pass->counter->oa_counter_read_uint64(perf,
485                                                              counter_pass->query,
486                                                              accumulated_results);
487             break;
488          case INTEL_PERF_COUNTER_DATA_TYPE_FLOAT:
489             results[c].float32 =
490                counter_pass->counter->oa_counter_read_float(perf,
491                                                             counter_pass->query,
492                                                             accumulated_results);
493             break;
494          default:
495             /* So far we aren't using uint32, double or bool32... */
496             unreachable("unexpected counter data type");
497          }
498          break;
499 
500       default:
501          unreachable("invalid query type");
502       }
503 
504       /* The Vulkan extension only has nanoseconds as a unit */
505       if (counter_pass->counter->units == INTEL_PERF_COUNTER_UNITS_US) {
506          assert(counter_pass->counter->data_type == INTEL_PERF_COUNTER_DATA_TYPE_UINT64);
507          results[c].uint64 *= 1000;
508       }
509    }
510 }
511