• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2018 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include <assert.h>
25 #include <stdbool.h>
26 #include <stdint.h>
27 
28 #include "anv_private.h"
29 #include "vk_util.h"
30 
31 #include "perf/intel_perf.h"
32 #include "perf/intel_perf_mdapi.h"
33 
34 #include "util/mesa-sha1.h"
35 
36 void
anv_physical_device_init_perf(struct anv_physical_device * device,int fd)37 anv_physical_device_init_perf(struct anv_physical_device *device, int fd)
38 {
39    const struct intel_device_info *devinfo = &device->info;
40 
41    device->perf = NULL;
42 
43    /* We need self modifying batches. The i915 parser prevents it on
44     * Gfx7.5 :( maybe one day.
45     */
46    if (devinfo->ver < 8)
47       return;
48 
49    struct intel_perf_config *perf = intel_perf_new(NULL);
50 
51    intel_perf_init_metrics(perf, &device->info, fd,
52                            false /* pipeline statistics */,
53                            true /* register snapshots */);
54 
55    if (!perf->n_queries)
56       goto err;
57 
58    /* We need DRM_I915_PERF_PROP_HOLD_PREEMPTION support, only available in
59     * perf revision 2.
60     */
61    if (!INTEL_DEBUG(DEBUG_NO_OACONFIG)) {
62       if (!intel_perf_has_hold_preemption(perf))
63          goto err;
64    }
65 
66    device->perf = perf;
67 
68    /* Compute the number of commands we need to implement a performance
69     * query.
70     */
71    const struct intel_perf_query_field_layout *layout = &perf->query_layout;
72    device->n_perf_query_commands = 0;
73    for (uint32_t f = 0; f < layout->n_fields; f++) {
74       struct intel_perf_query_field *field = &layout->fields[f];
75 
76       switch (field->type) {
77       case INTEL_PERF_QUERY_FIELD_TYPE_MI_RPC:
78          device->n_perf_query_commands++;
79          break;
80       case INTEL_PERF_QUERY_FIELD_TYPE_SRM_PERFCNT:
81       case INTEL_PERF_QUERY_FIELD_TYPE_SRM_RPSTAT:
82       case INTEL_PERF_QUERY_FIELD_TYPE_SRM_OA_A:
83       case INTEL_PERF_QUERY_FIELD_TYPE_SRM_OA_B:
84       case INTEL_PERF_QUERY_FIELD_TYPE_SRM_OA_C:
85          device->n_perf_query_commands += field->size / 4;
86          break;
87       default:
88          unreachable("Unhandled register type");
89       }
90    }
91    device->n_perf_query_commands *= 2; /* Begin & End */
92    device->n_perf_query_commands += 1; /* availability */
93 
94    return;
95 
96  err:
97    ralloc_free(perf);
98 }
99 
100 void
anv_device_perf_init(struct anv_device * device)101 anv_device_perf_init(struct anv_device *device)
102 {
103    device->perf_fd = -1;
104 }
105 
106 static int
anv_device_perf_open(struct anv_device * device,uint64_t metric_id)107 anv_device_perf_open(struct anv_device *device, uint64_t metric_id)
108 {
109    uint64_t properties[DRM_I915_PERF_PROP_MAX * 2];
110    struct drm_i915_perf_open_param param;
111    int p = 0, stream_fd;
112 
113    properties[p++] = DRM_I915_PERF_PROP_SAMPLE_OA;
114    properties[p++] = true;
115 
116    properties[p++] = DRM_I915_PERF_PROP_OA_METRICS_SET;
117    properties[p++] = metric_id;
118 
119    properties[p++] = DRM_I915_PERF_PROP_OA_FORMAT;
120    properties[p++] = device->info.ver >= 8 ?
121       I915_OA_FORMAT_A32u40_A4u32_B8_C8 :
122       I915_OA_FORMAT_A45_B8_C8;
123 
124    properties[p++] = DRM_I915_PERF_PROP_OA_EXPONENT;
125    properties[p++] = 31; /* slowest sampling period */
126 
127    properties[p++] = DRM_I915_PERF_PROP_CTX_HANDLE;
128    properties[p++] = device->context_id;
129 
130    properties[p++] = DRM_I915_PERF_PROP_HOLD_PREEMPTION;
131    properties[p++] = true;
132 
133    /* If global SSEU is available, pin it to the default. This will ensure on
134     * Gfx11 for instance we use the full EU array. Initially when perf was
135     * enabled we would use only half on Gfx11 because of functional
136     * requirements.
137     *
138     * Temporary disable this option on Gfx12.5+, kernel doesn't appear to
139     * support it.
140     */
141    if (intel_perf_has_global_sseu(device->physical->perf) &&
142        device->info.verx10 < 125) {
143       properties[p++] = DRM_I915_PERF_PROP_GLOBAL_SSEU;
144       properties[p++] = (uintptr_t) &device->physical->perf->sseu;
145    }
146 
147    memset(&param, 0, sizeof(param));
148    param.flags = 0;
149    param.flags |= I915_PERF_FLAG_FD_CLOEXEC | I915_PERF_FLAG_FD_NONBLOCK;
150    param.properties_ptr = (uintptr_t)properties;
151    param.num_properties = p / 2;
152 
153    stream_fd = intel_ioctl(device->fd, DRM_IOCTL_I915_PERF_OPEN, &param);
154    return stream_fd;
155 }
156 
157 /* VK_INTEL_performance_query */
anv_InitializePerformanceApiINTEL(VkDevice _device,const VkInitializePerformanceApiInfoINTEL * pInitializeInfo)158 VkResult anv_InitializePerformanceApiINTEL(
159     VkDevice                                    _device,
160     const VkInitializePerformanceApiInfoINTEL*  pInitializeInfo)
161 {
162    ANV_FROM_HANDLE(anv_device, device, _device);
163 
164    if (!device->physical->perf)
165       return VK_ERROR_EXTENSION_NOT_PRESENT;
166 
167    /* Not much to do here */
168    return VK_SUCCESS;
169 }
170 
anv_GetPerformanceParameterINTEL(VkDevice _device,VkPerformanceParameterTypeINTEL parameter,VkPerformanceValueINTEL * pValue)171 VkResult anv_GetPerformanceParameterINTEL(
172     VkDevice                                    _device,
173     VkPerformanceParameterTypeINTEL             parameter,
174     VkPerformanceValueINTEL*                    pValue)
175 {
176       ANV_FROM_HANDLE(anv_device, device, _device);
177 
178       if (!device->physical->perf)
179          return VK_ERROR_EXTENSION_NOT_PRESENT;
180 
181       VkResult result = VK_SUCCESS;
182       switch (parameter) {
183       case VK_PERFORMANCE_PARAMETER_TYPE_HW_COUNTERS_SUPPORTED_INTEL:
184          pValue->type = VK_PERFORMANCE_VALUE_TYPE_BOOL_INTEL;
185          pValue->data.valueBool = VK_TRUE;
186          break;
187 
188       case VK_PERFORMANCE_PARAMETER_TYPE_STREAM_MARKER_VALID_BITS_INTEL:
189          pValue->type = VK_PERFORMANCE_VALUE_TYPE_UINT32_INTEL;
190          pValue->data.value32 = 25;
191          break;
192 
193       default:
194          result = VK_ERROR_FEATURE_NOT_PRESENT;
195          break;
196       }
197 
198       return result;
199 }
200 
anv_CmdSetPerformanceMarkerINTEL(VkCommandBuffer commandBuffer,const VkPerformanceMarkerInfoINTEL * pMarkerInfo)201 VkResult anv_CmdSetPerformanceMarkerINTEL(
202     VkCommandBuffer                             commandBuffer,
203     const VkPerformanceMarkerInfoINTEL*         pMarkerInfo)
204 {
205    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
206 
207    cmd_buffer->intel_perf_marker = pMarkerInfo->marker;
208 
209    return VK_SUCCESS;
210 }
211 
anv_AcquirePerformanceConfigurationINTEL(VkDevice _device,const VkPerformanceConfigurationAcquireInfoINTEL * pAcquireInfo,VkPerformanceConfigurationINTEL * pConfiguration)212 VkResult anv_AcquirePerformanceConfigurationINTEL(
213     VkDevice                                    _device,
214     const VkPerformanceConfigurationAcquireInfoINTEL* pAcquireInfo,
215     VkPerformanceConfigurationINTEL*            pConfiguration)
216 {
217    ANV_FROM_HANDLE(anv_device, device, _device);
218    struct anv_performance_configuration_intel *config;
219 
220    config = vk_object_alloc(&device->vk, NULL, sizeof(*config),
221                             VK_OBJECT_TYPE_PERFORMANCE_CONFIGURATION_INTEL);
222    if (!config)
223       return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
224 
225    if (!INTEL_DEBUG(DEBUG_NO_OACONFIG)) {
226       config->register_config =
227          intel_perf_load_configuration(device->physical->perf, device->fd,
228                                      INTEL_PERF_QUERY_GUID_MDAPI);
229       if (!config->register_config) {
230          vk_object_free(&device->vk, NULL, config);
231          return VK_INCOMPLETE;
232       }
233 
234       int ret =
235          intel_perf_store_configuration(device->physical->perf, device->fd,
236                                       config->register_config, NULL /* guid */);
237       if (ret < 0) {
238          ralloc_free(config->register_config);
239          vk_object_free(&device->vk, NULL, config);
240          return VK_INCOMPLETE;
241       }
242 
243       config->config_id = ret;
244    }
245 
246    *pConfiguration = anv_performance_configuration_intel_to_handle(config);
247 
248    return VK_SUCCESS;
249 }
250 
anv_ReleasePerformanceConfigurationINTEL(VkDevice _device,VkPerformanceConfigurationINTEL _configuration)251 VkResult anv_ReleasePerformanceConfigurationINTEL(
252     VkDevice                                    _device,
253     VkPerformanceConfigurationINTEL             _configuration)
254 {
255    ANV_FROM_HANDLE(anv_device, device, _device);
256    ANV_FROM_HANDLE(anv_performance_configuration_intel, config, _configuration);
257 
258    if (!INTEL_DEBUG(DEBUG_NO_OACONFIG))
259       intel_ioctl(device->fd, DRM_IOCTL_I915_PERF_REMOVE_CONFIG, &config->config_id);
260 
261    ralloc_free(config->register_config);
262 
263    vk_object_free(&device->vk, NULL, config);
264 
265    return VK_SUCCESS;
266 }
267 
anv_QueueSetPerformanceConfigurationINTEL(VkQueue _queue,VkPerformanceConfigurationINTEL _configuration)268 VkResult anv_QueueSetPerformanceConfigurationINTEL(
269     VkQueue                                     _queue,
270     VkPerformanceConfigurationINTEL             _configuration)
271 {
272    ANV_FROM_HANDLE(anv_queue, queue, _queue);
273    ANV_FROM_HANDLE(anv_performance_configuration_intel, config, _configuration);
274    struct anv_device *device = queue->device;
275 
276    if (!INTEL_DEBUG(DEBUG_NO_OACONFIG)) {
277       if (device->perf_fd < 0) {
278          device->perf_fd = anv_device_perf_open(device, config->config_id);
279          if (device->perf_fd < 0)
280             return VK_ERROR_INITIALIZATION_FAILED;
281       } else {
282          int ret = intel_ioctl(device->perf_fd, I915_PERF_IOCTL_CONFIG,
283                                (void *)(uintptr_t) config->config_id);
284          if (ret < 0)
285             return vk_device_set_lost(&device->vk, "i915-perf config failed: %m");
286       }
287    }
288 
289    return VK_SUCCESS;
290 }
291 
anv_UninitializePerformanceApiINTEL(VkDevice _device)292 void anv_UninitializePerformanceApiINTEL(
293     VkDevice                                    _device)
294 {
295    ANV_FROM_HANDLE(anv_device, device, _device);
296 
297    if (device->perf_fd >= 0) {
298       close(device->perf_fd);
299       device->perf_fd = -1;
300    }
301 }
302 
303 /* VK_KHR_performance_query */
304 static const VkPerformanceCounterUnitKHR
305 intel_perf_counter_unit_to_vk_unit[] = {
306    [INTEL_PERF_COUNTER_UNITS_BYTES]                                = VK_PERFORMANCE_COUNTER_UNIT_BYTES_KHR,
307    [INTEL_PERF_COUNTER_UNITS_HZ]                                   = VK_PERFORMANCE_COUNTER_UNIT_HERTZ_KHR,
308    [INTEL_PERF_COUNTER_UNITS_NS]                                   = VK_PERFORMANCE_COUNTER_UNIT_NANOSECONDS_KHR,
309    [INTEL_PERF_COUNTER_UNITS_US]                                   = VK_PERFORMANCE_COUNTER_UNIT_NANOSECONDS_KHR, /* todo */
310    [INTEL_PERF_COUNTER_UNITS_PIXELS]                               = VK_PERFORMANCE_COUNTER_UNIT_GENERIC_KHR,
311    [INTEL_PERF_COUNTER_UNITS_TEXELS]                               = VK_PERFORMANCE_COUNTER_UNIT_GENERIC_KHR,
312    [INTEL_PERF_COUNTER_UNITS_THREADS]                              = VK_PERFORMANCE_COUNTER_UNIT_GENERIC_KHR,
313    [INTEL_PERF_COUNTER_UNITS_PERCENT]                              = VK_PERFORMANCE_COUNTER_UNIT_PERCENTAGE_KHR,
314    [INTEL_PERF_COUNTER_UNITS_MESSAGES]                             = VK_PERFORMANCE_COUNTER_UNIT_GENERIC_KHR,
315    [INTEL_PERF_COUNTER_UNITS_NUMBER]                               = VK_PERFORMANCE_COUNTER_UNIT_GENERIC_KHR,
316    [INTEL_PERF_COUNTER_UNITS_CYCLES]                               = VK_PERFORMANCE_COUNTER_UNIT_GENERIC_KHR,
317    [INTEL_PERF_COUNTER_UNITS_EVENTS]                               = VK_PERFORMANCE_COUNTER_UNIT_GENERIC_KHR,
318    [INTEL_PERF_COUNTER_UNITS_UTILIZATION]                          = VK_PERFORMANCE_COUNTER_UNIT_GENERIC_KHR,
319    [INTEL_PERF_COUNTER_UNITS_EU_SENDS_TO_L3_CACHE_LINES]           = VK_PERFORMANCE_COUNTER_UNIT_GENERIC_KHR,
320    [INTEL_PERF_COUNTER_UNITS_EU_ATOMIC_REQUESTS_TO_L3_CACHE_LINES] = VK_PERFORMANCE_COUNTER_UNIT_GENERIC_KHR,
321    [INTEL_PERF_COUNTER_UNITS_EU_REQUESTS_TO_L3_CACHE_LINES]        = VK_PERFORMANCE_COUNTER_UNIT_GENERIC_KHR,
322    [INTEL_PERF_COUNTER_UNITS_EU_BYTES_PER_L3_CACHE_LINE]           = VK_PERFORMANCE_COUNTER_UNIT_GENERIC_KHR,
323 };
324 
325 static const VkPerformanceCounterStorageKHR
326 intel_perf_counter_data_type_to_vk_storage[] = {
327    [INTEL_PERF_COUNTER_DATA_TYPE_BOOL32] = VK_PERFORMANCE_COUNTER_STORAGE_UINT32_KHR,
328    [INTEL_PERF_COUNTER_DATA_TYPE_UINT32] = VK_PERFORMANCE_COUNTER_STORAGE_UINT32_KHR,
329    [INTEL_PERF_COUNTER_DATA_TYPE_UINT64] = VK_PERFORMANCE_COUNTER_STORAGE_UINT64_KHR,
330    [INTEL_PERF_COUNTER_DATA_TYPE_FLOAT]  = VK_PERFORMANCE_COUNTER_STORAGE_FLOAT32_KHR,
331    [INTEL_PERF_COUNTER_DATA_TYPE_DOUBLE] = VK_PERFORMANCE_COUNTER_STORAGE_FLOAT64_KHR,
332 };
333 
anv_EnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR(VkPhysicalDevice physicalDevice,uint32_t queueFamilyIndex,uint32_t * pCounterCount,VkPerformanceCounterKHR * pCounters,VkPerformanceCounterDescriptionKHR * pCounterDescriptions)334 VkResult anv_EnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR(
335     VkPhysicalDevice                            physicalDevice,
336     uint32_t                                    queueFamilyIndex,
337     uint32_t*                                   pCounterCount,
338     VkPerformanceCounterKHR*                    pCounters,
339     VkPerformanceCounterDescriptionKHR*         pCounterDescriptions)
340 {
341    ANV_FROM_HANDLE(anv_physical_device, pdevice, physicalDevice);
342    struct intel_perf_config *perf = pdevice->perf;
343 
344    uint32_t desc_count = *pCounterCount;
345 
346    VK_OUTARRAY_MAKE_TYPED(VkPerformanceCounterKHR, out, pCounters, pCounterCount);
347    VK_OUTARRAY_MAKE_TYPED(VkPerformanceCounterDescriptionKHR, out_desc,
348                           pCounterDescriptions, &desc_count);
349 
350    /* We cannot support performance queries on anything other than RCS,
351     * because the MI_REPORT_PERF_COUNT command is not available on other
352     * engines.
353     */
354    struct anv_queue_family *queue_family =
355       &pdevice->queue.families[queueFamilyIndex];
356    if (queue_family->engine_class != I915_ENGINE_CLASS_RENDER)
357       return vk_outarray_status(&out);
358 
359    for (int c = 0; c < (perf ? perf->n_counters : 0); c++) {
360       const struct intel_perf_query_counter *intel_counter = perf->counter_infos[c].counter;
361 
362       vk_outarray_append_typed(VkPerformanceCounterKHR, &out, counter) {
363          counter->unit = intel_perf_counter_unit_to_vk_unit[intel_counter->units];
364          counter->scope = VK_PERFORMANCE_COUNTER_SCOPE_COMMAND_KHR;
365          counter->storage = intel_perf_counter_data_type_to_vk_storage[intel_counter->data_type];
366 
367          unsigned char sha1_result[20];
368          _mesa_sha1_compute(intel_counter->symbol_name,
369                             strlen(intel_counter->symbol_name),
370                             sha1_result);
371          memcpy(counter->uuid, sha1_result, sizeof(counter->uuid));
372       }
373 
374       vk_outarray_append_typed(VkPerformanceCounterDescriptionKHR, &out_desc, desc) {
375          desc->flags = 0; /* None so far. */
376          snprintf(desc->name, sizeof(desc->name), "%s", intel_counter->name);
377          snprintf(desc->category, sizeof(desc->category), "%s", intel_counter->category);
378          snprintf(desc->description, sizeof(desc->description), "%s", intel_counter->desc);
379       }
380    }
381 
382    return vk_outarray_status(&out);
383 }
384 
anv_GetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR(VkPhysicalDevice physicalDevice,const VkQueryPoolPerformanceCreateInfoKHR * pPerformanceQueryCreateInfo,uint32_t * pNumPasses)385 void anv_GetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR(
386     VkPhysicalDevice                            physicalDevice,
387     const VkQueryPoolPerformanceCreateInfoKHR*  pPerformanceQueryCreateInfo,
388     uint32_t*                                   pNumPasses)
389 {
390    ANV_FROM_HANDLE(anv_physical_device, pdevice, physicalDevice);
391    struct intel_perf_config *perf = pdevice->perf;
392 
393    if (!perf) {
394       *pNumPasses = 0;
395       return;
396    }
397 
398    *pNumPasses = intel_perf_get_n_passes(perf,
399                                        pPerformanceQueryCreateInfo->pCounterIndices,
400                                        pPerformanceQueryCreateInfo->counterIndexCount,
401                                        NULL);
402 }
403 
anv_AcquireProfilingLockKHR(VkDevice _device,const VkAcquireProfilingLockInfoKHR * pInfo)404 VkResult anv_AcquireProfilingLockKHR(
405     VkDevice                                    _device,
406     const VkAcquireProfilingLockInfoKHR*        pInfo)
407 {
408    ANV_FROM_HANDLE(anv_device, device, _device);
409    struct intel_perf_config *perf = device->physical->perf;
410    struct intel_perf_query_info *first_metric_set = &perf->queries[0];
411    int fd = -1;
412 
413    assert(device->perf_fd == -1);
414 
415    if (!INTEL_DEBUG(DEBUG_NO_OACONFIG)) {
416       fd = anv_device_perf_open(device, first_metric_set->oa_metrics_set_id);
417       if (fd < 0)
418          return VK_TIMEOUT;
419    }
420 
421    device->perf_fd = fd;
422    return VK_SUCCESS;
423 }
424 
anv_ReleaseProfilingLockKHR(VkDevice _device)425 void anv_ReleaseProfilingLockKHR(
426     VkDevice                                    _device)
427 {
428    ANV_FROM_HANDLE(anv_device, device, _device);
429 
430    if (!INTEL_DEBUG(DEBUG_NO_OACONFIG)) {
431       assert(device->perf_fd >= 0);
432       close(device->perf_fd);
433    }
434    device->perf_fd = -1;
435 }
436 
437 void
anv_perf_write_pass_results(struct intel_perf_config * perf,struct anv_query_pool * pool,uint32_t pass,const struct intel_perf_query_result * accumulated_results,union VkPerformanceCounterResultKHR * results)438 anv_perf_write_pass_results(struct intel_perf_config *perf,
439                             struct anv_query_pool *pool, uint32_t pass,
440                             const struct intel_perf_query_result *accumulated_results,
441                             union VkPerformanceCounterResultKHR *results)
442 {
443    for (uint32_t c = 0; c < pool->n_counters; c++) {
444       const struct intel_perf_counter_pass *counter_pass = &pool->counter_pass[c];
445 
446       if (counter_pass->pass != pass)
447          continue;
448 
449       switch (pool->pass_query[pass]->kind) {
450       case INTEL_PERF_QUERY_TYPE_PIPELINE: {
451          assert(counter_pass->counter->data_type == INTEL_PERF_COUNTER_DATA_TYPE_UINT64);
452          uint32_t accu_offset = counter_pass->counter->offset / sizeof(uint64_t);
453          results[c].uint64 = accumulated_results->accumulator[accu_offset];
454          break;
455       }
456 
457       case INTEL_PERF_QUERY_TYPE_OA:
458       case INTEL_PERF_QUERY_TYPE_RAW:
459          switch (counter_pass->counter->data_type) {
460          case INTEL_PERF_COUNTER_DATA_TYPE_UINT64:
461             results[c].uint64 =
462                counter_pass->counter->oa_counter_read_uint64(perf,
463                                                              counter_pass->query,
464                                                              accumulated_results);
465             break;
466          case INTEL_PERF_COUNTER_DATA_TYPE_FLOAT:
467             results[c].float32 =
468                counter_pass->counter->oa_counter_read_float(perf,
469                                                             counter_pass->query,
470                                                             accumulated_results);
471             break;
472          default:
473             /* So far we aren't using uint32, double or bool32... */
474             unreachable("unexpected counter data type");
475          }
476          break;
477 
478       default:
479          unreachable("invalid query type");
480       }
481 
482       /* The Vulkan extension only has nanoseconds as a unit */
483       if (counter_pass->counter->units == INTEL_PERF_COUNTER_UNITS_US) {
484          assert(counter_pass->counter->data_type == INTEL_PERF_COUNTER_DATA_TYPE_UINT64);
485          results[c].uint64 *= 1000;
486       }
487    }
488 }
489