• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2020 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include "vk_device.h"
25 
26 #include "vk_common_entrypoints.h"
27 #include "vk_instance.h"
28 #include "vk_log.h"
29 #include "vk_physical_device.h"
30 #include "vk_queue.h"
31 #include "vk_sync.h"
32 #include "vk_sync_timeline.h"
33 #include "vk_util.h"
34 #include "util/u_debug.h"
35 #include "util/hash_table.h"
36 #include "util/perf/cpu_trace.h"
37 #include "util/ralloc.h"
38 #include "util/timespec.h"
39 
40 static enum vk_device_timeline_mode
get_timeline_mode(struct vk_physical_device * physical_device)41 get_timeline_mode(struct vk_physical_device *physical_device)
42 {
43    if (physical_device->supported_sync_types == NULL)
44       return VK_DEVICE_TIMELINE_MODE_NONE;
45 
46    const struct vk_sync_type *timeline_type = NULL;
47    for (const struct vk_sync_type *const *t =
48         physical_device->supported_sync_types; *t; t++) {
49       if ((*t)->features & VK_SYNC_FEATURE_TIMELINE) {
50          /* We can only have one timeline mode */
51          assert(timeline_type == NULL);
52          timeline_type = *t;
53       }
54    }
55 
56    if (timeline_type == NULL)
57       return VK_DEVICE_TIMELINE_MODE_NONE;
58 
59    if (vk_sync_type_is_vk_sync_timeline(timeline_type))
60       return VK_DEVICE_TIMELINE_MODE_EMULATED;
61 
62    if (timeline_type->features & VK_SYNC_FEATURE_WAIT_BEFORE_SIGNAL)
63       return VK_DEVICE_TIMELINE_MODE_NATIVE;
64 
65    /* For assisted mode, we require a few additional things of all sync types
66     * which may be used as semaphores.
67     */
68    for (const struct vk_sync_type *const *t =
69         physical_device->supported_sync_types; *t; t++) {
70       if ((*t)->features & VK_SYNC_FEATURE_GPU_WAIT) {
71          assert((*t)->features & VK_SYNC_FEATURE_WAIT_PENDING);
72          if ((*t)->features & VK_SYNC_FEATURE_BINARY)
73             assert((*t)->features & VK_SYNC_FEATURE_CPU_RESET);
74       }
75    }
76 
77    return VK_DEVICE_TIMELINE_MODE_ASSISTED;
78 }
79 
80 static void
collect_enabled_features(struct vk_device * device,const VkDeviceCreateInfo * pCreateInfo)81 collect_enabled_features(struct vk_device *device,
82                          const VkDeviceCreateInfo *pCreateInfo)
83 {
84    if (pCreateInfo->pEnabledFeatures)
85       vk_set_physical_device_features_1_0(&device->enabled_features, pCreateInfo->pEnabledFeatures);
86    vk_set_physical_device_features(&device->enabled_features, pCreateInfo->pNext);
87 }
88 
89 VkResult
vk_device_init(struct vk_device * device,struct vk_physical_device * physical_device,const struct vk_device_dispatch_table * dispatch_table,const VkDeviceCreateInfo * pCreateInfo,const VkAllocationCallbacks * alloc)90 vk_device_init(struct vk_device *device,
91                struct vk_physical_device *physical_device,
92                const struct vk_device_dispatch_table *dispatch_table,
93                const VkDeviceCreateInfo *pCreateInfo,
94                const VkAllocationCallbacks *alloc)
95 {
96    memset(device, 0, sizeof(*device));
97    vk_object_base_init(device, &device->base, VK_OBJECT_TYPE_DEVICE);
98    if (alloc != NULL)
99       device->alloc = *alloc;
100    else
101       device->alloc = physical_device->instance->alloc;
102 
103    device->physical = physical_device;
104 
105    if (dispatch_table) {
106       device->dispatch_table = *dispatch_table;
107 
108       /* Add common entrypoints without overwriting driver-provided ones. */
109       vk_device_dispatch_table_from_entrypoints(
110          &device->dispatch_table, &vk_common_device_entrypoints, false);
111    }
112 
113    for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
114       int idx;
115       for (idx = 0; idx < VK_DEVICE_EXTENSION_COUNT; idx++) {
116          if (strcmp(pCreateInfo->ppEnabledExtensionNames[i],
117                     vk_device_extensions[idx].extensionName) == 0)
118             break;
119       }
120 
121       if (idx >= VK_DEVICE_EXTENSION_COUNT)
122          return vk_errorf(physical_device, VK_ERROR_EXTENSION_NOT_PRESENT,
123                           "%s not supported",
124                           pCreateInfo->ppEnabledExtensionNames[i]);
125 
126       if (!physical_device->supported_extensions.extensions[idx])
127          return vk_errorf(physical_device, VK_ERROR_EXTENSION_NOT_PRESENT,
128                           "%s not supported",
129                           pCreateInfo->ppEnabledExtensionNames[i]);
130 
131 #ifdef ANDROID_STRICT
132       if (!vk_android_allowed_device_extensions.extensions[idx])
133          return vk_errorf(physical_device, VK_ERROR_EXTENSION_NOT_PRESENT,
134                           "%s not supported",
135                           pCreateInfo->ppEnabledExtensionNames[i]);
136 #endif
137 
138       device->enabled_extensions.extensions[idx] = true;
139    }
140 
141    VkResult result =
142       vk_physical_device_check_device_features(physical_device,
143                                                pCreateInfo);
144    if (result != VK_SUCCESS)
145       return result;
146 
147    collect_enabled_features(device, pCreateInfo);
148 
149    p_atomic_set(&device->private_data_next_index, 0);
150 
151    list_inithead(&device->queues);
152 
153    device->drm_fd = -1;
154    device->mem_cache = NULL;
155 
156    device->timeline_mode = get_timeline_mode(physical_device);
157 
158    switch (device->timeline_mode) {
159    case VK_DEVICE_TIMELINE_MODE_NONE:
160    case VK_DEVICE_TIMELINE_MODE_NATIVE:
161       device->submit_mode = VK_QUEUE_SUBMIT_MODE_IMMEDIATE;
162       break;
163 
164    case VK_DEVICE_TIMELINE_MODE_EMULATED:
165       device->submit_mode = VK_QUEUE_SUBMIT_MODE_DEFERRED;
166       break;
167 
168    case VK_DEVICE_TIMELINE_MODE_ASSISTED:
169       if (os_get_option("MESA_VK_ENABLE_SUBMIT_THREAD")) {
170          if (debug_get_bool_option("MESA_VK_ENABLE_SUBMIT_THREAD", false)) {
171             device->submit_mode = VK_QUEUE_SUBMIT_MODE_THREADED;
172          } else {
173             device->submit_mode = VK_QUEUE_SUBMIT_MODE_IMMEDIATE;
174          }
175       } else {
176          device->submit_mode = VK_QUEUE_SUBMIT_MODE_THREADED_ON_DEMAND;
177       }
178       break;
179 
180    default:
181       unreachable("Invalid timeline mode");
182    }
183 
184 #if DETECT_OS_ANDROID
185    mtx_init(&device->swapchain_private_mtx, mtx_plain);
186    device->swapchain_private = NULL;
187 #endif /* DETECT_OS_ANDROID */
188 
189    simple_mtx_init(&device->trace_mtx, mtx_plain);
190 
191    vk_foreach_struct_const (ext, pCreateInfo->pNext) {
192       switch (ext->sType) {
193       case VK_STRUCTURE_TYPE_DEVICE_PIPELINE_BINARY_INTERNAL_CACHE_CONTROL_KHR: {
194          const VkDevicePipelineBinaryInternalCacheControlKHR *cache_control = (const void *)ext;
195          if (cache_control->disableInternalCache)
196             device->disable_internal_cache = true;
197          break;
198       }
199       default:
200          break;
201       }
202    }
203 
204    if (device->enabled_extensions.KHR_calibrated_timestamps ||
205        device->enabled_extensions.EXT_calibrated_timestamps) {
206       /* sorted by preference */
207       const VkTimeDomainKHR calibrate_domains[] = {
208          VK_TIME_DOMAIN_CLOCK_MONOTONIC_RAW_KHR,
209          VK_TIME_DOMAIN_CLOCK_MONOTONIC_KHR,
210       };
211       for (uint32_t i = 0; i < ARRAY_SIZE(calibrate_domains); i++) {
212          const VkTimeDomainKHR domain = calibrate_domains[i];
213          uint64_t ts;
214          if (vk_device_get_timestamp(NULL, domain, &ts) == VK_SUCCESS) {
215             device->calibrate_time_domain = domain;
216             break;
217          }
218       }
219 
220       assert(device->calibrate_time_domain != VK_TIME_DOMAIN_DEVICE_KHR);
221       device->device_time_domain_period =
222          (uint64_t)ceilf(device->physical->properties.timestampPeriod);
223    }
224 
225    return VK_SUCCESS;
226 }
227 
228 void
vk_device_finish(struct vk_device * device)229 vk_device_finish(struct vk_device *device)
230 {
231    /* Drivers should tear down their own queues */
232    assert(list_is_empty(&device->queues));
233 
234    vk_memory_trace_finish(device);
235 
236 #if DETECT_OS_ANDROID
237    if (device->swapchain_private) {
238       hash_table_foreach(device->swapchain_private, entry)
239          util_sparse_array_finish(entry->data);
240       ralloc_free(device->swapchain_private);
241    }
242 #endif /* DETECT_OS_ANDROID */
243 
244    simple_mtx_destroy(&device->trace_mtx);
245 
246    vk_object_base_finish(&device->base);
247 }
248 
249 void
vk_device_enable_threaded_submit(struct vk_device * device)250 vk_device_enable_threaded_submit(struct vk_device *device)
251 {
252    /* This must be called before any queues are created */
253    assert(list_is_empty(&device->queues));
254 
255    /* In order to use threaded submit, we need every sync type that can be
256     * used as a wait fence for vkQueueSubmit() to support WAIT_PENDING.
257     * It's required for cross-thread/process submit re-ordering.
258     */
259    for (const struct vk_sync_type *const *t =
260         device->physical->supported_sync_types; *t; t++) {
261       if ((*t)->features & VK_SYNC_FEATURE_GPU_WAIT)
262          assert((*t)->features & VK_SYNC_FEATURE_WAIT_PENDING);
263    }
264 
265    /* Any binary vk_sync types which will be used as permanent semaphore
266     * payloads also need to support vk_sync_type::move, but that's a lot
267     * harder to assert since it only applies to permanent semaphore payloads.
268     */
269 
270    if (device->submit_mode != VK_QUEUE_SUBMIT_MODE_THREADED)
271       device->submit_mode = VK_QUEUE_SUBMIT_MODE_THREADED_ON_DEMAND;
272 }
273 
274 VkResult
vk_device_flush(struct vk_device * device)275 vk_device_flush(struct vk_device *device)
276 {
277    if (device->submit_mode != VK_QUEUE_SUBMIT_MODE_DEFERRED)
278       return VK_SUCCESS;
279 
280    bool progress;
281    do {
282       progress = false;
283 
284       vk_foreach_queue(queue, device) {
285          uint32_t queue_submit_count;
286          VkResult result = vk_queue_flush(queue, &queue_submit_count);
287          if (unlikely(result != VK_SUCCESS))
288             return result;
289 
290          if (queue_submit_count)
291             progress = true;
292       }
293    } while (progress);
294 
295    return VK_SUCCESS;
296 }
297 
298 static const char *
timeline_mode_str(struct vk_device * device)299 timeline_mode_str(struct vk_device *device)
300 {
301    switch (device->timeline_mode) {
302 #define CASE(X) case VK_DEVICE_TIMELINE_MODE_##X: return #X;
303    CASE(NONE)
304    CASE(EMULATED)
305    CASE(ASSISTED)
306    CASE(NATIVE)
307 #undef CASE
308    default: return "UNKNOWN";
309    }
310 }
311 
312 void
_vk_device_report_lost(struct vk_device * device)313 _vk_device_report_lost(struct vk_device *device)
314 {
315    assert(p_atomic_read(&device->_lost.lost) > 0);
316 
317    device->_lost.reported = true;
318 
319    vk_foreach_queue(queue, device) {
320       if (queue->_lost.lost) {
321          __vk_errorf(queue, VK_ERROR_DEVICE_LOST,
322                      queue->_lost.error_file, queue->_lost.error_line,
323                      "%s", queue->_lost.error_msg);
324       }
325    }
326 
327    vk_logd(VK_LOG_OBJS(device), "Timeline mode is %s.",
328            timeline_mode_str(device));
329 }
330 
331 VkResult
_vk_device_set_lost(struct vk_device * device,const char * file,int line,const char * msg,...)332 _vk_device_set_lost(struct vk_device *device,
333                     const char *file, int line,
334                     const char *msg, ...)
335 {
336    /* This flushes out any per-queue device lost messages */
337    if (vk_device_is_lost(device))
338       return VK_ERROR_DEVICE_LOST;
339 
340    p_atomic_inc(&device->_lost.lost);
341    device->_lost.reported = true;
342 
343    va_list ap;
344    va_start(ap, msg);
345    __vk_errorv(device, VK_ERROR_DEVICE_LOST, file, line, msg, ap);
346    va_end(ap);
347 
348    vk_logd(VK_LOG_OBJS(device), "Timeline mode is %s.",
349            timeline_mode_str(device));
350 
351    if (debug_get_bool_option("MESA_VK_ABORT_ON_DEVICE_LOSS", false))
352       abort();
353 
354    return VK_ERROR_DEVICE_LOST;
355 }
356 
357 PFN_vkVoidFunction
vk_device_get_proc_addr(const struct vk_device * device,const char * name)358 vk_device_get_proc_addr(const struct vk_device *device,
359                         const char *name)
360 {
361    if (device == NULL || name == NULL)
362       return NULL;
363 
364    struct vk_instance *instance = device->physical->instance;
365    return vk_device_dispatch_table_get_if_supported(&device->dispatch_table,
366                                                     name,
367                                                     instance->app_info.api_version,
368                                                     &instance->enabled_extensions,
369                                                     &device->enabled_extensions);
370 }
371 
372 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
vk_common_GetDeviceProcAddr(VkDevice _device,const char * pName)373 vk_common_GetDeviceProcAddr(VkDevice _device,
374                             const char *pName)
375 {
376    VK_FROM_HANDLE(vk_device, device, _device);
377    return vk_device_get_proc_addr(device, pName);
378 }
379 
380 VKAPI_ATTR void VKAPI_CALL
vk_common_GetDeviceQueue(VkDevice _device,uint32_t queueFamilyIndex,uint32_t queueIndex,VkQueue * pQueue)381 vk_common_GetDeviceQueue(VkDevice _device,
382                          uint32_t queueFamilyIndex,
383                          uint32_t queueIndex,
384                          VkQueue *pQueue)
385 {
386    VK_FROM_HANDLE(vk_device, device, _device);
387 
388    const VkDeviceQueueInfo2 info = {
389       .sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2,
390       .pNext = NULL,
391       /* flags = 0 because (Vulkan spec 1.2.170 - vkGetDeviceQueue):
392        *
393        *    "vkGetDeviceQueue must only be used to get queues that were
394        *     created with the flags parameter of VkDeviceQueueCreateInfo set
395        *     to zero. To get queues that were created with a non-zero flags
396        *     parameter use vkGetDeviceQueue2."
397        */
398       .flags = 0,
399       .queueFamilyIndex = queueFamilyIndex,
400       .queueIndex = queueIndex,
401    };
402 
403    device->dispatch_table.GetDeviceQueue2(_device, &info, pQueue);
404 }
405 
406 VKAPI_ATTR void VKAPI_CALL
vk_common_GetDeviceQueue2(VkDevice _device,const VkDeviceQueueInfo2 * pQueueInfo,VkQueue * pQueue)407 vk_common_GetDeviceQueue2(VkDevice _device,
408                           const VkDeviceQueueInfo2 *pQueueInfo,
409                           VkQueue *pQueue)
410 {
411    VK_FROM_HANDLE(vk_device, device, _device);
412 
413    struct vk_queue *queue = NULL;
414    vk_foreach_queue(iter, device) {
415       if (iter->queue_family_index == pQueueInfo->queueFamilyIndex &&
416           iter->index_in_family == pQueueInfo->queueIndex) {
417          queue = iter;
418          break;
419       }
420    }
421 
422    /* From the Vulkan 1.1.70 spec:
423     *
424     *    "The queue returned by vkGetDeviceQueue2 must have the same flags
425     *    value from this structure as that used at device creation time in a
426     *    VkDeviceQueueCreateInfo instance. If no matching flags were specified
427     *    at device creation time then pQueue will return VK_NULL_HANDLE."
428     */
429    if (queue && queue->flags == pQueueInfo->flags)
430       *pQueue = vk_queue_to_handle(queue);
431    else
432       *pQueue = VK_NULL_HANDLE;
433 }
434 
435 VKAPI_ATTR VkResult VKAPI_CALL
vk_common_MapMemory(VkDevice _device,VkDeviceMemory memory,VkDeviceSize offset,VkDeviceSize size,VkMemoryMapFlags flags,void ** ppData)436 vk_common_MapMemory(VkDevice _device,
437                     VkDeviceMemory memory,
438                     VkDeviceSize offset,
439                     VkDeviceSize size,
440                     VkMemoryMapFlags flags,
441                     void **ppData)
442 {
443    VK_FROM_HANDLE(vk_device, device, _device);
444 
445    const VkMemoryMapInfoKHR info = {
446       .sType = VK_STRUCTURE_TYPE_MEMORY_MAP_INFO_KHR,
447       .flags = flags,
448       .memory = memory,
449       .offset = offset,
450       .size = size,
451    };
452 
453    return device->dispatch_table.MapMemory2KHR(_device, &info, ppData);
454 }
455 
456 VKAPI_ATTR void VKAPI_CALL
vk_common_UnmapMemory(VkDevice _device,VkDeviceMemory memory)457 vk_common_UnmapMemory(VkDevice _device,
458                       VkDeviceMemory memory)
459 {
460    VK_FROM_HANDLE(vk_device, device, _device);
461    ASSERTED VkResult result;
462 
463    const VkMemoryUnmapInfoKHR info = {
464       .sType = VK_STRUCTURE_TYPE_MEMORY_UNMAP_INFO_KHR,
465       .memory = memory,
466    };
467 
468    result = device->dispatch_table.UnmapMemory2KHR(_device, &info);
469    assert(result == VK_SUCCESS);
470 }
471 
472 VKAPI_ATTR void VKAPI_CALL
vk_common_GetDeviceGroupPeerMemoryFeatures(VkDevice device,uint32_t heapIndex,uint32_t localDeviceIndex,uint32_t remoteDeviceIndex,VkPeerMemoryFeatureFlags * pPeerMemoryFeatures)473 vk_common_GetDeviceGroupPeerMemoryFeatures(
474    VkDevice device,
475    uint32_t heapIndex,
476    uint32_t localDeviceIndex,
477    uint32_t remoteDeviceIndex,
478    VkPeerMemoryFeatureFlags *pPeerMemoryFeatures)
479 {
480    assert(localDeviceIndex == 0 && remoteDeviceIndex == 0);
481    *pPeerMemoryFeatures = VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT |
482                           VK_PEER_MEMORY_FEATURE_COPY_DST_BIT |
483                           VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT |
484                           VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT;
485 }
486 
487 VKAPI_ATTR void VKAPI_CALL
vk_common_GetImageMemoryRequirements(VkDevice _device,VkImage image,VkMemoryRequirements * pMemoryRequirements)488 vk_common_GetImageMemoryRequirements(VkDevice _device,
489                                      VkImage image,
490                                      VkMemoryRequirements *pMemoryRequirements)
491 {
492    VK_FROM_HANDLE(vk_device, device, _device);
493 
494    VkImageMemoryRequirementsInfo2 info = {
495       .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2,
496       .image = image,
497    };
498    VkMemoryRequirements2 reqs = {
499       .sType = VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2,
500    };
501    device->dispatch_table.GetImageMemoryRequirements2(_device, &info, &reqs);
502 
503    *pMemoryRequirements = reqs.memoryRequirements;
504 }
505 
506 VKAPI_ATTR VkResult VKAPI_CALL
vk_common_BindImageMemory(VkDevice _device,VkImage image,VkDeviceMemory memory,VkDeviceSize memoryOffset)507 vk_common_BindImageMemory(VkDevice _device,
508                           VkImage image,
509                           VkDeviceMemory memory,
510                           VkDeviceSize memoryOffset)
511 {
512    VK_FROM_HANDLE(vk_device, device, _device);
513 
514    VkBindImageMemoryInfo bind = {
515       .sType         = VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO,
516       .image         = image,
517       .memory        = memory,
518       .memoryOffset  = memoryOffset,
519    };
520 
521    return device->dispatch_table.BindImageMemory2(_device, 1, &bind);
522 }
523 
524 VKAPI_ATTR void VKAPI_CALL
vk_common_GetImageSparseMemoryRequirements(VkDevice _device,VkImage image,uint32_t * pSparseMemoryRequirementCount,VkSparseImageMemoryRequirements * pSparseMemoryRequirements)525 vk_common_GetImageSparseMemoryRequirements(VkDevice _device,
526                                            VkImage image,
527                                            uint32_t *pSparseMemoryRequirementCount,
528                                            VkSparseImageMemoryRequirements *pSparseMemoryRequirements)
529 {
530    VK_FROM_HANDLE(vk_device, device, _device);
531 
532    VkImageSparseMemoryRequirementsInfo2 info = {
533       .sType = VK_STRUCTURE_TYPE_IMAGE_SPARSE_MEMORY_REQUIREMENTS_INFO_2,
534       .image = image,
535    };
536 
537    if (!pSparseMemoryRequirements) {
538       device->dispatch_table.GetImageSparseMemoryRequirements2(_device,
539                                                                &info,
540                                                                pSparseMemoryRequirementCount,
541                                                                NULL);
542       return;
543    }
544 
545    STACK_ARRAY(VkSparseImageMemoryRequirements2, mem_reqs2, *pSparseMemoryRequirementCount);
546 
547    for (unsigned i = 0; i < *pSparseMemoryRequirementCount; ++i) {
548       mem_reqs2[i].sType = VK_STRUCTURE_TYPE_SPARSE_IMAGE_MEMORY_REQUIREMENTS_2;
549       mem_reqs2[i].pNext = NULL;
550    }
551 
552    device->dispatch_table.GetImageSparseMemoryRequirements2(_device,
553                                                             &info,
554                                                             pSparseMemoryRequirementCount,
555                                                             mem_reqs2);
556 
557    for (unsigned i = 0; i < *pSparseMemoryRequirementCount; ++i)
558       pSparseMemoryRequirements[i] = mem_reqs2[i].memoryRequirements;
559 
560    STACK_ARRAY_FINISH(mem_reqs2);
561 }
562 
563 VKAPI_ATTR VkResult VKAPI_CALL
vk_common_DeviceWaitIdle(VkDevice _device)564 vk_common_DeviceWaitIdle(VkDevice _device)
565 {
566    MESA_TRACE_FUNC();
567 
568    VK_FROM_HANDLE(vk_device, device, _device);
569    const struct vk_device_dispatch_table *disp = &device->dispatch_table;
570 
571    vk_foreach_queue(queue, device) {
572       VkResult result = disp->QueueWaitIdle(vk_queue_to_handle(queue));
573       if (result != VK_SUCCESS)
574          return result;
575    }
576 
577    return VK_SUCCESS;
578 }
579 
580 VkResult
vk_device_get_timestamp(struct vk_device * device,VkTimeDomainKHR domain,uint64_t * timestamp)581 vk_device_get_timestamp(struct vk_device *device, VkTimeDomainKHR domain,
582                         uint64_t *timestamp)
583 {
584    if (domain == VK_TIME_DOMAIN_DEVICE_KHR) {
585       assert(device && device->get_timestamp);
586       return device->get_timestamp(device, timestamp);
587    }
588 
589    /* device is not used for host time domains */
590 #ifndef _WIN32
591    clockid_t clockid;
592    struct timespec ts;
593 
594    switch (domain) {
595    case VK_TIME_DOMAIN_CLOCK_MONOTONIC_KHR:
596       clockid = CLOCK_MONOTONIC;
597       break;
598    case VK_TIME_DOMAIN_CLOCK_MONOTONIC_RAW_KHR:
599       /* The "RAW" clocks on Linux are called "FAST" on FreeBSD */
600 #if defined(CLOCK_MONOTONIC_RAW)
601       clockid = CLOCK_MONOTONIC_RAW;
602       break;
603 #elif defined(CLOCK_MONOTONIC_FAST)
604       clockid = CLOCK_MONOTONIC_FAST;
605       break;
606 #else
607       FALLTHROUGH;
608 #endif
609    default:
610       goto fail;
611    }
612 
613    if (clock_gettime(clockid, &ts) < 0)
614       goto fail;
615 
616    *timestamp = (uint64_t)ts.tv_sec * NSEC_PER_SEC + ts.tv_nsec;
617    return VK_SUCCESS;
618 
619 fail:
620 #endif /* _WIN32 */
621    return VK_ERROR_FEATURE_NOT_PRESENT;
622 }
623 
624 VKAPI_ATTR VkResult VKAPI_CALL
vk_common_GetCalibratedTimestampsKHR(VkDevice _device,uint32_t timestampCount,const VkCalibratedTimestampInfoKHR * pTimestampInfos,uint64_t * pTimestamps,uint64_t * pMaxDeviation)625 vk_common_GetCalibratedTimestampsKHR(
626    VkDevice _device, uint32_t timestampCount,
627    const VkCalibratedTimestampInfoKHR *pTimestampInfos, uint64_t *pTimestamps,
628    uint64_t *pMaxDeviation)
629 {
630    VK_FROM_HANDLE(vk_device, device, _device);
631    uint64_t begin, end;
632    VkResult result;
633 
634    /* collect timestamps as tight as possible */
635    result =
636       vk_device_get_timestamp(device, device->calibrate_time_domain, &begin);
637    for (uint32_t i = 0; i < timestampCount; i++) {
638       const VkTimeDomainKHR domain = pTimestampInfos[i].timeDomain;
639       if (domain == device->calibrate_time_domain)
640          pTimestamps[i] = begin;
641       else
642          result |= vk_device_get_timestamp(device, domain, &pTimestamps[i]);
643    }
644    result |=
645       vk_device_get_timestamp(device, device->calibrate_time_domain, &end);
646 
647    if (result != VK_SUCCESS)
648       return VK_ERROR_OUT_OF_HOST_MEMORY;
649 
650    uint64_t max_clock_period = 0;
651    for (uint32_t i = 0; i < timestampCount; i++) {
652       const VkTimeDomainKHR domain = pTimestampInfos[i].timeDomain;
653       const uint64_t period = domain == VK_TIME_DOMAIN_DEVICE_KHR
654          ? device->device_time_domain_period
655          : domain != device->calibrate_time_domain ? 1 : 0;
656       max_clock_period = MAX2(max_clock_period, period);
657    }
658 
659    *pMaxDeviation = vk_time_max_deviation(begin, end, max_clock_period);
660 
661    return VK_SUCCESS;
662 }
663 
664 #ifndef _WIN32
665 
666 uint64_t
vk_clock_gettime(clockid_t clock_id)667 vk_clock_gettime(clockid_t clock_id)
668 {
669    struct timespec current;
670    int ret;
671 
672    ret = clock_gettime(clock_id, &current);
673 #ifdef CLOCK_MONOTONIC_RAW
674    if (ret < 0 && clock_id == CLOCK_MONOTONIC_RAW)
675       ret = clock_gettime(CLOCK_MONOTONIC, &current);
676 #endif
677    if (ret < 0)
678       return 0;
679 
680    return (uint64_t)current.tv_sec * 1000000000ULL + current.tv_nsec;
681 }
682 
683 #endif //!_WIN32
684