• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2019 Google LLC
3  * SPDX-License-Identifier: MIT
4  *
5  * based in part on anv and radv which are:
6  * Copyright © 2015 Intel Corporation
7  * Copyright © 2016 Red Hat.
8  * Copyright © 2016 Bas Nieuwenhuizen
9  */
10 
11 #include "vn_buffer.h"
12 
13 #include "venus-protocol/vn_protocol_driver_buffer.h"
14 #include "venus-protocol/vn_protocol_driver_buffer_view.h"
15 
16 #include "vn_android.h"
17 #include "vn_device.h"
18 #include "vn_physical_device.h"
19 #include "vn_device_memory.h"
20 
21 /* buffer commands */
22 
23 /* mandatory buffer create infos to cache */
24 static const VkBufferCreateInfo cache_infos[] = {
25    {
26       .sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
27       .size = 1,
28       .usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT,
29       .sharingMode = VK_SHARING_MODE_EXCLUSIVE,
30    },
31    {
32       .sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
33       .size = 1,
34       .usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT,
35       .sharingMode = VK_SHARING_MODE_EXCLUSIVE,
36    },
37    {
38       .sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
39       .size = 1,
40       .usage =
41          VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_INDEX_BUFFER_BIT,
42       .sharingMode = VK_SHARING_MODE_EXCLUSIVE,
43    },
44    {
45       .sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
46       .size = 1,
47       .usage =
48          VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_VERTEX_BUFFER_BIT,
49       .sharingMode = VK_SHARING_MODE_EXCLUSIVE,
50    },
51    {
52       /* mainly for layering clients like angle and zink */
53       .sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
54       .size = 1,
55       .usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT |
56                VK_BUFFER_USAGE_TRANSFER_DST_BIT |
57                VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT |
58                VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT |
59                VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT |
60                VK_BUFFER_USAGE_STORAGE_BUFFER_BIT |
61                VK_BUFFER_USAGE_INDEX_BUFFER_BIT |
62                VK_BUFFER_USAGE_VERTEX_BUFFER_BIT |
63                VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT |
64                VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_BUFFER_BIT_EXT |
65                VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_COUNTER_BUFFER_BIT_EXT |
66                VK_BUFFER_USAGE_CONDITIONAL_RENDERING_BIT_EXT,
67       .sharingMode = VK_SHARING_MODE_EXCLUSIVE,
68    },
69 };
70 
71 static inline bool
vn_buffer_create_info_can_be_cached(const VkBufferCreateInfo * create_info)72 vn_buffer_create_info_can_be_cached(const VkBufferCreateInfo *create_info)
73 {
74    /* cache only VK_SHARING_MODE_EXCLUSIVE and without pNext for simplicity */
75    return (create_info->pNext == NULL) &&
76           (create_info->sharingMode == VK_SHARING_MODE_EXCLUSIVE);
77 }
78 
79 static VkResult
vn_buffer_cache_entries_create(struct vn_device * dev,struct vn_buffer_cache_entry ** out_entries,uint32_t * out_entry_count)80 vn_buffer_cache_entries_create(struct vn_device *dev,
81                                struct vn_buffer_cache_entry **out_entries,
82                                uint32_t *out_entry_count)
83 {
84    const VkAllocationCallbacks *alloc = &dev->base.base.alloc;
85    const struct vk_device_extension_table *app_exts =
86       &dev->base.base.enabled_extensions;
87    VkDevice dev_handle = vn_device_to_handle(dev);
88    struct vn_buffer_cache_entry *entries;
89    const uint32_t entry_count = ARRAY_SIZE(cache_infos);
90    VkResult result;
91 
92    entries = vk_zalloc(alloc, sizeof(*entries) * entry_count,
93                        VN_DEFAULT_ALIGN, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
94    if (!entries)
95       return VK_ERROR_OUT_OF_HOST_MEMORY;
96 
97    for (uint32_t i = 0; i < entry_count; i++) {
98       VkBuffer buf_handle = VK_NULL_HANDLE;
99       struct vn_buffer *buf = NULL;
100       VkBufferCreateInfo local_info = cache_infos[i];
101 
102       assert(vn_buffer_create_info_can_be_cached(&cache_infos[i]));
103 
104       /* We mask out usage bits from exts not enabled by the app to create the
105        * buffer. To be noted, we'll still set cache entry create_info to the
106        * unmasked one for code simplicity, and it's fine to use a superset.
107        */
108       if (!app_exts->EXT_transform_feedback) {
109          local_info.usage &=
110             ~(VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_BUFFER_BIT_EXT |
111               VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_COUNTER_BUFFER_BIT_EXT);
112       }
113       if (!app_exts->EXT_conditional_rendering)
114          local_info.usage &= ~VK_BUFFER_USAGE_CONDITIONAL_RENDERING_BIT_EXT;
115 
116       result = vn_CreateBuffer(dev_handle, &local_info, alloc, &buf_handle);
117       if (result != VK_SUCCESS) {
118          vk_free(alloc, entries);
119          return result;
120       }
121 
122       buf = vn_buffer_from_handle(buf_handle);
123 
124       /* TODO remove below after VK_KHR_maintenance4 becomes a requirement */
125       if (buf->requirements.memory.memoryRequirements.alignment <
126           buf->requirements.memory.memoryRequirements.size) {
127          vk_free(alloc, entries);
128          *out_entries = NULL;
129          *out_entry_count = 0;
130          return VK_SUCCESS;
131       }
132 
133       entries[i].create_info = &cache_infos[i];
134       entries[i].requirements.memory = buf->requirements.memory;
135       entries[i].requirements.dedicated = buf->requirements.dedicated;
136 
137       vn_DestroyBuffer(dev_handle, buf_handle, alloc);
138    }
139 
140    *out_entries = entries;
141    *out_entry_count = entry_count;
142    return VK_SUCCESS;
143 }
144 
145 static void
vn_buffer_cache_entries_destroy(struct vn_device * dev,struct vn_buffer_cache_entry * entries)146 vn_buffer_cache_entries_destroy(struct vn_device *dev,
147                                 struct vn_buffer_cache_entry *entries)
148 {
149    const VkAllocationCallbacks *alloc = &dev->base.base.alloc;
150 
151    if (entries)
152       vk_free(alloc, entries);
153 }
154 
155 static VkResult
vn_buffer_get_max_buffer_size(struct vn_device * dev,uint64_t * out_max_buffer_size)156 vn_buffer_get_max_buffer_size(struct vn_device *dev,
157                               uint64_t *out_max_buffer_size)
158 {
159    const VkAllocationCallbacks *alloc = &dev->base.base.alloc;
160    struct vn_physical_device *pdev = dev->physical_device;
161    VkDevice dev_handle = vn_device_to_handle(dev);
162    VkBuffer buf_handle;
163    VkBufferCreateInfo create_info = {
164       .sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
165       .usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT,
166       .sharingMode = VK_SHARING_MODE_EXCLUSIVE,
167    };
168    uint64_t max_buffer_size = 0;
169    uint8_t begin = 0;
170    uint8_t end = 64;
171 
172    if (pdev->features.maintenance4.maintenance4) {
173       *out_max_buffer_size = pdev->properties.maintenance4.maxBufferSize;
174       return VK_SUCCESS;
175    }
176 
177    /* For drivers that don't support VK_KHR_maintenance4, we try to estimate
178     * the maxBufferSize using binary search.
179     * TODO remove all the search code after VK_KHR_maintenance4 becomes
180     * a requirement.
181     */
182    while (begin < end) {
183       uint8_t mid = (begin + end) >> 1;
184       create_info.size = 1ull << mid;
185       if (vn_CreateBuffer(dev_handle, &create_info, alloc, &buf_handle) ==
186           VK_SUCCESS) {
187          vn_DestroyBuffer(dev_handle, buf_handle, alloc);
188          max_buffer_size = create_info.size;
189          begin = mid + 1;
190       } else {
191          end = mid;
192       }
193    }
194 
195    *out_max_buffer_size = max_buffer_size;
196    return VK_SUCCESS;
197 }
198 
199 VkResult
vn_buffer_cache_init(struct vn_device * dev)200 vn_buffer_cache_init(struct vn_device *dev)
201 {
202    uint32_t ahb_mem_type_bits = 0;
203    uint64_t max_buffer_size = 0;
204    struct vn_buffer_cache_entry *entries = NULL;
205    uint32_t entry_count = 0;
206    VkResult result;
207 
208    if (dev->base.base.enabled_extensions
209           .ANDROID_external_memory_android_hardware_buffer) {
210       result =
211          vn_android_get_ahb_buffer_memory_type_bits(dev, &ahb_mem_type_bits);
212       if (result != VK_SUCCESS)
213          return result;
214    }
215 
216    result = vn_buffer_get_max_buffer_size(dev, &max_buffer_size);
217    if (result != VK_SUCCESS)
218       return result;
219 
220    result = vn_buffer_cache_entries_create(dev, &entries, &entry_count);
221    if (result != VK_SUCCESS)
222       return result;
223 
224    dev->buffer_cache.ahb_mem_type_bits = ahb_mem_type_bits;
225    dev->buffer_cache.max_buffer_size = max_buffer_size;
226    dev->buffer_cache.entries = entries;
227    dev->buffer_cache.entry_count = entry_count;
228    return VK_SUCCESS;
229 }
230 
231 void
vn_buffer_cache_fini(struct vn_device * dev)232 vn_buffer_cache_fini(struct vn_device *dev)
233 {
234    vn_buffer_cache_entries_destroy(dev, dev->buffer_cache.entries);
235 }
236 
237 static bool
vn_buffer_cache_get_memory_requirements(struct vn_buffer_cache * cache,const VkBufferCreateInfo * create_info,struct vn_buffer_memory_requirements * out)238 vn_buffer_cache_get_memory_requirements(
239    struct vn_buffer_cache *cache,
240    const VkBufferCreateInfo *create_info,
241    struct vn_buffer_memory_requirements *out)
242 {
243    if (VN_PERF(NO_ASYNC_BUFFER_CREATE))
244       return false;
245 
246    if (create_info->size > cache->max_buffer_size)
247       return false;
248 
249    if (!vn_buffer_create_info_can_be_cached(create_info))
250       return false;
251 
252    /* 12.7. Resource Memory Association
253     *
254     * The memoryTypeBits member is identical for all VkBuffer objects created
255     * with the same value for the flags and usage members in the
256     * VkBufferCreateInfo structure and the handleTypes member of the
257     * VkExternalMemoryBufferCreateInfo structure passed to vkCreateBuffer.
258     * Further, if usage1 and usage2 of type VkBufferUsageFlags are such that
259     * the bits set in usage2 are a subset of the bits set in usage1, and they
260     * have the same flags and VkExternalMemoryBufferCreateInfo::handleTypes,
261     * then the bits set in memoryTypeBits returned for usage1 must be a subset
262     * of the bits set in memoryTypeBits returned for usage2, for all values of
263     * flags.
264     */
265    for (uint32_t i = 0; i < cache->entry_count; i++) {
266       const struct vn_buffer_cache_entry *entry = &cache->entries[i];
267       // TODO: Fix the spec regarding the usage and alignment behavior
268       if ((entry->create_info->flags == create_info->flags) &&
269           ((entry->create_info->usage & create_info->usage) ==
270            create_info->usage)) {
271          *out = entry->requirements;
272 
273          /* TODO remove the comment after VK_KHR_maintenance4 becomes a
274           * requirement
275           *
276           * This is based on below implementation defined behavior:
277           *
278           *    req.size <= align64(info.size, req.alignment)
279           */
280          out->memory.memoryRequirements.size = align64(
281             create_info->size, out->memory.memoryRequirements.alignment);
282          return true;
283       }
284    }
285 
286    return false;
287 }
288 
289 static VkResult
vn_buffer_init(struct vn_device * dev,const VkBufferCreateInfo * create_info,struct vn_buffer * buf)290 vn_buffer_init(struct vn_device *dev,
291                const VkBufferCreateInfo *create_info,
292                struct vn_buffer *buf)
293 {
294    VkDevice dev_handle = vn_device_to_handle(dev);
295    VkBuffer buf_handle = vn_buffer_to_handle(buf);
296    VkResult result;
297 
298    if (vn_buffer_cache_get_memory_requirements(
299           &dev->buffer_cache, create_info, &buf->requirements)) {
300       vn_async_vkCreateBuffer(dev->instance, dev_handle, create_info, NULL,
301                               &buf_handle);
302       return VK_SUCCESS;
303    }
304 
305    result = vn_call_vkCreateBuffer(dev->instance, dev_handle, create_info,
306                                    NULL, &buf_handle);
307    if (result != VK_SUCCESS)
308       return result;
309 
310    buf->requirements.memory.sType = VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2;
311    buf->requirements.memory.pNext = &buf->requirements.dedicated;
312    buf->requirements.dedicated.sType =
313       VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS;
314    buf->requirements.dedicated.pNext = NULL;
315 
316    vn_call_vkGetBufferMemoryRequirements2(
317       dev->instance, dev_handle,
318       &(VkBufferMemoryRequirementsInfo2){
319          .sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2,
320          .buffer = buf_handle,
321       },
322       &buf->requirements.memory);
323 
324    return VK_SUCCESS;
325 }
326 
327 VkResult
vn_buffer_create(struct vn_device * dev,const VkBufferCreateInfo * create_info,const VkAllocationCallbacks * alloc,struct vn_buffer ** out_buf)328 vn_buffer_create(struct vn_device *dev,
329                  const VkBufferCreateInfo *create_info,
330                  const VkAllocationCallbacks *alloc,
331                  struct vn_buffer **out_buf)
332 {
333    struct vn_buffer *buf = NULL;
334    VkResult result;
335 
336    buf = vk_zalloc(alloc, sizeof(*buf), VN_DEFAULT_ALIGN,
337                    VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
338    if (!buf)
339       return VK_ERROR_OUT_OF_HOST_MEMORY;
340 
341    vn_object_base_init(&buf->base, VK_OBJECT_TYPE_BUFFER, &dev->base);
342 
343    result = vn_buffer_init(dev, create_info, buf);
344    if (result != VK_SUCCESS) {
345       vn_object_base_fini(&buf->base);
346       vk_free(alloc, buf);
347       return result;
348    }
349 
350    *out_buf = buf;
351 
352    return VK_SUCCESS;
353 }
354 
355 VkResult
vn_CreateBuffer(VkDevice device,const VkBufferCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkBuffer * pBuffer)356 vn_CreateBuffer(VkDevice device,
357                 const VkBufferCreateInfo *pCreateInfo,
358                 const VkAllocationCallbacks *pAllocator,
359                 VkBuffer *pBuffer)
360 {
361    VN_TRACE_FUNC();
362    struct vn_device *dev = vn_device_from_handle(device);
363    const VkAllocationCallbacks *alloc =
364       pAllocator ? pAllocator : &dev->base.base.alloc;
365    struct vn_buffer *buf = NULL;
366    VkResult result;
367 
368    const VkExternalMemoryBufferCreateInfo *external_info =
369       vk_find_struct_const(pCreateInfo->pNext,
370                            EXTERNAL_MEMORY_BUFFER_CREATE_INFO);
371    const bool ahb_info =
372       external_info &&
373       external_info->handleTypes ==
374          VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID;
375 
376    if (ahb_info)
377       result = vn_android_buffer_from_ahb(dev, pCreateInfo, alloc, &buf);
378    else
379       result = vn_buffer_create(dev, pCreateInfo, alloc, &buf);
380 
381    if (result != VK_SUCCESS)
382       return vn_error(dev->instance, result);
383 
384    *pBuffer = vn_buffer_to_handle(buf);
385 
386    return VK_SUCCESS;
387 }
388 
389 void
vn_DestroyBuffer(VkDevice device,VkBuffer buffer,const VkAllocationCallbacks * pAllocator)390 vn_DestroyBuffer(VkDevice device,
391                  VkBuffer buffer,
392                  const VkAllocationCallbacks *pAllocator)
393 {
394    VN_TRACE_FUNC();
395    struct vn_device *dev = vn_device_from_handle(device);
396    struct vn_buffer *buf = vn_buffer_from_handle(buffer);
397    const VkAllocationCallbacks *alloc =
398       pAllocator ? pAllocator : &dev->base.base.alloc;
399 
400    if (!buf)
401       return;
402 
403    vn_async_vkDestroyBuffer(dev->instance, device, buffer, NULL);
404 
405    vn_object_base_fini(&buf->base);
406    vk_free(alloc, buf);
407 }
408 
409 VkDeviceAddress
vn_GetBufferDeviceAddress(VkDevice device,const VkBufferDeviceAddressInfo * pInfo)410 vn_GetBufferDeviceAddress(VkDevice device,
411                           const VkBufferDeviceAddressInfo *pInfo)
412 {
413    struct vn_device *dev = vn_device_from_handle(device);
414 
415    return vn_call_vkGetBufferDeviceAddress(dev->instance, device, pInfo);
416 }
417 
418 uint64_t
vn_GetBufferOpaqueCaptureAddress(VkDevice device,const VkBufferDeviceAddressInfo * pInfo)419 vn_GetBufferOpaqueCaptureAddress(VkDevice device,
420                                  const VkBufferDeviceAddressInfo *pInfo)
421 {
422    struct vn_device *dev = vn_device_from_handle(device);
423 
424    return vn_call_vkGetBufferOpaqueCaptureAddress(dev->instance, device,
425                                                   pInfo);
426 }
427 
428 void
vn_GetBufferMemoryRequirements2(VkDevice device,const VkBufferMemoryRequirementsInfo2 * pInfo,VkMemoryRequirements2 * pMemoryRequirements)429 vn_GetBufferMemoryRequirements2(VkDevice device,
430                                 const VkBufferMemoryRequirementsInfo2 *pInfo,
431                                 VkMemoryRequirements2 *pMemoryRequirements)
432 {
433    const struct vn_buffer *buf = vn_buffer_from_handle(pInfo->buffer);
434    union {
435       VkBaseOutStructure *pnext;
436       VkMemoryRequirements2 *two;
437       VkMemoryDedicatedRequirements *dedicated;
438    } u = { .two = pMemoryRequirements };
439 
440    while (u.pnext) {
441       switch (u.pnext->sType) {
442       case VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2:
443          u.two->memoryRequirements =
444             buf->requirements.memory.memoryRequirements;
445          break;
446       case VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS:
447          u.dedicated->prefersDedicatedAllocation =
448             buf->requirements.dedicated.prefersDedicatedAllocation;
449          u.dedicated->requiresDedicatedAllocation =
450             buf->requirements.dedicated.requiresDedicatedAllocation;
451          break;
452       default:
453          break;
454       }
455       u.pnext = u.pnext->pNext;
456    }
457 }
458 
459 VkResult
vn_BindBufferMemory2(VkDevice device,uint32_t bindInfoCount,const VkBindBufferMemoryInfo * pBindInfos)460 vn_BindBufferMemory2(VkDevice device,
461                      uint32_t bindInfoCount,
462                      const VkBindBufferMemoryInfo *pBindInfos)
463 {
464    struct vn_device *dev = vn_device_from_handle(device);
465    const VkAllocationCallbacks *alloc = &dev->base.base.alloc;
466 
467    VkBindBufferMemoryInfo *local_infos = NULL;
468    for (uint32_t i = 0; i < bindInfoCount; i++) {
469       const VkBindBufferMemoryInfo *info = &pBindInfos[i];
470       struct vn_device_memory *mem =
471          vn_device_memory_from_handle(info->memory);
472       if (!mem->base_memory)
473          continue;
474 
475       if (!local_infos) {
476          const size_t size = sizeof(*local_infos) * bindInfoCount;
477          local_infos = vk_alloc(alloc, size, VN_DEFAULT_ALIGN,
478                                 VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
479          if (!local_infos)
480             return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
481 
482          memcpy(local_infos, pBindInfos, size);
483       }
484 
485       local_infos[i].memory = vn_device_memory_to_handle(mem->base_memory);
486       local_infos[i].memoryOffset += mem->base_offset;
487    }
488    if (local_infos)
489       pBindInfos = local_infos;
490 
491    vn_async_vkBindBufferMemory2(dev->instance, device, bindInfoCount,
492                                 pBindInfos);
493 
494    vk_free(alloc, local_infos);
495 
496    return VK_SUCCESS;
497 }
498 
499 /* buffer view commands */
500 
501 VkResult
vn_CreateBufferView(VkDevice device,const VkBufferViewCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkBufferView * pView)502 vn_CreateBufferView(VkDevice device,
503                     const VkBufferViewCreateInfo *pCreateInfo,
504                     const VkAllocationCallbacks *pAllocator,
505                     VkBufferView *pView)
506 {
507    struct vn_device *dev = vn_device_from_handle(device);
508    const VkAllocationCallbacks *alloc =
509       pAllocator ? pAllocator : &dev->base.base.alloc;
510 
511    struct vn_buffer_view *view =
512       vk_zalloc(alloc, sizeof(*view), VN_DEFAULT_ALIGN,
513                 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
514    if (!view)
515       return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
516 
517    vn_object_base_init(&view->base, VK_OBJECT_TYPE_BUFFER_VIEW, &dev->base);
518 
519    VkBufferView view_handle = vn_buffer_view_to_handle(view);
520    vn_async_vkCreateBufferView(dev->instance, device, pCreateInfo, NULL,
521                                &view_handle);
522 
523    *pView = view_handle;
524 
525    return VK_SUCCESS;
526 }
527 
528 void
vn_DestroyBufferView(VkDevice device,VkBufferView bufferView,const VkAllocationCallbacks * pAllocator)529 vn_DestroyBufferView(VkDevice device,
530                      VkBufferView bufferView,
531                      const VkAllocationCallbacks *pAllocator)
532 {
533    struct vn_device *dev = vn_device_from_handle(device);
534    struct vn_buffer_view *view = vn_buffer_view_from_handle(bufferView);
535    const VkAllocationCallbacks *alloc =
536       pAllocator ? pAllocator : &dev->base.base.alloc;
537 
538    if (!view)
539       return;
540 
541    vn_async_vkDestroyBufferView(dev->instance, device, bufferView, NULL);
542 
543    vn_object_base_fini(&view->base);
544    vk_free(alloc, view);
545 }
546 
547 void
vn_GetDeviceBufferMemoryRequirements(VkDevice device,const VkDeviceBufferMemoryRequirements * pInfo,VkMemoryRequirements2 * pMemoryRequirements)548 vn_GetDeviceBufferMemoryRequirements(
549    VkDevice device,
550    const VkDeviceBufferMemoryRequirements *pInfo,
551    VkMemoryRequirements2 *pMemoryRequirements)
552 {
553    struct vn_device *dev = vn_device_from_handle(device);
554 
555    /* TODO per-device cache */
556    vn_call_vkGetDeviceBufferMemoryRequirements(dev->instance, device, pInfo,
557                                                pMemoryRequirements);
558 }
559