• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2019 Google LLC
3  * SPDX-License-Identifier: MIT
4  *
5  * based in part on anv and radv which are:
6  * Copyright © 2015 Intel Corporation
7  * Copyright © 2016 Red Hat.
8  * Copyright © 2016 Bas Nieuwenhuizen
9  */
10 
11 #include "vn_device_memory.h"
12 
13 #include "venus-protocol/vn_protocol_driver_device_memory.h"
14 #include "venus-protocol/vn_protocol_driver_transport.h"
15 
16 #include "vn_android.h"
17 #include "vn_buffer.h"
18 #include "vn_device.h"
19 #include "vn_image.h"
20 #include "vn_physical_device.h"
21 
22 /* device memory commands */
23 
24 static VkResult
vn_device_memory_pool_grow_alloc(struct vn_device * dev,uint32_t mem_type_index,VkDeviceSize size,struct vn_device_memory ** out_mem)25 vn_device_memory_pool_grow_alloc(struct vn_device *dev,
26                                  uint32_t mem_type_index,
27                                  VkDeviceSize size,
28                                  struct vn_device_memory **out_mem)
29 {
30    VkDevice dev_handle = vn_device_to_handle(dev);
31    const VkAllocationCallbacks *alloc = &dev->base.base.alloc;
32    const VkPhysicalDeviceMemoryProperties *mem_props =
33       &dev->physical_device->memory_properties.memoryProperties;
34    const VkMemoryPropertyFlags mem_flags =
35       mem_props->memoryTypes[mem_type_index].propertyFlags;
36    struct vn_device_memory *mem = NULL;
37    VkDeviceMemory mem_handle = VK_NULL_HANDLE;
38    VkResult result;
39 
40    mem = vk_zalloc(alloc, sizeof(*mem), VN_DEFAULT_ALIGN,
41                    VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
42    if (!mem)
43       return VK_ERROR_OUT_OF_HOST_MEMORY;
44 
45    vn_object_base_init(&mem->base, VK_OBJECT_TYPE_DEVICE_MEMORY, &dev->base);
46    mem->size = size;
47    mem->flags = mem_flags;
48 
49    mem_handle = vn_device_memory_to_handle(mem);
50    result = vn_call_vkAllocateMemory(
51       dev->instance, dev_handle,
52       &(const VkMemoryAllocateInfo){
53          .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
54          .allocationSize = size,
55          .memoryTypeIndex = mem_type_index,
56       },
57       NULL, &mem_handle);
58    if (result != VK_SUCCESS) {
59       mem_handle = VK_NULL_HANDLE;
60       goto fail;
61    }
62 
63    result = vn_renderer_bo_create_from_device_memory(
64       dev->renderer, mem->size, mem->base.id, mem->flags, 0, &mem->base_bo);
65    if (result != VK_SUCCESS) {
66       assert(!mem->base_bo);
67       goto fail;
68    }
69 
70    result =
71       vn_instance_submit_roundtrip(dev->instance, &mem->bo_roundtrip_seqno);
72    if (result != VK_SUCCESS)
73       goto fail;
74 
75    mem->bo_roundtrip_seqno_valid = true;
76    *out_mem = mem;
77 
78    return VK_SUCCESS;
79 
80 fail:
81    if (mem->base_bo)
82       vn_renderer_bo_unref(dev->renderer, mem->base_bo);
83    if (mem_handle != VK_NULL_HANDLE)
84       vn_async_vkFreeMemory(dev->instance, dev_handle, mem_handle, NULL);
85    vn_object_base_fini(&mem->base);
86    vk_free(alloc, mem);
87    return result;
88 }
89 
90 static struct vn_device_memory *
vn_device_memory_pool_ref(struct vn_device * dev,struct vn_device_memory * pool_mem)91 vn_device_memory_pool_ref(struct vn_device *dev,
92                           struct vn_device_memory *pool_mem)
93 {
94    assert(pool_mem->base_bo);
95 
96    vn_renderer_bo_ref(dev->renderer, pool_mem->base_bo);
97 
98    return pool_mem;
99 }
100 
101 static void
vn_device_memory_pool_unref(struct vn_device * dev,struct vn_device_memory * pool_mem)102 vn_device_memory_pool_unref(struct vn_device *dev,
103                             struct vn_device_memory *pool_mem)
104 {
105    const VkAllocationCallbacks *alloc = &dev->base.base.alloc;
106 
107    assert(pool_mem->base_bo);
108 
109    if (!vn_renderer_bo_unref(dev->renderer, pool_mem->base_bo))
110       return;
111 
112    /* wait on valid bo_roundtrip_seqno before vkFreeMemory */
113    if (pool_mem->bo_roundtrip_seqno_valid)
114       vn_instance_wait_roundtrip(dev->instance, pool_mem->bo_roundtrip_seqno);
115 
116    vn_async_vkFreeMemory(dev->instance, vn_device_to_handle(dev),
117                          vn_device_memory_to_handle(pool_mem), NULL);
118    vn_object_base_fini(&pool_mem->base);
119    vk_free(alloc, pool_mem);
120 }
121 
122 void
vn_device_memory_pool_fini(struct vn_device * dev,uint32_t mem_type_index)123 vn_device_memory_pool_fini(struct vn_device *dev, uint32_t mem_type_index)
124 {
125    struct vn_device_memory_pool *pool = &dev->memory_pools[mem_type_index];
126    if (pool->memory)
127       vn_device_memory_pool_unref(dev, pool->memory);
128    mtx_destroy(&pool->mutex);
129 }
130 
131 static VkResult
vn_device_memory_pool_grow_locked(struct vn_device * dev,uint32_t mem_type_index,VkDeviceSize size)132 vn_device_memory_pool_grow_locked(struct vn_device *dev,
133                                   uint32_t mem_type_index,
134                                   VkDeviceSize size)
135 {
136    struct vn_device_memory *mem;
137    VkResult result =
138       vn_device_memory_pool_grow_alloc(dev, mem_type_index, size, &mem);
139    if (result != VK_SUCCESS)
140       return result;
141 
142    struct vn_device_memory_pool *pool = &dev->memory_pools[mem_type_index];
143    if (pool->memory)
144       vn_device_memory_pool_unref(dev, pool->memory);
145 
146    pool->memory = mem;
147    pool->used = 0;
148 
149    return VK_SUCCESS;
150 }
151 
152 static VkResult
vn_device_memory_pool_suballocate(struct vn_device * dev,struct vn_device_memory * mem,uint32_t mem_type_index)153 vn_device_memory_pool_suballocate(struct vn_device *dev,
154                                   struct vn_device_memory *mem,
155                                   uint32_t mem_type_index)
156 {
157    const VkDeviceSize pool_size = 16 * 1024 * 1024;
158    /* XXX We don't know the alignment requirement.  Use 64K because some GPUs
159     * have 64K pages.  It is also required by newer Intel GPUs.  But really we
160     * should require kernel 5.12+, where there is no KVM memslot limit, and
161     * remove this whole thing.
162     */
163    const VkDeviceSize pool_align = 64 * 1024;
164    struct vn_device_memory_pool *pool = &dev->memory_pools[mem_type_index];
165 
166    assert(mem->size <= pool_size);
167 
168    mtx_lock(&pool->mutex);
169 
170    if (!pool->memory || pool->used + mem->size > pool_size) {
171       VkResult result =
172          vn_device_memory_pool_grow_locked(dev, mem_type_index, pool_size);
173       if (result != VK_SUCCESS) {
174          mtx_unlock(&pool->mutex);
175          return result;
176       }
177    }
178 
179    mem->base_memory = vn_device_memory_pool_ref(dev, pool->memory);
180 
181    /* point mem->base_bo at pool base_bo and assign base_offset accordingly */
182    mem->base_bo = pool->memory->base_bo;
183    mem->base_offset = pool->used;
184    pool->used += align64(mem->size, pool_align);
185 
186    mtx_unlock(&pool->mutex);
187 
188    return VK_SUCCESS;
189 }
190 
191 static bool
vn_device_memory_should_suballocate(const struct vn_device * dev,const VkMemoryAllocateInfo * alloc_info,const VkMemoryPropertyFlags flags)192 vn_device_memory_should_suballocate(const struct vn_device *dev,
193                                     const VkMemoryAllocateInfo *alloc_info,
194                                     const VkMemoryPropertyFlags flags)
195 {
196    const struct vn_instance *instance = dev->physical_device->instance;
197    const struct vn_renderer_info *renderer = &instance->renderer->info;
198 
199    if (renderer->has_guest_vram)
200       return false;
201 
202    /* We should not support suballocations because apps can do better.  But
203     * each BO takes up a KVM memslot currently and some CTS tests exhausts
204     * them.  This might not be needed on newer (host) kernels where there are
205     * many more KVM memslots.
206     */
207 
208    /* consider host-visible memory only */
209    if (!(flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT))
210       return false;
211 
212    /* reject larger allocations */
213    if (alloc_info->allocationSize > 64 * 1024)
214       return false;
215 
216    /* reject if there is any pnext struct other than
217     * VkMemoryDedicatedAllocateInfo, or if dedicated allocation is required
218     */
219    if (alloc_info->pNext) {
220       const VkMemoryDedicatedAllocateInfo *dedicated = alloc_info->pNext;
221       if (dedicated->sType !=
222              VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO ||
223           dedicated->pNext)
224          return false;
225 
226       const struct vn_image *img = vn_image_from_handle(dedicated->image);
227       if (img) {
228          for (uint32_t i = 0; i < ARRAY_SIZE(img->requirements); i++) {
229             if (img->requirements[i].dedicated.requiresDedicatedAllocation)
230                return false;
231          }
232       }
233 
234       const struct vn_buffer *buf = vn_buffer_from_handle(dedicated->buffer);
235       if (buf && buf->requirements.dedicated.requiresDedicatedAllocation)
236          return false;
237    }
238 
239    return true;
240 }
241 
242 VkResult
vn_device_memory_import_dma_buf(struct vn_device * dev,struct vn_device_memory * mem,const VkMemoryAllocateInfo * alloc_info,bool force_unmappable,int fd)243 vn_device_memory_import_dma_buf(struct vn_device *dev,
244                                 struct vn_device_memory *mem,
245                                 const VkMemoryAllocateInfo *alloc_info,
246                                 bool force_unmappable,
247                                 int fd)
248 {
249    VkDevice device = vn_device_to_handle(dev);
250    VkDeviceMemory memory = vn_device_memory_to_handle(mem);
251    const VkPhysicalDeviceMemoryProperties *mem_props =
252       &dev->physical_device->memory_properties.memoryProperties;
253    VkMemoryPropertyFlags mem_flags =
254       mem_props->memoryTypes[alloc_info->memoryTypeIndex].propertyFlags;
255    struct vn_renderer_bo *bo;
256    VkResult result = VK_SUCCESS;
257 
258    if (force_unmappable)
259       mem_flags &= ~VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
260 
261    result = vn_renderer_bo_create_from_dma_buf(
262       dev->renderer, alloc_info->allocationSize, fd, mem_flags, &bo);
263    if (result != VK_SUCCESS)
264       return result;
265 
266    vn_instance_roundtrip(dev->instance);
267 
268    /* XXX fix VkImportMemoryResourceInfoMESA to support memory planes */
269    const VkImportMemoryResourceInfoMESA import_memory_resource_info = {
270       .sType = VK_STRUCTURE_TYPE_IMPORT_MEMORY_RESOURCE_INFO_MESA,
271       .pNext = alloc_info->pNext,
272       .resourceId = bo->res_id,
273    };
274    const VkMemoryAllocateInfo memory_allocate_info = {
275       .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
276       .pNext = &import_memory_resource_info,
277       .allocationSize = alloc_info->allocationSize,
278       .memoryTypeIndex = alloc_info->memoryTypeIndex,
279    };
280    result = vn_call_vkAllocateMemory(dev->instance, device,
281                                      &memory_allocate_info, NULL, &memory);
282    if (result != VK_SUCCESS) {
283       vn_renderer_bo_unref(dev->renderer, bo);
284       return result;
285    }
286 
287    /* need to close import fd on success to avoid fd leak */
288    close(fd);
289    mem->base_bo = bo;
290 
291    return VK_SUCCESS;
292 }
293 
294 static VkResult
vn_device_memory_alloc_guest_vram(struct vn_device * dev,struct vn_device_memory * mem,const VkMemoryAllocateInfo * alloc_info,VkExternalMemoryHandleTypeFlags external_handles)295 vn_device_memory_alloc_guest_vram(
296    struct vn_device *dev,
297    struct vn_device_memory *mem,
298    const VkMemoryAllocateInfo *alloc_info,
299    VkExternalMemoryHandleTypeFlags external_handles)
300 {
301    VkDevice dev_handle = vn_device_to_handle(dev);
302    VkDeviceMemory mem_handle = vn_device_memory_to_handle(mem);
303    VkResult result = VK_SUCCESS;
304 
305    result = vn_renderer_bo_create_from_device_memory(
306       dev->renderer, mem->size, 0, mem->flags, external_handles,
307       &mem->base_bo);
308    if (result != VK_SUCCESS) {
309       return result;
310    }
311 
312    const VkImportMemoryResourceInfoMESA import_memory_resource_info = {
313       .sType = VK_STRUCTURE_TYPE_IMPORT_MEMORY_RESOURCE_INFO_MESA,
314       .pNext = alloc_info->pNext,
315       .resourceId = mem->base_bo->res_id,
316    };
317 
318    const VkMemoryAllocateInfo memory_allocate_info = {
319       .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
320       .pNext = &import_memory_resource_info,
321       .allocationSize = alloc_info->allocationSize,
322       .memoryTypeIndex = alloc_info->memoryTypeIndex,
323    };
324 
325    vn_instance_roundtrip(dev->instance);
326 
327    result = vn_call_vkAllocateMemory(
328       dev->instance, dev_handle, &memory_allocate_info, NULL, &mem_handle);
329    if (result != VK_SUCCESS) {
330       vn_renderer_bo_unref(dev->renderer, mem->base_bo);
331       return result;
332    }
333 
334    result =
335       vn_instance_submit_roundtrip(dev->instance, &mem->bo_roundtrip_seqno);
336    if (result != VK_SUCCESS) {
337       vn_renderer_bo_unref(dev->renderer, mem->base_bo);
338       vn_async_vkFreeMemory(dev->instance, dev_handle, mem_handle, NULL);
339       return result;
340    }
341 
342    mem->bo_roundtrip_seqno_valid = true;
343 
344    return VK_SUCCESS;
345 }
346 
347 static VkResult
vn_device_memory_alloc_generic(struct vn_device * dev,struct vn_device_memory * mem,const VkMemoryAllocateInfo * alloc_info,VkExternalMemoryHandleTypeFlags external_handles)348 vn_device_memory_alloc_generic(
349    struct vn_device *dev,
350    struct vn_device_memory *mem,
351    const VkMemoryAllocateInfo *alloc_info,
352    VkExternalMemoryHandleTypeFlags external_handles)
353 {
354    VkDevice dev_handle = vn_device_to_handle(dev);
355    VkDeviceMemory mem_handle = vn_device_memory_to_handle(mem);
356    VkResult result = VK_SUCCESS;
357 
358    result = vn_call_vkAllocateMemory(dev->instance, dev_handle, alloc_info,
359                                      NULL, &mem_handle);
360    if (result != VK_SUCCESS || !external_handles)
361       return result;
362 
363    result = vn_renderer_bo_create_from_device_memory(
364       dev->renderer, mem->size, mem->base.id, mem->flags, external_handles,
365       &mem->base_bo);
366    if (result != VK_SUCCESS) {
367       vn_async_vkFreeMemory(dev->instance, dev_handle, mem_handle, NULL);
368       return result;
369    }
370 
371    result =
372       vn_instance_submit_roundtrip(dev->instance, &mem->bo_roundtrip_seqno);
373    if (result != VK_SUCCESS) {
374       vn_renderer_bo_unref(dev->renderer, mem->base_bo);
375       vn_async_vkFreeMemory(dev->instance, dev_handle, mem_handle, NULL);
376       return result;
377    }
378 
379    mem->bo_roundtrip_seqno_valid = true;
380 
381    return VK_SUCCESS;
382 }
383 
384 static VkResult
vn_device_memory_alloc(struct vn_device * dev,struct vn_device_memory * mem,const VkMemoryAllocateInfo * alloc_info,VkExternalMemoryHandleTypeFlags external_handles)385 vn_device_memory_alloc(struct vn_device *dev,
386                        struct vn_device_memory *mem,
387                        const VkMemoryAllocateInfo *alloc_info,
388                        VkExternalMemoryHandleTypeFlags external_handles)
389 {
390    const struct vn_instance *instance = dev->physical_device->instance;
391    const struct vn_renderer_info *renderer_info = &instance->renderer->info;
392 
393    if (renderer_info->has_guest_vram) {
394       return vn_device_memory_alloc_guest_vram(dev, mem, alloc_info,
395                                                external_handles);
396    }
397 
398    return vn_device_memory_alloc_generic(dev, mem, alloc_info,
399                                          external_handles);
400 }
401 
402 VkResult
vn_AllocateMemory(VkDevice device,const VkMemoryAllocateInfo * pAllocateInfo,const VkAllocationCallbacks * pAllocator,VkDeviceMemory * pMemory)403 vn_AllocateMemory(VkDevice device,
404                   const VkMemoryAllocateInfo *pAllocateInfo,
405                   const VkAllocationCallbacks *pAllocator,
406                   VkDeviceMemory *pMemory)
407 {
408    VN_TRACE_FUNC();
409    struct vn_device *dev = vn_device_from_handle(device);
410    const VkAllocationCallbacks *alloc =
411       pAllocator ? pAllocator : &dev->base.base.alloc;
412 
413    const VkPhysicalDeviceMemoryProperties *mem_props =
414       &dev->physical_device->memory_properties.memoryProperties;
415    const VkMemoryPropertyFlags mem_flags =
416       mem_props->memoryTypes[pAllocateInfo->memoryTypeIndex].propertyFlags;
417 
418    const VkExportMemoryAllocateInfo *export_info = NULL;
419    const VkImportAndroidHardwareBufferInfoANDROID *import_ahb_info = NULL;
420    const VkImportMemoryFdInfoKHR *import_fd_info = NULL;
421    bool export_ahb = false;
422 
423    vk_foreach_struct_const(pnext, pAllocateInfo->pNext) {
424       switch (pnext->sType) {
425       case VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO:
426          export_info = (void *)pnext;
427          if (export_info->handleTypes &
428              VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID)
429             export_ahb = true;
430          else if (!export_info->handleTypes)
431             export_info = NULL;
432          break;
433       case VK_STRUCTURE_TYPE_IMPORT_ANDROID_HARDWARE_BUFFER_INFO_ANDROID:
434          import_ahb_info = (void *)pnext;
435          break;
436       case VK_STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHR:
437          import_fd_info = (void *)pnext;
438          break;
439       default:
440          break;
441       }
442    }
443 
444    struct vn_device_memory *mem =
445       vk_zalloc(alloc, sizeof(*mem), VN_DEFAULT_ALIGN,
446                 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
447    if (!mem)
448       return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
449 
450    vn_object_base_init(&mem->base, VK_OBJECT_TYPE_DEVICE_MEMORY, &dev->base);
451    mem->size = pAllocateInfo->allocationSize;
452    mem->flags = mem_flags;
453 
454    VkDeviceMemory mem_handle = vn_device_memory_to_handle(mem);
455    VkResult result;
456    if (import_ahb_info) {
457       result = vn_android_device_import_ahb(dev, mem, pAllocateInfo, alloc,
458                                             import_ahb_info->buffer, false);
459    } else if (export_ahb) {
460       result = vn_android_device_allocate_ahb(dev, mem, pAllocateInfo, alloc);
461    } else if (import_fd_info) {
462       result = vn_device_memory_import_dma_buf(dev, mem, pAllocateInfo, false,
463                                                import_fd_info->fd);
464    } else if (export_info) {
465       result = vn_device_memory_alloc(dev, mem, pAllocateInfo,
466                                       export_info->handleTypes);
467    } else if (vn_device_memory_should_suballocate(dev, pAllocateInfo,
468                                                   mem_flags)) {
469       result = vn_device_memory_pool_suballocate(
470          dev, mem, pAllocateInfo->memoryTypeIndex);
471    } else {
472       result = vn_device_memory_alloc(dev, mem, pAllocateInfo, 0);
473    }
474    if (result != VK_SUCCESS) {
475       vn_object_base_fini(&mem->base);
476       vk_free(alloc, mem);
477       return vn_error(dev->instance, result);
478    }
479 
480    *pMemory = mem_handle;
481 
482    return VK_SUCCESS;
483 }
484 
485 void
vn_FreeMemory(VkDevice device,VkDeviceMemory memory,const VkAllocationCallbacks * pAllocator)486 vn_FreeMemory(VkDevice device,
487               VkDeviceMemory memory,
488               const VkAllocationCallbacks *pAllocator)
489 {
490    VN_TRACE_FUNC();
491    struct vn_device *dev = vn_device_from_handle(device);
492    struct vn_device_memory *mem = vn_device_memory_from_handle(memory);
493    const VkAllocationCallbacks *alloc =
494       pAllocator ? pAllocator : &dev->base.base.alloc;
495 
496    if (!mem)
497       return;
498 
499    if (mem->base_memory) {
500       vn_device_memory_pool_unref(dev, mem->base_memory);
501    } else {
502       if (mem->base_bo)
503          vn_renderer_bo_unref(dev->renderer, mem->base_bo);
504 
505       if (mem->bo_roundtrip_seqno_valid)
506          vn_instance_wait_roundtrip(dev->instance, mem->bo_roundtrip_seqno);
507 
508       vn_async_vkFreeMemory(dev->instance, device, memory, NULL);
509    }
510 
511    if (mem->ahb)
512       vn_android_release_ahb(mem->ahb);
513 
514    vn_object_base_fini(&mem->base);
515    vk_free(alloc, mem);
516 }
517 
518 uint64_t
vn_GetDeviceMemoryOpaqueCaptureAddress(VkDevice device,const VkDeviceMemoryOpaqueCaptureAddressInfo * pInfo)519 vn_GetDeviceMemoryOpaqueCaptureAddress(
520    VkDevice device, const VkDeviceMemoryOpaqueCaptureAddressInfo *pInfo)
521 {
522    struct vn_device *dev = vn_device_from_handle(device);
523    ASSERTED struct vn_device_memory *mem =
524       vn_device_memory_from_handle(pInfo->memory);
525 
526    assert(!mem->base_memory);
527    return vn_call_vkGetDeviceMemoryOpaqueCaptureAddress(dev->instance, device,
528                                                         pInfo);
529 }
530 
531 VkResult
vn_MapMemory(VkDevice device,VkDeviceMemory memory,VkDeviceSize offset,VkDeviceSize size,VkMemoryMapFlags flags,void ** ppData)532 vn_MapMemory(VkDevice device,
533              VkDeviceMemory memory,
534              VkDeviceSize offset,
535              VkDeviceSize size,
536              VkMemoryMapFlags flags,
537              void **ppData)
538 {
539    VN_TRACE_FUNC();
540    struct vn_device *dev = vn_device_from_handle(device);
541    struct vn_device_memory *mem = vn_device_memory_from_handle(memory);
542    const bool need_bo = !mem->base_bo;
543    void *ptr = NULL;
544    VkResult result;
545 
546    assert(mem->flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT);
547 
548    /* We don't want to blindly create a bo for each HOST_VISIBLE memory as
549     * that has a cost. By deferring bo creation until now, we can avoid the
550     * cost unless a bo is really needed. However, that means
551     * vn_renderer_bo_map will block until the renderer creates the resource
552     * and injects the pages into the guest.
553     */
554    if (need_bo) {
555       result = vn_renderer_bo_create_from_device_memory(
556          dev->renderer, mem->size, mem->base.id, mem->flags, 0,
557          &mem->base_bo);
558       if (result != VK_SUCCESS)
559          return vn_error(dev->instance, result);
560    }
561 
562    ptr = vn_renderer_bo_map(dev->renderer, mem->base_bo);
563    if (!ptr) {
564       /* vn_renderer_bo_map implies a roundtrip on success, but not here. */
565       if (need_bo) {
566          result = vn_instance_submit_roundtrip(dev->instance,
567                                                &mem->bo_roundtrip_seqno);
568          if (result != VK_SUCCESS)
569             return vn_error(dev->instance, result);
570 
571          mem->bo_roundtrip_seqno_valid = true;
572       }
573 
574       return vn_error(dev->instance, VK_ERROR_MEMORY_MAP_FAILED);
575    }
576 
577    mem->map_end = size == VK_WHOLE_SIZE ? mem->size : offset + size;
578 
579    *ppData = ptr + mem->base_offset + offset;
580 
581    return VK_SUCCESS;
582 }
583 
584 void
vn_UnmapMemory(VkDevice device,VkDeviceMemory memory)585 vn_UnmapMemory(VkDevice device, VkDeviceMemory memory)
586 {
587    VN_TRACE_FUNC();
588 }
589 
590 VkResult
vn_FlushMappedMemoryRanges(VkDevice device,uint32_t memoryRangeCount,const VkMappedMemoryRange * pMemoryRanges)591 vn_FlushMappedMemoryRanges(VkDevice device,
592                            uint32_t memoryRangeCount,
593                            const VkMappedMemoryRange *pMemoryRanges)
594 {
595    VN_TRACE_FUNC();
596    struct vn_device *dev = vn_device_from_handle(device);
597 
598    for (uint32_t i = 0; i < memoryRangeCount; i++) {
599       const VkMappedMemoryRange *range = &pMemoryRanges[i];
600       struct vn_device_memory *mem =
601          vn_device_memory_from_handle(range->memory);
602 
603       const VkDeviceSize size = range->size == VK_WHOLE_SIZE
604                                    ? mem->map_end - range->offset
605                                    : range->size;
606       vn_renderer_bo_flush(dev->renderer, mem->base_bo,
607                            mem->base_offset + range->offset, size);
608    }
609 
610    return VK_SUCCESS;
611 }
612 
613 VkResult
vn_InvalidateMappedMemoryRanges(VkDevice device,uint32_t memoryRangeCount,const VkMappedMemoryRange * pMemoryRanges)614 vn_InvalidateMappedMemoryRanges(VkDevice device,
615                                 uint32_t memoryRangeCount,
616                                 const VkMappedMemoryRange *pMemoryRanges)
617 {
618    VN_TRACE_FUNC();
619    struct vn_device *dev = vn_device_from_handle(device);
620 
621    for (uint32_t i = 0; i < memoryRangeCount; i++) {
622       const VkMappedMemoryRange *range = &pMemoryRanges[i];
623       struct vn_device_memory *mem =
624          vn_device_memory_from_handle(range->memory);
625 
626       const VkDeviceSize size = range->size == VK_WHOLE_SIZE
627                                    ? mem->map_end - range->offset
628                                    : range->size;
629       vn_renderer_bo_invalidate(dev->renderer, mem->base_bo,
630                                 mem->base_offset + range->offset, size);
631    }
632 
633    return VK_SUCCESS;
634 }
635 
636 void
vn_GetDeviceMemoryCommitment(VkDevice device,VkDeviceMemory memory,VkDeviceSize * pCommittedMemoryInBytes)637 vn_GetDeviceMemoryCommitment(VkDevice device,
638                              VkDeviceMemory memory,
639                              VkDeviceSize *pCommittedMemoryInBytes)
640 {
641    struct vn_device *dev = vn_device_from_handle(device);
642    ASSERTED struct vn_device_memory *mem =
643       vn_device_memory_from_handle(memory);
644 
645    assert(!mem->base_memory);
646    vn_call_vkGetDeviceMemoryCommitment(dev->instance, device, memory,
647                                        pCommittedMemoryInBytes);
648 }
649 
650 VkResult
vn_GetMemoryFdKHR(VkDevice device,const VkMemoryGetFdInfoKHR * pGetFdInfo,int * pFd)651 vn_GetMemoryFdKHR(VkDevice device,
652                   const VkMemoryGetFdInfoKHR *pGetFdInfo,
653                   int *pFd)
654 {
655    VN_TRACE_FUNC();
656    struct vn_device *dev = vn_device_from_handle(device);
657    struct vn_device_memory *mem =
658       vn_device_memory_from_handle(pGetFdInfo->memory);
659 
660    /* At the moment, we support only the below handle types. */
661    assert(pGetFdInfo->handleType &
662           (VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT |
663            VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT));
664    assert(!mem->base_memory && mem->base_bo);
665    *pFd = vn_renderer_bo_export_dma_buf(dev->renderer, mem->base_bo);
666    if (*pFd < 0)
667       return vn_error(dev->instance, VK_ERROR_TOO_MANY_OBJECTS);
668 
669    return VK_SUCCESS;
670 }
671 
672 VkResult
vn_get_memory_dma_buf_properties(struct vn_device * dev,int fd,uint64_t * out_alloc_size,uint32_t * out_mem_type_bits)673 vn_get_memory_dma_buf_properties(struct vn_device *dev,
674                                  int fd,
675                                  uint64_t *out_alloc_size,
676                                  uint32_t *out_mem_type_bits)
677 {
678    VkDevice device = vn_device_to_handle(dev);
679    struct vn_renderer_bo *bo = NULL;
680    VkResult result = VK_SUCCESS;
681 
682    result = vn_renderer_bo_create_from_dma_buf(dev->renderer, 0 /* size */,
683                                                fd, 0 /* flags */, &bo);
684    if (result != VK_SUCCESS)
685       return result;
686 
687    vn_instance_roundtrip(dev->instance);
688 
689    VkMemoryResourceAllocationSizeProperties100000MESA alloc_size_props = {
690       .sType =
691          VK_STRUCTURE_TYPE_MEMORY_RESOURCE_ALLOCATION_SIZE_PROPERTIES_100000_MESA,
692       .pNext = NULL,
693       .allocationSize = 0,
694    };
695    VkMemoryResourcePropertiesMESA props = {
696       .sType = VK_STRUCTURE_TYPE_MEMORY_RESOURCE_PROPERTIES_MESA,
697       .pNext =
698          dev->instance->experimental.memoryResourceAllocationSize == VK_TRUE
699             ? &alloc_size_props
700             : NULL,
701       .memoryTypeBits = 0,
702    };
703    result = vn_call_vkGetMemoryResourcePropertiesMESA(dev->instance, device,
704                                                       bo->res_id, &props);
705    vn_renderer_bo_unref(dev->renderer, bo);
706    if (result != VK_SUCCESS)
707       return result;
708 
709    *out_alloc_size = alloc_size_props.allocationSize;
710    *out_mem_type_bits = props.memoryTypeBits;
711 
712    return VK_SUCCESS;
713 }
714 
715 VkResult
vn_GetMemoryFdPropertiesKHR(VkDevice device,VkExternalMemoryHandleTypeFlagBits handleType,int fd,VkMemoryFdPropertiesKHR * pMemoryFdProperties)716 vn_GetMemoryFdPropertiesKHR(VkDevice device,
717                             VkExternalMemoryHandleTypeFlagBits handleType,
718                             int fd,
719                             VkMemoryFdPropertiesKHR *pMemoryFdProperties)
720 {
721    VN_TRACE_FUNC();
722    struct vn_device *dev = vn_device_from_handle(device);
723    uint64_t alloc_size = 0;
724    uint32_t mem_type_bits = 0;
725    VkResult result = VK_SUCCESS;
726 
727    if (handleType != VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT)
728       return vn_error(dev->instance, VK_ERROR_INVALID_EXTERNAL_HANDLE);
729 
730    result =
731       vn_get_memory_dma_buf_properties(dev, fd, &alloc_size, &mem_type_bits);
732    if (result != VK_SUCCESS)
733       return vn_error(dev->instance, result);
734 
735    pMemoryFdProperties->memoryTypeBits = mem_type_bits;
736 
737    return VK_SUCCESS;
738 }
739