1 /*
2 * Copyright 2019 Google LLC
3 * SPDX-License-Identifier: MIT
4 *
5 * based in part on anv and radv which are:
6 * Copyright © 2015 Intel Corporation
7 * Copyright © 2016 Red Hat.
8 * Copyright © 2016 Bas Nieuwenhuizen
9 */
10
11 #include "vn_device_memory.h"
12
13 #include "venus-protocol/vn_protocol_driver_device_memory.h"
14 #include "venus-protocol/vn_protocol_driver_transport.h"
15
16 #include "vn_android.h"
17 #include "vn_buffer.h"
18 #include "vn_device.h"
19 #include "vn_image.h"
20 #include "vn_instance.h"
21 #include "vn_physical_device.h"
22
23 /* device memory commands */
24
25 static inline VkResult
vn_device_memory_alloc_simple(struct vn_device * dev,struct vn_device_memory * mem,const VkMemoryAllocateInfo * alloc_info)26 vn_device_memory_alloc_simple(struct vn_device *dev,
27 struct vn_device_memory *mem,
28 const VkMemoryAllocateInfo *alloc_info)
29 {
30 VkDevice dev_handle = vn_device_to_handle(dev);
31 VkDeviceMemory mem_handle = vn_device_memory_to_handle(mem);
32 if (VN_PERF(NO_ASYNC_MEM_ALLOC)) {
33 return vn_call_vkAllocateMemory(dev->primary_ring, dev_handle,
34 alloc_info, NULL, &mem_handle);
35 }
36
37 struct vn_ring_submit_command instance_submit;
38 vn_submit_vkAllocateMemory(dev->primary_ring, 0, dev_handle, alloc_info,
39 NULL, &mem_handle, &instance_submit);
40 if (!instance_submit.ring_seqno_valid)
41 return VK_ERROR_OUT_OF_HOST_MEMORY;
42
43 mem->bo_ring_seqno_valid = true;
44 mem->bo_ring_seqno = instance_submit.ring_seqno;
45 return VK_SUCCESS;
46 }
47
48 static inline void
vn_device_memory_free_simple(struct vn_device * dev,struct vn_device_memory * mem)49 vn_device_memory_free_simple(struct vn_device *dev,
50 struct vn_device_memory *mem)
51 {
52 VkDevice dev_handle = vn_device_to_handle(dev);
53 VkDeviceMemory mem_handle = vn_device_memory_to_handle(mem);
54 vn_async_vkFreeMemory(dev->primary_ring, dev_handle, mem_handle, NULL);
55 }
56
57 static VkResult
vn_device_memory_wait_alloc(struct vn_device * dev,struct vn_device_memory * mem)58 vn_device_memory_wait_alloc(struct vn_device *dev,
59 struct vn_device_memory *mem)
60 {
61 if (!mem->bo_ring_seqno_valid)
62 return VK_SUCCESS;
63
64 /* no need to wait for ring if
65 * - mem alloc is done upon bo map or export
66 * - mem import is done upon bo destroy
67 */
68 if (vn_ring_get_seqno_status(dev->primary_ring, mem->bo_ring_seqno))
69 return VK_SUCCESS;
70
71 /* fine to false it here since renderer submission failure is fatal */
72 mem->bo_ring_seqno_valid = false;
73
74 const uint64_t ring_id = vn_ring_get_id(dev->primary_ring);
75 uint32_t local_data[8];
76 struct vn_cs_encoder local_enc =
77 VN_CS_ENCODER_INITIALIZER_LOCAL(local_data, sizeof(local_data));
78 vn_encode_vkWaitRingSeqnoMESA(&local_enc, 0, ring_id, mem->bo_ring_seqno);
79 return vn_renderer_submit_simple(dev->renderer, local_data,
80 vn_cs_encoder_get_len(&local_enc));
81 }
82
83 static inline VkResult
vn_device_memory_bo_init(struct vn_device * dev,struct vn_device_memory * mem)84 vn_device_memory_bo_init(struct vn_device *dev, struct vn_device_memory *mem)
85 {
86 VkResult result = vn_device_memory_wait_alloc(dev, mem);
87 if (result != VK_SUCCESS)
88 return result;
89
90 const struct vk_device_memory *mem_vk = &mem->base.base;
91 const VkMemoryType *mem_type = &dev->physical_device->memory_properties
92 .memoryTypes[mem_vk->memory_type_index];
93 return vn_renderer_bo_create_from_device_memory(
94 dev->renderer, mem_vk->size, mem->base.id, mem_type->propertyFlags,
95 mem_vk->export_handle_types, &mem->base_bo);
96 }
97
98 static inline void
vn_device_memory_bo_fini(struct vn_device * dev,struct vn_device_memory * mem)99 vn_device_memory_bo_fini(struct vn_device *dev, struct vn_device_memory *mem)
100 {
101 if (mem->base_bo) {
102 vn_device_memory_wait_alloc(dev, mem);
103 vn_renderer_bo_unref(dev->renderer, mem->base_bo);
104 }
105 }
106
107 static VkResult
vn_device_memory_pool_grow_alloc(struct vn_device * dev,uint32_t mem_type_index,VkDeviceSize size,struct vn_device_memory ** out_mem)108 vn_device_memory_pool_grow_alloc(struct vn_device *dev,
109 uint32_t mem_type_index,
110 VkDeviceSize size,
111 struct vn_device_memory **out_mem)
112 {
113 const VkMemoryAllocateInfo alloc_info = {
114 .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
115 .allocationSize = size,
116 .memoryTypeIndex = mem_type_index,
117 };
118 struct vn_device_memory *mem = vk_device_memory_create(
119 &dev->base.base, &alloc_info, NULL, sizeof(*mem));
120 if (!mem)
121 return VK_ERROR_OUT_OF_HOST_MEMORY;
122
123 vn_object_set_id(mem, vn_get_next_obj_id(), VK_OBJECT_TYPE_DEVICE_MEMORY);
124
125 VkResult result = vn_device_memory_alloc_simple(dev, mem, &alloc_info);
126 if (result != VK_SUCCESS)
127 goto obj_fini;
128
129 result = vn_device_memory_bo_init(dev, mem);
130 if (result != VK_SUCCESS)
131 goto mem_free;
132
133 result =
134 vn_instance_submit_roundtrip(dev->instance, &mem->bo_roundtrip_seqno);
135 if (result != VK_SUCCESS)
136 goto bo_unref;
137
138 mem->bo_roundtrip_seqno_valid = true;
139 *out_mem = mem;
140
141 return VK_SUCCESS;
142
143 bo_unref:
144 vn_renderer_bo_unref(dev->renderer, mem->base_bo);
145 mem_free:
146 vn_device_memory_free_simple(dev, mem);
147 obj_fini:
148 vk_device_memory_destroy(&dev->base.base, NULL, &mem->base.base);
149 return result;
150 }
151
152 static struct vn_device_memory *
vn_device_memory_pool_ref(struct vn_device * dev,struct vn_device_memory * pool_mem)153 vn_device_memory_pool_ref(struct vn_device *dev,
154 struct vn_device_memory *pool_mem)
155 {
156 assert(pool_mem->base_bo);
157
158 vn_renderer_bo_ref(dev->renderer, pool_mem->base_bo);
159
160 return pool_mem;
161 }
162
163 static void
vn_device_memory_pool_unref(struct vn_device * dev,struct vn_device_memory * pool_mem)164 vn_device_memory_pool_unref(struct vn_device *dev,
165 struct vn_device_memory *pool_mem)
166 {
167 assert(pool_mem->base_bo);
168
169 if (!vn_renderer_bo_unref(dev->renderer, pool_mem->base_bo))
170 return;
171
172 /* wait on valid bo_roundtrip_seqno before vkFreeMemory */
173 if (pool_mem->bo_roundtrip_seqno_valid)
174 vn_instance_wait_roundtrip(dev->instance, pool_mem->bo_roundtrip_seqno);
175
176 vn_device_memory_free_simple(dev, pool_mem);
177 vk_device_memory_destroy(&dev->base.base, NULL, &pool_mem->base.base);
178 }
179
180 void
vn_device_memory_pool_fini(struct vn_device * dev,uint32_t mem_type_index)181 vn_device_memory_pool_fini(struct vn_device *dev, uint32_t mem_type_index)
182 {
183 struct vn_device_memory_pool *pool = &dev->memory_pools[mem_type_index];
184 if (pool->memory)
185 vn_device_memory_pool_unref(dev, pool->memory);
186 mtx_destroy(&pool->mutex);
187 }
188
189 static VkResult
vn_device_memory_pool_grow_locked(struct vn_device * dev,uint32_t mem_type_index,VkDeviceSize size)190 vn_device_memory_pool_grow_locked(struct vn_device *dev,
191 uint32_t mem_type_index,
192 VkDeviceSize size)
193 {
194 struct vn_device_memory *mem;
195 VkResult result =
196 vn_device_memory_pool_grow_alloc(dev, mem_type_index, size, &mem);
197 if (result != VK_SUCCESS)
198 return result;
199
200 struct vn_device_memory_pool *pool = &dev->memory_pools[mem_type_index];
201 if (pool->memory)
202 vn_device_memory_pool_unref(dev, pool->memory);
203
204 pool->memory = mem;
205 pool->used = 0;
206
207 return VK_SUCCESS;
208 }
209
210 static VkResult
vn_device_memory_pool_suballocate(struct vn_device * dev,struct vn_device_memory * mem)211 vn_device_memory_pool_suballocate(struct vn_device *dev,
212 struct vn_device_memory *mem)
213 {
214 static const VkDeviceSize pool_size = 16 * 1024 * 1024;
215 /* TODO fix https://gitlab.freedesktop.org/mesa/mesa/-/issues/9351
216 * Before that, we use 64K default alignment because some GPUs have 64K
217 * pages. It is also required by newer Intel GPUs. Meanwhile, use prior 4K
218 * align on implementations known to fit.
219 */
220 const bool is_renderer_mali = dev->physical_device->renderer_driver_id ==
221 VK_DRIVER_ID_ARM_PROPRIETARY;
222 const VkDeviceSize pool_align = is_renderer_mali ? 4096 : 64 * 1024;
223 const struct vk_device_memory *mem_vk = &mem->base.base;
224 struct vn_device_memory_pool *pool =
225 &dev->memory_pools[mem_vk->memory_type_index];
226
227 assert(mem_vk->size <= pool_size);
228
229 mtx_lock(&pool->mutex);
230
231 if (!pool->memory || pool->used + mem_vk->size > pool_size) {
232 VkResult result = vn_device_memory_pool_grow_locked(
233 dev, mem_vk->memory_type_index, pool_size);
234 if (result != VK_SUCCESS) {
235 mtx_unlock(&pool->mutex);
236 return result;
237 }
238 }
239
240 mem->base_memory = vn_device_memory_pool_ref(dev, pool->memory);
241
242 /* point mem->base_bo at pool base_bo and assign base_offset accordingly */
243 mem->base_bo = pool->memory->base_bo;
244 mem->base_offset = pool->used;
245 pool->used += align64(mem_vk->size, pool_align);
246
247 mtx_unlock(&pool->mutex);
248
249 return VK_SUCCESS;
250 }
251
252 static bool
vn_device_memory_should_suballocate(const struct vn_device * dev,const VkMemoryAllocateInfo * alloc_info)253 vn_device_memory_should_suballocate(const struct vn_device *dev,
254 const VkMemoryAllocateInfo *alloc_info)
255 {
256 if (VN_PERF(NO_MEMORY_SUBALLOC))
257 return false;
258
259 if (dev->renderer->info.has_guest_vram)
260 return false;
261
262 /* We should not support suballocations because apps can do better. But
263 * each BO takes up a KVM memslot currently and some CTS tests exhausts
264 * them. This might not be needed on newer (host) kernels where there are
265 * many more KVM memslots.
266 */
267
268 /* consider host-visible memory only */
269 const VkMemoryType *mem_type =
270 &dev->physical_device->memory_properties
271 .memoryTypes[alloc_info->memoryTypeIndex];
272 if (!(mem_type->propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT))
273 return false;
274
275 /* reject larger allocations */
276 if (alloc_info->allocationSize > 64 * 1024)
277 return false;
278
279 /* reject if there is any pnext struct other than
280 * VkMemoryDedicatedAllocateInfo, or if dedicated allocation is required
281 */
282 if (alloc_info->pNext) {
283 const VkMemoryDedicatedAllocateInfo *dedicated = alloc_info->pNext;
284 if (dedicated->sType !=
285 VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO ||
286 dedicated->pNext)
287 return false;
288
289 const struct vn_image *img = vn_image_from_handle(dedicated->image);
290 if (img) {
291 for (uint32_t i = 0; i < ARRAY_SIZE(img->requirements); i++) {
292 if (img->requirements[i].dedicated.requiresDedicatedAllocation)
293 return false;
294 }
295 }
296
297 const struct vn_buffer *buf = vn_buffer_from_handle(dedicated->buffer);
298 if (buf && buf->requirements.dedicated.requiresDedicatedAllocation)
299 return false;
300 }
301
302 return true;
303 }
304
305 VkResult
vn_device_memory_import_dma_buf(struct vn_device * dev,struct vn_device_memory * mem,const VkMemoryAllocateInfo * alloc_info,bool force_unmappable,int fd)306 vn_device_memory_import_dma_buf(struct vn_device *dev,
307 struct vn_device_memory *mem,
308 const VkMemoryAllocateInfo *alloc_info,
309 bool force_unmappable,
310 int fd)
311 {
312 const VkMemoryType *mem_type =
313 &dev->physical_device->memory_properties
314 .memoryTypes[alloc_info->memoryTypeIndex];
315
316 struct vn_renderer_bo *bo;
317 VkResult result = vn_renderer_bo_create_from_dma_buf(
318 dev->renderer, alloc_info->allocationSize, fd,
319 force_unmappable ? 0 : mem_type->propertyFlags, &bo);
320 if (result != VK_SUCCESS)
321 return result;
322
323 vn_instance_roundtrip(dev->instance);
324
325 const VkImportMemoryResourceInfoMESA import_memory_resource_info = {
326 .sType = VK_STRUCTURE_TYPE_IMPORT_MEMORY_RESOURCE_INFO_MESA,
327 .pNext = alloc_info->pNext,
328 .resourceId = bo->res_id,
329 };
330 const VkMemoryAllocateInfo memory_allocate_info = {
331 .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
332 .pNext = &import_memory_resource_info,
333 .allocationSize = alloc_info->allocationSize,
334 .memoryTypeIndex = alloc_info->memoryTypeIndex,
335 };
336 result = vn_device_memory_alloc_simple(dev, mem, &memory_allocate_info);
337 if (result != VK_SUCCESS) {
338 vn_renderer_bo_unref(dev->renderer, bo);
339 return result;
340 }
341
342 /* need to close import fd on success to avoid fd leak */
343 close(fd);
344 mem->base_bo = bo;
345
346 return VK_SUCCESS;
347 }
348
349 static VkResult
vn_device_memory_alloc_guest_vram(struct vn_device * dev,struct vn_device_memory * mem,const VkMemoryAllocateInfo * alloc_info)350 vn_device_memory_alloc_guest_vram(struct vn_device *dev,
351 struct vn_device_memory *mem,
352 const VkMemoryAllocateInfo *alloc_info)
353 {
354 const struct vk_device_memory *mem_vk = &mem->base.base;
355 const VkMemoryType *mem_type = &dev->physical_device->memory_properties
356 .memoryTypes[mem_vk->memory_type_index];
357 VkMemoryPropertyFlags flags = mem_type->propertyFlags;
358
359 /* For external allocation handles, it's possible scenario when requested
360 * non-mappable memory. To make sure that virtio-gpu driver will send to
361 * the host the address of allocated blob using RESOURCE_MAP_BLOB command
362 * a flag VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT must be set.
363 */
364 if (mem_vk->export_handle_types)
365 flags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
366
367 VkResult result = vn_renderer_bo_create_from_device_memory(
368 dev->renderer, mem_vk->size, mem->base.id, flags,
369 mem_vk->export_handle_types, &mem->base_bo);
370 if (result != VK_SUCCESS) {
371 return result;
372 }
373
374 const VkImportMemoryResourceInfoMESA import_memory_resource_info = {
375 .sType = VK_STRUCTURE_TYPE_IMPORT_MEMORY_RESOURCE_INFO_MESA,
376 .pNext = alloc_info->pNext,
377 .resourceId = mem->base_bo->res_id,
378 };
379
380 const VkMemoryAllocateInfo memory_allocate_info = {
381 .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
382 .pNext = &import_memory_resource_info,
383 .allocationSize = alloc_info->allocationSize,
384 .memoryTypeIndex = alloc_info->memoryTypeIndex,
385 };
386
387 vn_instance_roundtrip(dev->instance);
388
389 result = vn_device_memory_alloc_simple(dev, mem, &memory_allocate_info);
390 if (result != VK_SUCCESS) {
391 vn_renderer_bo_unref(dev->renderer, mem->base_bo);
392 return result;
393 }
394
395 return VK_SUCCESS;
396 }
397
398 static VkResult
vn_device_memory_alloc_export(struct vn_device * dev,struct vn_device_memory * mem,const VkMemoryAllocateInfo * alloc_info)399 vn_device_memory_alloc_export(struct vn_device *dev,
400 struct vn_device_memory *mem,
401 const VkMemoryAllocateInfo *alloc_info)
402 {
403 VkResult result = vn_device_memory_alloc_simple(dev, mem, alloc_info);
404 if (result != VK_SUCCESS)
405 return result;
406
407 result = vn_device_memory_bo_init(dev, mem);
408 if (result != VK_SUCCESS) {
409 vn_device_memory_free_simple(dev, mem);
410 return result;
411 }
412
413 result =
414 vn_instance_submit_roundtrip(dev->instance, &mem->bo_roundtrip_seqno);
415 if (result != VK_SUCCESS) {
416 vn_renderer_bo_unref(dev->renderer, mem->base_bo);
417 vn_device_memory_free_simple(dev, mem);
418 return result;
419 }
420
421 mem->bo_roundtrip_seqno_valid = true;
422
423 return VK_SUCCESS;
424 }
425
426 struct vn_device_memory_alloc_info {
427 VkMemoryAllocateInfo alloc;
428 VkExportMemoryAllocateInfo export;
429 VkMemoryAllocateFlagsInfo flags;
430 VkMemoryDedicatedAllocateInfo dedicated;
431 VkMemoryOpaqueCaptureAddressAllocateInfo capture;
432 };
433
434 static const VkMemoryAllocateInfo *
vn_device_memory_fix_alloc_info(const VkMemoryAllocateInfo * alloc_info,const VkExternalMemoryHandleTypeFlagBits renderer_handle_type,bool has_guest_vram,struct vn_device_memory_alloc_info * local_info)435 vn_device_memory_fix_alloc_info(
436 const VkMemoryAllocateInfo *alloc_info,
437 const VkExternalMemoryHandleTypeFlagBits renderer_handle_type,
438 bool has_guest_vram,
439 struct vn_device_memory_alloc_info *local_info)
440 {
441 local_info->alloc = *alloc_info;
442 VkBaseOutStructure *cur = (void *)&local_info->alloc;
443
444 vk_foreach_struct_const(src, alloc_info->pNext) {
445 void *next = NULL;
446 switch (src->sType) {
447 case VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO:
448 /* guest vram turns export alloc into import, so drop export info */
449 if (has_guest_vram)
450 break;
451 memcpy(&local_info->export, src, sizeof(local_info->export));
452 local_info->export.handleTypes = renderer_handle_type;
453 next = &local_info->export;
454 break;
455 case VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO:
456 memcpy(&local_info->flags, src, sizeof(local_info->flags));
457 next = &local_info->flags;
458 break;
459 case VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO:
460 memcpy(&local_info->dedicated, src, sizeof(local_info->dedicated));
461 next = &local_info->dedicated;
462 break;
463 case VK_STRUCTURE_TYPE_MEMORY_OPAQUE_CAPTURE_ADDRESS_ALLOCATE_INFO:
464 memcpy(&local_info->capture, src, sizeof(local_info->capture));
465 next = &local_info->capture;
466 break;
467 default:
468 break;
469 }
470
471 if (next) {
472 cur->pNext = next;
473 cur = next;
474 }
475 }
476
477 cur->pNext = NULL;
478
479 return &local_info->alloc;
480 }
481
482 static VkResult
vn_device_memory_alloc(struct vn_device * dev,struct vn_device_memory * mem,const VkMemoryAllocateInfo * alloc_info)483 vn_device_memory_alloc(struct vn_device *dev,
484 struct vn_device_memory *mem,
485 const VkMemoryAllocateInfo *alloc_info)
486 {
487 struct vk_device_memory *mem_vk = &mem->base.base;
488 const VkMemoryType *mem_type = &dev->physical_device->memory_properties
489 .memoryTypes[mem_vk->memory_type_index];
490
491 const bool has_guest_vram = dev->renderer->info.has_guest_vram;
492 const bool host_visible =
493 mem_type->propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
494 const bool export_alloc = mem_vk->export_handle_types;
495
496 const VkExternalMemoryHandleTypeFlagBits renderer_handle_type =
497 dev->physical_device->external_memory.renderer_handle_type;
498 struct vn_device_memory_alloc_info local_info;
499 if (mem_vk->export_handle_types &&
500 mem_vk->export_handle_types != renderer_handle_type) {
501 alloc_info = vn_device_memory_fix_alloc_info(
502 alloc_info, renderer_handle_type, has_guest_vram, &local_info);
503
504 /* ensure correct blob flags */
505 mem_vk->export_handle_types = renderer_handle_type;
506 }
507
508 if (has_guest_vram && (host_visible || export_alloc)) {
509 return vn_device_memory_alloc_guest_vram(dev, mem, alloc_info);
510 } else if (export_alloc) {
511 return vn_device_memory_alloc_export(dev, mem, alloc_info);
512 } else {
513 return vn_device_memory_alloc_simple(dev, mem, alloc_info);
514 }
515 }
516
517 static void
vn_device_memory_emit_report(struct vn_device * dev,struct vn_device_memory * mem,bool is_alloc,VkResult result)518 vn_device_memory_emit_report(struct vn_device *dev,
519 struct vn_device_memory *mem,
520 bool is_alloc,
521 VkResult result)
522 {
523 if (likely(!dev->memory_reports))
524 return;
525
526 const struct vk_device_memory *mem_vk = &mem->base.base;
527 VkDeviceMemoryReportEventTypeEXT type;
528 if (result != VK_SUCCESS) {
529 type = VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_ALLOCATION_FAILED_EXT;
530 } else if (is_alloc) {
531 type = mem_vk->import_handle_type
532 ? VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_IMPORT_EXT
533 : VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_ALLOCATE_EXT;
534 } else {
535 type = mem_vk->import_handle_type
536 ? VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_UNIMPORT_EXT
537 : VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_FREE_EXT;
538 }
539 const uint64_t mem_obj_id =
540 (mem_vk->import_handle_type | mem_vk->export_handle_types)
541 ? mem->base_bo->res_id
542 : mem->base.id;
543 const VkMemoryType *mem_type = &dev->physical_device->memory_properties
544 .memoryTypes[mem_vk->memory_type_index];
545 vn_device_emit_device_memory_report(dev, type, mem_obj_id, mem_vk->size,
546 VK_OBJECT_TYPE_DEVICE_MEMORY,
547 (uintptr_t)mem, mem_type->heapIndex);
548 }
549
550 VkResult
vn_AllocateMemory(VkDevice device,const VkMemoryAllocateInfo * pAllocateInfo,const VkAllocationCallbacks * pAllocator,VkDeviceMemory * pMemory)551 vn_AllocateMemory(VkDevice device,
552 const VkMemoryAllocateInfo *pAllocateInfo,
553 const VkAllocationCallbacks *pAllocator,
554 VkDeviceMemory *pMemory)
555 {
556 struct vn_device *dev = vn_device_from_handle(device);
557
558 /* see vn_physical_device_init_memory_properties */
559 VkMemoryAllocateInfo local_info;
560 if (pAllocateInfo->memoryTypeIndex ==
561 dev->physical_device->incoherent_cached) {
562 local_info = *pAllocateInfo;
563 local_info.memoryTypeIndex = dev->physical_device->coherent_uncached;
564 pAllocateInfo = &local_info;
565 }
566
567 const VkImportMemoryFdInfoKHR *import_fd_info = NULL;
568 const VkMemoryDedicatedAllocateInfo *dedicated_info = NULL;
569 vk_foreach_struct_const(pnext, pAllocateInfo->pNext) {
570 switch (pnext->sType) {
571 case VK_STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHR:
572 import_fd_info = (const void *)pnext;
573 break;
574 case VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO:
575 dedicated_info = (const void *)pnext;
576 break;
577 default:
578 break;
579 }
580 }
581
582 struct vn_device_memory *mem = vk_device_memory_create(
583 &dev->base.base, pAllocateInfo, pAllocator, sizeof(*mem));
584 if (!mem)
585 return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
586
587 vn_object_set_id(mem, vn_get_next_obj_id(), VK_OBJECT_TYPE_DEVICE_MEMORY);
588
589 VkResult result;
590 if (mem->base.base.ahardware_buffer) {
591 result = vn_android_device_import_ahb(dev, mem, dedicated_info);
592 } else if (import_fd_info) {
593 result = vn_device_memory_import_dma_buf(dev, mem, pAllocateInfo, false,
594 import_fd_info->fd);
595 } else if (vn_device_memory_should_suballocate(dev, pAllocateInfo)) {
596 result = vn_device_memory_pool_suballocate(dev, mem);
597 } else {
598 result = vn_device_memory_alloc(dev, mem, pAllocateInfo);
599 }
600
601 vn_device_memory_emit_report(dev, mem, /* is_alloc */ true, result);
602
603 if (result != VK_SUCCESS) {
604 vk_device_memory_destroy(&dev->base.base, pAllocator, &mem->base.base);
605 return vn_error(dev->instance, result);
606 }
607
608 *pMemory = vn_device_memory_to_handle(mem);
609
610 return VK_SUCCESS;
611 }
612
613 void
vn_FreeMemory(VkDevice device,VkDeviceMemory memory,const VkAllocationCallbacks * pAllocator)614 vn_FreeMemory(VkDevice device,
615 VkDeviceMemory memory,
616 const VkAllocationCallbacks *pAllocator)
617 {
618 struct vn_device *dev = vn_device_from_handle(device);
619 struct vn_device_memory *mem = vn_device_memory_from_handle(memory);
620 if (!mem)
621 return;
622
623 vn_device_memory_emit_report(dev, mem, /* is_alloc */ false, VK_SUCCESS);
624
625 if (mem->base_memory) {
626 vn_device_memory_pool_unref(dev, mem->base_memory);
627 } else {
628 /* ensure renderer side import still sees the resource */
629 vn_device_memory_bo_fini(dev, mem);
630
631 if (mem->bo_roundtrip_seqno_valid)
632 vn_instance_wait_roundtrip(dev->instance, mem->bo_roundtrip_seqno);
633
634 vn_device_memory_free_simple(dev, mem);
635 }
636
637 vk_device_memory_destroy(&dev->base.base, pAllocator, &mem->base.base);
638 }
639
640 uint64_t
vn_GetDeviceMemoryOpaqueCaptureAddress(VkDevice device,const VkDeviceMemoryOpaqueCaptureAddressInfo * pInfo)641 vn_GetDeviceMemoryOpaqueCaptureAddress(
642 VkDevice device, const VkDeviceMemoryOpaqueCaptureAddressInfo *pInfo)
643 {
644 struct vn_device *dev = vn_device_from_handle(device);
645 ASSERTED struct vn_device_memory *mem =
646 vn_device_memory_from_handle(pInfo->memory);
647
648 assert(!mem->base_memory);
649 return vn_call_vkGetDeviceMemoryOpaqueCaptureAddress(dev->primary_ring,
650 device, pInfo);
651 }
652
653 VkResult
vn_MapMemory(VkDevice device,VkDeviceMemory memory,VkDeviceSize offset,VkDeviceSize size,VkMemoryMapFlags flags,void ** ppData)654 vn_MapMemory(VkDevice device,
655 VkDeviceMemory memory,
656 VkDeviceSize offset,
657 VkDeviceSize size,
658 VkMemoryMapFlags flags,
659 void **ppData)
660 {
661 VN_TRACE_FUNC();
662 struct vn_device *dev = vn_device_from_handle(device);
663 struct vn_device_memory *mem = vn_device_memory_from_handle(memory);
664 const struct vk_device_memory *mem_vk = &mem->base.base;
665 const bool need_bo = !mem->base_bo;
666 void *ptr = NULL;
667 VkResult result;
668
669 /* We don't want to blindly create a bo for each HOST_VISIBLE memory as
670 * that has a cost. By deferring bo creation until now, we can avoid the
671 * cost unless a bo is really needed. However, that means
672 * vn_renderer_bo_map will block until the renderer creates the resource
673 * and injects the pages into the guest.
674 *
675 * XXX We also assume that a vn_renderer_bo can be created as long as the
676 * renderer VkDeviceMemory has a mappable memory type. That is plain
677 * wrong. It is impossible to fix though until some new extension is
678 * created and supported by the driver, and that the renderer switches to
679 * the extension.
680 */
681 if (need_bo) {
682 result = vn_device_memory_bo_init(dev, mem);
683 if (result != VK_SUCCESS)
684 return vn_error(dev->instance, result);
685 }
686
687 ptr = vn_renderer_bo_map(dev->renderer, mem->base_bo);
688 if (!ptr) {
689 /* vn_renderer_bo_map implies a roundtrip on success, but not here. */
690 if (need_bo) {
691 result = vn_instance_submit_roundtrip(dev->instance,
692 &mem->bo_roundtrip_seqno);
693 if (result != VK_SUCCESS)
694 return vn_error(dev->instance, result);
695
696 mem->bo_roundtrip_seqno_valid = true;
697 }
698
699 return vn_error(dev->instance, VK_ERROR_MEMORY_MAP_FAILED);
700 }
701
702 mem->map_end = size == VK_WHOLE_SIZE ? mem_vk->size : offset + size;
703
704 *ppData = ptr + mem->base_offset + offset;
705
706 return VK_SUCCESS;
707 }
708
709 void
vn_UnmapMemory(VkDevice device,VkDeviceMemory memory)710 vn_UnmapMemory(VkDevice device, VkDeviceMemory memory)
711 {
712 }
713
714 VkResult
vn_FlushMappedMemoryRanges(VkDevice device,uint32_t memoryRangeCount,const VkMappedMemoryRange * pMemoryRanges)715 vn_FlushMappedMemoryRanges(VkDevice device,
716 uint32_t memoryRangeCount,
717 const VkMappedMemoryRange *pMemoryRanges)
718 {
719 struct vn_device *dev = vn_device_from_handle(device);
720
721 for (uint32_t i = 0; i < memoryRangeCount; i++) {
722 const VkMappedMemoryRange *range = &pMemoryRanges[i];
723 struct vn_device_memory *mem =
724 vn_device_memory_from_handle(range->memory);
725
726 const VkDeviceSize size = range->size == VK_WHOLE_SIZE
727 ? mem->map_end - range->offset
728 : range->size;
729 vn_renderer_bo_flush(dev->renderer, mem->base_bo,
730 mem->base_offset + range->offset, size);
731 }
732
733 return VK_SUCCESS;
734 }
735
736 VkResult
vn_InvalidateMappedMemoryRanges(VkDevice device,uint32_t memoryRangeCount,const VkMappedMemoryRange * pMemoryRanges)737 vn_InvalidateMappedMemoryRanges(VkDevice device,
738 uint32_t memoryRangeCount,
739 const VkMappedMemoryRange *pMemoryRanges)
740 {
741 struct vn_device *dev = vn_device_from_handle(device);
742
743 for (uint32_t i = 0; i < memoryRangeCount; i++) {
744 const VkMappedMemoryRange *range = &pMemoryRanges[i];
745 struct vn_device_memory *mem =
746 vn_device_memory_from_handle(range->memory);
747
748 const VkDeviceSize size = range->size == VK_WHOLE_SIZE
749 ? mem->map_end - range->offset
750 : range->size;
751 vn_renderer_bo_invalidate(dev->renderer, mem->base_bo,
752 mem->base_offset + range->offset, size);
753 }
754
755 return VK_SUCCESS;
756 }
757
758 void
vn_GetDeviceMemoryCommitment(VkDevice device,VkDeviceMemory memory,VkDeviceSize * pCommittedMemoryInBytes)759 vn_GetDeviceMemoryCommitment(VkDevice device,
760 VkDeviceMemory memory,
761 VkDeviceSize *pCommittedMemoryInBytes)
762 {
763 struct vn_device *dev = vn_device_from_handle(device);
764 ASSERTED struct vn_device_memory *mem =
765 vn_device_memory_from_handle(memory);
766
767 assert(!mem->base_memory);
768 vn_call_vkGetDeviceMemoryCommitment(dev->primary_ring, device, memory,
769 pCommittedMemoryInBytes);
770 }
771
772 VkResult
vn_GetMemoryFdKHR(VkDevice device,const VkMemoryGetFdInfoKHR * pGetFdInfo,int * pFd)773 vn_GetMemoryFdKHR(VkDevice device,
774 const VkMemoryGetFdInfoKHR *pGetFdInfo,
775 int *pFd)
776 {
777 VN_TRACE_FUNC();
778 struct vn_device *dev = vn_device_from_handle(device);
779 struct vn_device_memory *mem =
780 vn_device_memory_from_handle(pGetFdInfo->memory);
781
782 /* At the moment, we support only the below handle types. */
783 assert(pGetFdInfo->handleType &
784 (VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT |
785 VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT));
786 assert(!mem->base_memory && mem->base_bo);
787 *pFd = vn_renderer_bo_export_dma_buf(dev->renderer, mem->base_bo);
788 if (*pFd < 0)
789 return vn_error(dev->instance, VK_ERROR_TOO_MANY_OBJECTS);
790
791 return VK_SUCCESS;
792 }
793
794 VkResult
vn_get_memory_dma_buf_properties(struct vn_device * dev,int fd,uint64_t * out_alloc_size,uint32_t * out_mem_type_bits)795 vn_get_memory_dma_buf_properties(struct vn_device *dev,
796 int fd,
797 uint64_t *out_alloc_size,
798 uint32_t *out_mem_type_bits)
799 {
800 VkDevice device = vn_device_to_handle(dev);
801
802 struct vn_renderer_bo *bo;
803 VkResult result = vn_renderer_bo_create_from_dma_buf(
804 dev->renderer, 0 /* size */, fd, 0 /* flags */, &bo);
805 if (result != VK_SUCCESS)
806 return result;
807
808 vn_instance_roundtrip(dev->instance);
809
810 VkMemoryResourceAllocationSizePropertiesMESA alloc_size_props = {
811 .sType =
812 VK_STRUCTURE_TYPE_MEMORY_RESOURCE_ALLOCATION_SIZE_PROPERTIES_MESA,
813 };
814 VkMemoryResourcePropertiesMESA props = {
815 .sType = VK_STRUCTURE_TYPE_MEMORY_RESOURCE_PROPERTIES_MESA,
816 .pNext = &alloc_size_props,
817 };
818 result = vn_call_vkGetMemoryResourcePropertiesMESA(
819 dev->primary_ring, device, bo->res_id, &props);
820 vn_renderer_bo_unref(dev->renderer, bo);
821 if (result != VK_SUCCESS)
822 return result;
823
824 *out_alloc_size = alloc_size_props.allocationSize;
825 *out_mem_type_bits = props.memoryTypeBits;
826
827 return VK_SUCCESS;
828 }
829
830 VkResult
vn_GetMemoryFdPropertiesKHR(VkDevice device,VkExternalMemoryHandleTypeFlagBits handleType,int fd,VkMemoryFdPropertiesKHR * pMemoryFdProperties)831 vn_GetMemoryFdPropertiesKHR(VkDevice device,
832 VkExternalMemoryHandleTypeFlagBits handleType,
833 int fd,
834 VkMemoryFdPropertiesKHR *pMemoryFdProperties)
835 {
836 VN_TRACE_FUNC();
837 struct vn_device *dev = vn_device_from_handle(device);
838 uint64_t alloc_size = 0;
839 uint32_t mem_type_bits = 0;
840 VkResult result = VK_SUCCESS;
841
842 if (handleType != VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT)
843 return vn_error(dev->instance, VK_ERROR_INVALID_EXTERNAL_HANDLE);
844
845 result =
846 vn_get_memory_dma_buf_properties(dev, fd, &alloc_size, &mem_type_bits);
847 if (result != VK_SUCCESS)
848 return vn_error(dev->instance, result);
849
850 pMemoryFdProperties->memoryTypeBits = mem_type_bits;
851
852 return VK_SUCCESS;
853 }
854