• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2024 Valve Corporation
3  * Copyright 2024 Alyssa Rosenzweig
4  * Copyright 2022-2023 Collabora Ltd. and Red Hat Inc.
5  * SPDX-License-Identifier: MIT
6  */
7 #include "hk_device_memory.h"
8 
9 #include "hk_device.h"
10 #include "hk_entrypoints.h"
11 #include "hk_image.h"
12 #include "hk_physical_device.h"
13 
14 #include "asahi/lib/agx_bo.h"
15 #include "util/u_atomic.h"
16 
17 #include <inttypes.h>
18 #include <sys/mman.h>
19 
20 /* Supports opaque fd only */
21 const VkExternalMemoryProperties hk_opaque_fd_mem_props = {
22    .externalMemoryFeatures = VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT |
23                              VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT,
24    .exportFromImportedHandleTypes =
25       VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT,
26    .compatibleHandleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT,
27 };
28 
29 /* Supports opaque fd and dma_buf. */
30 const VkExternalMemoryProperties hk_dma_buf_mem_props = {
31    .externalMemoryFeatures = VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT |
32                              VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT,
33    .exportFromImportedHandleTypes =
34       VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT |
35       VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT,
36    .compatibleHandleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT |
37                             VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT,
38 };
39 
40 static enum agx_bo_flags
hk_memory_type_flags(const VkMemoryType * type,VkExternalMemoryHandleTypeFlagBits handle_types)41 hk_memory_type_flags(const VkMemoryType *type,
42                      VkExternalMemoryHandleTypeFlagBits handle_types)
43 {
44    unsigned flags = 0;
45 
46    if (handle_types)
47       flags |= AGX_BO_SHARED | AGX_BO_SHAREABLE;
48 
49    return flags;
50 }
51 
52 static void
hk_add_ext_bo_locked(struct hk_device * dev,struct agx_bo * bo)53 hk_add_ext_bo_locked(struct hk_device *dev, struct agx_bo *bo)
54 {
55    uint32_t id = bo->vbo_res_id;
56 
57    unsigned count = util_dynarray_num_elements(&dev->external_bos.list,
58                                                struct asahi_ccmd_submit_res);
59 
60    for (unsigned i = 0; i < count; i++) {
61       struct asahi_ccmd_submit_res *p = util_dynarray_element(
62          &dev->external_bos.list, struct asahi_ccmd_submit_res, i);
63 
64       if (p->res_id == id) {
65          ++*util_dynarray_element(&dev->external_bos.counts, unsigned, i);
66          return;
67       }
68    }
69 
70    struct asahi_ccmd_submit_res res = {
71       .res_id = id,
72       .flags = ASAHI_EXTRES_READ | ASAHI_EXTRES_WRITE,
73    };
74    util_dynarray_append(&dev->external_bos.list, struct asahi_ccmd_submit_res,
75                         res);
76    util_dynarray_append(&dev->external_bos.counts, unsigned, 1);
77 }
78 
79 static void
hk_add_ext_bo(struct hk_device * dev,struct agx_bo * bo)80 hk_add_ext_bo(struct hk_device *dev, struct agx_bo *bo)
81 {
82    if (dev->dev.is_virtio) {
83       u_rwlock_wrlock(&dev->external_bos.lock);
84       hk_add_ext_bo_locked(dev, bo);
85       u_rwlock_wrunlock(&dev->external_bos.lock);
86    }
87 }
88 
89 static void
hk_remove_ext_bo_locked(struct hk_device * dev,struct agx_bo * bo)90 hk_remove_ext_bo_locked(struct hk_device *dev, struct agx_bo *bo)
91 {
92    uint32_t id = bo->vbo_res_id;
93    unsigned count = util_dynarray_num_elements(&dev->external_bos.list,
94                                                struct asahi_ccmd_submit_res);
95 
96    for (unsigned i = 0; i < count; i++) {
97       struct asahi_ccmd_submit_res *p = util_dynarray_element(
98          &dev->external_bos.list, struct asahi_ccmd_submit_res, i);
99 
100       if (p->res_id == id) {
101          unsigned *ctr =
102             util_dynarray_element(&dev->external_bos.counts, unsigned, i);
103          if (!--*ctr) {
104             *ctr = util_dynarray_pop(&dev->external_bos.counts, unsigned);
105             *p = util_dynarray_pop(&dev->external_bos.list,
106                                    struct asahi_ccmd_submit_res);
107          }
108          return;
109       }
110    }
111 
112    unreachable("BO not found");
113 }
114 
115 static void
hk_remove_ext_bo(struct hk_device * dev,struct agx_bo * bo)116 hk_remove_ext_bo(struct hk_device *dev, struct agx_bo *bo)
117 {
118    if (dev->dev.is_virtio) {
119       u_rwlock_wrlock(&dev->external_bos.lock);
120       hk_remove_ext_bo_locked(dev, bo);
121       u_rwlock_wrunlock(&dev->external_bos.lock);
122    }
123 }
124 
125 VKAPI_ATTR VkResult VKAPI_CALL
hk_GetMemoryFdPropertiesKHR(VkDevice device,VkExternalMemoryHandleTypeFlagBits handleType,int fd,VkMemoryFdPropertiesKHR * pMemoryFdProperties)126 hk_GetMemoryFdPropertiesKHR(VkDevice device,
127                             VkExternalMemoryHandleTypeFlagBits handleType,
128                             int fd,
129                             VkMemoryFdPropertiesKHR *pMemoryFdProperties)
130 {
131    VK_FROM_HANDLE(hk_device, dev, device);
132    struct hk_physical_device *pdev = hk_device_physical(dev);
133    struct agx_bo *bo;
134 
135    switch (handleType) {
136    case VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT:
137    case VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT:
138       bo = agx_bo_import(&dev->dev, fd);
139       if (bo == NULL)
140          return vk_error(dev, VK_ERROR_INVALID_EXTERNAL_HANDLE);
141       break;
142    default:
143       return vk_error(dev, VK_ERROR_INVALID_EXTERNAL_HANDLE);
144    }
145 
146    uint32_t type_bits = 0;
147    for (unsigned t = 0; t < ARRAY_SIZE(pdev->mem_types); t++) {
148       const unsigned flags =
149          hk_memory_type_flags(&pdev->mem_types[t], handleType);
150       if (!(flags & ~bo->flags))
151          type_bits |= (1 << t);
152    }
153 
154    pMemoryFdProperties->memoryTypeBits = type_bits;
155 
156    agx_bo_unreference(&dev->dev, bo);
157 
158    return VK_SUCCESS;
159 }
160 
161 VKAPI_ATTR VkResult VKAPI_CALL
hk_AllocateMemory(VkDevice device,const VkMemoryAllocateInfo * pAllocateInfo,const VkAllocationCallbacks * pAllocator,VkDeviceMemory * pMem)162 hk_AllocateMemory(VkDevice device, const VkMemoryAllocateInfo *pAllocateInfo,
163                   const VkAllocationCallbacks *pAllocator, VkDeviceMemory *pMem)
164 {
165    VK_FROM_HANDLE(hk_device, dev, device);
166    struct hk_physical_device *pdev = hk_device_physical(dev);
167    struct hk_device_memory *mem;
168    VkResult result = VK_SUCCESS;
169 
170    const VkImportMemoryFdInfoKHR *fd_info =
171       vk_find_struct_const(pAllocateInfo->pNext, IMPORT_MEMORY_FD_INFO_KHR);
172    const VkExportMemoryAllocateInfo *export_info =
173       vk_find_struct_const(pAllocateInfo->pNext, EXPORT_MEMORY_ALLOCATE_INFO);
174    const VkMemoryType *type = &pdev->mem_types[pAllocateInfo->memoryTypeIndex];
175 
176    VkExternalMemoryHandleTypeFlagBits handle_types = 0;
177    if (export_info != NULL)
178       handle_types |= export_info->handleTypes;
179    if (fd_info != NULL)
180       handle_types |= fd_info->handleType;
181 
182    const unsigned flags = hk_memory_type_flags(type, handle_types);
183 
184    uint32_t alignment = 16384; /* Apple page size */
185 
186    struct hk_memory_heap *heap = &pdev->mem_heaps[type->heapIndex];
187    if (p_atomic_read(&heap->used) > heap->size)
188       return vk_error(dev, VK_ERROR_OUT_OF_DEVICE_MEMORY);
189 
190    const uint64_t aligned_size =
191       align64(pAllocateInfo->allocationSize, alignment);
192 
193    mem = vk_device_memory_create(&dev->vk, pAllocateInfo, pAllocator,
194                                  sizeof(*mem));
195    if (!mem)
196       return vk_error(dev, VK_ERROR_OUT_OF_HOST_MEMORY);
197 
198    mem->map = NULL;
199    if (fd_info && fd_info->handleType) {
200       assert(
201          fd_info->handleType == VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT ||
202          fd_info->handleType == VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
203 
204       mem->bo = agx_bo_import(&dev->dev, fd_info->fd);
205       if (mem->bo == NULL) {
206          result = vk_error(dev, VK_ERROR_INVALID_EXTERNAL_HANDLE);
207          goto fail_alloc;
208       }
209       assert(!(flags & ~mem->bo->flags));
210    } else {
211       enum agx_bo_flags flags = 0;
212       if (handle_types)
213          flags |= AGX_BO_SHAREABLE;
214 
215       mem->bo = agx_bo_create(&dev->dev, aligned_size, 0, flags, "App memory");
216       if (!mem->bo) {
217          result = vk_error(dev, VK_ERROR_OUT_OF_DEVICE_MEMORY);
218          goto fail_alloc;
219       }
220    }
221 
222    if (mem->bo->flags & (AGX_BO_SHAREABLE | AGX_BO_SHARED))
223       hk_add_ext_bo(dev, mem->bo);
224 
225    if (fd_info && fd_info->handleType) {
226       /* From the Vulkan spec:
227        *
228        *    "Importing memory from a file descriptor transfers ownership of
229        *    the file descriptor from the application to the Vulkan
230        *    implementation. The application must not perform any operations on
231        *    the file descriptor after a successful import."
232        *
233        * If the import fails, we leave the file descriptor open.
234        */
235       close(fd_info->fd);
236    }
237 
238    uint64_t heap_used = p_atomic_add_return(&heap->used, mem->bo->size);
239    if (heap_used > heap->size) {
240       hk_FreeMemory(device, hk_device_memory_to_handle(mem), pAllocator);
241       return vk_errorf(device, VK_ERROR_OUT_OF_DEVICE_MEMORY,
242                        "Out of heap memory");
243    }
244 
245    *pMem = hk_device_memory_to_handle(mem);
246 
247    return VK_SUCCESS;
248 
249 fail_alloc:
250    vk_device_memory_destroy(&dev->vk, pAllocator, &mem->vk);
251    return result;
252 }
253 
254 VKAPI_ATTR void VKAPI_CALL
hk_FreeMemory(VkDevice device,VkDeviceMemory _mem,const VkAllocationCallbacks * pAllocator)255 hk_FreeMemory(VkDevice device, VkDeviceMemory _mem,
256               const VkAllocationCallbacks *pAllocator)
257 {
258    VK_FROM_HANDLE(hk_device, dev, device);
259    VK_FROM_HANDLE(hk_device_memory, mem, _mem);
260    struct hk_physical_device *pdev = hk_device_physical(dev);
261 
262    if (!mem)
263       return;
264 
265    const VkMemoryType *type = &pdev->mem_types[mem->vk.memory_type_index];
266    struct hk_memory_heap *heap = &pdev->mem_heaps[type->heapIndex];
267    p_atomic_add(&heap->used, -((int64_t)mem->bo->size));
268 
269    if (mem->bo->flags & (AGX_BO_SHAREABLE | AGX_BO_SHARED))
270       hk_remove_ext_bo(dev, mem->bo);
271 
272    agx_bo_unreference(&dev->dev, mem->bo);
273 
274    vk_device_memory_destroy(&dev->vk, pAllocator, &mem->vk);
275 }
276 
277 VKAPI_ATTR VkResult VKAPI_CALL
hk_MapMemory2KHR(VkDevice device,const VkMemoryMapInfoKHR * pMemoryMapInfo,void ** ppData)278 hk_MapMemory2KHR(VkDevice device, const VkMemoryMapInfoKHR *pMemoryMapInfo,
279                  void **ppData)
280 {
281    VK_FROM_HANDLE(hk_device, dev, device);
282    VK_FROM_HANDLE(hk_device_memory, mem, pMemoryMapInfo->memory);
283 
284    if (mem == NULL) {
285       *ppData = NULL;
286       return VK_SUCCESS;
287    }
288 
289    const VkDeviceSize offset = pMemoryMapInfo->offset;
290    const VkDeviceSize size = vk_device_memory_range(
291       &mem->vk, pMemoryMapInfo->offset, pMemoryMapInfo->size);
292 
293    UNUSED void *fixed_addr = NULL;
294    if (pMemoryMapInfo->flags & VK_MEMORY_MAP_PLACED_BIT_EXT) {
295       const VkMemoryMapPlacedInfoEXT *placed_info = vk_find_struct_const(
296          pMemoryMapInfo->pNext, MEMORY_MAP_PLACED_INFO_EXT);
297       fixed_addr = placed_info->pPlacedAddress;
298    }
299 
300    /* From the Vulkan spec version 1.0.32 docs for MapMemory:
301     *
302     *  * If size is not equal to VK_WHOLE_SIZE, size must be greater than 0
303     *    assert(size != 0);
304     *  * If size is not equal to VK_WHOLE_SIZE, size must be less than or
305     *    equal to the size of the memory minus offset
306     */
307    assert(size > 0);
308    assert(offset + size <= mem->bo->size);
309 
310    if (size != (size_t)size) {
311       return vk_errorf(dev, VK_ERROR_MEMORY_MAP_FAILED,
312                        "requested size 0x%" PRIx64 " does not fit in %u bits",
313                        size, (unsigned)(sizeof(size_t) * 8));
314    }
315 
316    /* From the Vulkan 1.2.194 spec:
317     *
318     *    "memory must not be currently host mapped"
319     */
320    if (mem->map != NULL) {
321       return vk_errorf(dev, VK_ERROR_MEMORY_MAP_FAILED,
322                        "Memory object already mapped.");
323    }
324 
325    mem->map = agx_bo_map(mem->bo);
326    *ppData = mem->map + offset;
327 
328    return VK_SUCCESS;
329 }
330 
331 VKAPI_ATTR VkResult VKAPI_CALL
hk_UnmapMemory2KHR(VkDevice device,const VkMemoryUnmapInfoKHR * pMemoryUnmapInfo)332 hk_UnmapMemory2KHR(VkDevice device,
333                    const VkMemoryUnmapInfoKHR *pMemoryUnmapInfo)
334 {
335    VK_FROM_HANDLE(hk_device_memory, mem, pMemoryUnmapInfo->memory);
336 
337    if (mem == NULL)
338       return VK_SUCCESS;
339 
340    if (pMemoryUnmapInfo->flags & VK_MEMORY_UNMAP_RESERVE_BIT_EXT) {
341       unreachable("todo");
342 #if 0
343       VK_FROM_HANDLE(hk_device, dev, device);
344 
345       int err = agx_bo_overmap(mem->bo, mem->map);
346       if (err) {
347          return vk_errorf(dev, VK_ERROR_MEMORY_MAP_FAILED,
348                           "Failed to map over original mapping");
349       }
350 #endif
351    } else {
352       /* TODO */
353       //// agx_bo_unmap(mem->bo, mem->map);
354    }
355 
356    mem->map = NULL;
357 
358    return VK_SUCCESS;
359 }
360 
361 VKAPI_ATTR VkResult VKAPI_CALL
hk_FlushMappedMemoryRanges(VkDevice device,uint32_t memoryRangeCount,const VkMappedMemoryRange * pMemoryRanges)362 hk_FlushMappedMemoryRanges(VkDevice device, uint32_t memoryRangeCount,
363                            const VkMappedMemoryRange *pMemoryRanges)
364 {
365    return VK_SUCCESS;
366 }
367 
368 VKAPI_ATTR VkResult VKAPI_CALL
hk_InvalidateMappedMemoryRanges(VkDevice device,uint32_t memoryRangeCount,const VkMappedMemoryRange * pMemoryRanges)369 hk_InvalidateMappedMemoryRanges(VkDevice device, uint32_t memoryRangeCount,
370                                 const VkMappedMemoryRange *pMemoryRanges)
371 {
372    return VK_SUCCESS;
373 }
374 
375 VKAPI_ATTR void VKAPI_CALL
hk_GetDeviceMemoryCommitment(VkDevice device,VkDeviceMemory _mem,VkDeviceSize * pCommittedMemoryInBytes)376 hk_GetDeviceMemoryCommitment(VkDevice device, VkDeviceMemory _mem,
377                              VkDeviceSize *pCommittedMemoryInBytes)
378 {
379    VK_FROM_HANDLE(hk_device_memory, mem, _mem);
380 
381    *pCommittedMemoryInBytes = mem->bo->size;
382 }
383 
384 VKAPI_ATTR VkResult VKAPI_CALL
hk_GetMemoryFdKHR(VkDevice device,const VkMemoryGetFdInfoKHR * pGetFdInfo,int * pFD)385 hk_GetMemoryFdKHR(VkDevice device, const VkMemoryGetFdInfoKHR *pGetFdInfo,
386                   int *pFD)
387 {
388    VK_FROM_HANDLE(hk_device, dev, device);
389    VK_FROM_HANDLE(hk_device_memory, memory, pGetFdInfo->memory);
390 
391    switch (pGetFdInfo->handleType) {
392    case VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT:
393    case VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT:
394       *pFD = agx_bo_export(&dev->dev, memory->bo);
395       return VK_SUCCESS;
396    default:
397       assert(!"unsupported handle type");
398       return vk_error(dev, VK_ERROR_FEATURE_NOT_PRESENT);
399    }
400 }
401 
402 VKAPI_ATTR uint64_t VKAPI_CALL
hk_GetDeviceMemoryOpaqueCaptureAddress(UNUSED VkDevice device,const VkDeviceMemoryOpaqueCaptureAddressInfo * pInfo)403 hk_GetDeviceMemoryOpaqueCaptureAddress(
404    UNUSED VkDevice device, const VkDeviceMemoryOpaqueCaptureAddressInfo *pInfo)
405 {
406    VK_FROM_HANDLE(hk_device_memory, mem, pInfo->memory);
407 
408    return mem->bo->va->addr;
409 }
410