• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2022 Collabora Ltd. and Red Hat Inc.
3  * SPDX-License-Identifier: MIT
4  */
5 #include "nvk_device_memory.h"
6 
7 #include "nouveau_bo.h"
8 
9 #include "nvk_device.h"
10 #include "nvk_entrypoints.h"
11 #include "nvk_image.h"
12 #include "nvk_physical_device.h"
13 
14 #include "nv_push.h"
15 #include "util/u_atomic.h"
16 
17 #include <inttypes.h>
18 #include <sys/mman.h>
19 
20 #include "nvtypes.h"
21 #include "nvk_cl90b5.h"
22 
23 /* Supports opaque fd only */
24 const VkExternalMemoryProperties nvk_opaque_fd_mem_props = {
25    .externalMemoryFeatures =
26       VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT |
27       VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT,
28    .exportFromImportedHandleTypes =
29       VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT,
30    .compatibleHandleTypes =
31       VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT,
32 };
33 
34 /* Supports opaque fd and dma_buf. */
35 const VkExternalMemoryProperties nvk_dma_buf_mem_props = {
36    .externalMemoryFeatures =
37       VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT |
38       VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT,
39    .exportFromImportedHandleTypes =
40       VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT |
41       VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT,
42    .compatibleHandleTypes =
43       VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT |
44       VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT,
45 };
46 
47 static VkResult
zero_vram(struct nvk_device * dev,struct nouveau_ws_bo * bo)48 zero_vram(struct nvk_device *dev, struct nouveau_ws_bo *bo)
49 {
50    uint32_t push_data[256];
51    struct nv_push push;
52    nv_push_init(&push, push_data, ARRAY_SIZE(push_data));
53    struct nv_push *p = &push;
54 
55    uint64_t addr = bo->offset;
56 
57    assert((bo->size % 4096) == 0);
58    assert((bo->size / 4096) < (1 << 15));
59 
60    P_MTHD(p, NV90B5, OFFSET_OUT_UPPER);
61    P_NV90B5_OFFSET_OUT_UPPER(p, addr >> 32);
62    P_NV90B5_OFFSET_OUT_LOWER(p, addr & 0xffffffff);
63    P_NV90B5_PITCH_IN(p, 4096);
64    P_NV90B5_PITCH_OUT(p, 4096);
65    P_NV90B5_LINE_LENGTH_IN(p, (4096 / 16));
66    P_NV90B5_LINE_COUNT(p, bo->size / 4096);
67 
68    P_IMMD(p, NV90B5, SET_REMAP_CONST_A, 0);
69    P_IMMD(p, NV90B5, SET_REMAP_COMPONENTS, {
70       .dst_x = DST_X_CONST_A,
71       .dst_y = DST_Y_CONST_A,
72       .dst_z = DST_Z_CONST_A,
73       .dst_w = DST_W_CONST_A,
74       .component_size = COMPONENT_SIZE_FOUR,
75       .num_src_components = NUM_SRC_COMPONENTS_FOUR,
76       .num_dst_components = NUM_DST_COMPONENTS_FOUR,
77    });
78 
79    P_IMMD(p, NV90B5, LAUNCH_DMA, {
80       .data_transfer_type = DATA_TRANSFER_TYPE_NON_PIPELINED,
81       .multi_line_enable = MULTI_LINE_ENABLE_TRUE,
82       .flush_enable = FLUSH_ENABLE_TRUE,
83       .src_memory_layout = SRC_MEMORY_LAYOUT_PITCH,
84       .dst_memory_layout = DST_MEMORY_LAYOUT_PITCH,
85       .remap_enable = REMAP_ENABLE_TRUE,
86    });
87 
88    return nvk_queue_submit_simple(&dev->queue, nv_push_dw_count(&push),
89                                   push_data, 1, &bo);
90 }
91 
92 static enum nouveau_ws_bo_flags
nvk_memory_type_flags(const VkMemoryType * type,VkExternalMemoryHandleTypeFlagBits handle_types)93 nvk_memory_type_flags(const VkMemoryType *type,
94                       VkExternalMemoryHandleTypeFlagBits handle_types)
95 {
96    enum nouveau_ws_bo_flags flags = 0;
97    if (type->propertyFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT)
98       flags = NOUVEAU_WS_BO_LOCAL;
99    else
100       flags = NOUVEAU_WS_BO_GART;
101 
102    if (type->propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT)
103       flags |= NOUVEAU_WS_BO_MAP;
104 
105    if (handle_types == 0)
106       flags |= NOUVEAU_WS_BO_NO_SHARE;
107 
108    return flags;
109 }
110 
111 VKAPI_ATTR VkResult VKAPI_CALL
nvk_GetMemoryFdPropertiesKHR(VkDevice device,VkExternalMemoryHandleTypeFlagBits handleType,int fd,VkMemoryFdPropertiesKHR * pMemoryFdProperties)112 nvk_GetMemoryFdPropertiesKHR(VkDevice device,
113                              VkExternalMemoryHandleTypeFlagBits handleType,
114                              int fd,
115                              VkMemoryFdPropertiesKHR *pMemoryFdProperties)
116 {
117    VK_FROM_HANDLE(nvk_device, dev, device);
118    struct nvk_physical_device *pdev = nvk_device_physical(dev);
119    struct nouveau_ws_bo *bo;
120 
121    switch (handleType) {
122    case VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT:
123    case VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT:
124       bo = nouveau_ws_bo_from_dma_buf(dev->ws_dev, fd);
125       if (bo == NULL)
126          return vk_error(dev, VK_ERROR_INVALID_EXTERNAL_HANDLE);
127       break;
128    default:
129       return vk_error(dev, VK_ERROR_INVALID_EXTERNAL_HANDLE);
130    }
131 
132    uint32_t type_bits = 0;
133    for (unsigned t = 0; t < ARRAY_SIZE(pdev->mem_types); t++) {
134       const enum nouveau_ws_bo_flags flags =
135          nvk_memory_type_flags(&pdev->mem_types[t], handleType);
136       if (!(flags & ~bo->flags))
137          type_bits |= (1 << t);
138    }
139 
140    pMemoryFdProperties->memoryTypeBits = type_bits;
141 
142    nouveau_ws_bo_destroy(bo);
143 
144    return VK_SUCCESS;
145 }
146 
147 VKAPI_ATTR VkResult VKAPI_CALL
nvk_AllocateMemory(VkDevice device,const VkMemoryAllocateInfo * pAllocateInfo,const VkAllocationCallbacks * pAllocator,VkDeviceMemory * pMem)148 nvk_AllocateMemory(VkDevice device,
149                    const VkMemoryAllocateInfo *pAllocateInfo,
150                    const VkAllocationCallbacks *pAllocator,
151                    VkDeviceMemory *pMem)
152 {
153    VK_FROM_HANDLE(nvk_device, dev, device);
154    struct nvk_physical_device *pdev = nvk_device_physical(dev);
155    struct nvk_device_memory *mem;
156    VkResult result = VK_SUCCESS;
157 
158    const VkImportMemoryFdInfoKHR *fd_info =
159       vk_find_struct_const(pAllocateInfo->pNext, IMPORT_MEMORY_FD_INFO_KHR);
160    const VkExportMemoryAllocateInfo *export_info =
161       vk_find_struct_const(pAllocateInfo->pNext, EXPORT_MEMORY_ALLOCATE_INFO);
162    const VkMemoryType *type =
163       &pdev->mem_types[pAllocateInfo->memoryTypeIndex];
164 
165    VkExternalMemoryHandleTypeFlagBits handle_types = 0;
166    if (export_info != NULL)
167       handle_types |= export_info->handleTypes;
168    if (fd_info != NULL)
169       handle_types |= fd_info->handleType;
170 
171    const enum nouveau_ws_bo_flags flags =
172       nvk_memory_type_flags(type, handle_types);
173 
174    uint32_t alignment = (1ULL << 12);
175    if (!(flags & NOUVEAU_WS_BO_GART))
176       alignment = (1ULL << 16);
177 
178    const uint64_t aligned_size =
179       align64(pAllocateInfo->allocationSize, alignment);
180 
181    mem = vk_device_memory_create(&dev->vk, pAllocateInfo,
182                                  pAllocator, sizeof(*mem));
183    if (!mem)
184       return vk_error(dev, VK_ERROR_OUT_OF_HOST_MEMORY);
185 
186 
187    mem->map = NULL;
188    if (fd_info && fd_info->handleType) {
189       assert(fd_info->handleType ==
190                VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT ||
191              fd_info->handleType ==
192                VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
193 
194       mem->bo = nouveau_ws_bo_from_dma_buf(dev->ws_dev, fd_info->fd);
195       if (mem->bo == NULL) {
196          result = vk_error(dev, VK_ERROR_INVALID_EXTERNAL_HANDLE);
197          goto fail_alloc;
198       }
199       assert(!(flags & ~mem->bo->flags));
200    } else {
201       mem->bo = nouveau_ws_bo_new(dev->ws_dev, aligned_size, alignment, flags);
202       if (!mem->bo) {
203          result = vk_error(dev, VK_ERROR_OUT_OF_DEVICE_MEMORY);
204          goto fail_alloc;
205       }
206    }
207 
208    if (dev->ws_dev->debug_flags & NVK_DEBUG_ZERO_MEMORY) {
209       if (type->propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) {
210          void *map = nouveau_ws_bo_map(mem->bo, NOUVEAU_WS_BO_RDWR, NULL);
211          if (map == NULL) {
212             result = vk_errorf(dev, VK_ERROR_OUT_OF_HOST_MEMORY,
213                                "Memory map failed");
214             goto fail_bo;
215          }
216          memset(map, 0, mem->bo->size);
217          nouveau_ws_bo_unmap(mem->bo, map);
218       } else {
219          result = zero_vram(dev, mem->bo);
220          if (result != VK_SUCCESS)
221             goto fail_bo;
222       }
223    }
224 
225    if (fd_info && fd_info->handleType) {
226       /* From the Vulkan spec:
227        *
228        *    "Importing memory from a file descriptor transfers ownership of
229        *    the file descriptor from the application to the Vulkan
230        *    implementation. The application must not perform any operations on
231        *    the file descriptor after a successful import."
232        *
233        * If the import fails, we leave the file descriptor open.
234        */
235       close(fd_info->fd);
236    }
237 
238    struct nvk_memory_heap *heap = &pdev->mem_heaps[type->heapIndex];
239    p_atomic_add(&heap->used, mem->bo->size);
240 
241    *pMem = nvk_device_memory_to_handle(mem);
242 
243    return VK_SUCCESS;
244 
245 fail_bo:
246    nouveau_ws_bo_destroy(mem->bo);
247 fail_alloc:
248    vk_device_memory_destroy(&dev->vk, pAllocator, &mem->vk);
249    return result;
250 }
251 
252 VKAPI_ATTR void VKAPI_CALL
nvk_FreeMemory(VkDevice device,VkDeviceMemory _mem,const VkAllocationCallbacks * pAllocator)253 nvk_FreeMemory(VkDevice device,
254                VkDeviceMemory _mem,
255                const VkAllocationCallbacks *pAllocator)
256 {
257    VK_FROM_HANDLE(nvk_device, dev, device);
258    VK_FROM_HANDLE(nvk_device_memory, mem, _mem);
259    struct nvk_physical_device *pdev = nvk_device_physical(dev);
260 
261    if (!mem)
262       return;
263 
264    if (mem->map)
265       nouveau_ws_bo_unmap(mem->bo, mem->map);
266 
267    const VkMemoryType *type = &pdev->mem_types[mem->vk.memory_type_index];
268    struct nvk_memory_heap *heap = &pdev->mem_heaps[type->heapIndex];
269    p_atomic_add(&heap->used, -((int64_t)mem->bo->size));
270 
271    nouveau_ws_bo_destroy(mem->bo);
272 
273    vk_device_memory_destroy(&dev->vk, pAllocator, &mem->vk);
274 }
275 
276 VKAPI_ATTR VkResult VKAPI_CALL
nvk_MapMemory2KHR(VkDevice device,const VkMemoryMapInfoKHR * pMemoryMapInfo,void ** ppData)277 nvk_MapMemory2KHR(VkDevice device,
278                   const VkMemoryMapInfoKHR *pMemoryMapInfo,
279                   void **ppData)
280 {
281    VK_FROM_HANDLE(nvk_device, dev, device);
282    VK_FROM_HANDLE(nvk_device_memory, mem, pMemoryMapInfo->memory);
283 
284    if (mem == NULL) {
285       *ppData = NULL;
286       return VK_SUCCESS;
287    }
288 
289    const VkDeviceSize offset = pMemoryMapInfo->offset;
290    const VkDeviceSize size =
291       vk_device_memory_range(&mem->vk, pMemoryMapInfo->offset,
292                                        pMemoryMapInfo->size);
293 
294    void *fixed_addr = NULL;
295    if (pMemoryMapInfo->flags & VK_MEMORY_MAP_PLACED_BIT_EXT) {
296       const VkMemoryMapPlacedInfoEXT *placed_info =
297          vk_find_struct_const(pMemoryMapInfo->pNext, MEMORY_MAP_PLACED_INFO_EXT);
298       fixed_addr = placed_info->pPlacedAddress;
299    }
300 
301    /* From the Vulkan spec version 1.0.32 docs for MapMemory:
302     *
303     *  * If size is not equal to VK_WHOLE_SIZE, size must be greater than 0
304     *    assert(size != 0);
305     *  * If size is not equal to VK_WHOLE_SIZE, size must be less than or
306     *    equal to the size of the memory minus offset
307     */
308    assert(size > 0);
309    assert(offset + size <= mem->bo->size);
310 
311    if (size != (size_t)size) {
312       return vk_errorf(dev, VK_ERROR_MEMORY_MAP_FAILED,
313                        "requested size 0x%"PRIx64" does not fit in %u bits",
314                        size, (unsigned)(sizeof(size_t) * 8));
315    }
316 
317    /* From the Vulkan 1.2.194 spec:
318     *
319     *    "memory must not be currently host mapped"
320     */
321    if (mem->map != NULL) {
322       return vk_errorf(dev, VK_ERROR_MEMORY_MAP_FAILED,
323                        "Memory object already mapped.");
324    }
325 
326    mem->map = nouveau_ws_bo_map(mem->bo, NOUVEAU_WS_BO_RDWR, fixed_addr);
327    if (mem->map == NULL) {
328       return vk_errorf(dev, VK_ERROR_MEMORY_MAP_FAILED,
329                        "Memory object couldn't be mapped.");
330    }
331 
332    *ppData = mem->map + offset;
333 
334    return VK_SUCCESS;
335 }
336 
337 VKAPI_ATTR VkResult VKAPI_CALL
nvk_UnmapMemory2KHR(VkDevice device,const VkMemoryUnmapInfoKHR * pMemoryUnmapInfo)338 nvk_UnmapMemory2KHR(VkDevice device,
339                     const VkMemoryUnmapInfoKHR *pMemoryUnmapInfo)
340 {
341    VK_FROM_HANDLE(nvk_device, dev, device);
342    VK_FROM_HANDLE(nvk_device_memory, mem, pMemoryUnmapInfo->memory);
343 
344    if (mem == NULL)
345       return VK_SUCCESS;
346 
347    if (pMemoryUnmapInfo->flags & VK_MEMORY_UNMAP_RESERVE_BIT_EXT) {
348       int err = nouveau_ws_bo_overmap(mem->bo, mem->map);
349       if (err) {
350          return vk_errorf(dev, VK_ERROR_MEMORY_MAP_FAILED,
351                           "Failed to map over original mapping");
352       }
353    } else {
354       nouveau_ws_bo_unmap(mem->bo, mem->map);
355    }
356 
357    mem->map = NULL;
358 
359    return VK_SUCCESS;
360 }
361 
362 VKAPI_ATTR VkResult VKAPI_CALL
nvk_FlushMappedMemoryRanges(VkDevice device,uint32_t memoryRangeCount,const VkMappedMemoryRange * pMemoryRanges)363 nvk_FlushMappedMemoryRanges(VkDevice device,
364                             uint32_t memoryRangeCount,
365                             const VkMappedMemoryRange *pMemoryRanges)
366 {
367    return VK_SUCCESS;
368 }
369 
370 VKAPI_ATTR VkResult VKAPI_CALL
nvk_InvalidateMappedMemoryRanges(VkDevice device,uint32_t memoryRangeCount,const VkMappedMemoryRange * pMemoryRanges)371 nvk_InvalidateMappedMemoryRanges(VkDevice device,
372                                  uint32_t memoryRangeCount,
373                                  const VkMappedMemoryRange *pMemoryRanges)
374 {
375    return VK_SUCCESS;
376 }
377 
378 VKAPI_ATTR void VKAPI_CALL
nvk_GetDeviceMemoryCommitment(VkDevice device,VkDeviceMemory _mem,VkDeviceSize * pCommittedMemoryInBytes)379 nvk_GetDeviceMemoryCommitment(VkDevice device,
380                               VkDeviceMemory _mem,
381                               VkDeviceSize* pCommittedMemoryInBytes)
382 {
383    VK_FROM_HANDLE(nvk_device_memory, mem, _mem);
384 
385    *pCommittedMemoryInBytes = mem->bo->size;
386 }
387 
388 VKAPI_ATTR VkResult VKAPI_CALL
nvk_GetMemoryFdKHR(VkDevice device,const VkMemoryGetFdInfoKHR * pGetFdInfo,int * pFD)389 nvk_GetMemoryFdKHR(VkDevice device,
390                    const VkMemoryGetFdInfoKHR *pGetFdInfo,
391                    int *pFD)
392 {
393    VK_FROM_HANDLE(nvk_device, dev, device);
394    VK_FROM_HANDLE(nvk_device_memory, memory, pGetFdInfo->memory);
395 
396    switch (pGetFdInfo->handleType) {
397    case VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT:
398    case VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT:
399       if (nouveau_ws_bo_dma_buf(memory->bo, pFD))
400          return vk_error(dev, VK_ERROR_TOO_MANY_OBJECTS);
401       return VK_SUCCESS;
402    default:
403       assert(!"unsupported handle type");
404       return vk_error(dev, VK_ERROR_FEATURE_NOT_PRESENT);
405    }
406 }
407 
408 VKAPI_ATTR uint64_t VKAPI_CALL
nvk_GetDeviceMemoryOpaqueCaptureAddress(UNUSED VkDevice device,const VkDeviceMemoryOpaqueCaptureAddressInfo * pInfo)409 nvk_GetDeviceMemoryOpaqueCaptureAddress(
410    UNUSED VkDevice device,
411    const VkDeviceMemoryOpaqueCaptureAddressInfo* pInfo)
412 {
413    VK_FROM_HANDLE(nvk_device_memory, mem, pInfo->memory);
414 
415    return mem->bo->offset;
416 }
417