1 /*
2 * Copyright © 2021 Collabora Ltd.
3 * SPDX-License-Identifier: MIT
4 */
5
6 #include "panvk_buffer.h"
7 #include "panvk_device.h"
8 #include "panvk_device_memory.h"
9 #include "panvk_entrypoints.h"
10
11 #include "vk_log.h"
12
13 #define PANVK_MAX_BUFFER_SIZE (1 << 30)
14
15 VKAPI_ATTR VkDeviceAddress VKAPI_CALL
panvk_GetBufferDeviceAddress(VkDevice _device,const VkBufferDeviceAddressInfo * pInfo)16 panvk_GetBufferDeviceAddress(VkDevice _device,
17 const VkBufferDeviceAddressInfo *pInfo)
18 {
19 VK_FROM_HANDLE(panvk_buffer, buffer, pInfo->buffer);
20
21 return buffer->dev_addr;
22 }
23
24 VKAPI_ATTR uint64_t VKAPI_CALL
panvk_GetBufferOpaqueCaptureAddress(VkDevice _device,const VkBufferDeviceAddressInfo * pInfo)25 panvk_GetBufferOpaqueCaptureAddress(VkDevice _device,
26 const VkBufferDeviceAddressInfo *pInfo)
27 {
28 return panvk_GetBufferDeviceAddress(_device, pInfo);
29 }
30
31 VKAPI_ATTR void VKAPI_CALL
panvk_GetDeviceBufferMemoryRequirements(VkDevice device,const VkDeviceBufferMemoryRequirements * pInfo,VkMemoryRequirements2 * pMemoryRequirements)32 panvk_GetDeviceBufferMemoryRequirements(VkDevice device,
33 const VkDeviceBufferMemoryRequirements *pInfo,
34 VkMemoryRequirements2 *pMemoryRequirements)
35 {
36 const uint64_t align = 64;
37 const uint64_t size = align64(pInfo->pCreateInfo->size, align);
38
39 pMemoryRequirements->memoryRequirements.memoryTypeBits = 1;
40 pMemoryRequirements->memoryRequirements.alignment = align;
41 pMemoryRequirements->memoryRequirements.size = size;
42
43 vk_foreach_struct_const(ext, pMemoryRequirements->pNext) {
44 switch (ext->sType) {
45 case VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS: {
46 VkMemoryDedicatedRequirements *dedicated = (void *)ext;
47 dedicated->requiresDedicatedAllocation = false;
48 dedicated->prefersDedicatedAllocation = dedicated->requiresDedicatedAllocation;
49 break;
50 }
51 default:
52 vk_debug_ignored_stype(ext->sType);
53 break;
54 }
55 }
56 }
57
58 VKAPI_ATTR VkResult VKAPI_CALL
panvk_BindBufferMemory2(VkDevice _device,uint32_t bindInfoCount,const VkBindBufferMemoryInfo * pBindInfos)59 panvk_BindBufferMemory2(VkDevice _device, uint32_t bindInfoCount,
60 const VkBindBufferMemoryInfo *pBindInfos)
61 {
62 VK_FROM_HANDLE(panvk_device, device, _device);
63 const struct panvk_physical_device *phys_dev =
64 to_panvk_physical_device(device->vk.physical);
65 const unsigned arch = pan_arch(phys_dev->kmod.props.gpu_prod_id);
66
67 for (uint32_t i = 0; i < bindInfoCount; ++i) {
68 VK_FROM_HANDLE(panvk_device_memory, mem, pBindInfos[i].memory);
69 VK_FROM_HANDLE(panvk_buffer, buffer, pBindInfos[i].buffer);
70 struct pan_kmod_bo *old_bo = buffer->bo;
71
72 assert(mem != NULL);
73
74 buffer->bo = pan_kmod_bo_get(mem->bo);
75 buffer->dev_addr = mem->addr.dev + pBindInfos[i].memoryOffset;
76
77 /* FIXME: Only host map for index buffers so we can do the min/max
78 * index retrieval on the CPU. This is all broken anyway and the
79 * min/max search should be done with a compute shader that also
80 * patches the job descriptor accordingly (basically an indirect draw).
81 *
82 * Make sure this goes away as soon as we fixed indirect draws.
83 */
84 if (arch < 9 && (buffer->vk.usage & VK_BUFFER_USAGE_INDEX_BUFFER_BIT)) {
85 VkDeviceSize offset = pBindInfos[i].memoryOffset;
86 VkDeviceSize pgsize = getpagesize();
87 off_t map_start = offset & ~(pgsize - 1);
88 off_t map_end = offset + buffer->vk.size;
89 void *map_addr =
90 pan_kmod_bo_mmap(mem->bo, map_start, map_end - map_start,
91 PROT_WRITE, MAP_SHARED, NULL);
92
93 assert(map_addr != MAP_FAILED);
94 buffer->host_ptr = map_addr + (offset & pgsize);
95 }
96
97 pan_kmod_bo_put(old_bo);
98 }
99 return VK_SUCCESS;
100 }
101
102 VKAPI_ATTR VkResult VKAPI_CALL
panvk_CreateBuffer(VkDevice _device,const VkBufferCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkBuffer * pBuffer)103 panvk_CreateBuffer(VkDevice _device, const VkBufferCreateInfo *pCreateInfo,
104 const VkAllocationCallbacks *pAllocator, VkBuffer *pBuffer)
105 {
106 VK_FROM_HANDLE(panvk_device, device, _device);
107 struct panvk_buffer *buffer;
108
109 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO);
110
111 if (pCreateInfo->size > PANVK_MAX_BUFFER_SIZE)
112 return panvk_error(device, VK_ERROR_OUT_OF_DEVICE_MEMORY);
113
114 buffer =
115 vk_buffer_create(&device->vk, pCreateInfo, pAllocator, sizeof(*buffer));
116 if (buffer == NULL)
117 return panvk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
118
119 *pBuffer = panvk_buffer_to_handle(buffer);
120
121 return VK_SUCCESS;
122 }
123
124 VKAPI_ATTR void VKAPI_CALL
panvk_DestroyBuffer(VkDevice _device,VkBuffer _buffer,const VkAllocationCallbacks * pAllocator)125 panvk_DestroyBuffer(VkDevice _device, VkBuffer _buffer,
126 const VkAllocationCallbacks *pAllocator)
127 {
128 VK_FROM_HANDLE(panvk_device, device, _device);
129 VK_FROM_HANDLE(panvk_buffer, buffer, _buffer);
130
131 if (!buffer)
132 return;
133
134 if (buffer->host_ptr) {
135 VkDeviceSize pgsize = getpagesize();
136 uintptr_t map_start = (uintptr_t)buffer->host_ptr & ~(pgsize - 1);
137 uintptr_t map_end =
138 ALIGN_POT((uintptr_t)buffer->host_ptr + buffer->vk.size, pgsize);
139 ASSERTED int ret = os_munmap((void *)map_start, map_end - map_start);
140
141 assert(!ret);
142 buffer->host_ptr = NULL;
143 }
144
145 pan_kmod_bo_put(buffer->bo);
146 vk_buffer_destroy(&device->vk, pAllocator, &buffer->vk);
147 }
148