1 /*
2 * Copyright © 2022 Collabora Ltd. and Red Hat Inc.
3 * SPDX-License-Identifier: MIT
4 */
5 #include "nvk_buffer.h"
6
7 #include "nvk_entrypoints.h"
8 #include "nvk_device.h"
9 #include "nvk_device_memory.h"
10 #include "nvk_physical_device.h"
11 #include "nvk_queue.h"
12 #include "nvkmd/nvkmd.h"
13
14 static uint32_t
nvk_get_buffer_alignment(const struct nvk_physical_device * pdev,VkBufferUsageFlags2KHR usage_flags,VkBufferCreateFlags create_flags)15 nvk_get_buffer_alignment(const struct nvk_physical_device *pdev,
16 VkBufferUsageFlags2KHR usage_flags,
17 VkBufferCreateFlags create_flags)
18 {
19 uint32_t alignment = 16;
20
21 if (usage_flags & VK_BUFFER_USAGE_2_UNIFORM_BUFFER_BIT_KHR)
22 alignment = MAX2(alignment, nvk_min_cbuf_alignment(&pdev->info));
23
24 if (usage_flags & VK_BUFFER_USAGE_2_STORAGE_BUFFER_BIT_KHR)
25 alignment = MAX2(alignment, NVK_MIN_SSBO_ALIGNMENT);
26
27 if (usage_flags & (VK_BUFFER_USAGE_2_UNIFORM_TEXEL_BUFFER_BIT_KHR |
28 VK_BUFFER_USAGE_2_STORAGE_TEXEL_BUFFER_BIT_KHR))
29 alignment = MAX2(alignment, NVK_MIN_TEXEL_BUFFER_ALIGNMENT);
30
31 if (create_flags & (VK_BUFFER_CREATE_SPARSE_BINDING_BIT |
32 VK_BUFFER_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT))
33 alignment = MAX2(alignment, pdev->nvkmd->bind_align_B);
34
35 return alignment;
36 }
37
38 static uint64_t
nvk_get_bda_replay_addr(const VkBufferCreateInfo * pCreateInfo)39 nvk_get_bda_replay_addr(const VkBufferCreateInfo *pCreateInfo)
40 {
41 uint64_t addr = 0;
42 vk_foreach_struct_const(ext, pCreateInfo->pNext) {
43 switch (ext->sType) {
44 case VK_STRUCTURE_TYPE_BUFFER_OPAQUE_CAPTURE_ADDRESS_CREATE_INFO: {
45 const VkBufferOpaqueCaptureAddressCreateInfo *bda = (void *)ext;
46 if (bda->opaqueCaptureAddress != 0) {
47 #ifdef NDEBUG
48 return bda->opaqueCaptureAddress;
49 #else
50 assert(addr == 0 || bda->opaqueCaptureAddress == addr);
51 addr = bda->opaqueCaptureAddress;
52 #endif
53 }
54 break;
55 }
56
57 case VK_STRUCTURE_TYPE_BUFFER_DEVICE_ADDRESS_CREATE_INFO_EXT: {
58 const VkBufferDeviceAddressCreateInfoEXT *bda = (void *)ext;
59 if (bda->deviceAddress != 0) {
60 #ifdef NDEBUG
61 return bda->deviceAddress;
62 #else
63 assert(addr == 0 || bda->deviceAddress == addr);
64 addr = bda->deviceAddress;
65 #endif
66 }
67 break;
68 }
69
70 default:
71 break;
72 }
73 }
74
75 return addr;
76 }
77
78 VKAPI_ATTR VkResult VKAPI_CALL
nvk_CreateBuffer(VkDevice device,const VkBufferCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkBuffer * pBuffer)79 nvk_CreateBuffer(VkDevice device,
80 const VkBufferCreateInfo *pCreateInfo,
81 const VkAllocationCallbacks *pAllocator,
82 VkBuffer *pBuffer)
83 {
84 VK_FROM_HANDLE(nvk_device, dev, device);
85 struct nvk_buffer *buffer;
86 VkResult result;
87
88 if (pCreateInfo->size > NVK_MAX_BUFFER_SIZE)
89 return vk_error(dev, VK_ERROR_OUT_OF_DEVICE_MEMORY);
90
91 buffer = vk_buffer_create(&dev->vk, pCreateInfo, pAllocator,
92 sizeof(*buffer));
93 if (!buffer)
94 return vk_error(dev, VK_ERROR_OUT_OF_HOST_MEMORY);
95
96 if (buffer->vk.size > 0 &&
97 (buffer->vk.create_flags & (VK_BUFFER_CREATE_SPARSE_BINDING_BIT |
98 VK_BUFFER_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT))) {
99 const uint32_t alignment =
100 nvk_get_buffer_alignment(nvk_device_physical(dev),
101 buffer->vk.usage,
102 buffer->vk.create_flags);
103 assert(alignment >= 4096);
104 const uint64_t va_size_B = align64(buffer->vk.size, alignment);
105
106 enum nvkmd_va_flags va_flags = 0;
107 if (buffer->vk.create_flags & VK_BUFFER_CREATE_SPARSE_RESIDENCY_BIT)
108 va_flags |= NVKMD_VA_SPARSE;
109
110 uint64_t fixed_addr = 0;
111 if (buffer->vk.create_flags & VK_BUFFER_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT) {
112 va_flags |= NVKMD_VA_REPLAY;
113
114 fixed_addr = nvk_get_bda_replay_addr(pCreateInfo);
115 if (fixed_addr != 0)
116 va_flags |= NVKMD_VA_ALLOC_FIXED;
117 }
118
119 result = nvkmd_dev_alloc_va(dev->nvkmd, &dev->vk.base,
120 va_flags, 0 /* pte_kind */,
121 va_size_B, alignment, fixed_addr,
122 &buffer->va);
123 if (result != VK_SUCCESS) {
124 vk_buffer_destroy(&dev->vk, pAllocator, &buffer->vk);
125 return result;
126 }
127
128 buffer->addr = buffer->va->addr;
129 }
130
131 *pBuffer = nvk_buffer_to_handle(buffer);
132
133 return VK_SUCCESS;
134 }
135
136 VKAPI_ATTR void VKAPI_CALL
nvk_DestroyBuffer(VkDevice device,VkBuffer _buffer,const VkAllocationCallbacks * pAllocator)137 nvk_DestroyBuffer(VkDevice device,
138 VkBuffer _buffer,
139 const VkAllocationCallbacks *pAllocator)
140 {
141 VK_FROM_HANDLE(nvk_device, dev, device);
142 VK_FROM_HANDLE(nvk_buffer, buffer, _buffer);
143
144 if (!buffer)
145 return;
146
147 if (buffer->va != NULL)
148 nvkmd_va_free(buffer->va);
149
150 vk_buffer_destroy(&dev->vk, pAllocator, &buffer->vk);
151 }
152
153 VKAPI_ATTR void VKAPI_CALL
nvk_GetDeviceBufferMemoryRequirements(VkDevice device,const VkDeviceBufferMemoryRequirements * pInfo,VkMemoryRequirements2 * pMemoryRequirements)154 nvk_GetDeviceBufferMemoryRequirements(
155 VkDevice device,
156 const VkDeviceBufferMemoryRequirements *pInfo,
157 VkMemoryRequirements2 *pMemoryRequirements)
158 {
159 VK_FROM_HANDLE(nvk_device, dev, device);
160 struct nvk_physical_device *pdev = nvk_device_physical(dev);
161
162 const uint32_t alignment =
163 nvk_get_buffer_alignment(nvk_device_physical(dev),
164 pInfo->pCreateInfo->usage,
165 pInfo->pCreateInfo->flags);
166
167 pMemoryRequirements->memoryRequirements = (VkMemoryRequirements) {
168 .size = align64(pInfo->pCreateInfo->size, alignment),
169 .alignment = alignment,
170 .memoryTypeBits = BITFIELD_MASK(pdev->mem_type_count),
171 };
172
173 vk_foreach_struct_const(ext, pMemoryRequirements->pNext) {
174 switch (ext->sType) {
175 case VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS: {
176 VkMemoryDedicatedRequirements *dedicated = (void *)ext;
177 dedicated->prefersDedicatedAllocation = false;
178 dedicated->requiresDedicatedAllocation = false;
179 break;
180 }
181 default:
182 vk_debug_ignored_stype(ext->sType);
183 break;
184 }
185 }
186 }
187
188 VKAPI_ATTR void VKAPI_CALL
nvk_GetPhysicalDeviceExternalBufferProperties(VkPhysicalDevice physicalDevice,const VkPhysicalDeviceExternalBufferInfo * pExternalBufferInfo,VkExternalBufferProperties * pExternalBufferProperties)189 nvk_GetPhysicalDeviceExternalBufferProperties(
190 VkPhysicalDevice physicalDevice,
191 const VkPhysicalDeviceExternalBufferInfo *pExternalBufferInfo,
192 VkExternalBufferProperties *pExternalBufferProperties)
193 {
194 /* The Vulkan 1.3.256 spec says:
195 *
196 * VUID-VkPhysicalDeviceExternalBufferInfo-handleType-parameter
197 *
198 * "handleType must be a valid VkExternalMemoryHandleTypeFlagBits value"
199 *
200 * This differs from VkPhysicalDeviceExternalImageFormatInfo, which
201 * surprisingly permits handleType == 0.
202 */
203 assert(pExternalBufferInfo->handleType != 0);
204
205 /* All of the current flags are for sparse which we don't support yet.
206 * Even when we do support it, doing sparse on external memory sounds
207 * sketchy. Also, just disallowing flags is the safe option.
208 */
209 if (pExternalBufferInfo->flags)
210 goto unsupported;
211
212 switch (pExternalBufferInfo->handleType) {
213 case VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT:
214 case VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT:
215 pExternalBufferProperties->externalMemoryProperties =
216 nvk_dma_buf_mem_props;
217 return;
218 default:
219 goto unsupported;
220 }
221
222 unsupported:
223 /* From the Vulkan 1.3.256 spec:
224 *
225 * compatibleHandleTypes must include at least handleType.
226 */
227 pExternalBufferProperties->externalMemoryProperties =
228 (VkExternalMemoryProperties) {
229 .compatibleHandleTypes = pExternalBufferInfo->handleType,
230 };
231 }
232
233 static VkResult
nvk_bind_buffer_memory(struct nvk_device * dev,const VkBindBufferMemoryInfo * info)234 nvk_bind_buffer_memory(struct nvk_device *dev,
235 const VkBindBufferMemoryInfo *info)
236 {
237 VK_FROM_HANDLE(nvk_device_memory, mem, info->memory);
238 VK_FROM_HANDLE(nvk_buffer, buffer, info->buffer);
239 struct nvk_physical_device *pdev = nvk_device_physical(dev);
240 VkResult result = VK_SUCCESS;
241
242 if ((pdev->debug_flags & NVK_DEBUG_PUSH_DUMP) &&
243 (buffer->vk.usage & (VK_BUFFER_USAGE_2_INDIRECT_BUFFER_BIT_KHR |
244 VK_BUFFER_USAGE_2_PREPROCESS_BUFFER_BIT_EXT)))
245 nvkmd_dev_track_mem(dev->nvkmd, mem->mem);
246
247 if (buffer->va != NULL) {
248 result = nvkmd_va_bind_mem(buffer->va, &buffer->vk.base,
249 0 /* va_offset */,
250 mem->mem, info->memoryOffset,
251 buffer->va->size_B);
252 } else {
253 buffer->addr = mem->mem->va->addr + info->memoryOffset;
254 }
255
256 return result;
257 }
258
259 VKAPI_ATTR VkResult VKAPI_CALL
nvk_BindBufferMemory2(VkDevice device,uint32_t bindInfoCount,const VkBindBufferMemoryInfo * pBindInfos)260 nvk_BindBufferMemory2(VkDevice device,
261 uint32_t bindInfoCount,
262 const VkBindBufferMemoryInfo *pBindInfos)
263 {
264 VK_FROM_HANDLE(nvk_device, dev, device);
265 VkResult first_error_or_success = VK_SUCCESS;
266
267 for (uint32_t i = 0; i < bindInfoCount; ++i) {
268 VkResult result = nvk_bind_buffer_memory(dev, &pBindInfos[i]);
269
270 const VkBindMemoryStatusKHR *status =
271 vk_find_struct_const(pBindInfos[i].pNext, BIND_MEMORY_STATUS_KHR);
272 if (status != NULL && status->pResult != NULL)
273 *status->pResult = result;
274
275 if (first_error_or_success == VK_SUCCESS)
276 first_error_or_success = result;
277 }
278
279 return first_error_or_success;
280 }
281
282 VKAPI_ATTR VkDeviceAddress VKAPI_CALL
nvk_GetBufferDeviceAddress(UNUSED VkDevice device,const VkBufferDeviceAddressInfo * pInfo)283 nvk_GetBufferDeviceAddress(UNUSED VkDevice device,
284 const VkBufferDeviceAddressInfo *pInfo)
285 {
286 VK_FROM_HANDLE(nvk_buffer, buffer, pInfo->buffer);
287
288 return nvk_buffer_address(buffer, 0);
289 }
290
291 VKAPI_ATTR uint64_t VKAPI_CALL
nvk_GetBufferOpaqueCaptureAddress(UNUSED VkDevice device,const VkBufferDeviceAddressInfo * pInfo)292 nvk_GetBufferOpaqueCaptureAddress(UNUSED VkDevice device,
293 const VkBufferDeviceAddressInfo *pInfo)
294 {
295 VK_FROM_HANDLE(nvk_buffer, buffer, pInfo->buffer);
296
297 return nvk_buffer_address(buffer, 0);
298 }
299
300 VkResult
nvk_queue_buffer_bind(struct nvk_queue * queue,const VkSparseBufferMemoryBindInfo * bind_info)301 nvk_queue_buffer_bind(struct nvk_queue *queue,
302 const VkSparseBufferMemoryBindInfo *bind_info)
303 {
304 VK_FROM_HANDLE(nvk_buffer, buffer, bind_info->buffer);
305 VkResult result;
306
307 const uint32_t bind_count = bind_info->bindCount;
308 if (bind_count == 0)
309 return VK_SUCCESS;
310
311 STACK_ARRAY(struct nvkmd_ctx_bind, binds, bind_count);
312
313 for (unsigned i = 0; i < bind_count; i++) {
314 const VkSparseMemoryBind *bind = &bind_info->pBinds[i];
315 VK_FROM_HANDLE(nvk_device_memory, mem, bind->memory);
316
317 binds[i] = (struct nvkmd_ctx_bind) {
318 .op = mem ? NVKMD_BIND_OP_BIND : NVKMD_BIND_OP_UNBIND,
319 .va = buffer->va,
320 .va_offset_B = bind->resourceOffset,
321 .mem = mem ? mem->mem : NULL,
322 .mem_offset_B = mem ? bind->memoryOffset : 0,
323 .range_B = bind->size,
324 };
325 }
326
327 result = nvkmd_ctx_bind(queue->bind_ctx, &queue->vk.base,
328 bind_count, binds);
329
330 STACK_ARRAY_FINISH(binds);
331
332 return result;
333 }
334
335 VKAPI_ATTR VkResult VKAPI_CALL
nvk_GetBufferOpaqueCaptureDescriptorDataEXT(VkDevice device,const VkBufferCaptureDescriptorDataInfoEXT * pInfo,void * pData)336 nvk_GetBufferOpaqueCaptureDescriptorDataEXT(
337 VkDevice device,
338 const VkBufferCaptureDescriptorDataInfoEXT *pInfo,
339 void *pData)
340 {
341 return VK_SUCCESS;
342 }
343