1 /*
2 * Copyright © 2022 Collabora Ltd. and Red Hat Inc.
3 * SPDX-License-Identifier: MIT
4 */
5 #include "nvk_buffer.h"
6
7 #include "nvk_entrypoints.h"
8 #include "nvk_device.h"
9 #include "nvk_device_memory.h"
10 #include "nvk_physical_device.h"
11 #include "nvk_queue.h"
12 #include "nvkmd/nvkmd.h"
13
14 static uint32_t
nvk_get_buffer_alignment(const struct nvk_physical_device * pdev,VkBufferUsageFlags2KHR usage_flags,VkBufferCreateFlags create_flags)15 nvk_get_buffer_alignment(const struct nvk_physical_device *pdev,
16 VkBufferUsageFlags2KHR usage_flags,
17 VkBufferCreateFlags create_flags)
18 {
19 uint32_t alignment = 16;
20
21 if (usage_flags & VK_BUFFER_USAGE_2_UNIFORM_BUFFER_BIT_KHR)
22 alignment = MAX2(alignment, nvk_min_cbuf_alignment(&pdev->info));
23
24 if (usage_flags & VK_BUFFER_USAGE_2_STORAGE_BUFFER_BIT_KHR)
25 alignment = MAX2(alignment, NVK_MIN_SSBO_ALIGNMENT);
26
27 if (usage_flags & (VK_BUFFER_USAGE_2_UNIFORM_TEXEL_BUFFER_BIT_KHR |
28 VK_BUFFER_USAGE_2_STORAGE_TEXEL_BUFFER_BIT_KHR))
29 alignment = MAX2(alignment, NVK_MIN_TEXEL_BUFFER_ALIGNMENT);
30
31 if (usage_flags & VK_BUFFER_USAGE_2_PREPROCESS_BUFFER_BIT_EXT)
32 alignment = MAX2(alignment, NVK_DGC_ALIGN);
33
34 if (create_flags & (VK_BUFFER_CREATE_SPARSE_BINDING_BIT |
35 VK_BUFFER_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT))
36 alignment = MAX2(alignment, pdev->nvkmd->bind_align_B);
37
38 return alignment;
39 }
40
41 static uint64_t
nvk_get_bda_replay_addr(const VkBufferCreateInfo * pCreateInfo)42 nvk_get_bda_replay_addr(const VkBufferCreateInfo *pCreateInfo)
43 {
44 uint64_t addr = 0;
45 vk_foreach_struct_const(ext, pCreateInfo->pNext) {
46 switch (ext->sType) {
47 case VK_STRUCTURE_TYPE_BUFFER_OPAQUE_CAPTURE_ADDRESS_CREATE_INFO: {
48 const VkBufferOpaqueCaptureAddressCreateInfo *bda = (void *)ext;
49 if (bda->opaqueCaptureAddress != 0) {
50 #ifdef NDEBUG
51 return bda->opaqueCaptureAddress;
52 #else
53 assert(addr == 0 || bda->opaqueCaptureAddress == addr);
54 addr = bda->opaqueCaptureAddress;
55 #endif
56 }
57 break;
58 }
59
60 case VK_STRUCTURE_TYPE_BUFFER_DEVICE_ADDRESS_CREATE_INFO_EXT: {
61 const VkBufferDeviceAddressCreateInfoEXT *bda = (void *)ext;
62 if (bda->deviceAddress != 0) {
63 #ifdef NDEBUG
64 return bda->deviceAddress;
65 #else
66 assert(addr == 0 || bda->deviceAddress == addr);
67 addr = bda->deviceAddress;
68 #endif
69 }
70 break;
71 }
72
73 default:
74 break;
75 }
76 }
77
78 return addr;
79 }
80
81 VKAPI_ATTR VkResult VKAPI_CALL
nvk_CreateBuffer(VkDevice device,const VkBufferCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkBuffer * pBuffer)82 nvk_CreateBuffer(VkDevice device,
83 const VkBufferCreateInfo *pCreateInfo,
84 const VkAllocationCallbacks *pAllocator,
85 VkBuffer *pBuffer)
86 {
87 VK_FROM_HANDLE(nvk_device, dev, device);
88 struct nvk_buffer *buffer;
89 VkResult result;
90
91 if (pCreateInfo->size > NVK_MAX_BUFFER_SIZE)
92 return vk_error(dev, VK_ERROR_OUT_OF_DEVICE_MEMORY);
93
94 buffer = vk_buffer_create(&dev->vk, pCreateInfo, pAllocator,
95 sizeof(*buffer));
96 if (!buffer)
97 return vk_error(dev, VK_ERROR_OUT_OF_HOST_MEMORY);
98
99 if (buffer->vk.size > 0 &&
100 (buffer->vk.create_flags & (VK_BUFFER_CREATE_SPARSE_BINDING_BIT |
101 VK_BUFFER_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT))) {
102 const uint32_t alignment =
103 nvk_get_buffer_alignment(nvk_device_physical(dev),
104 buffer->vk.usage,
105 buffer->vk.create_flags);
106 assert(alignment >= 4096);
107 const uint64_t va_size_B = align64(buffer->vk.size, alignment);
108
109 enum nvkmd_va_flags va_flags = 0;
110 if (buffer->vk.create_flags & VK_BUFFER_CREATE_SPARSE_RESIDENCY_BIT)
111 va_flags |= NVKMD_VA_SPARSE;
112
113 uint64_t fixed_addr = 0;
114 if (buffer->vk.create_flags & VK_BUFFER_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT) {
115 va_flags |= NVKMD_VA_REPLAY;
116
117 fixed_addr = nvk_get_bda_replay_addr(pCreateInfo);
118 if (fixed_addr != 0)
119 va_flags |= NVKMD_VA_ALLOC_FIXED;
120 }
121
122 result = nvkmd_dev_alloc_va(dev->nvkmd, &dev->vk.base,
123 va_flags, 0 /* pte_kind */,
124 va_size_B, alignment, fixed_addr,
125 &buffer->va);
126 if (result != VK_SUCCESS) {
127 vk_buffer_destroy(&dev->vk, pAllocator, &buffer->vk);
128 return result;
129 }
130
131 buffer->addr = buffer->va->addr;
132 }
133
134 *pBuffer = nvk_buffer_to_handle(buffer);
135
136 return VK_SUCCESS;
137 }
138
139 VKAPI_ATTR void VKAPI_CALL
nvk_DestroyBuffer(VkDevice device,VkBuffer _buffer,const VkAllocationCallbacks * pAllocator)140 nvk_DestroyBuffer(VkDevice device,
141 VkBuffer _buffer,
142 const VkAllocationCallbacks *pAllocator)
143 {
144 VK_FROM_HANDLE(nvk_device, dev, device);
145 VK_FROM_HANDLE(nvk_buffer, buffer, _buffer);
146
147 if (!buffer)
148 return;
149
150 if (buffer->va != NULL)
151 nvkmd_va_free(buffer->va);
152
153 vk_buffer_destroy(&dev->vk, pAllocator, &buffer->vk);
154 }
155
156 VKAPI_ATTR void VKAPI_CALL
nvk_GetDeviceBufferMemoryRequirements(VkDevice device,const VkDeviceBufferMemoryRequirements * pInfo,VkMemoryRequirements2 * pMemoryRequirements)157 nvk_GetDeviceBufferMemoryRequirements(
158 VkDevice device,
159 const VkDeviceBufferMemoryRequirements *pInfo,
160 VkMemoryRequirements2 *pMemoryRequirements)
161 {
162 VK_FROM_HANDLE(nvk_device, dev, device);
163 struct nvk_physical_device *pdev = nvk_device_physical(dev);
164
165 const uint32_t alignment =
166 nvk_get_buffer_alignment(nvk_device_physical(dev),
167 pInfo->pCreateInfo->usage,
168 pInfo->pCreateInfo->flags);
169
170 pMemoryRequirements->memoryRequirements = (VkMemoryRequirements) {
171 .size = align64(pInfo->pCreateInfo->size, alignment),
172 .alignment = alignment,
173 .memoryTypeBits = BITFIELD_MASK(pdev->mem_type_count),
174 };
175
176 vk_foreach_struct_const(ext, pMemoryRequirements->pNext) {
177 switch (ext->sType) {
178 case VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS: {
179 VkMemoryDedicatedRequirements *dedicated = (void *)ext;
180 dedicated->prefersDedicatedAllocation = false;
181 dedicated->requiresDedicatedAllocation = false;
182 break;
183 }
184 default:
185 vk_debug_ignored_stype(ext->sType);
186 break;
187 }
188 }
189 }
190
191 VKAPI_ATTR void VKAPI_CALL
nvk_GetPhysicalDeviceExternalBufferProperties(VkPhysicalDevice physicalDevice,const VkPhysicalDeviceExternalBufferInfo * pExternalBufferInfo,VkExternalBufferProperties * pExternalBufferProperties)192 nvk_GetPhysicalDeviceExternalBufferProperties(
193 VkPhysicalDevice physicalDevice,
194 const VkPhysicalDeviceExternalBufferInfo *pExternalBufferInfo,
195 VkExternalBufferProperties *pExternalBufferProperties)
196 {
197 /* The Vulkan 1.3.256 spec says:
198 *
199 * VUID-VkPhysicalDeviceExternalBufferInfo-handleType-parameter
200 *
201 * "handleType must be a valid VkExternalMemoryHandleTypeFlagBits value"
202 *
203 * This differs from VkPhysicalDeviceExternalImageFormatInfo, which
204 * surprisingly permits handleType == 0.
205 */
206 assert(pExternalBufferInfo->handleType != 0);
207
208 /* All of the current flags are for sparse which we don't support yet.
209 * Even when we do support it, doing sparse on external memory sounds
210 * sketchy. Also, just disallowing flags is the safe option.
211 */
212 if (pExternalBufferInfo->flags)
213 goto unsupported;
214
215 switch (pExternalBufferInfo->handleType) {
216 case VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT:
217 case VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT:
218 pExternalBufferProperties->externalMemoryProperties =
219 nvk_dma_buf_mem_props;
220 return;
221 default:
222 goto unsupported;
223 }
224
225 unsupported:
226 /* From the Vulkan 1.3.256 spec:
227 *
228 * compatibleHandleTypes must include at least handleType.
229 */
230 pExternalBufferProperties->externalMemoryProperties =
231 (VkExternalMemoryProperties) {
232 .compatibleHandleTypes = pExternalBufferInfo->handleType,
233 };
234 }
235
236 static VkResult
nvk_bind_buffer_memory(struct nvk_device * dev,const VkBindBufferMemoryInfo * info)237 nvk_bind_buffer_memory(struct nvk_device *dev,
238 const VkBindBufferMemoryInfo *info)
239 {
240 VK_FROM_HANDLE(nvk_device_memory, mem, info->memory);
241 VK_FROM_HANDLE(nvk_buffer, buffer, info->buffer);
242 struct nvk_physical_device *pdev = nvk_device_physical(dev);
243 VkResult result = VK_SUCCESS;
244
245 if ((pdev->debug_flags & NVK_DEBUG_PUSH_DUMP) &&
246 (buffer->vk.usage & (VK_BUFFER_USAGE_2_INDIRECT_BUFFER_BIT_KHR |
247 VK_BUFFER_USAGE_2_PREPROCESS_BUFFER_BIT_EXT)))
248 nvkmd_dev_track_mem(dev->nvkmd, mem->mem);
249
250 if (buffer->va != NULL) {
251 result = nvkmd_va_bind_mem(buffer->va, &buffer->vk.base,
252 0 /* va_offset */,
253 mem->mem, info->memoryOffset,
254 buffer->va->size_B);
255 } else {
256 buffer->addr = mem->mem->va->addr + info->memoryOffset;
257 }
258
259 return result;
260 }
261
262 VKAPI_ATTR VkResult VKAPI_CALL
nvk_BindBufferMemory2(VkDevice device,uint32_t bindInfoCount,const VkBindBufferMemoryInfo * pBindInfos)263 nvk_BindBufferMemory2(VkDevice device,
264 uint32_t bindInfoCount,
265 const VkBindBufferMemoryInfo *pBindInfos)
266 {
267 VK_FROM_HANDLE(nvk_device, dev, device);
268 VkResult first_error_or_success = VK_SUCCESS;
269
270 for (uint32_t i = 0; i < bindInfoCount; ++i) {
271 VkResult result = nvk_bind_buffer_memory(dev, &pBindInfos[i]);
272
273 const VkBindMemoryStatusKHR *status =
274 vk_find_struct_const(pBindInfos[i].pNext, BIND_MEMORY_STATUS_KHR);
275 if (status != NULL && status->pResult != NULL)
276 *status->pResult = result;
277
278 if (first_error_or_success == VK_SUCCESS)
279 first_error_or_success = result;
280 }
281
282 return first_error_or_success;
283 }
284
285 VKAPI_ATTR VkDeviceAddress VKAPI_CALL
nvk_GetBufferDeviceAddress(UNUSED VkDevice device,const VkBufferDeviceAddressInfo * pInfo)286 nvk_GetBufferDeviceAddress(UNUSED VkDevice device,
287 const VkBufferDeviceAddressInfo *pInfo)
288 {
289 VK_FROM_HANDLE(nvk_buffer, buffer, pInfo->buffer);
290
291 return nvk_buffer_address(buffer, 0);
292 }
293
294 VKAPI_ATTR uint64_t VKAPI_CALL
nvk_GetBufferOpaqueCaptureAddress(UNUSED VkDevice device,const VkBufferDeviceAddressInfo * pInfo)295 nvk_GetBufferOpaqueCaptureAddress(UNUSED VkDevice device,
296 const VkBufferDeviceAddressInfo *pInfo)
297 {
298 VK_FROM_HANDLE(nvk_buffer, buffer, pInfo->buffer);
299
300 return nvk_buffer_address(buffer, 0);
301 }
302
303 VkResult
nvk_queue_buffer_bind(struct nvk_queue * queue,const VkSparseBufferMemoryBindInfo * bind_info)304 nvk_queue_buffer_bind(struct nvk_queue *queue,
305 const VkSparseBufferMemoryBindInfo *bind_info)
306 {
307 VK_FROM_HANDLE(nvk_buffer, buffer, bind_info->buffer);
308 VkResult result;
309
310 const uint32_t bind_count = bind_info->bindCount;
311 if (bind_count == 0)
312 return VK_SUCCESS;
313
314 STACK_ARRAY(struct nvkmd_ctx_bind, binds, bind_count);
315
316 for (unsigned i = 0; i < bind_count; i++) {
317 const VkSparseMemoryBind *bind = &bind_info->pBinds[i];
318 VK_FROM_HANDLE(nvk_device_memory, mem, bind->memory);
319
320 binds[i] = (struct nvkmd_ctx_bind) {
321 .op = mem ? NVKMD_BIND_OP_BIND : NVKMD_BIND_OP_UNBIND,
322 .va = buffer->va,
323 .va_offset_B = bind->resourceOffset,
324 .mem = mem ? mem->mem : NULL,
325 .mem_offset_B = mem ? bind->memoryOffset : 0,
326 .range_B = bind->size,
327 };
328 }
329
330 result = nvkmd_ctx_bind(queue->bind_ctx, &queue->vk.base,
331 bind_count, binds);
332
333 STACK_ARRAY_FINISH(binds);
334
335 return result;
336 }
337
338 VKAPI_ATTR VkResult VKAPI_CALL
nvk_GetBufferOpaqueCaptureDescriptorDataEXT(VkDevice device,const VkBufferCaptureDescriptorDataInfoEXT * pInfo,void * pData)339 nvk_GetBufferOpaqueCaptureDescriptorDataEXT(
340 VkDevice device,
341 const VkBufferCaptureDescriptorDataInfoEXT *pInfo,
342 void *pData)
343 {
344 return VK_SUCCESS;
345 }
346