1 /*
2 * Copyright © 2022 Collabora Ltd. and Red Hat Inc.
3 * SPDX-License-Identifier: MIT
4 */
5 #include "nvk_buffer.h"
6
7 #include "nvk_entrypoints.h"
8 #include "nvk_device.h"
9 #include "nvk_device_memory.h"
10 #include "nvk_physical_device.h"
11
12 static uint32_t
nvk_get_buffer_alignment(const struct nvk_physical_device * pdev,VkBufferUsageFlags2KHR usage_flags,VkBufferCreateFlags create_flags)13 nvk_get_buffer_alignment(const struct nvk_physical_device *pdev,
14 VkBufferUsageFlags2KHR usage_flags,
15 VkBufferCreateFlags create_flags)
16 {
17 uint32_t alignment = 16;
18
19 if (usage_flags & VK_BUFFER_USAGE_2_UNIFORM_BUFFER_BIT_KHR)
20 alignment = MAX2(alignment, nvk_min_cbuf_alignment(&pdev->info));
21
22 if (usage_flags & VK_BUFFER_USAGE_2_STORAGE_BUFFER_BIT_KHR)
23 alignment = MAX2(alignment, NVK_MIN_SSBO_ALIGNMENT);
24
25 if (usage_flags & (VK_BUFFER_USAGE_2_UNIFORM_TEXEL_BUFFER_BIT_KHR |
26 VK_BUFFER_USAGE_2_STORAGE_TEXEL_BUFFER_BIT_KHR))
27 alignment = MAX2(alignment, NVK_MIN_TEXEL_BUFFER_ALIGNMENT);
28
29 if (create_flags & (VK_BUFFER_CREATE_SPARSE_BINDING_BIT |
30 VK_BUFFER_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT))
31 alignment = MAX2(alignment, 4096);
32
33 return alignment;
34 }
35
36 static uint64_t
nvk_get_bda_replay_addr(const VkBufferCreateInfo * pCreateInfo)37 nvk_get_bda_replay_addr(const VkBufferCreateInfo *pCreateInfo)
38 {
39 uint64_t addr = 0;
40 vk_foreach_struct_const(ext, pCreateInfo->pNext) {
41 switch (ext->sType) {
42 case VK_STRUCTURE_TYPE_BUFFER_OPAQUE_CAPTURE_ADDRESS_CREATE_INFO: {
43 const VkBufferOpaqueCaptureAddressCreateInfo *bda = (void *)ext;
44 if (bda->opaqueCaptureAddress != 0) {
45 #ifdef NDEBUG
46 return bda->opaqueCaptureAddress;
47 #else
48 assert(addr == 0 || bda->opaqueCaptureAddress == addr);
49 addr = bda->opaqueCaptureAddress;
50 #endif
51 }
52 break;
53 }
54
55 case VK_STRUCTURE_TYPE_BUFFER_DEVICE_ADDRESS_CREATE_INFO_EXT: {
56 const VkBufferDeviceAddressCreateInfoEXT *bda = (void *)ext;
57 if (bda->deviceAddress != 0) {
58 #ifdef NDEBUG
59 return bda->deviceAddress;
60 #else
61 assert(addr == 0 || bda->deviceAddress == addr);
62 addr = bda->deviceAddress;
63 #endif
64 }
65 break;
66 }
67
68 default:
69 break;
70 }
71 }
72
73 return addr;
74 }
75
76 VKAPI_ATTR VkResult VKAPI_CALL
nvk_CreateBuffer(VkDevice device,const VkBufferCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkBuffer * pBuffer)77 nvk_CreateBuffer(VkDevice device,
78 const VkBufferCreateInfo *pCreateInfo,
79 const VkAllocationCallbacks *pAllocator,
80 VkBuffer *pBuffer)
81 {
82 VK_FROM_HANDLE(nvk_device, dev, device);
83 struct nvk_buffer *buffer;
84
85 if (pCreateInfo->size > NVK_MAX_BUFFER_SIZE)
86 return vk_error(dev, VK_ERROR_OUT_OF_DEVICE_MEMORY);
87
88 buffer = vk_buffer_create(&dev->vk, pCreateInfo, pAllocator,
89 sizeof(*buffer));
90 if (!buffer)
91 return vk_error(dev, VK_ERROR_OUT_OF_HOST_MEMORY);
92
93 if (buffer->vk.size > 0 &&
94 (buffer->vk.create_flags & (VK_BUFFER_CREATE_SPARSE_BINDING_BIT |
95 VK_BUFFER_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT))) {
96 const uint32_t alignment =
97 nvk_get_buffer_alignment(nvk_device_physical(dev),
98 buffer->vk.usage,
99 buffer->vk.create_flags);
100 assert(alignment >= 4096);
101 buffer->vma_size_B = align64(buffer->vk.size, alignment);
102
103 const bool sparse_residency =
104 buffer->vk.create_flags & VK_BUFFER_CREATE_SPARSE_RESIDENCY_BIT;
105 const bool bda_capture_replay =
106 buffer->vk.create_flags & VK_BUFFER_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT;
107
108 uint64_t bda_replay_addr = 0;
109 if (bda_capture_replay)
110 bda_replay_addr = nvk_get_bda_replay_addr(pCreateInfo);
111
112 buffer->addr = nouveau_ws_alloc_vma(dev->ws_dev, bda_replay_addr,
113 buffer->vma_size_B,
114 alignment, bda_capture_replay,
115 sparse_residency);
116 if (buffer->addr == 0) {
117 vk_buffer_destroy(&dev->vk, pAllocator, &buffer->vk);
118 return vk_errorf(dev, VK_ERROR_OUT_OF_DEVICE_MEMORY,
119 "Sparse VMA allocation failed");
120 }
121 }
122
123 *pBuffer = nvk_buffer_to_handle(buffer);
124
125 return VK_SUCCESS;
126 }
127
128 VKAPI_ATTR void VKAPI_CALL
nvk_DestroyBuffer(VkDevice device,VkBuffer _buffer,const VkAllocationCallbacks * pAllocator)129 nvk_DestroyBuffer(VkDevice device,
130 VkBuffer _buffer,
131 const VkAllocationCallbacks *pAllocator)
132 {
133 VK_FROM_HANDLE(nvk_device, dev, device);
134 VK_FROM_HANDLE(nvk_buffer, buffer, _buffer);
135
136 if (!buffer)
137 return;
138
139 if (buffer->vma_size_B > 0) {
140 const bool sparse_residency =
141 buffer->vk.create_flags & VK_BUFFER_CREATE_SPARSE_RESIDENCY_BIT;
142 const bool bda_capture_replay =
143 buffer->vk.create_flags & VK_BUFFER_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT;
144
145 nouveau_ws_bo_unbind_vma(dev->ws_dev, buffer->addr, buffer->vma_size_B);
146 nouveau_ws_free_vma(dev->ws_dev, buffer->addr, buffer->vma_size_B,
147 bda_capture_replay, sparse_residency);
148 }
149
150 vk_buffer_destroy(&dev->vk, pAllocator, &buffer->vk);
151 }
152
153 VKAPI_ATTR void VKAPI_CALL
nvk_GetDeviceBufferMemoryRequirements(VkDevice device,const VkDeviceBufferMemoryRequirements * pInfo,VkMemoryRequirements2 * pMemoryRequirements)154 nvk_GetDeviceBufferMemoryRequirements(
155 VkDevice device,
156 const VkDeviceBufferMemoryRequirements *pInfo,
157 VkMemoryRequirements2 *pMemoryRequirements)
158 {
159 VK_FROM_HANDLE(nvk_device, dev, device);
160
161 const uint32_t alignment =
162 nvk_get_buffer_alignment(nvk_device_physical(dev),
163 pInfo->pCreateInfo->usage,
164 pInfo->pCreateInfo->flags);
165
166 pMemoryRequirements->memoryRequirements = (VkMemoryRequirements) {
167 .size = align64(pInfo->pCreateInfo->size, alignment),
168 .alignment = alignment,
169 .memoryTypeBits = BITFIELD_MASK(dev->pdev->mem_type_count),
170 };
171
172 vk_foreach_struct_const(ext, pMemoryRequirements->pNext) {
173 switch (ext->sType) {
174 case VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS: {
175 VkMemoryDedicatedRequirements *dedicated = (void *)ext;
176 dedicated->prefersDedicatedAllocation = false;
177 dedicated->requiresDedicatedAllocation = false;
178 break;
179 }
180 default:
181 nvk_debug_ignored_stype(ext->sType);
182 break;
183 }
184 }
185 }
186
187 VKAPI_ATTR void VKAPI_CALL
nvk_GetPhysicalDeviceExternalBufferProperties(VkPhysicalDevice physicalDevice,const VkPhysicalDeviceExternalBufferInfo * pExternalBufferInfo,VkExternalBufferProperties * pExternalBufferProperties)188 nvk_GetPhysicalDeviceExternalBufferProperties(
189 VkPhysicalDevice physicalDevice,
190 const VkPhysicalDeviceExternalBufferInfo *pExternalBufferInfo,
191 VkExternalBufferProperties *pExternalBufferProperties)
192 {
193 /* The Vulkan 1.3.256 spec says:
194 *
195 * VUID-VkPhysicalDeviceExternalBufferInfo-handleType-parameter
196 *
197 * "handleType must be a valid VkExternalMemoryHandleTypeFlagBits value"
198 *
199 * This differs from VkPhysicalDeviceExternalImageFormatInfo, which
200 * surprisingly permits handleType == 0.
201 */
202 assert(pExternalBufferInfo->handleType != 0);
203
204 /* All of the current flags are for sparse which we don't support yet.
205 * Even when we do support it, doing sparse on external memory sounds
206 * sketchy. Also, just disallowing flags is the safe option.
207 */
208 if (pExternalBufferInfo->flags)
209 goto unsupported;
210
211 switch (pExternalBufferInfo->handleType) {
212 case VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT:
213 case VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT:
214 pExternalBufferProperties->externalMemoryProperties =
215 nvk_dma_buf_mem_props;
216 return;
217 default:
218 goto unsupported;
219 }
220
221 unsupported:
222 /* From the Vulkan 1.3.256 spec:
223 *
224 * compatibleHandleTypes must include at least handleType.
225 */
226 pExternalBufferProperties->externalMemoryProperties =
227 (VkExternalMemoryProperties) {
228 .compatibleHandleTypes = pExternalBufferInfo->handleType,
229 };
230 }
231
232 VKAPI_ATTR VkResult VKAPI_CALL
nvk_BindBufferMemory2(VkDevice device,uint32_t bindInfoCount,const VkBindBufferMemoryInfo * pBindInfos)233 nvk_BindBufferMemory2(VkDevice device,
234 uint32_t bindInfoCount,
235 const VkBindBufferMemoryInfo *pBindInfos)
236 {
237 for (uint32_t i = 0; i < bindInfoCount; ++i) {
238 VK_FROM_HANDLE(nvk_device_memory, mem, pBindInfos[i].memory);
239 VK_FROM_HANDLE(nvk_buffer, buffer, pBindInfos[i].buffer);
240
241 buffer->is_local = !(mem->bo->flags & NOUVEAU_WS_BO_GART);
242 if (buffer->vma_size_B) {
243 VK_FROM_HANDLE(nvk_device, dev, device);
244 nouveau_ws_bo_bind_vma(dev->ws_dev,
245 mem->bo,
246 buffer->addr,
247 buffer->vma_size_B,
248 pBindInfos[i].memoryOffset,
249 0 /* pte_kind */);
250 } else {
251 buffer->addr = mem->bo->offset + pBindInfos[i].memoryOffset;
252 }
253 }
254 return VK_SUCCESS;
255 }
256
257 VKAPI_ATTR VkDeviceAddress VKAPI_CALL
nvk_GetBufferDeviceAddress(UNUSED VkDevice device,const VkBufferDeviceAddressInfo * pInfo)258 nvk_GetBufferDeviceAddress(UNUSED VkDevice device,
259 const VkBufferDeviceAddressInfo *pInfo)
260 {
261 VK_FROM_HANDLE(nvk_buffer, buffer, pInfo->buffer);
262
263 return nvk_buffer_address(buffer, 0);
264 }
265
266 VKAPI_ATTR uint64_t VKAPI_CALL
nvk_GetBufferOpaqueCaptureAddress(UNUSED VkDevice device,const VkBufferDeviceAddressInfo * pInfo)267 nvk_GetBufferOpaqueCaptureAddress(UNUSED VkDevice device,
268 const VkBufferDeviceAddressInfo *pInfo)
269 {
270 VK_FROM_HANDLE(nvk_buffer, buffer, pInfo->buffer);
271
272 return nvk_buffer_address(buffer, 0);
273 }
274