1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
7 *
8 * SPDX-License-Identifier: MIT
9 */
10
11 #include "radv_buffer.h"
12 #include "radv_device.h"
13 #include "radv_device_memory.h"
14 #include "radv_dgc.h"
15 #include "radv_entrypoints.h"
16 #include "radv_instance.h"
17 #include "radv_physical_device.h"
18 #include "radv_rmv.h"
19
20 #include "vk_common_entrypoints.h"
21 #include "vk_debug_utils.h"
22 #include "vk_log.h"
23
24 void
radv_buffer_init(struct radv_buffer * buffer,struct radv_device * device,struct radeon_winsys_bo * bo,uint64_t size,uint64_t offset)25 radv_buffer_init(struct radv_buffer *buffer, struct radv_device *device, struct radeon_winsys_bo *bo, uint64_t size,
26 uint64_t offset)
27 {
28 VkBufferCreateInfo createInfo = {
29 .sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
30 .size = size,
31 };
32
33 vk_buffer_init(&device->vk, &buffer->vk, &createInfo);
34
35 buffer->bo = bo;
36 buffer->offset = offset;
37 }
38
39 void
radv_buffer_finish(struct radv_buffer * buffer)40 radv_buffer_finish(struct radv_buffer *buffer)
41 {
42 vk_buffer_finish(&buffer->vk);
43 }
44
45 static void
radv_destroy_buffer(struct radv_device * device,const VkAllocationCallbacks * pAllocator,struct radv_buffer * buffer)46 radv_destroy_buffer(struct radv_device *device, const VkAllocationCallbacks *pAllocator, struct radv_buffer *buffer)
47 {
48 struct radv_physical_device *pdev = radv_device_physical(device);
49 struct radv_instance *instance = radv_physical_device_instance(pdev);
50
51 if ((buffer->vk.create_flags & VK_BUFFER_CREATE_SPARSE_BINDING_BIT) && buffer->bo)
52 radv_bo_destroy(device, &buffer->vk.base, buffer->bo);
53
54 if (buffer->bo_va)
55 vk_address_binding_report(&instance->vk, &buffer->vk.base, buffer->bo_va + buffer->offset, buffer->range,
56 VK_DEVICE_ADDRESS_BINDING_TYPE_UNBIND_EXT);
57
58 radv_rmv_log_resource_destroy(device, (uint64_t)radv_buffer_to_handle(buffer));
59 radv_buffer_finish(buffer);
60 vk_free2(&device->vk.alloc, pAllocator, buffer);
61 }
62
63 VkResult
radv_create_buffer(struct radv_device * device,const VkBufferCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkBuffer * pBuffer,bool is_internal)64 radv_create_buffer(struct radv_device *device, const VkBufferCreateInfo *pCreateInfo,
65 const VkAllocationCallbacks *pAllocator, VkBuffer *pBuffer, bool is_internal)
66 {
67 struct radv_buffer *buffer;
68
69 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO);
70
71 #if DETECT_OS_ANDROID
72 /* reject buffers that are larger than maxBufferSize on Android, which
73 * might not have VK_KHR_maintenance4
74 */
75 if (pCreateInfo->size > RADV_MAX_MEMORY_ALLOCATION_SIZE)
76 return vk_error(device, VK_ERROR_OUT_OF_DEVICE_MEMORY);
77 #endif
78
79 buffer = vk_alloc2(&device->vk.alloc, pAllocator, sizeof(*buffer), 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
80 if (buffer == NULL)
81 return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
82
83 vk_buffer_init(&device->vk, &buffer->vk, pCreateInfo);
84 buffer->bo = NULL;
85 buffer->offset = 0;
86 buffer->bo_va = 0;
87 buffer->range = 0;
88
89 uint64_t replay_address = 0;
90 const VkBufferOpaqueCaptureAddressCreateInfo *replay_info =
91 vk_find_struct_const(pCreateInfo->pNext, BUFFER_OPAQUE_CAPTURE_ADDRESS_CREATE_INFO);
92 if (replay_info && replay_info->opaqueCaptureAddress)
93 replay_address = replay_info->opaqueCaptureAddress;
94
95 if (pCreateInfo->flags & VK_BUFFER_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT)
96 buffer->bo_va = replay_address;
97
98 if (pCreateInfo->flags & VK_BUFFER_CREATE_SPARSE_BINDING_BIT) {
99 enum radeon_bo_flag flags = RADEON_FLAG_VIRTUAL;
100 if (pCreateInfo->flags & VK_BUFFER_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT)
101 flags |= RADEON_FLAG_REPLAYABLE;
102 if (buffer->vk.usage &
103 (VK_BUFFER_USAGE_2_RESOURCE_DESCRIPTOR_BUFFER_BIT_EXT | VK_BUFFER_USAGE_2_SAMPLER_DESCRIPTOR_BUFFER_BIT_EXT))
104 flags |= RADEON_FLAG_32BIT;
105
106 VkResult result = radv_bo_create(device, &buffer->vk.base, align64(buffer->vk.size, 4096), 4096, 0, flags,
107 RADV_BO_PRIORITY_VIRTUAL, replay_address, is_internal, &buffer->bo);
108 if (result != VK_SUCCESS) {
109 radv_destroy_buffer(device, pAllocator, buffer);
110 return vk_error(device, result);
111 }
112
113 buffer->bo_va = radv_buffer_get_va(buffer->bo);
114 }
115
116 *pBuffer = radv_buffer_to_handle(buffer);
117 vk_rmv_log_buffer_create(&device->vk, false, *pBuffer);
118 if (buffer->bo)
119 radv_rmv_log_buffer_bind(device, *pBuffer);
120 return VK_SUCCESS;
121 }
122
123 VKAPI_ATTR VkResult VKAPI_CALL
radv_CreateBuffer(VkDevice _device,const VkBufferCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkBuffer * pBuffer)124 radv_CreateBuffer(VkDevice _device, const VkBufferCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator,
125 VkBuffer *pBuffer)
126 {
127 VK_FROM_HANDLE(radv_device, device, _device);
128 return radv_create_buffer(device, pCreateInfo, pAllocator, pBuffer, false);
129 }
130
131 VKAPI_ATTR void VKAPI_CALL
radv_DestroyBuffer(VkDevice _device,VkBuffer _buffer,const VkAllocationCallbacks * pAllocator)132 radv_DestroyBuffer(VkDevice _device, VkBuffer _buffer, const VkAllocationCallbacks *pAllocator)
133 {
134 VK_FROM_HANDLE(radv_device, device, _device);
135 VK_FROM_HANDLE(radv_buffer, buffer, _buffer);
136
137 if (!buffer)
138 return;
139
140 radv_destroy_buffer(device, pAllocator, buffer);
141 }
142
143 VKAPI_ATTR VkResult VKAPI_CALL
radv_BindBufferMemory2(VkDevice _device,uint32_t bindInfoCount,const VkBindBufferMemoryInfo * pBindInfos)144 radv_BindBufferMemory2(VkDevice _device, uint32_t bindInfoCount, const VkBindBufferMemoryInfo *pBindInfos)
145 {
146 VK_FROM_HANDLE(radv_device, device, _device);
147 struct radv_physical_device *pdev = radv_device_physical(device);
148 struct radv_instance *instance = radv_physical_device_instance(pdev);
149
150 for (uint32_t i = 0; i < bindInfoCount; ++i) {
151 VK_FROM_HANDLE(radv_device_memory, mem, pBindInfos[i].memory);
152 VK_FROM_HANDLE(radv_buffer, buffer, pBindInfos[i].buffer);
153 VkBindMemoryStatus *status = (void *)vk_find_struct_const(&pBindInfos[i], BIND_MEMORY_STATUS);
154
155 if (status)
156 *status->pResult = VK_SUCCESS;
157
158 VkBufferMemoryRequirementsInfo2 info = {
159 .sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2,
160 .buffer = pBindInfos[i].buffer,
161 };
162 VkMemoryRequirements2 reqs = {
163 .sType = VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2,
164 };
165
166 vk_common_GetBufferMemoryRequirements2(_device, &info, &reqs);
167
168 if (mem->alloc_size) {
169 if (pBindInfos[i].memoryOffset + reqs.memoryRequirements.size > mem->alloc_size) {
170 if (status)
171 *status->pResult = VK_ERROR_UNKNOWN;
172 return vk_errorf(device, VK_ERROR_UNKNOWN, "Device memory object too small for the buffer.\n");
173 }
174 }
175
176 buffer->bo = mem->bo;
177 buffer->offset = pBindInfos[i].memoryOffset;
178 buffer->bo_va = radv_buffer_get_va(mem->bo);
179 buffer->range = reqs.memoryRequirements.size;
180
181 radv_rmv_log_buffer_bind(device, pBindInfos[i].buffer);
182
183 vk_address_binding_report(&instance->vk, &buffer->vk.base, radv_buffer_get_va(buffer->bo) + buffer->offset,
184 buffer->range, VK_DEVICE_ADDRESS_BINDING_TYPE_BIND_EXT);
185 }
186 return VK_SUCCESS;
187 }
188
189 static void
radv_get_buffer_memory_requirements(struct radv_device * device,VkDeviceSize size,VkBufferCreateFlags flags,VkBufferUsageFlags2 usage,VkMemoryRequirements2 * pMemoryRequirements)190 radv_get_buffer_memory_requirements(struct radv_device *device, VkDeviceSize size, VkBufferCreateFlags flags,
191 VkBufferUsageFlags2 usage, VkMemoryRequirements2 *pMemoryRequirements)
192 {
193 const struct radv_physical_device *pdev = radv_device_physical(device);
194
195 pMemoryRequirements->memoryRequirements.memoryTypeBits =
196 ((1u << pdev->memory_properties.memoryTypeCount) - 1u) & ~pdev->memory_types_32bit;
197
198 /* Force 32-bit address-space for descriptor buffers usage because they are passed to shaders
199 * through 32-bit pointers.
200 */
201 if (usage & (VK_BUFFER_USAGE_2_RESOURCE_DESCRIPTOR_BUFFER_BIT_EXT |
202 VK_BUFFER_USAGE_2_SAMPLER_DESCRIPTOR_BUFFER_BIT_EXT | VK_BUFFER_USAGE_2_PREPROCESS_BUFFER_BIT_EXT))
203 pMemoryRequirements->memoryRequirements.memoryTypeBits = pdev->memory_types_32bit;
204
205 if (flags & VK_BUFFER_CREATE_SPARSE_BINDING_BIT) {
206 pMemoryRequirements->memoryRequirements.alignment = 4096;
207 } else {
208 if (usage & VK_BUFFER_USAGE_2_PREPROCESS_BUFFER_BIT_EXT)
209 pMemoryRequirements->memoryRequirements.alignment = radv_dgc_get_buffer_alignment(device);
210 else
211 pMemoryRequirements->memoryRequirements.alignment = 16;
212 }
213
214 /* Top level acceleration structures need the bottom 6 bits to store
215 * the root ids of instances. The hardware also needs bvh nodes to
216 * be 64 byte aligned.
217 */
218 if (usage & VK_BUFFER_USAGE_2_ACCELERATION_STRUCTURE_STORAGE_BIT_KHR)
219 pMemoryRequirements->memoryRequirements.alignment = MAX2(pMemoryRequirements->memoryRequirements.alignment, 64);
220
221 pMemoryRequirements->memoryRequirements.size = align64(size, pMemoryRequirements->memoryRequirements.alignment);
222
223 vk_foreach_struct (ext, pMemoryRequirements->pNext) {
224 switch (ext->sType) {
225 case VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS: {
226 VkMemoryDedicatedRequirements *req = (VkMemoryDedicatedRequirements *)ext;
227 req->requiresDedicatedAllocation = false;
228 req->prefersDedicatedAllocation = req->requiresDedicatedAllocation;
229 break;
230 }
231 default:
232 break;
233 }
234 }
235 }
236
237 static const VkBufferUsageFlagBits2
radv_get_buffer_usage_flags(const VkBufferCreateInfo * pCreateInfo)238 radv_get_buffer_usage_flags(const VkBufferCreateInfo *pCreateInfo)
239 {
240 const VkBufferUsageFlags2CreateInfo *flags2 =
241 vk_find_struct_const(pCreateInfo->pNext, BUFFER_USAGE_FLAGS_2_CREATE_INFO);
242 return flags2 ? flags2->usage : pCreateInfo->usage;
243 }
244
245 VKAPI_ATTR void VKAPI_CALL
radv_GetDeviceBufferMemoryRequirements(VkDevice _device,const VkDeviceBufferMemoryRequirements * pInfo,VkMemoryRequirements2 * pMemoryRequirements)246 radv_GetDeviceBufferMemoryRequirements(VkDevice _device, const VkDeviceBufferMemoryRequirements *pInfo,
247 VkMemoryRequirements2 *pMemoryRequirements)
248 {
249 VK_FROM_HANDLE(radv_device, device, _device);
250 const VkBufferUsageFlagBits2 usage_flags = radv_get_buffer_usage_flags(pInfo->pCreateInfo);
251
252 radv_get_buffer_memory_requirements(device, pInfo->pCreateInfo->size, pInfo->pCreateInfo->flags, usage_flags,
253 pMemoryRequirements);
254 }
255
256 VKAPI_ATTR VkDeviceAddress VKAPI_CALL
radv_GetBufferDeviceAddress(VkDevice device,const VkBufferDeviceAddressInfo * pInfo)257 radv_GetBufferDeviceAddress(VkDevice device, const VkBufferDeviceAddressInfo *pInfo)
258 {
259 VK_FROM_HANDLE(radv_buffer, buffer, pInfo->buffer);
260 return buffer->bo_va + buffer->offset;
261 }
262
263 VKAPI_ATTR uint64_t VKAPI_CALL
radv_GetBufferOpaqueCaptureAddress(VkDevice device,const VkBufferDeviceAddressInfo * pInfo)264 radv_GetBufferOpaqueCaptureAddress(VkDevice device, const VkBufferDeviceAddressInfo *pInfo)
265 {
266 VK_FROM_HANDLE(radv_buffer, buffer, pInfo->buffer);
267 return buffer->bo_va + buffer->offset;
268 }
269
270 VkResult
radv_bo_create(struct radv_device * device,struct vk_object_base * object,uint64_t size,unsigned alignment,enum radeon_bo_domain domain,enum radeon_bo_flag flags,unsigned priority,uint64_t address,bool is_internal,struct radeon_winsys_bo ** out_bo)271 radv_bo_create(struct radv_device *device, struct vk_object_base *object, uint64_t size, unsigned alignment,
272 enum radeon_bo_domain domain, enum radeon_bo_flag flags, unsigned priority, uint64_t address,
273 bool is_internal, struct radeon_winsys_bo **out_bo)
274 {
275 struct radv_physical_device *pdev = radv_device_physical(device);
276 struct radv_instance *instance = radv_physical_device_instance(pdev);
277 struct radeon_winsys *ws = device->ws;
278 VkResult result;
279
280 result = ws->buffer_create(ws, size, alignment, domain, flags, priority, address, out_bo);
281 if (result != VK_SUCCESS)
282 return result;
283
284 radv_rmv_log_bo_allocate(device, *out_bo, is_internal);
285
286 vk_address_binding_report(&instance->vk, object ? object : &device->vk.base, radv_buffer_get_va(*out_bo),
287 (*out_bo)->size, VK_DEVICE_ADDRESS_BINDING_TYPE_BIND_EXT);
288 return VK_SUCCESS;
289 }
290
291 void
radv_bo_destroy(struct radv_device * device,struct vk_object_base * object,struct radeon_winsys_bo * bo)292 radv_bo_destroy(struct radv_device *device, struct vk_object_base *object, struct radeon_winsys_bo *bo)
293 {
294 struct radv_physical_device *pdev = radv_device_physical(device);
295 struct radv_instance *instance = radv_physical_device_instance(pdev);
296 struct radeon_winsys *ws = device->ws;
297
298 radv_rmv_log_bo_destroy(device, bo);
299
300 vk_address_binding_report(&instance->vk, object ? object : &device->vk.base, radv_buffer_get_va(bo), bo->size,
301 VK_DEVICE_ADDRESS_BINDING_TYPE_UNBIND_EXT);
302
303 ws->buffer_destroy(ws, bo);
304 }
305
306 VkResult
radv_bo_virtual_bind(struct radv_device * device,struct vk_object_base * object,struct radeon_winsys_bo * parent,uint64_t offset,uint64_t size,struct radeon_winsys_bo * bo,uint64_t bo_offset)307 radv_bo_virtual_bind(struct radv_device *device, struct vk_object_base *object, struct radeon_winsys_bo *parent,
308 uint64_t offset, uint64_t size, struct radeon_winsys_bo *bo, uint64_t bo_offset)
309 {
310 struct radv_physical_device *pdev = radv_device_physical(device);
311 struct radv_instance *instance = radv_physical_device_instance(pdev);
312 struct radeon_winsys *ws = device->ws;
313 VkResult result;
314
315 result = ws->buffer_virtual_bind(ws, parent, offset, size, bo, bo_offset);
316 if (result != VK_SUCCESS)
317 return result;
318
319 if (bo)
320 radv_rmv_log_sparse_add_residency(device, parent, offset);
321 else
322 radv_rmv_log_sparse_remove_residency(device, parent, offset);
323
324 vk_address_binding_report(&instance->vk, object, radv_buffer_get_va(parent) + offset, size,
325 bo ? VK_DEVICE_ADDRESS_BINDING_TYPE_BIND_EXT : VK_DEVICE_ADDRESS_BINDING_TYPE_UNBIND_EXT);
326
327 return VK_SUCCESS;
328 }
329
330 VkResult
radv_bo_from_fd(struct radv_device * device,int fd,unsigned priority,struct radv_device_memory * mem,uint64_t * alloc_size)331 radv_bo_from_fd(struct radv_device *device, int fd, unsigned priority, struct radv_device_memory *mem,
332 uint64_t *alloc_size)
333 {
334 struct radv_physical_device *pdev = radv_device_physical(device);
335 struct radv_instance *instance = radv_physical_device_instance(pdev);
336 struct radeon_winsys *ws = device->ws;
337 VkResult result;
338
339 result = ws->buffer_from_fd(ws, fd, priority, &mem->bo, alloc_size);
340 if (result != VK_SUCCESS)
341 return result;
342
343 vk_address_binding_report(&instance->vk, &mem->base, radv_buffer_get_va(mem->bo), mem->bo->size,
344 VK_DEVICE_ADDRESS_BINDING_TYPE_BIND_EXT);
345
346 return result;
347 }
348
349 VkResult
radv_bo_from_ptr(struct radv_device * device,void * host_ptr,uint64_t alloc_size,unsigned priority,struct radv_device_memory * mem)350 radv_bo_from_ptr(struct radv_device *device, void *host_ptr, uint64_t alloc_size, unsigned priority,
351 struct radv_device_memory *mem)
352 {
353 struct radv_physical_device *pdev = radv_device_physical(device);
354 struct radv_instance *instance = radv_physical_device_instance(pdev);
355 struct radeon_winsys *ws = device->ws;
356 VkResult result;
357
358 result = ws->buffer_from_ptr(ws, host_ptr, alloc_size, priority, &mem->bo);
359 if (result != VK_SUCCESS)
360 return result;
361
362 vk_address_binding_report(&instance->vk, &mem->base, radv_buffer_get_va(mem->bo), mem->bo->size,
363 VK_DEVICE_ADDRESS_BINDING_TYPE_BIND_EXT);
364
365 return result;
366 }
367