1 /*
2 * Copyright © 2019 Red Hat.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "lvp_private.h"
25 #include "pipe/p_context.h"
26 #include "vk_util.h"
27
lvp_create_cmd_buffer(struct lvp_device * device,struct lvp_cmd_pool * pool,VkCommandBufferLevel level,VkCommandBuffer * pCommandBuffer)28 static VkResult lvp_create_cmd_buffer(
29 struct lvp_device * device,
30 struct lvp_cmd_pool * pool,
31 VkCommandBufferLevel level,
32 VkCommandBuffer* pCommandBuffer)
33 {
34 struct lvp_cmd_buffer *cmd_buffer;
35
36 cmd_buffer = vk_alloc(&pool->vk.alloc, sizeof(*cmd_buffer), 8,
37 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
38 if (cmd_buffer == NULL)
39 return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
40
41 VkResult result = vk_command_buffer_init(&cmd_buffer->vk, &pool->vk, level);
42 if (result != VK_SUCCESS) {
43 vk_free(&pool->vk.alloc, cmd_buffer);
44 return result;
45 }
46
47 cmd_buffer->device = device;
48 cmd_buffer->pool = pool;
49
50 cmd_buffer->status = LVP_CMD_BUFFER_STATUS_INITIAL;
51 if (pool) {
52 list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
53 } else {
54 /* Init the pool_link so we can safefly call list_del when we destroy
55 * the command buffer
56 */
57 list_inithead(&cmd_buffer->pool_link);
58 }
59 *pCommandBuffer = lvp_cmd_buffer_to_handle(cmd_buffer);
60
61 return VK_SUCCESS;
62 }
63
lvp_reset_cmd_buffer(struct lvp_cmd_buffer * cmd_buffer)64 static VkResult lvp_reset_cmd_buffer(struct lvp_cmd_buffer *cmd_buffer)
65 {
66 vk_command_buffer_reset(&cmd_buffer->vk);
67
68 cmd_buffer->status = LVP_CMD_BUFFER_STATUS_INITIAL;
69 return VK_SUCCESS;
70 }
71
lvp_AllocateCommandBuffers(VkDevice _device,const VkCommandBufferAllocateInfo * pAllocateInfo,VkCommandBuffer * pCommandBuffers)72 VKAPI_ATTR VkResult VKAPI_CALL lvp_AllocateCommandBuffers(
73 VkDevice _device,
74 const VkCommandBufferAllocateInfo* pAllocateInfo,
75 VkCommandBuffer* pCommandBuffers)
76 {
77 LVP_FROM_HANDLE(lvp_device, device, _device);
78 LVP_FROM_HANDLE(lvp_cmd_pool, pool, pAllocateInfo->commandPool);
79
80 VkResult result = VK_SUCCESS;
81 uint32_t i;
82
83 for (i = 0; i < pAllocateInfo->commandBufferCount; i++) {
84
85 if (!list_is_empty(&pool->free_cmd_buffers)) {
86 struct lvp_cmd_buffer *cmd_buffer = list_first_entry(&pool->free_cmd_buffers, struct lvp_cmd_buffer, pool_link);
87
88 list_del(&cmd_buffer->pool_link);
89 list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
90
91 result = lvp_reset_cmd_buffer(cmd_buffer);
92 vk_command_buffer_finish(&cmd_buffer->vk);
93 VkResult init_result =
94 vk_command_buffer_init(&cmd_buffer->vk, &pool->vk,
95 pAllocateInfo->level);
96 if (init_result != VK_SUCCESS)
97 result = init_result;
98
99 pCommandBuffers[i] = lvp_cmd_buffer_to_handle(cmd_buffer);
100 } else {
101 result = lvp_create_cmd_buffer(device, pool, pAllocateInfo->level,
102 &pCommandBuffers[i]);
103 if (result != VK_SUCCESS)
104 break;
105 }
106 }
107
108 if (result != VK_SUCCESS) {
109 lvp_FreeCommandBuffers(_device, pAllocateInfo->commandPool,
110 i, pCommandBuffers);
111 memset(pCommandBuffers, 0,
112 sizeof(*pCommandBuffers) * pAllocateInfo->commandBufferCount);
113 }
114
115 return result;
116 }
117
118 static void
lvp_cmd_buffer_destroy(struct lvp_cmd_buffer * cmd_buffer)119 lvp_cmd_buffer_destroy(struct lvp_cmd_buffer *cmd_buffer)
120 {
121 vk_command_buffer_finish(&cmd_buffer->vk);
122 vk_free(&cmd_buffer->pool->vk.alloc, cmd_buffer);
123 }
124
lvp_FreeCommandBuffers(VkDevice device,VkCommandPool commandPool,uint32_t commandBufferCount,const VkCommandBuffer * pCommandBuffers)125 VKAPI_ATTR void VKAPI_CALL lvp_FreeCommandBuffers(
126 VkDevice device,
127 VkCommandPool commandPool,
128 uint32_t commandBufferCount,
129 const VkCommandBuffer* pCommandBuffers)
130 {
131 for (uint32_t i = 0; i < commandBufferCount; i++) {
132 LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, pCommandBuffers[i]);
133
134 if (cmd_buffer) {
135 if (cmd_buffer->pool) {
136 list_del(&cmd_buffer->pool_link);
137 list_addtail(&cmd_buffer->pool_link, &cmd_buffer->pool->free_cmd_buffers);
138 } else
139 lvp_cmd_buffer_destroy(cmd_buffer);
140 }
141 }
142 }
143
lvp_ResetCommandBuffer(VkCommandBuffer commandBuffer,VkCommandBufferResetFlags flags)144 VKAPI_ATTR VkResult VKAPI_CALL lvp_ResetCommandBuffer(
145 VkCommandBuffer commandBuffer,
146 VkCommandBufferResetFlags flags)
147 {
148 LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
149
150 return lvp_reset_cmd_buffer(cmd_buffer);
151 }
152
lvp_BeginCommandBuffer(VkCommandBuffer commandBuffer,const VkCommandBufferBeginInfo * pBeginInfo)153 VKAPI_ATTR VkResult VKAPI_CALL lvp_BeginCommandBuffer(
154 VkCommandBuffer commandBuffer,
155 const VkCommandBufferBeginInfo* pBeginInfo)
156 {
157 LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
158 VkResult result;
159 if (cmd_buffer->status != LVP_CMD_BUFFER_STATUS_INITIAL) {
160 result = lvp_reset_cmd_buffer(cmd_buffer);
161 if (result != VK_SUCCESS)
162 return result;
163 }
164 cmd_buffer->status = LVP_CMD_BUFFER_STATUS_RECORDING;
165 return VK_SUCCESS;
166 }
167
lvp_EndCommandBuffer(VkCommandBuffer commandBuffer)168 VKAPI_ATTR VkResult VKAPI_CALL lvp_EndCommandBuffer(
169 VkCommandBuffer commandBuffer)
170 {
171 LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
172
173 cmd_buffer->status =
174 cmd_buffer->vk.cmd_queue.error == VK_SUCCESS ?
175 LVP_CMD_BUFFER_STATUS_EXECUTABLE :
176 LVP_CMD_BUFFER_STATUS_INVALID;
177
178 return cmd_buffer->vk.cmd_queue.error;
179 }
180
lvp_CreateCommandPool(VkDevice _device,const VkCommandPoolCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkCommandPool * pCmdPool)181 VKAPI_ATTR VkResult VKAPI_CALL lvp_CreateCommandPool(
182 VkDevice _device,
183 const VkCommandPoolCreateInfo* pCreateInfo,
184 const VkAllocationCallbacks* pAllocator,
185 VkCommandPool* pCmdPool)
186 {
187 LVP_FROM_HANDLE(lvp_device, device, _device);
188 struct lvp_cmd_pool *pool;
189
190 pool = vk_alloc2(&device->vk.alloc, pAllocator, sizeof(*pool), 8,
191 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
192 if (pool == NULL)
193 return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
194
195 VkResult result = vk_command_pool_init(&pool->vk, &device->vk,
196 pCreateInfo, pAllocator);
197 if (result != VK_SUCCESS) {
198 vk_free2(&device->vk.alloc, pAllocator, pool);
199 return result;
200 }
201
202 list_inithead(&pool->cmd_buffers);
203 list_inithead(&pool->free_cmd_buffers);
204
205 *pCmdPool = lvp_cmd_pool_to_handle(pool);
206
207 return VK_SUCCESS;
208 }
209
lvp_DestroyCommandPool(VkDevice _device,VkCommandPool commandPool,const VkAllocationCallbacks * pAllocator)210 VKAPI_ATTR void VKAPI_CALL lvp_DestroyCommandPool(
211 VkDevice _device,
212 VkCommandPool commandPool,
213 const VkAllocationCallbacks* pAllocator)
214 {
215 LVP_FROM_HANDLE(lvp_device, device, _device);
216 LVP_FROM_HANDLE(lvp_cmd_pool, pool, commandPool);
217
218 if (!pool)
219 return;
220
221 list_for_each_entry_safe(struct lvp_cmd_buffer, cmd_buffer,
222 &pool->cmd_buffers, pool_link) {
223 lvp_cmd_buffer_destroy(cmd_buffer);
224 }
225
226 list_for_each_entry_safe(struct lvp_cmd_buffer, cmd_buffer,
227 &pool->free_cmd_buffers, pool_link) {
228 lvp_cmd_buffer_destroy(cmd_buffer);
229 }
230
231 vk_command_pool_finish(&pool->vk);
232 vk_free2(&device->vk.alloc, pAllocator, pool);
233 }
234
lvp_ResetCommandPool(VkDevice device,VkCommandPool commandPool,VkCommandPoolResetFlags flags)235 VKAPI_ATTR VkResult VKAPI_CALL lvp_ResetCommandPool(
236 VkDevice device,
237 VkCommandPool commandPool,
238 VkCommandPoolResetFlags flags)
239 {
240 LVP_FROM_HANDLE(lvp_cmd_pool, pool, commandPool);
241 VkResult result;
242
243 list_for_each_entry(struct lvp_cmd_buffer, cmd_buffer,
244 &pool->cmd_buffers, pool_link) {
245 result = lvp_reset_cmd_buffer(cmd_buffer);
246 if (result != VK_SUCCESS)
247 return result;
248 }
249 return VK_SUCCESS;
250 }
251
lvp_TrimCommandPool(VkDevice device,VkCommandPool commandPool,VkCommandPoolTrimFlags flags)252 VKAPI_ATTR void VKAPI_CALL lvp_TrimCommandPool(
253 VkDevice device,
254 VkCommandPool commandPool,
255 VkCommandPoolTrimFlags flags)
256 {
257 LVP_FROM_HANDLE(lvp_cmd_pool, pool, commandPool);
258
259 if (!pool)
260 return;
261
262 list_for_each_entry_safe(struct lvp_cmd_buffer, cmd_buffer,
263 &pool->free_cmd_buffers, pool_link) {
264 lvp_cmd_buffer_destroy(cmd_buffer);
265 }
266 list_inithead(&pool->free_cmd_buffers);
267 }
268
269 static void
lvp_free_CmdPushDescriptorSetWithTemplateKHR(struct vk_cmd_queue * queue,struct vk_cmd_queue_entry * cmd)270 lvp_free_CmdPushDescriptorSetWithTemplateKHR(struct vk_cmd_queue *queue, struct vk_cmd_queue_entry *cmd)
271 {
272 struct lvp_device *device = cmd->driver_data;
273 LVP_FROM_HANDLE(lvp_descriptor_update_template, templ, cmd->u.push_descriptor_set_with_template_khr.descriptor_update_template);
274 lvp_descriptor_template_templ_unref(device, templ);
275 }
276
lvp_CmdPushDescriptorSetWithTemplateKHR(VkCommandBuffer commandBuffer,VkDescriptorUpdateTemplate descriptorUpdateTemplate,VkPipelineLayout layout,uint32_t set,const void * pData)277 VKAPI_ATTR void VKAPI_CALL lvp_CmdPushDescriptorSetWithTemplateKHR(
278 VkCommandBuffer commandBuffer,
279 VkDescriptorUpdateTemplate descriptorUpdateTemplate,
280 VkPipelineLayout layout,
281 uint32_t set,
282 const void* pData)
283 {
284 LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
285 LVP_FROM_HANDLE(lvp_descriptor_update_template, templ, descriptorUpdateTemplate);
286 size_t info_size = 0;
287 struct vk_cmd_queue_entry *cmd = vk_zalloc(cmd_buffer->vk.cmd_queue.alloc,
288 sizeof(*cmd), 8,
289 VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
290 if (!cmd)
291 return;
292
293 cmd->type = VK_CMD_PUSH_DESCRIPTOR_SET_WITH_TEMPLATE_KHR;
294
295 list_addtail(&cmd->cmd_link, &cmd_buffer->vk.cmd_queue.cmds);
296 cmd->driver_free_cb = lvp_free_CmdPushDescriptorSetWithTemplateKHR;
297 cmd->driver_data = cmd_buffer->device;
298
299 cmd->u.push_descriptor_set_with_template_khr.descriptor_update_template = descriptorUpdateTemplate;
300 lvp_descriptor_template_templ_ref(templ);
301 cmd->u.push_descriptor_set_with_template_khr.layout = layout;
302 cmd->u.push_descriptor_set_with_template_khr.set = set;
303
304 for (unsigned i = 0; i < templ->entry_count; i++) {
305 VkDescriptorUpdateTemplateEntry *entry = &templ->entry[i];
306
307 switch (entry->descriptorType) {
308 case VK_DESCRIPTOR_TYPE_SAMPLER:
309 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
310 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
311 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
312 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
313 info_size += sizeof(VkDescriptorImageInfo) * entry->descriptorCount;
314 break;
315 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
316 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
317 info_size += sizeof(VkBufferView) * entry->descriptorCount;
318 break;
319 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
320 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
321 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
322 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
323 default:
324 info_size += sizeof(VkDescriptorBufferInfo) * entry->descriptorCount;
325 break;
326 }
327 }
328
329 cmd->u.push_descriptor_set_with_template_khr.data = vk_zalloc(cmd_buffer->vk.cmd_queue.alloc, info_size, 8, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
330
331 uint64_t offset = 0;
332 for (unsigned i = 0; i < templ->entry_count; i++) {
333 VkDescriptorUpdateTemplateEntry *entry = &templ->entry[i];
334
335 unsigned size = 0;
336 switch (entry->descriptorType) {
337 case VK_DESCRIPTOR_TYPE_SAMPLER:
338 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
339 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
340 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
341 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
342 size = sizeof(VkDescriptorImageInfo);
343 break;
344 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
345 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
346 size = sizeof(VkBufferView);
347 break;
348 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
349 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
350 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
351 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
352 default:
353 size = sizeof(VkDescriptorBufferInfo);
354 break;
355 }
356 for (unsigned i = 0; i < entry->descriptorCount; i++) {
357 memcpy((uint8_t*)cmd->u.push_descriptor_set_with_template_khr.data + offset, (const uint8_t*)pData + entry->offset + i * entry->stride, size);
358 offset += size;
359 }
360 }
361 }
362