1 /*
2 * Copyright © 2019 Red Hat.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "lvp_private.h"
25 #include "pipe/p_context.h"
26
lvp_create_cmd_buffer(struct lvp_device * device,struct lvp_cmd_pool * pool,VkCommandBufferLevel level,VkCommandBuffer * pCommandBuffer)27 static VkResult lvp_create_cmd_buffer(
28 struct lvp_device * device,
29 struct lvp_cmd_pool * pool,
30 VkCommandBufferLevel level,
31 VkCommandBuffer* pCommandBuffer)
32 {
33 struct lvp_cmd_buffer *cmd_buffer;
34
35 cmd_buffer = vk_alloc(&pool->alloc, sizeof(*cmd_buffer), 8,
36 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
37 if (cmd_buffer == NULL)
38 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
39
40 vk_object_base_init(&device->vk, &cmd_buffer->base,
41 VK_OBJECT_TYPE_COMMAND_BUFFER);
42 cmd_buffer->device = device;
43 cmd_buffer->pool = pool;
44 list_inithead(&cmd_buffer->cmds);
45 cmd_buffer->status = LVP_CMD_BUFFER_STATUS_INITIAL;
46 if (pool) {
47 list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
48 } else {
49 /* Init the pool_link so we can safefly call list_del when we destroy
50 * the command buffer
51 */
52 list_inithead(&cmd_buffer->pool_link);
53 }
54 *pCommandBuffer = lvp_cmd_buffer_to_handle(cmd_buffer);
55
56 return VK_SUCCESS;
57 }
58
59 static void
lvp_cmd_buffer_free_all_cmds(struct lvp_cmd_buffer * cmd_buffer)60 lvp_cmd_buffer_free_all_cmds(struct lvp_cmd_buffer *cmd_buffer)
61 {
62 struct lvp_cmd_buffer_entry *tmp, *cmd;
63 LIST_FOR_EACH_ENTRY_SAFE(cmd, tmp, &cmd_buffer->cmds, cmd_link) {
64 list_del(&cmd->cmd_link);
65 vk_free(&cmd_buffer->pool->alloc, cmd);
66 }
67 }
68
lvp_reset_cmd_buffer(struct lvp_cmd_buffer * cmd_buffer)69 static VkResult lvp_reset_cmd_buffer(struct lvp_cmd_buffer *cmd_buffer)
70 {
71 lvp_cmd_buffer_free_all_cmds(cmd_buffer);
72 list_inithead(&cmd_buffer->cmds);
73 cmd_buffer->status = LVP_CMD_BUFFER_STATUS_INITIAL;
74 return VK_SUCCESS;
75 }
76
lvp_AllocateCommandBuffers(VkDevice _device,const VkCommandBufferAllocateInfo * pAllocateInfo,VkCommandBuffer * pCommandBuffers)77 VkResult lvp_AllocateCommandBuffers(
78 VkDevice _device,
79 const VkCommandBufferAllocateInfo* pAllocateInfo,
80 VkCommandBuffer* pCommandBuffers)
81 {
82 LVP_FROM_HANDLE(lvp_device, device, _device);
83 LVP_FROM_HANDLE(lvp_cmd_pool, pool, pAllocateInfo->commandPool);
84
85 VkResult result = VK_SUCCESS;
86 uint32_t i;
87
88 for (i = 0; i < pAllocateInfo->commandBufferCount; i++) {
89
90 if (!list_is_empty(&pool->free_cmd_buffers)) {
91 struct lvp_cmd_buffer *cmd_buffer = list_first_entry(&pool->free_cmd_buffers, struct lvp_cmd_buffer, pool_link);
92
93 list_del(&cmd_buffer->pool_link);
94 list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
95
96 result = lvp_reset_cmd_buffer(cmd_buffer);
97 cmd_buffer->level = pAllocateInfo->level;
98
99 pCommandBuffers[i] = lvp_cmd_buffer_to_handle(cmd_buffer);
100 } else {
101 result = lvp_create_cmd_buffer(device, pool, pAllocateInfo->level,
102 &pCommandBuffers[i]);
103 if (result != VK_SUCCESS)
104 break;
105 }
106 }
107
108 if (result != VK_SUCCESS) {
109 lvp_FreeCommandBuffers(_device, pAllocateInfo->commandPool,
110 i, pCommandBuffers);
111 memset(pCommandBuffers, 0,
112 sizeof(*pCommandBuffers) * pAllocateInfo->commandBufferCount);
113 }
114
115 return result;
116 }
117
118 static void
lvp_cmd_buffer_destroy(struct lvp_cmd_buffer * cmd_buffer)119 lvp_cmd_buffer_destroy(struct lvp_cmd_buffer *cmd_buffer)
120 {
121 lvp_cmd_buffer_free_all_cmds(cmd_buffer);
122 list_del(&cmd_buffer->pool_link);
123 vk_object_base_finish(&cmd_buffer->base);
124 vk_free(&cmd_buffer->pool->alloc, cmd_buffer);
125 }
126
lvp_FreeCommandBuffers(VkDevice device,VkCommandPool commandPool,uint32_t commandBufferCount,const VkCommandBuffer * pCommandBuffers)127 void lvp_FreeCommandBuffers(
128 VkDevice device,
129 VkCommandPool commandPool,
130 uint32_t commandBufferCount,
131 const VkCommandBuffer* pCommandBuffers)
132 {
133 for (uint32_t i = 0; i < commandBufferCount; i++) {
134 LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, pCommandBuffers[i]);
135
136 if (cmd_buffer) {
137 if (cmd_buffer->pool) {
138 list_del(&cmd_buffer->pool_link);
139 list_addtail(&cmd_buffer->pool_link, &cmd_buffer->pool->free_cmd_buffers);
140 } else
141 lvp_cmd_buffer_destroy(cmd_buffer);
142 }
143 }
144 }
145
lvp_ResetCommandBuffer(VkCommandBuffer commandBuffer,VkCommandBufferResetFlags flags)146 VkResult lvp_ResetCommandBuffer(
147 VkCommandBuffer commandBuffer,
148 VkCommandBufferResetFlags flags)
149 {
150 LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
151
152 return lvp_reset_cmd_buffer(cmd_buffer);
153 }
154
lvp_BeginCommandBuffer(VkCommandBuffer commandBuffer,const VkCommandBufferBeginInfo * pBeginInfo)155 VkResult lvp_BeginCommandBuffer(
156 VkCommandBuffer commandBuffer,
157 const VkCommandBufferBeginInfo* pBeginInfo)
158 {
159 LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
160 VkResult result;
161 if (cmd_buffer->status != LVP_CMD_BUFFER_STATUS_INITIAL) {
162 result = lvp_reset_cmd_buffer(cmd_buffer);
163 if (result != VK_SUCCESS)
164 return result;
165 }
166 cmd_buffer->status = LVP_CMD_BUFFER_STATUS_RECORDING;
167 return VK_SUCCESS;
168 }
169
lvp_EndCommandBuffer(VkCommandBuffer commandBuffer)170 VkResult lvp_EndCommandBuffer(
171 VkCommandBuffer commandBuffer)
172 {
173 LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
174 cmd_buffer->status = LVP_CMD_BUFFER_STATUS_EXECUTABLE;
175 return VK_SUCCESS;
176 }
177
lvp_CreateCommandPool(VkDevice _device,const VkCommandPoolCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkCommandPool * pCmdPool)178 VkResult lvp_CreateCommandPool(
179 VkDevice _device,
180 const VkCommandPoolCreateInfo* pCreateInfo,
181 const VkAllocationCallbacks* pAllocator,
182 VkCommandPool* pCmdPool)
183 {
184 LVP_FROM_HANDLE(lvp_device, device, _device);
185 struct lvp_cmd_pool *pool;
186
187 pool = vk_alloc2(&device->vk.alloc, pAllocator, sizeof(*pool), 8,
188 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
189 if (pool == NULL)
190 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
191
192 vk_object_base_init(&device->vk, &pool->base,
193 VK_OBJECT_TYPE_COMMAND_POOL);
194 if (pAllocator)
195 pool->alloc = *pAllocator;
196 else
197 pool->alloc = device->vk.alloc;
198
199 list_inithead(&pool->cmd_buffers);
200 list_inithead(&pool->free_cmd_buffers);
201
202 *pCmdPool = lvp_cmd_pool_to_handle(pool);
203
204 return VK_SUCCESS;
205 }
206
lvp_DestroyCommandPool(VkDevice _device,VkCommandPool commandPool,const VkAllocationCallbacks * pAllocator)207 void lvp_DestroyCommandPool(
208 VkDevice _device,
209 VkCommandPool commandPool,
210 const VkAllocationCallbacks* pAllocator)
211 {
212 LVP_FROM_HANDLE(lvp_device, device, _device);
213 LVP_FROM_HANDLE(lvp_cmd_pool, pool, commandPool);
214
215 if (!pool)
216 return;
217
218 list_for_each_entry_safe(struct lvp_cmd_buffer, cmd_buffer,
219 &pool->cmd_buffers, pool_link) {
220 lvp_cmd_buffer_destroy(cmd_buffer);
221 }
222
223 list_for_each_entry_safe(struct lvp_cmd_buffer, cmd_buffer,
224 &pool->free_cmd_buffers, pool_link) {
225 lvp_cmd_buffer_destroy(cmd_buffer);
226 }
227
228 vk_object_base_finish(&pool->base);
229 vk_free2(&device->vk.alloc, pAllocator, pool);
230 }
231
lvp_ResetCommandPool(VkDevice device,VkCommandPool commandPool,VkCommandPoolResetFlags flags)232 VkResult lvp_ResetCommandPool(
233 VkDevice device,
234 VkCommandPool commandPool,
235 VkCommandPoolResetFlags flags)
236 {
237 LVP_FROM_HANDLE(lvp_cmd_pool, pool, commandPool);
238 VkResult result;
239
240 list_for_each_entry(struct lvp_cmd_buffer, cmd_buffer,
241 &pool->cmd_buffers, pool_link) {
242 result = lvp_reset_cmd_buffer(cmd_buffer);
243 if (result != VK_SUCCESS)
244 return result;
245 }
246 return VK_SUCCESS;
247 }
248
lvp_TrimCommandPool(VkDevice device,VkCommandPool commandPool,VkCommandPoolTrimFlags flags)249 void lvp_TrimCommandPool(
250 VkDevice device,
251 VkCommandPool commandPool,
252 VkCommandPoolTrimFlags flags)
253 {
254 LVP_FROM_HANDLE(lvp_cmd_pool, pool, commandPool);
255
256 if (!pool)
257 return;
258
259 list_for_each_entry_safe(struct lvp_cmd_buffer, cmd_buffer,
260 &pool->free_cmd_buffers, pool_link) {
261 lvp_cmd_buffer_destroy(cmd_buffer);
262 }
263 }
264
cmd_buf_entry_alloc_size(struct lvp_cmd_buffer * cmd_buffer,uint32_t extra_size,enum lvp_cmds type)265 static struct lvp_cmd_buffer_entry *cmd_buf_entry_alloc_size(struct lvp_cmd_buffer *cmd_buffer,
266 uint32_t extra_size,
267 enum lvp_cmds type)
268 {
269 struct lvp_cmd_buffer_entry *cmd;
270 uint32_t cmd_size = sizeof(*cmd) + extra_size;
271 cmd = vk_alloc(&cmd_buffer->pool->alloc,
272 cmd_size,
273 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
274 if (!cmd)
275 return NULL;
276
277 cmd->cmd_type = type;
278 return cmd;
279 }
280
cmd_buf_entry_alloc(struct lvp_cmd_buffer * cmd_buffer,enum lvp_cmds type)281 static struct lvp_cmd_buffer_entry *cmd_buf_entry_alloc(struct lvp_cmd_buffer *cmd_buffer,
282 enum lvp_cmds type)
283 {
284 return cmd_buf_entry_alloc_size(cmd_buffer, 0, type);
285 }
286
cmd_buf_queue(struct lvp_cmd_buffer * cmd_buffer,struct lvp_cmd_buffer_entry * cmd)287 static void cmd_buf_queue(struct lvp_cmd_buffer *cmd_buffer,
288 struct lvp_cmd_buffer_entry *cmd)
289 {
290 list_addtail(&cmd->cmd_link, &cmd_buffer->cmds);
291 }
292
293 static void
state_setup_attachments(struct lvp_attachment_state * attachments,struct lvp_render_pass * pass,const VkClearValue * clear_values)294 state_setup_attachments(struct lvp_attachment_state *attachments,
295 struct lvp_render_pass *pass,
296 const VkClearValue *clear_values)
297 {
298 for (uint32_t i = 0; i < pass->attachment_count; ++i) {
299 struct lvp_render_pass_attachment *att = &pass->attachments[i];
300 VkImageAspectFlags att_aspects = vk_format_aspects(att->format);
301 VkImageAspectFlags clear_aspects = 0;
302 if (att_aspects == VK_IMAGE_ASPECT_COLOR_BIT) {
303 /* color attachment */
304 if (att->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
305 clear_aspects |= VK_IMAGE_ASPECT_COLOR_BIT;
306 }
307 } else {
308 /* depthstencil attachment */
309 if ((att_aspects & VK_IMAGE_ASPECT_DEPTH_BIT) &&
310 att->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
311 clear_aspects |= VK_IMAGE_ASPECT_DEPTH_BIT;
312 if ((att_aspects & VK_IMAGE_ASPECT_STENCIL_BIT) &&
313 att->stencil_load_op == VK_ATTACHMENT_LOAD_OP_DONT_CARE)
314 clear_aspects |= VK_IMAGE_ASPECT_STENCIL_BIT;
315 }
316 if ((att_aspects & VK_IMAGE_ASPECT_STENCIL_BIT) &&
317 att->stencil_load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
318 clear_aspects |= VK_IMAGE_ASPECT_STENCIL_BIT;
319 }
320 }
321 attachments[i].pending_clear_aspects = clear_aspects;
322 if (clear_values)
323 attachments[i].clear_value = clear_values[i];
324 }
325 }
326
lvp_CmdBeginRenderPass(VkCommandBuffer commandBuffer,const VkRenderPassBeginInfo * pRenderPassBegin,VkSubpassContents contents)327 void lvp_CmdBeginRenderPass(
328 VkCommandBuffer commandBuffer,
329 const VkRenderPassBeginInfo* pRenderPassBegin,
330 VkSubpassContents contents)
331 {
332 LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
333 LVP_FROM_HANDLE(lvp_render_pass, pass, pRenderPassBegin->renderPass);
334 LVP_FROM_HANDLE(lvp_framebuffer, framebuffer, pRenderPassBegin->framebuffer);
335 struct lvp_cmd_buffer_entry *cmd;
336 uint32_t cmd_size = pass->attachment_count * sizeof(struct lvp_attachment_state);
337
338 cmd = cmd_buf_entry_alloc_size(cmd_buffer, cmd_size, LVP_CMD_BEGIN_RENDER_PASS);
339 if (!cmd)
340 return;
341
342 cmd->u.begin_render_pass.render_pass = pass;
343 cmd->u.begin_render_pass.framebuffer = framebuffer;
344 cmd->u.begin_render_pass.render_area = pRenderPassBegin->renderArea;
345
346 cmd->u.begin_render_pass.attachments = (struct lvp_attachment_state *)(cmd + 1);
347 state_setup_attachments(cmd->u.begin_render_pass.attachments, pass, pRenderPassBegin->pClearValues);
348
349 cmd_buf_queue(cmd_buffer, cmd);
350 }
351
lvp_CmdNextSubpass(VkCommandBuffer commandBuffer,VkSubpassContents contents)352 void lvp_CmdNextSubpass(
353 VkCommandBuffer commandBuffer,
354 VkSubpassContents contents)
355 {
356 LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
357 struct lvp_cmd_buffer_entry *cmd;
358
359 cmd = cmd_buf_entry_alloc(cmd_buffer, LVP_CMD_NEXT_SUBPASS);
360 if (!cmd)
361 return;
362
363 cmd->u.next_subpass.contents = contents;
364
365 cmd_buf_queue(cmd_buffer, cmd);
366 }
367
lvp_CmdBindVertexBuffers(VkCommandBuffer commandBuffer,uint32_t firstBinding,uint32_t bindingCount,const VkBuffer * pBuffers,const VkDeviceSize * pOffsets)368 void lvp_CmdBindVertexBuffers(
369 VkCommandBuffer commandBuffer,
370 uint32_t firstBinding,
371 uint32_t bindingCount,
372 const VkBuffer* pBuffers,
373 const VkDeviceSize* pOffsets)
374 {
375 LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
376 struct lvp_cmd_buffer_entry *cmd;
377 struct lvp_buffer **buffers;
378 VkDeviceSize *offsets;
379 int i;
380 uint32_t cmd_size = bindingCount * sizeof(struct lvp_buffer *) + bindingCount * sizeof(VkDeviceSize);
381
382 cmd = cmd_buf_entry_alloc_size(cmd_buffer, cmd_size, LVP_CMD_BIND_VERTEX_BUFFERS);
383 if (!cmd)
384 return;
385
386 cmd->u.vertex_buffers.first = firstBinding;
387 cmd->u.vertex_buffers.binding_count = bindingCount;
388
389 buffers = (struct lvp_buffer **)(cmd + 1);
390 offsets = (VkDeviceSize *)(buffers + bindingCount);
391 for (i = 0; i < bindingCount; i++) {
392 buffers[i] = lvp_buffer_from_handle(pBuffers[i]);
393 offsets[i] = pOffsets[i];
394 }
395 cmd->u.vertex_buffers.buffers = buffers;
396 cmd->u.vertex_buffers.offsets = offsets;
397
398 cmd_buf_queue(cmd_buffer, cmd);
399 }
400
lvp_CmdBindPipeline(VkCommandBuffer commandBuffer,VkPipelineBindPoint pipelineBindPoint,VkPipeline _pipeline)401 void lvp_CmdBindPipeline(
402 VkCommandBuffer commandBuffer,
403 VkPipelineBindPoint pipelineBindPoint,
404 VkPipeline _pipeline)
405 {
406 LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
407 LVP_FROM_HANDLE(lvp_pipeline, pipeline, _pipeline);
408 struct lvp_cmd_buffer_entry *cmd;
409
410 cmd = cmd_buf_entry_alloc(cmd_buffer, LVP_CMD_BIND_PIPELINE);
411 if (!cmd)
412 return;
413
414 cmd->u.pipeline.bind_point = pipelineBindPoint;
415 cmd->u.pipeline.pipeline = pipeline;
416
417 cmd_buf_queue(cmd_buffer, cmd);
418 }
419
lvp_CmdBindDescriptorSets(VkCommandBuffer commandBuffer,VkPipelineBindPoint pipelineBindPoint,VkPipelineLayout _layout,uint32_t firstSet,uint32_t descriptorSetCount,const VkDescriptorSet * pDescriptorSets,uint32_t dynamicOffsetCount,const uint32_t * pDynamicOffsets)420 void lvp_CmdBindDescriptorSets(
421 VkCommandBuffer commandBuffer,
422 VkPipelineBindPoint pipelineBindPoint,
423 VkPipelineLayout _layout,
424 uint32_t firstSet,
425 uint32_t descriptorSetCount,
426 const VkDescriptorSet* pDescriptorSets,
427 uint32_t dynamicOffsetCount,
428 const uint32_t* pDynamicOffsets)
429 {
430 LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
431 LVP_FROM_HANDLE(lvp_pipeline_layout, layout, _layout);
432 struct lvp_cmd_buffer_entry *cmd;
433 struct lvp_descriptor_set **sets;
434 uint32_t *offsets;
435 int i;
436 uint32_t cmd_size = descriptorSetCount * sizeof(struct lvp_descriptor_set *) + dynamicOffsetCount * sizeof(uint32_t);
437
438 cmd = cmd_buf_entry_alloc_size(cmd_buffer, cmd_size, LVP_CMD_BIND_DESCRIPTOR_SETS);
439 if (!cmd)
440 return;
441
442 cmd->u.descriptor_sets.bind_point = pipelineBindPoint;
443 cmd->u.descriptor_sets.layout = layout;
444 cmd->u.descriptor_sets.first = firstSet;
445 cmd->u.descriptor_sets.count = descriptorSetCount;
446
447 sets = (struct lvp_descriptor_set **)(cmd + 1);
448 for (i = 0; i < descriptorSetCount; i++) {
449 sets[i] = lvp_descriptor_set_from_handle(pDescriptorSets[i]);
450 }
451 cmd->u.descriptor_sets.sets = sets;
452
453 cmd->u.descriptor_sets.dynamic_offset_count = dynamicOffsetCount;
454 offsets = (uint32_t *)(sets + descriptorSetCount);
455 for (i = 0; i < dynamicOffsetCount; i++)
456 offsets[i] = pDynamicOffsets[i];
457 cmd->u.descriptor_sets.dynamic_offsets = offsets;
458
459 cmd_buf_queue(cmd_buffer, cmd);
460 }
461
lvp_CmdDraw(VkCommandBuffer commandBuffer,uint32_t vertexCount,uint32_t instanceCount,uint32_t firstVertex,uint32_t firstInstance)462 void lvp_CmdDraw(
463 VkCommandBuffer commandBuffer,
464 uint32_t vertexCount,
465 uint32_t instanceCount,
466 uint32_t firstVertex,
467 uint32_t firstInstance)
468 {
469 LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
470 struct lvp_cmd_buffer_entry *cmd;
471
472 cmd = cmd_buf_entry_alloc(cmd_buffer, LVP_CMD_DRAW);
473 if (!cmd)
474 return;
475
476 cmd->u.draw.vertex_count = vertexCount;
477 cmd->u.draw.instance_count = instanceCount;
478 cmd->u.draw.first_vertex = firstVertex;
479 cmd->u.draw.first_instance = firstInstance;
480
481 cmd_buf_queue(cmd_buffer, cmd);
482 }
483
lvp_CmdEndRenderPass(VkCommandBuffer commandBuffer)484 void lvp_CmdEndRenderPass(
485 VkCommandBuffer commandBuffer)
486 {
487 LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
488 struct lvp_cmd_buffer_entry *cmd;
489
490 cmd = cmd_buf_entry_alloc(cmd_buffer, LVP_CMD_END_RENDER_PASS);
491 if (!cmd)
492 return;
493
494 cmd_buf_queue(cmd_buffer, cmd);
495 }
496
lvp_CmdSetViewport(VkCommandBuffer commandBuffer,uint32_t firstViewport,uint32_t viewportCount,const VkViewport * pViewports)497 void lvp_CmdSetViewport(
498 VkCommandBuffer commandBuffer,
499 uint32_t firstViewport,
500 uint32_t viewportCount,
501 const VkViewport* pViewports)
502 {
503 LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
504 struct lvp_cmd_buffer_entry *cmd;
505 int i;
506
507 cmd = cmd_buf_entry_alloc(cmd_buffer, LVP_CMD_SET_VIEWPORT);
508 if (!cmd)
509 return;
510
511 cmd->u.set_viewport.first_viewport = firstViewport;
512 cmd->u.set_viewport.viewport_count = viewportCount;
513 for (i = 0; i < viewportCount; i++)
514 cmd->u.set_viewport.viewports[i] = pViewports[i];
515
516 cmd_buf_queue(cmd_buffer, cmd);
517 }
518
lvp_CmdSetScissor(VkCommandBuffer commandBuffer,uint32_t firstScissor,uint32_t scissorCount,const VkRect2D * pScissors)519 void lvp_CmdSetScissor(
520 VkCommandBuffer commandBuffer,
521 uint32_t firstScissor,
522 uint32_t scissorCount,
523 const VkRect2D* pScissors)
524 {
525 LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
526 struct lvp_cmd_buffer_entry *cmd;
527 int i;
528
529 cmd = cmd_buf_entry_alloc(cmd_buffer, LVP_CMD_SET_SCISSOR);
530 if (!cmd)
531 return;
532
533 cmd->u.set_scissor.first_scissor = firstScissor;
534 cmd->u.set_scissor.scissor_count = scissorCount;
535 for (i = 0; i < scissorCount; i++)
536 cmd->u.set_scissor.scissors[i] = pScissors[i];
537
538 cmd_buf_queue(cmd_buffer, cmd);
539 }
540
lvp_CmdSetLineWidth(VkCommandBuffer commandBuffer,float lineWidth)541 void lvp_CmdSetLineWidth(
542 VkCommandBuffer commandBuffer,
543 float lineWidth)
544 {
545 LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
546 struct lvp_cmd_buffer_entry *cmd;
547
548 cmd = cmd_buf_entry_alloc(cmd_buffer, LVP_CMD_SET_LINE_WIDTH);
549 if (!cmd)
550 return;
551
552 cmd->u.set_line_width.line_width = lineWidth;
553
554 cmd_buf_queue(cmd_buffer, cmd);
555 }
556
lvp_CmdSetDepthBias(VkCommandBuffer commandBuffer,float depthBiasConstantFactor,float depthBiasClamp,float depthBiasSlopeFactor)557 void lvp_CmdSetDepthBias(
558 VkCommandBuffer commandBuffer,
559 float depthBiasConstantFactor,
560 float depthBiasClamp,
561 float depthBiasSlopeFactor)
562 {
563 LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
564 struct lvp_cmd_buffer_entry *cmd;
565
566 cmd = cmd_buf_entry_alloc(cmd_buffer, LVP_CMD_SET_DEPTH_BIAS);
567 if (!cmd)
568 return;
569
570 cmd->u.set_depth_bias.constant_factor = depthBiasConstantFactor;
571 cmd->u.set_depth_bias.clamp = depthBiasClamp;
572 cmd->u.set_depth_bias.slope_factor = depthBiasSlopeFactor;
573
574 cmd_buf_queue(cmd_buffer, cmd);
575 }
576
lvp_CmdSetBlendConstants(VkCommandBuffer commandBuffer,const float blendConstants[4])577 void lvp_CmdSetBlendConstants(
578 VkCommandBuffer commandBuffer,
579 const float blendConstants[4])
580 {
581 LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
582 struct lvp_cmd_buffer_entry *cmd;
583
584 cmd = cmd_buf_entry_alloc(cmd_buffer, LVP_CMD_SET_BLEND_CONSTANTS);
585 if (!cmd)
586 return;
587
588 memcpy(cmd->u.set_blend_constants.blend_constants, blendConstants, 4 * sizeof(float));
589
590 cmd_buf_queue(cmd_buffer, cmd);
591 }
592
lvp_CmdSetDepthBounds(VkCommandBuffer commandBuffer,float minDepthBounds,float maxDepthBounds)593 void lvp_CmdSetDepthBounds(
594 VkCommandBuffer commandBuffer,
595 float minDepthBounds,
596 float maxDepthBounds)
597 {
598 LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
599 struct lvp_cmd_buffer_entry *cmd;
600
601 cmd = cmd_buf_entry_alloc(cmd_buffer, LVP_CMD_SET_DEPTH_BOUNDS);
602 if (!cmd)
603 return;
604
605 cmd->u.set_depth_bounds.min_depth = minDepthBounds;
606 cmd->u.set_depth_bounds.max_depth = maxDepthBounds;
607
608 cmd_buf_queue(cmd_buffer, cmd);
609 }
610
lvp_CmdSetStencilCompareMask(VkCommandBuffer commandBuffer,VkStencilFaceFlags faceMask,uint32_t compareMask)611 void lvp_CmdSetStencilCompareMask(
612 VkCommandBuffer commandBuffer,
613 VkStencilFaceFlags faceMask,
614 uint32_t compareMask)
615 {
616 LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
617 struct lvp_cmd_buffer_entry *cmd;
618
619 cmd = cmd_buf_entry_alloc(cmd_buffer, LVP_CMD_SET_STENCIL_COMPARE_MASK);
620 if (!cmd)
621 return;
622
623 cmd->u.stencil_vals.face_mask = faceMask;
624 cmd->u.stencil_vals.value = compareMask;
625
626 cmd_buf_queue(cmd_buffer, cmd);
627 }
628
lvp_CmdSetStencilWriteMask(VkCommandBuffer commandBuffer,VkStencilFaceFlags faceMask,uint32_t writeMask)629 void lvp_CmdSetStencilWriteMask(
630 VkCommandBuffer commandBuffer,
631 VkStencilFaceFlags faceMask,
632 uint32_t writeMask)
633 {
634 LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
635 struct lvp_cmd_buffer_entry *cmd;
636
637 cmd = cmd_buf_entry_alloc(cmd_buffer, LVP_CMD_SET_STENCIL_WRITE_MASK);
638 if (!cmd)
639 return;
640
641 cmd->u.stencil_vals.face_mask = faceMask;
642 cmd->u.stencil_vals.value = writeMask;
643
644 cmd_buf_queue(cmd_buffer, cmd);
645 }
646
647
lvp_CmdSetStencilReference(VkCommandBuffer commandBuffer,VkStencilFaceFlags faceMask,uint32_t reference)648 void lvp_CmdSetStencilReference(
649 VkCommandBuffer commandBuffer,
650 VkStencilFaceFlags faceMask,
651 uint32_t reference)
652 {
653 LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
654 struct lvp_cmd_buffer_entry *cmd;
655
656 cmd = cmd_buf_entry_alloc(cmd_buffer, LVP_CMD_SET_STENCIL_REFERENCE);
657 if (!cmd)
658 return;
659
660 cmd->u.stencil_vals.face_mask = faceMask;
661 cmd->u.stencil_vals.value = reference;
662
663 cmd_buf_queue(cmd_buffer, cmd);
664 }
665
lvp_CmdPushConstants(VkCommandBuffer commandBuffer,VkPipelineLayout layout,VkShaderStageFlags stageFlags,uint32_t offset,uint32_t size,const void * pValues)666 void lvp_CmdPushConstants(
667 VkCommandBuffer commandBuffer,
668 VkPipelineLayout layout,
669 VkShaderStageFlags stageFlags,
670 uint32_t offset,
671 uint32_t size,
672 const void* pValues)
673 {
674 LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
675 struct lvp_cmd_buffer_entry *cmd;
676
677 cmd = cmd_buf_entry_alloc_size(cmd_buffer, (size - 4), LVP_CMD_PUSH_CONSTANTS);
678 if (!cmd)
679 return;
680
681 cmd->u.push_constants.stage = stageFlags;
682 cmd->u.push_constants.offset = offset;
683 cmd->u.push_constants.size = size;
684 memcpy(cmd->u.push_constants.val, pValues, size);
685
686 cmd_buf_queue(cmd_buffer, cmd);
687 }
688
lvp_CmdBindIndexBuffer(VkCommandBuffer commandBuffer,VkBuffer _buffer,VkDeviceSize offset,VkIndexType indexType)689 void lvp_CmdBindIndexBuffer(
690 VkCommandBuffer commandBuffer,
691 VkBuffer _buffer,
692 VkDeviceSize offset,
693 VkIndexType indexType)
694 {
695 LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
696 LVP_FROM_HANDLE(lvp_buffer, buffer, _buffer);
697 struct lvp_cmd_buffer_entry *cmd;
698
699 cmd = cmd_buf_entry_alloc(cmd_buffer, LVP_CMD_BIND_INDEX_BUFFER);
700 if (!cmd)
701 return;
702
703 cmd->u.index_buffer.buffer = buffer;
704 cmd->u.index_buffer.offset = offset;
705 cmd->u.index_buffer.index_type = indexType;
706
707 cmd_buf_queue(cmd_buffer, cmd);
708 }
709
lvp_CmdDrawIndexed(VkCommandBuffer commandBuffer,uint32_t indexCount,uint32_t instanceCount,uint32_t firstIndex,int32_t vertexOffset,uint32_t firstInstance)710 void lvp_CmdDrawIndexed(
711 VkCommandBuffer commandBuffer,
712 uint32_t indexCount,
713 uint32_t instanceCount,
714 uint32_t firstIndex,
715 int32_t vertexOffset,
716 uint32_t firstInstance)
717 {
718 LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
719 struct lvp_cmd_buffer_entry *cmd;
720
721 cmd = cmd_buf_entry_alloc(cmd_buffer, LVP_CMD_DRAW_INDEXED);
722 if (!cmd)
723 return;
724
725 cmd->u.draw_indexed.index_count = indexCount;
726 cmd->u.draw_indexed.instance_count = instanceCount;
727 cmd->u.draw_indexed.first_index = firstIndex;
728 cmd->u.draw_indexed.vertex_offset = vertexOffset;
729 cmd->u.draw_indexed.first_instance = firstInstance;
730
731 cmd_buf_queue(cmd_buffer, cmd);
732 }
733
lvp_CmdDrawIndirect(VkCommandBuffer commandBuffer,VkBuffer _buffer,VkDeviceSize offset,uint32_t drawCount,uint32_t stride)734 void lvp_CmdDrawIndirect(
735 VkCommandBuffer commandBuffer,
736 VkBuffer _buffer,
737 VkDeviceSize offset,
738 uint32_t drawCount,
739 uint32_t stride)
740 {
741 LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
742 LVP_FROM_HANDLE(lvp_buffer, buf, _buffer);
743 struct lvp_cmd_buffer_entry *cmd;
744
745 cmd = cmd_buf_entry_alloc(cmd_buffer, LVP_CMD_DRAW_INDIRECT);
746 if (!cmd)
747 return;
748
749 cmd->u.draw_indirect.offset = offset;
750 cmd->u.draw_indirect.buffer = buf;
751 cmd->u.draw_indirect.draw_count = drawCount;
752 cmd->u.draw_indirect.stride = stride;
753
754 cmd_buf_queue(cmd_buffer, cmd);
755 }
756
lvp_CmdDrawIndexedIndirect(VkCommandBuffer commandBuffer,VkBuffer _buffer,VkDeviceSize offset,uint32_t drawCount,uint32_t stride)757 void lvp_CmdDrawIndexedIndirect(
758 VkCommandBuffer commandBuffer,
759 VkBuffer _buffer,
760 VkDeviceSize offset,
761 uint32_t drawCount,
762 uint32_t stride)
763 {
764 LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
765 LVP_FROM_HANDLE(lvp_buffer, buf, _buffer);
766 struct lvp_cmd_buffer_entry *cmd;
767
768 cmd = cmd_buf_entry_alloc(cmd_buffer, LVP_CMD_DRAW_INDEXED_INDIRECT);
769 if (!cmd)
770 return;
771
772 cmd->u.draw_indirect.offset = offset;
773 cmd->u.draw_indirect.buffer = buf;
774 cmd->u.draw_indirect.draw_count = drawCount;
775 cmd->u.draw_indirect.stride = stride;
776
777 cmd_buf_queue(cmd_buffer, cmd);
778 }
779
lvp_CmdDispatch(VkCommandBuffer commandBuffer,uint32_t x,uint32_t y,uint32_t z)780 void lvp_CmdDispatch(
781 VkCommandBuffer commandBuffer,
782 uint32_t x,
783 uint32_t y,
784 uint32_t z)
785 {
786 LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
787 struct lvp_cmd_buffer_entry *cmd;
788
789 cmd = cmd_buf_entry_alloc(cmd_buffer, LVP_CMD_DISPATCH);
790 if (!cmd)
791 return;
792
793 cmd->u.dispatch.x = x;
794 cmd->u.dispatch.y = y;
795 cmd->u.dispatch.z = z;
796
797 cmd_buf_queue(cmd_buffer, cmd);
798 }
799
lvp_CmdDispatchIndirect(VkCommandBuffer commandBuffer,VkBuffer _buffer,VkDeviceSize offset)800 void lvp_CmdDispatchIndirect(
801 VkCommandBuffer commandBuffer,
802 VkBuffer _buffer,
803 VkDeviceSize offset)
804 {
805 LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
806 struct lvp_cmd_buffer_entry *cmd;
807
808 cmd = cmd_buf_entry_alloc(cmd_buffer, LVP_CMD_DISPATCH_INDIRECT);
809 if (!cmd)
810 return;
811
812 cmd->u.dispatch_indirect.buffer = lvp_buffer_from_handle(_buffer);
813 cmd->u.dispatch_indirect.offset = offset;
814
815 cmd_buf_queue(cmd_buffer, cmd);
816 }
817
lvp_CmdExecuteCommands(VkCommandBuffer commandBuffer,uint32_t commandBufferCount,const VkCommandBuffer * pCmdBuffers)818 void lvp_CmdExecuteCommands(
819 VkCommandBuffer commandBuffer,
820 uint32_t commandBufferCount,
821 const VkCommandBuffer* pCmdBuffers)
822 {
823 LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
824 struct lvp_cmd_buffer_entry *cmd;
825 uint32_t cmd_size = commandBufferCount * sizeof(struct lvp_cmd_buffer *);
826
827 cmd = cmd_buf_entry_alloc_size(cmd_buffer, cmd_size, LVP_CMD_EXECUTE_COMMANDS);
828 if (!cmd)
829 return;
830
831 cmd->u.execute_commands.command_buffer_count = commandBufferCount;
832 for (unsigned i = 0; i < commandBufferCount; i++)
833 cmd->u.execute_commands.cmd_buffers[i] = lvp_cmd_buffer_from_handle(pCmdBuffers[i]);
834
835 cmd_buf_queue(cmd_buffer, cmd);
836 }
837
lvp_CmdSetEvent(VkCommandBuffer commandBuffer,VkEvent _event,VkPipelineStageFlags stageMask)838 void lvp_CmdSetEvent(VkCommandBuffer commandBuffer,
839 VkEvent _event,
840 VkPipelineStageFlags stageMask)
841 {
842 LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
843 LVP_FROM_HANDLE(lvp_event, event, _event);
844 struct lvp_cmd_buffer_entry *cmd;
845
846 cmd = cmd_buf_entry_alloc(cmd_buffer, LVP_CMD_SET_EVENT);
847 if (!cmd)
848 return;
849
850 cmd->u.event_set.event = event;
851 cmd->u.event_set.value = true;
852 cmd->u.event_set.flush = !!(stageMask == VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT);
853
854 cmd_buf_queue(cmd_buffer, cmd);
855 }
856
lvp_CmdResetEvent(VkCommandBuffer commandBuffer,VkEvent _event,VkPipelineStageFlags stageMask)857 void lvp_CmdResetEvent(VkCommandBuffer commandBuffer,
858 VkEvent _event,
859 VkPipelineStageFlags stageMask)
860 {
861 LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
862 LVP_FROM_HANDLE(lvp_event, event, _event);
863 struct lvp_cmd_buffer_entry *cmd;
864
865 cmd = cmd_buf_entry_alloc(cmd_buffer, LVP_CMD_SET_EVENT);
866 if (!cmd)
867 return;
868
869 cmd->u.event_set.event = event;
870 cmd->u.event_set.value = false;
871 cmd->u.event_set.flush = !!(stageMask == VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT);
872
873 cmd_buf_queue(cmd_buffer, cmd);
874
875 }
876
lvp_CmdWaitEvents(VkCommandBuffer commandBuffer,uint32_t eventCount,const VkEvent * pEvents,VkPipelineStageFlags srcStageMask,VkPipelineStageFlags dstStageMask,uint32_t memoryBarrierCount,const VkMemoryBarrier * pMemoryBarriers,uint32_t bufferMemoryBarrierCount,const VkBufferMemoryBarrier * pBufferMemoryBarriers,uint32_t imageMemoryBarrierCount,const VkImageMemoryBarrier * pImageMemoryBarriers)877 void lvp_CmdWaitEvents(VkCommandBuffer commandBuffer,
878 uint32_t eventCount,
879 const VkEvent* pEvents,
880 VkPipelineStageFlags srcStageMask,
881 VkPipelineStageFlags dstStageMask,
882 uint32_t memoryBarrierCount,
883 const VkMemoryBarrier* pMemoryBarriers,
884 uint32_t bufferMemoryBarrierCount,
885 const VkBufferMemoryBarrier* pBufferMemoryBarriers,
886 uint32_t imageMemoryBarrierCount,
887 const VkImageMemoryBarrier* pImageMemoryBarriers)
888 {
889 LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
890 struct lvp_cmd_buffer_entry *cmd;
891 uint32_t cmd_size = 0;
892
893 cmd_size += eventCount * sizeof(struct lvp_event *);
894 cmd_size += memoryBarrierCount * sizeof(VkMemoryBarrier);
895 cmd_size += bufferMemoryBarrierCount * sizeof(VkBufferMemoryBarrier);
896 cmd_size += imageMemoryBarrierCount * sizeof(VkImageMemoryBarrier);
897
898 cmd = cmd_buf_entry_alloc_size(cmd_buffer, cmd_size, LVP_CMD_WAIT_EVENTS);
899 if (!cmd)
900 return;
901
902 cmd->u.wait_events.src_stage_mask = srcStageMask;
903 cmd->u.wait_events.dst_stage_mask = dstStageMask;
904 cmd->u.wait_events.event_count = eventCount;
905 cmd->u.wait_events.events = (struct lvp_event **)(cmd + 1);
906 for (unsigned i = 0; i < eventCount; i++)
907 cmd->u.wait_events.events[i] = lvp_event_from_handle(pEvents[i]);
908 cmd->u.wait_events.memory_barrier_count = memoryBarrierCount;
909 cmd->u.wait_events.buffer_memory_barrier_count = bufferMemoryBarrierCount;
910 cmd->u.wait_events.image_memory_barrier_count = imageMemoryBarrierCount;
911
912 /* TODO finish off this */
913 cmd_buf_queue(cmd_buffer, cmd);
914 }
915
916
lvp_CmdCopyBufferToImage(VkCommandBuffer commandBuffer,VkBuffer srcBuffer,VkImage destImage,VkImageLayout destImageLayout,uint32_t regionCount,const VkBufferImageCopy * pRegions)917 void lvp_CmdCopyBufferToImage(
918 VkCommandBuffer commandBuffer,
919 VkBuffer srcBuffer,
920 VkImage destImage,
921 VkImageLayout destImageLayout,
922 uint32_t regionCount,
923 const VkBufferImageCopy* pRegions)
924 {
925 LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
926 LVP_FROM_HANDLE(lvp_buffer, src_buffer, srcBuffer);
927 LVP_FROM_HANDLE(lvp_image, dst_image, destImage);
928 struct lvp_cmd_buffer_entry *cmd;
929 uint32_t cmd_size = regionCount * sizeof(VkBufferImageCopy);
930
931 cmd = cmd_buf_entry_alloc_size(cmd_buffer, cmd_size, LVP_CMD_COPY_BUFFER_TO_IMAGE);
932 if (!cmd)
933 return;
934
935 cmd->u.buffer_to_img.src = src_buffer;
936 cmd->u.buffer_to_img.dst = dst_image;
937 cmd->u.buffer_to_img.dst_layout = destImageLayout;
938 cmd->u.buffer_to_img.region_count = regionCount;
939
940 {
941 VkBufferImageCopy *regions;
942
943 regions = (VkBufferImageCopy *)(cmd + 1);
944 memcpy(regions, pRegions, regionCount * sizeof(VkBufferImageCopy));
945 cmd->u.buffer_to_img.regions = regions;
946 }
947
948 cmd_buf_queue(cmd_buffer, cmd);
949 }
950
lvp_CmdCopyImageToBuffer(VkCommandBuffer commandBuffer,VkImage srcImage,VkImageLayout srcImageLayout,VkBuffer destBuffer,uint32_t regionCount,const VkBufferImageCopy * pRegions)951 void lvp_CmdCopyImageToBuffer(
952 VkCommandBuffer commandBuffer,
953 VkImage srcImage,
954 VkImageLayout srcImageLayout,
955 VkBuffer destBuffer,
956 uint32_t regionCount,
957 const VkBufferImageCopy* pRegions)
958 {
959 LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
960 LVP_FROM_HANDLE(lvp_image, src_image, srcImage);
961 LVP_FROM_HANDLE(lvp_buffer, dst_buffer, destBuffer);
962 struct lvp_cmd_buffer_entry *cmd;
963 uint32_t cmd_size = regionCount * sizeof(VkBufferImageCopy);
964
965 cmd = cmd_buf_entry_alloc_size(cmd_buffer, cmd_size, LVP_CMD_COPY_IMAGE_TO_BUFFER);
966 if (!cmd)
967 return;
968
969 cmd->u.img_to_buffer.src = src_image;
970 cmd->u.img_to_buffer.dst = dst_buffer;
971 cmd->u.img_to_buffer.src_layout = srcImageLayout;
972 cmd->u.img_to_buffer.region_count = regionCount;
973
974 {
975 VkBufferImageCopy *regions;
976
977 regions = (VkBufferImageCopy *)(cmd + 1);
978 memcpy(regions, pRegions, regionCount * sizeof(VkBufferImageCopy));
979 cmd->u.img_to_buffer.regions = regions;
980 }
981
982 cmd_buf_queue(cmd_buffer, cmd);
983 }
984
lvp_CmdCopyImage(VkCommandBuffer commandBuffer,VkImage srcImage,VkImageLayout srcImageLayout,VkImage destImage,VkImageLayout destImageLayout,uint32_t regionCount,const VkImageCopy * pRegions)985 void lvp_CmdCopyImage(
986 VkCommandBuffer commandBuffer,
987 VkImage srcImage,
988 VkImageLayout srcImageLayout,
989 VkImage destImage,
990 VkImageLayout destImageLayout,
991 uint32_t regionCount,
992 const VkImageCopy* pRegions)
993 {
994 LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
995 LVP_FROM_HANDLE(lvp_image, src_image, srcImage);
996 LVP_FROM_HANDLE(lvp_image, dest_image, destImage);
997 struct lvp_cmd_buffer_entry *cmd;
998 uint32_t cmd_size = regionCount * sizeof(VkImageCopy);
999
1000 cmd = cmd_buf_entry_alloc_size(cmd_buffer, cmd_size, LVP_CMD_COPY_IMAGE);
1001 if (!cmd)
1002 return;
1003
1004 cmd->u.copy_image.src = src_image;
1005 cmd->u.copy_image.dst = dest_image;
1006 cmd->u.copy_image.src_layout = srcImageLayout;
1007 cmd->u.copy_image.dst_layout = destImageLayout;
1008 cmd->u.copy_image.region_count = regionCount;
1009
1010 {
1011 VkImageCopy *regions;
1012
1013 regions = (VkImageCopy *)(cmd + 1);
1014 memcpy(regions, pRegions, regionCount * sizeof(VkImageCopy));
1015 cmd->u.copy_image.regions = regions;
1016 }
1017
1018 cmd_buf_queue(cmd_buffer, cmd);
1019 }
1020
1021
lvp_CmdCopyBuffer(VkCommandBuffer commandBuffer,VkBuffer srcBuffer,VkBuffer destBuffer,uint32_t regionCount,const VkBufferCopy * pRegions)1022 void lvp_CmdCopyBuffer(
1023 VkCommandBuffer commandBuffer,
1024 VkBuffer srcBuffer,
1025 VkBuffer destBuffer,
1026 uint32_t regionCount,
1027 const VkBufferCopy* pRegions)
1028 {
1029 LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
1030 LVP_FROM_HANDLE(lvp_buffer, src_buffer, srcBuffer);
1031 LVP_FROM_HANDLE(lvp_buffer, dest_buffer, destBuffer);
1032 struct lvp_cmd_buffer_entry *cmd;
1033 uint32_t cmd_size = regionCount * sizeof(VkBufferCopy);
1034
1035 cmd = cmd_buf_entry_alloc_size(cmd_buffer, cmd_size, LVP_CMD_COPY_BUFFER);
1036 if (!cmd)
1037 return;
1038
1039 cmd->u.copy_buffer.src = src_buffer;
1040 cmd->u.copy_buffer.dst = dest_buffer;
1041 cmd->u.copy_buffer.region_count = regionCount;
1042
1043 {
1044 VkBufferCopy *regions;
1045
1046 regions = (VkBufferCopy *)(cmd + 1);
1047 memcpy(regions, pRegions, regionCount * sizeof(VkBufferCopy));
1048 cmd->u.copy_buffer.regions = regions;
1049 }
1050
1051 cmd_buf_queue(cmd_buffer, cmd);
1052 }
1053
lvp_CmdBlitImage(VkCommandBuffer commandBuffer,VkImage srcImage,VkImageLayout srcImageLayout,VkImage destImage,VkImageLayout destImageLayout,uint32_t regionCount,const VkImageBlit * pRegions,VkFilter filter)1054 void lvp_CmdBlitImage(
1055 VkCommandBuffer commandBuffer,
1056 VkImage srcImage,
1057 VkImageLayout srcImageLayout,
1058 VkImage destImage,
1059 VkImageLayout destImageLayout,
1060 uint32_t regionCount,
1061 const VkImageBlit* pRegions,
1062 VkFilter filter)
1063 {
1064 LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
1065 LVP_FROM_HANDLE(lvp_image, src_image, srcImage);
1066 LVP_FROM_HANDLE(lvp_image, dest_image, destImage);
1067 struct lvp_cmd_buffer_entry *cmd;
1068 uint32_t cmd_size = regionCount * sizeof(VkImageBlit);
1069
1070 cmd = cmd_buf_entry_alloc_size(cmd_buffer, cmd_size, LVP_CMD_BLIT_IMAGE);
1071 if (!cmd)
1072 return;
1073
1074 cmd->u.blit_image.src = src_image;
1075 cmd->u.blit_image.dst = dest_image;
1076 cmd->u.blit_image.src_layout = srcImageLayout;
1077 cmd->u.blit_image.dst_layout = destImageLayout;
1078 cmd->u.blit_image.filter = filter;
1079 cmd->u.blit_image.region_count = regionCount;
1080
1081 {
1082 VkImageBlit *regions;
1083
1084 regions = (VkImageBlit *)(cmd + 1);
1085 memcpy(regions, pRegions, regionCount * sizeof(VkImageBlit));
1086 cmd->u.blit_image.regions = regions;
1087 }
1088
1089 cmd_buf_queue(cmd_buffer, cmd);
1090 }
1091
lvp_CmdClearAttachments(VkCommandBuffer commandBuffer,uint32_t attachmentCount,const VkClearAttachment * pAttachments,uint32_t rectCount,const VkClearRect * pRects)1092 void lvp_CmdClearAttachments(
1093 VkCommandBuffer commandBuffer,
1094 uint32_t attachmentCount,
1095 const VkClearAttachment* pAttachments,
1096 uint32_t rectCount,
1097 const VkClearRect* pRects)
1098 {
1099 LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
1100 struct lvp_cmd_buffer_entry *cmd;
1101 uint32_t cmd_size = attachmentCount * sizeof(VkClearAttachment) + rectCount * sizeof(VkClearRect);
1102
1103 cmd = cmd_buf_entry_alloc_size(cmd_buffer, cmd_size, LVP_CMD_CLEAR_ATTACHMENTS);
1104 if (!cmd)
1105 return;
1106
1107 cmd->u.clear_attachments.attachment_count = attachmentCount;
1108 cmd->u.clear_attachments.attachments = (VkClearAttachment *)(cmd + 1);
1109 for (unsigned i = 0; i < attachmentCount; i++)
1110 cmd->u.clear_attachments.attachments[i] = pAttachments[i];
1111 cmd->u.clear_attachments.rect_count = rectCount;
1112 cmd->u.clear_attachments.rects = (VkClearRect *)(cmd->u.clear_attachments.attachments + attachmentCount);
1113 for (unsigned i = 0; i < rectCount; i++)
1114 cmd->u.clear_attachments.rects[i] = pRects[i];
1115
1116 cmd_buf_queue(cmd_buffer, cmd);
1117 }
1118
lvp_CmdFillBuffer(VkCommandBuffer commandBuffer,VkBuffer dstBuffer,VkDeviceSize dstOffset,VkDeviceSize fillSize,uint32_t data)1119 void lvp_CmdFillBuffer(
1120 VkCommandBuffer commandBuffer,
1121 VkBuffer dstBuffer,
1122 VkDeviceSize dstOffset,
1123 VkDeviceSize fillSize,
1124 uint32_t data)
1125 {
1126 LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
1127 LVP_FROM_HANDLE(lvp_buffer, dst_buffer, dstBuffer);
1128 struct lvp_cmd_buffer_entry *cmd;
1129
1130 cmd = cmd_buf_entry_alloc(cmd_buffer, LVP_CMD_FILL_BUFFER);
1131 if (!cmd)
1132 return;
1133
1134 cmd->u.fill_buffer.buffer = dst_buffer;
1135 cmd->u.fill_buffer.offset = dstOffset;
1136 cmd->u.fill_buffer.fill_size = fillSize;
1137 cmd->u.fill_buffer.data = data;
1138
1139 cmd_buf_queue(cmd_buffer, cmd);
1140 }
1141
lvp_CmdUpdateBuffer(VkCommandBuffer commandBuffer,VkBuffer dstBuffer,VkDeviceSize dstOffset,VkDeviceSize dataSize,const void * pData)1142 void lvp_CmdUpdateBuffer(
1143 VkCommandBuffer commandBuffer,
1144 VkBuffer dstBuffer,
1145 VkDeviceSize dstOffset,
1146 VkDeviceSize dataSize,
1147 const void* pData)
1148 {
1149 LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
1150 LVP_FROM_HANDLE(lvp_buffer, dst_buffer, dstBuffer);
1151 struct lvp_cmd_buffer_entry *cmd;
1152
1153 cmd = cmd_buf_entry_alloc_size(cmd_buffer, dataSize, LVP_CMD_UPDATE_BUFFER);
1154 if (!cmd)
1155 return;
1156
1157 cmd->u.update_buffer.buffer = dst_buffer;
1158 cmd->u.update_buffer.offset = dstOffset;
1159 cmd->u.update_buffer.data_size = dataSize;
1160 memcpy(cmd->u.update_buffer.data, pData, dataSize);
1161
1162 cmd_buf_queue(cmd_buffer, cmd);
1163 }
1164
lvp_CmdClearColorImage(VkCommandBuffer commandBuffer,VkImage image_h,VkImageLayout imageLayout,const VkClearColorValue * pColor,uint32_t rangeCount,const VkImageSubresourceRange * pRanges)1165 void lvp_CmdClearColorImage(
1166 VkCommandBuffer commandBuffer,
1167 VkImage image_h,
1168 VkImageLayout imageLayout,
1169 const VkClearColorValue* pColor,
1170 uint32_t rangeCount,
1171 const VkImageSubresourceRange* pRanges)
1172 {
1173 LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
1174 LVP_FROM_HANDLE(lvp_image, image, image_h);
1175 struct lvp_cmd_buffer_entry *cmd;
1176 uint32_t cmd_size = rangeCount * sizeof(VkImageSubresourceRange);
1177
1178 cmd = cmd_buf_entry_alloc_size(cmd_buffer, cmd_size, LVP_CMD_CLEAR_COLOR_IMAGE);
1179 if (!cmd)
1180 return;
1181
1182 cmd->u.clear_color_image.image = image;
1183 cmd->u.clear_color_image.layout = imageLayout;
1184 cmd->u.clear_color_image.clear_val = *pColor;
1185 cmd->u.clear_color_image.range_count = rangeCount;
1186 cmd->u.clear_color_image.ranges = (VkImageSubresourceRange *)(cmd + 1);
1187 for (unsigned i = 0; i < rangeCount; i++)
1188 cmd->u.clear_color_image.ranges[i] = pRanges[i];
1189
1190 cmd_buf_queue(cmd_buffer, cmd);
1191 }
1192
lvp_CmdClearDepthStencilImage(VkCommandBuffer commandBuffer,VkImage image_h,VkImageLayout imageLayout,const VkClearDepthStencilValue * pDepthStencil,uint32_t rangeCount,const VkImageSubresourceRange * pRanges)1193 void lvp_CmdClearDepthStencilImage(
1194 VkCommandBuffer commandBuffer,
1195 VkImage image_h,
1196 VkImageLayout imageLayout,
1197 const VkClearDepthStencilValue* pDepthStencil,
1198 uint32_t rangeCount,
1199 const VkImageSubresourceRange* pRanges)
1200 {
1201 LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
1202 LVP_FROM_HANDLE(lvp_image, image, image_h);
1203 struct lvp_cmd_buffer_entry *cmd;
1204 uint32_t cmd_size = rangeCount * sizeof(VkImageSubresourceRange);
1205
1206 cmd = cmd_buf_entry_alloc_size(cmd_buffer, cmd_size, LVP_CMD_CLEAR_DEPTH_STENCIL_IMAGE);
1207 if (!cmd)
1208 return;
1209
1210 cmd->u.clear_ds_image.image = image;
1211 cmd->u.clear_ds_image.layout = imageLayout;
1212 cmd->u.clear_ds_image.clear_val = *pDepthStencil;
1213 cmd->u.clear_ds_image.range_count = rangeCount;
1214 cmd->u.clear_ds_image.ranges = (VkImageSubresourceRange *)(cmd + 1);
1215 for (unsigned i = 0; i < rangeCount; i++)
1216 cmd->u.clear_ds_image.ranges[i] = pRanges[i];
1217
1218 cmd_buf_queue(cmd_buffer, cmd);
1219 }
1220
1221
lvp_CmdResolveImage(VkCommandBuffer commandBuffer,VkImage srcImage,VkImageLayout srcImageLayout,VkImage destImage,VkImageLayout destImageLayout,uint32_t regionCount,const VkImageResolve * regions)1222 void lvp_CmdResolveImage(
1223 VkCommandBuffer commandBuffer,
1224 VkImage srcImage,
1225 VkImageLayout srcImageLayout,
1226 VkImage destImage,
1227 VkImageLayout destImageLayout,
1228 uint32_t regionCount,
1229 const VkImageResolve* regions)
1230 {
1231 LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
1232 LVP_FROM_HANDLE(lvp_image, src_image, srcImage);
1233 LVP_FROM_HANDLE(lvp_image, dst_image, destImage);
1234 struct lvp_cmd_buffer_entry *cmd;
1235 uint32_t cmd_size = regionCount * sizeof(VkImageResolve);
1236
1237 cmd = cmd_buf_entry_alloc_size(cmd_buffer, cmd_size, LVP_CMD_RESOLVE_IMAGE);
1238 if (!cmd)
1239 return;
1240
1241 cmd->u.resolve_image.src = src_image;
1242 cmd->u.resolve_image.dst = dst_image;
1243 cmd->u.resolve_image.src_layout = srcImageLayout;
1244 cmd->u.resolve_image.dst_layout = destImageLayout;
1245 cmd->u.resolve_image.region_count = regionCount;
1246 cmd->u.resolve_image.regions = (VkImageResolve *)(cmd + 1);
1247 for (unsigned i = 0; i < regionCount; i++)
1248 cmd->u.resolve_image.regions[i] = regions[i];
1249
1250 cmd_buf_queue(cmd_buffer, cmd);
1251 }
1252
lvp_CmdResetQueryPool(VkCommandBuffer commandBuffer,VkQueryPool queryPool,uint32_t firstQuery,uint32_t queryCount)1253 void lvp_CmdResetQueryPool(
1254 VkCommandBuffer commandBuffer,
1255 VkQueryPool queryPool,
1256 uint32_t firstQuery,
1257 uint32_t queryCount)
1258 {
1259 LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
1260 LVP_FROM_HANDLE(lvp_query_pool, query_pool, queryPool);
1261 struct lvp_cmd_buffer_entry *cmd;
1262
1263 cmd = cmd_buf_entry_alloc(cmd_buffer, LVP_CMD_RESET_QUERY_POOL);
1264 if (!cmd)
1265 return;
1266
1267 cmd->u.query.pool = query_pool;
1268 cmd->u.query.query = firstQuery;
1269 cmd->u.query.index = queryCount;
1270
1271 cmd_buf_queue(cmd_buffer, cmd);
1272 }
1273
lvp_CmdBeginQueryIndexedEXT(VkCommandBuffer commandBuffer,VkQueryPool queryPool,uint32_t query,VkQueryControlFlags flags,uint32_t index)1274 void lvp_CmdBeginQueryIndexedEXT(
1275 VkCommandBuffer commandBuffer,
1276 VkQueryPool queryPool,
1277 uint32_t query,
1278 VkQueryControlFlags flags,
1279 uint32_t index)
1280 {
1281 LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
1282 LVP_FROM_HANDLE(lvp_query_pool, query_pool, queryPool);
1283 struct lvp_cmd_buffer_entry *cmd;
1284
1285 cmd = cmd_buf_entry_alloc(cmd_buffer, LVP_CMD_BEGIN_QUERY);
1286 if (!cmd)
1287 return;
1288
1289 cmd->u.query.pool = query_pool;
1290 cmd->u.query.query = query;
1291 cmd->u.query.index = index;
1292 cmd->u.query.precise = true;
1293
1294 cmd_buf_queue(cmd_buffer, cmd);
1295 }
1296
lvp_CmdBeginQuery(VkCommandBuffer commandBuffer,VkQueryPool queryPool,uint32_t query,VkQueryControlFlags flags)1297 void lvp_CmdBeginQuery(
1298 VkCommandBuffer commandBuffer,
1299 VkQueryPool queryPool,
1300 uint32_t query,
1301 VkQueryControlFlags flags)
1302 {
1303 lvp_CmdBeginQueryIndexedEXT(commandBuffer, queryPool, query, flags, 0);
1304 }
1305
lvp_CmdEndQueryIndexedEXT(VkCommandBuffer commandBuffer,VkQueryPool queryPool,uint32_t query,uint32_t index)1306 void lvp_CmdEndQueryIndexedEXT(
1307 VkCommandBuffer commandBuffer,
1308 VkQueryPool queryPool,
1309 uint32_t query,
1310 uint32_t index)
1311 {
1312 LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
1313 LVP_FROM_HANDLE(lvp_query_pool, query_pool, queryPool);
1314 struct lvp_cmd_buffer_entry *cmd;
1315
1316 cmd = cmd_buf_entry_alloc(cmd_buffer, LVP_CMD_END_QUERY);
1317 if (!cmd)
1318 return;
1319
1320 cmd->u.query.pool = query_pool;
1321 cmd->u.query.query = query;
1322 cmd->u.query.index = index;
1323
1324 cmd_buf_queue(cmd_buffer, cmd);
1325 }
1326
lvp_CmdEndQuery(VkCommandBuffer commandBuffer,VkQueryPool queryPool,uint32_t query)1327 void lvp_CmdEndQuery(
1328 VkCommandBuffer commandBuffer,
1329 VkQueryPool queryPool,
1330 uint32_t query)
1331 {
1332 lvp_CmdEndQueryIndexedEXT(commandBuffer, queryPool, query, 0);
1333 }
1334
lvp_CmdWriteTimestamp(VkCommandBuffer commandBuffer,VkPipelineStageFlagBits pipelineStage,VkQueryPool queryPool,uint32_t query)1335 void lvp_CmdWriteTimestamp(
1336 VkCommandBuffer commandBuffer,
1337 VkPipelineStageFlagBits pipelineStage,
1338 VkQueryPool queryPool,
1339 uint32_t query)
1340 {
1341 LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
1342 LVP_FROM_HANDLE(lvp_query_pool, query_pool, queryPool);
1343 struct lvp_cmd_buffer_entry *cmd;
1344
1345 cmd = cmd_buf_entry_alloc(cmd_buffer, LVP_CMD_WRITE_TIMESTAMP);
1346 if (!cmd)
1347 return;
1348
1349 cmd->u.query.pool = query_pool;
1350 cmd->u.query.query = query;
1351 cmd->u.query.flush = !(pipelineStage == VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT);
1352
1353 cmd_buf_queue(cmd_buffer, cmd);
1354 }
1355
lvp_CmdCopyQueryPoolResults(VkCommandBuffer commandBuffer,VkQueryPool queryPool,uint32_t firstQuery,uint32_t queryCount,VkBuffer dstBuffer,VkDeviceSize dstOffset,VkDeviceSize stride,VkQueryResultFlags flags)1356 void lvp_CmdCopyQueryPoolResults(
1357 VkCommandBuffer commandBuffer,
1358 VkQueryPool queryPool,
1359 uint32_t firstQuery,
1360 uint32_t queryCount,
1361 VkBuffer dstBuffer,
1362 VkDeviceSize dstOffset,
1363 VkDeviceSize stride,
1364 VkQueryResultFlags flags)
1365 {
1366 LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
1367 LVP_FROM_HANDLE(lvp_query_pool, query_pool, queryPool);
1368 LVP_FROM_HANDLE(lvp_buffer, buffer, dstBuffer);
1369 struct lvp_cmd_buffer_entry *cmd;
1370
1371 cmd = cmd_buf_entry_alloc(cmd_buffer, LVP_CMD_COPY_QUERY_POOL_RESULTS);
1372 if (!cmd)
1373 return;
1374
1375 cmd->u.copy_query_pool_results.pool = query_pool;
1376 cmd->u.copy_query_pool_results.first_query = firstQuery;
1377 cmd->u.copy_query_pool_results.query_count = queryCount;
1378 cmd->u.copy_query_pool_results.dst = buffer;
1379 cmd->u.copy_query_pool_results.dst_offset = dstOffset;
1380 cmd->u.copy_query_pool_results.stride = stride;
1381 cmd->u.copy_query_pool_results.flags = flags;
1382
1383 cmd_buf_queue(cmd_buffer, cmd);
1384 }
1385
lvp_CmdPipelineBarrier(VkCommandBuffer commandBuffer,VkPipelineStageFlags srcStageMask,VkPipelineStageFlags destStageMask,VkBool32 byRegion,uint32_t memoryBarrierCount,const VkMemoryBarrier * pMemoryBarriers,uint32_t bufferMemoryBarrierCount,const VkBufferMemoryBarrier * pBufferMemoryBarriers,uint32_t imageMemoryBarrierCount,const VkImageMemoryBarrier * pImageMemoryBarriers)1386 void lvp_CmdPipelineBarrier(
1387 VkCommandBuffer commandBuffer,
1388 VkPipelineStageFlags srcStageMask,
1389 VkPipelineStageFlags destStageMask,
1390 VkBool32 byRegion,
1391 uint32_t memoryBarrierCount,
1392 const VkMemoryBarrier* pMemoryBarriers,
1393 uint32_t bufferMemoryBarrierCount,
1394 const VkBufferMemoryBarrier* pBufferMemoryBarriers,
1395 uint32_t imageMemoryBarrierCount,
1396 const VkImageMemoryBarrier* pImageMemoryBarriers)
1397 {
1398 LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
1399 struct lvp_cmd_buffer_entry *cmd;
1400 uint32_t cmd_size = 0;
1401
1402 cmd_size += memoryBarrierCount * sizeof(VkMemoryBarrier);
1403 cmd_size += bufferMemoryBarrierCount * sizeof(VkBufferMemoryBarrier);
1404 cmd_size += imageMemoryBarrierCount * sizeof(VkImageMemoryBarrier);
1405
1406 cmd = cmd_buf_entry_alloc_size(cmd_buffer, cmd_size, LVP_CMD_PIPELINE_BARRIER);
1407 if (!cmd)
1408 return;
1409
1410 cmd->u.pipeline_barrier.src_stage_mask = srcStageMask;
1411 cmd->u.pipeline_barrier.dst_stage_mask = destStageMask;
1412 cmd->u.pipeline_barrier.by_region = byRegion;
1413 cmd->u.pipeline_barrier.memory_barrier_count = memoryBarrierCount;
1414 cmd->u.pipeline_barrier.buffer_memory_barrier_count = bufferMemoryBarrierCount;
1415 cmd->u.pipeline_barrier.image_memory_barrier_count = imageMemoryBarrierCount;
1416
1417 /* TODO finish off this */
1418 cmd_buf_queue(cmd_buffer, cmd);
1419 }
1420