• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2015 Intel Corporation
3  * Copyright © 2022 Collabora, Ltd
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22  * IN THE SOFTWARE.
23  */
24 
25 #include "vk_command_pool.h"
26 
27 #include "vk_alloc.h"
28 #include "vk_command_buffer.h"
29 #include "vk_common_entrypoints.h"
30 #include "vk_device.h"
31 #include "vk_log.h"
32 
33 static bool
should_recycle_command_buffers(struct vk_device * device)34 should_recycle_command_buffers(struct vk_device *device)
35 {
36    /* They have to be using the common allocation implementation, otherwise
37     * the recycled command buffers will never actually get re-used
38     */
39    const struct vk_device_dispatch_table *disp = &device->dispatch_table;
40    if (disp->AllocateCommandBuffers != vk_common_AllocateCommandBuffers)
41       return false;
42 
43    /* We need to be able to reset command buffers */
44    if (device->command_buffer_ops->reset == NULL)
45       return false;
46 
47    return true;
48 }
49 
50 static void
destroy_free_command_buffers(struct vk_command_pool * pool)51 destroy_free_command_buffers(struct vk_command_pool *pool)
52 {
53    for (uint32_t i = 0; i < ARRAY_SIZE(pool->free_command_buffers); i++) {
54       list_for_each_entry_safe(struct vk_command_buffer, cmd_buffer,
55                                &pool->free_command_buffers[i], pool_link) {
56          cmd_buffer->ops->destroy(cmd_buffer);
57       }
58       assert(list_is_empty(&pool->free_command_buffers[i]));
59    }
60 }
61 
62 VkResult MUST_CHECK
vk_command_pool_init(struct vk_device * device,struct vk_command_pool * pool,const VkCommandPoolCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator)63 vk_command_pool_init(struct vk_device *device,
64                      struct vk_command_pool *pool,
65                      const VkCommandPoolCreateInfo *pCreateInfo,
66                      const VkAllocationCallbacks *pAllocator)
67 {
68    memset(pool, 0, sizeof(*pool));
69    vk_object_base_init(device, &pool->base,
70                        VK_OBJECT_TYPE_COMMAND_POOL);
71 
72    pool->flags = pCreateInfo->flags;
73    pool->queue_family_index = pCreateInfo->queueFamilyIndex;
74    pool->alloc = pAllocator ? *pAllocator : device->alloc;
75    pool->command_buffer_ops = device->command_buffer_ops;
76    pool->recycle_command_buffers = should_recycle_command_buffers(device);
77    list_inithead(&pool->command_buffers);
78 
79    for (uint32_t i = 0; i < ARRAY_SIZE(pool->free_command_buffers); i++)
80       list_inithead(&pool->free_command_buffers[i]);
81 
82    return VK_SUCCESS;
83 }
84 
85 void
vk_command_pool_finish(struct vk_command_pool * pool)86 vk_command_pool_finish(struct vk_command_pool *pool)
87 {
88    list_for_each_entry_safe(struct vk_command_buffer, cmd_buffer,
89                             &pool->command_buffers, pool_link) {
90       cmd_buffer->ops->destroy(cmd_buffer);
91    }
92    assert(list_is_empty(&pool->command_buffers));
93 
94    destroy_free_command_buffers(pool);
95 
96    vk_object_base_finish(&pool->base);
97 }
98 
99 VKAPI_ATTR VkResult VKAPI_CALL
vk_common_CreateCommandPool(VkDevice _device,const VkCommandPoolCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkCommandPool * pCommandPool)100 vk_common_CreateCommandPool(VkDevice _device,
101                             const VkCommandPoolCreateInfo *pCreateInfo,
102                             const VkAllocationCallbacks *pAllocator,
103                             VkCommandPool *pCommandPool)
104 {
105    VK_FROM_HANDLE(vk_device, device, _device);
106    struct vk_command_pool *pool;
107    VkResult result;
108 
109    pool = vk_alloc2(&device->alloc, pAllocator, sizeof(*pool), 8,
110                     VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
111    if (pool == NULL)
112       return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
113 
114    result = vk_command_pool_init(device, pool, pCreateInfo, pAllocator);
115    if (unlikely(result != VK_SUCCESS)) {
116       vk_free2(&device->alloc, pAllocator, pool);
117       return result;
118    }
119 
120    *pCommandPool = vk_command_pool_to_handle(pool);
121 
122    return VK_SUCCESS;
123 }
124 
125 VKAPI_ATTR void VKAPI_CALL
vk_common_DestroyCommandPool(VkDevice _device,VkCommandPool commandPool,const VkAllocationCallbacks * pAllocator)126 vk_common_DestroyCommandPool(VkDevice _device,
127                              VkCommandPool commandPool,
128                              const VkAllocationCallbacks *pAllocator)
129 {
130    VK_FROM_HANDLE(vk_device, device, _device);
131    VK_FROM_HANDLE(vk_command_pool, pool, commandPool);
132 
133    if (pool == NULL)
134       return;
135 
136    vk_command_pool_finish(pool);
137    vk_free2(&device->alloc, pAllocator, pool);
138 }
139 
140 VKAPI_ATTR VkResult VKAPI_CALL
vk_common_ResetCommandPool(VkDevice device,VkCommandPool commandPool,VkCommandPoolResetFlags flags)141 vk_common_ResetCommandPool(VkDevice device,
142                            VkCommandPool commandPool,
143                            VkCommandPoolResetFlags flags)
144 {
145    VK_FROM_HANDLE(vk_command_pool, pool, commandPool);
146    const struct vk_device_dispatch_table *disp =
147       &pool->base.device->dispatch_table;
148 
149 #define COPY_FLAG(flag) \
150    if (flags & VK_COMMAND_POOL_RESET_##flag) \
151       cb_flags |= VK_COMMAND_BUFFER_RESET_##flag
152 
153    VkCommandBufferResetFlags cb_flags = 0;
154    COPY_FLAG(RELEASE_RESOURCES_BIT);
155 
156 #undef COPY_FLAG
157 
158    list_for_each_entry_safe(struct vk_command_buffer, cmd_buffer,
159                             &pool->command_buffers, pool_link) {
160       VkResult result =
161          disp->ResetCommandBuffer(vk_command_buffer_to_handle(cmd_buffer),
162                                   cb_flags);
163       if (result != VK_SUCCESS)
164          return result;
165    }
166 
167    return VK_SUCCESS;
168 }
169 
170 static void
vk_command_buffer_recycle_or_destroy(struct vk_command_pool * pool,struct vk_command_buffer * cmd_buffer)171 vk_command_buffer_recycle_or_destroy(struct vk_command_pool *pool,
172                                      struct vk_command_buffer *cmd_buffer)
173 {
174    assert(pool == cmd_buffer->pool);
175 
176    if (pool->recycle_command_buffers) {
177       vk_command_buffer_recycle(cmd_buffer);
178 
179       list_del(&cmd_buffer->pool_link);
180       list_add(&cmd_buffer->pool_link, &pool->free_command_buffers[cmd_buffer->level]);
181    } else {
182       cmd_buffer->ops->destroy(cmd_buffer);
183    }
184 }
185 
186 static struct vk_command_buffer *
vk_command_pool_find_free(struct vk_command_pool * pool,VkCommandBufferLevel level)187 vk_command_pool_find_free(struct vk_command_pool *pool,
188                           VkCommandBufferLevel level)
189 {
190    if (list_is_empty(&pool->free_command_buffers[level]))
191       return NULL;
192 
193    struct vk_command_buffer *cmd_buffer =
194       list_first_entry(&pool->free_command_buffers[level],
195                        struct vk_command_buffer, pool_link);
196 
197    list_del(&cmd_buffer->pool_link);
198    list_addtail(&cmd_buffer->pool_link, &pool->command_buffers);
199 
200    return cmd_buffer;
201 }
202 
203 VKAPI_ATTR VkResult VKAPI_CALL
vk_common_AllocateCommandBuffers(VkDevice device,const VkCommandBufferAllocateInfo * pAllocateInfo,VkCommandBuffer * pCommandBuffers)204 vk_common_AllocateCommandBuffers(VkDevice device,
205                                  const VkCommandBufferAllocateInfo *pAllocateInfo,
206                                  VkCommandBuffer *pCommandBuffers)
207 {
208    VK_FROM_HANDLE(vk_command_pool, pool, pAllocateInfo->commandPool);
209    VkResult result;
210    uint32_t i;
211 
212    assert(device == vk_device_to_handle(pool->base.device));
213 
214    for (i = 0; i < pAllocateInfo->commandBufferCount; i++) {
215       struct vk_command_buffer *cmd_buffer =
216          vk_command_pool_find_free(pool, pAllocateInfo->level);
217       if (cmd_buffer == NULL) {
218          result = pool->command_buffer_ops->create(pool, pAllocateInfo->level, &cmd_buffer);
219          if (unlikely(result != VK_SUCCESS))
220             goto fail;
221       }
222 
223       cmd_buffer->level = pAllocateInfo->level;
224 
225       pCommandBuffers[i] = vk_command_buffer_to_handle(cmd_buffer);
226    }
227 
228    return VK_SUCCESS;
229 
230 fail:
231    while (i--) {
232       VK_FROM_HANDLE(vk_command_buffer, cmd_buffer, pCommandBuffers[i]);
233       vk_command_buffer_recycle_or_destroy(pool, cmd_buffer);
234    }
235    for (i = 0; i < pAllocateInfo->commandBufferCount; i++)
236       pCommandBuffers[i] = VK_NULL_HANDLE;
237 
238    return result;
239 }
240 
241 VKAPI_ATTR void VKAPI_CALL
vk_common_FreeCommandBuffers(VkDevice device,VkCommandPool commandPool,uint32_t commandBufferCount,const VkCommandBuffer * pCommandBuffers)242 vk_common_FreeCommandBuffers(VkDevice device,
243                              VkCommandPool commandPool,
244                              uint32_t commandBufferCount,
245                              const VkCommandBuffer *pCommandBuffers)
246 {
247    VK_FROM_HANDLE(vk_command_pool, pool, commandPool);
248 
249    for (uint32_t i = 0; i < commandBufferCount; i++) {
250       VK_FROM_HANDLE(vk_command_buffer, cmd_buffer, pCommandBuffers[i]);
251 
252       if (cmd_buffer == NULL)
253          continue;
254 
255       vk_command_buffer_recycle_or_destroy(pool, cmd_buffer);
256    }
257 }
258 
259 void
vk_command_pool_trim(struct vk_command_pool * pool,VkCommandPoolTrimFlags flags)260 vk_command_pool_trim(struct vk_command_pool *pool,
261                      VkCommandPoolTrimFlags flags)
262 {
263    destroy_free_command_buffers(pool);
264 }
265 
266 VKAPI_ATTR void VKAPI_CALL
vk_common_TrimCommandPool(VkDevice device,VkCommandPool commandPool,VkCommandPoolTrimFlags flags)267 vk_common_TrimCommandPool(VkDevice device,
268                           VkCommandPool commandPool,
269                           VkCommandPoolTrimFlags flags)
270 {
271    VK_FROM_HANDLE(vk_command_pool, pool, commandPool);
272 
273    vk_command_pool_trim(pool, flags);
274 }
275