• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2019 Google LLC
3  * SPDX-License-Identifier: MIT
4  *
5  * based in part on anv and radv which are:
6  * Copyright © 2015 Intel Corporation
7  * Copyright © 2016 Red Hat.
8  * Copyright © 2016 Bas Nieuwenhuizen
9  */
10 
11 #include "vn_pipeline.h"
12 
13 #include "venus-protocol/vn_protocol_driver_pipeline.h"
14 #include "venus-protocol/vn_protocol_driver_pipeline_cache.h"
15 #include "venus-protocol/vn_protocol_driver_pipeline_layout.h"
16 #include "venus-protocol/vn_protocol_driver_shader_module.h"
17 
18 #include "vn_device.h"
19 #include "vn_physical_device.h"
20 
21 /* shader module commands */
22 
23 VkResult
vn_CreateShaderModule(VkDevice device,const VkShaderModuleCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkShaderModule * pShaderModule)24 vn_CreateShaderModule(VkDevice device,
25                       const VkShaderModuleCreateInfo *pCreateInfo,
26                       const VkAllocationCallbacks *pAllocator,
27                       VkShaderModule *pShaderModule)
28 {
29    struct vn_device *dev = vn_device_from_handle(device);
30    const VkAllocationCallbacks *alloc =
31       pAllocator ? pAllocator : &dev->base.base.alloc;
32 
33    struct vn_shader_module *mod =
34       vk_zalloc(alloc, sizeof(*mod), VN_DEFAULT_ALIGN,
35                 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
36    if (!mod)
37       return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
38 
39    vn_object_base_init(&mod->base, VK_OBJECT_TYPE_SHADER_MODULE, &dev->base);
40 
41    VkShaderModule mod_handle = vn_shader_module_to_handle(mod);
42    vn_async_vkCreateShaderModule(dev->instance, device, pCreateInfo, NULL,
43                                  &mod_handle);
44 
45    *pShaderModule = mod_handle;
46 
47    return VK_SUCCESS;
48 }
49 
50 void
vn_DestroyShaderModule(VkDevice device,VkShaderModule shaderModule,const VkAllocationCallbacks * pAllocator)51 vn_DestroyShaderModule(VkDevice device,
52                        VkShaderModule shaderModule,
53                        const VkAllocationCallbacks *pAllocator)
54 {
55    struct vn_device *dev = vn_device_from_handle(device);
56    struct vn_shader_module *mod = vn_shader_module_from_handle(shaderModule);
57    const VkAllocationCallbacks *alloc =
58       pAllocator ? pAllocator : &dev->base.base.alloc;
59 
60    if (!mod)
61       return;
62 
63    vn_async_vkDestroyShaderModule(dev->instance, device, shaderModule, NULL);
64 
65    vn_object_base_fini(&mod->base);
66    vk_free(alloc, mod);
67 }
68 
69 /* pipeline layout commands */
70 
71 VkResult
vn_CreatePipelineLayout(VkDevice device,const VkPipelineLayoutCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkPipelineLayout * pPipelineLayout)72 vn_CreatePipelineLayout(VkDevice device,
73                         const VkPipelineLayoutCreateInfo *pCreateInfo,
74                         const VkAllocationCallbacks *pAllocator,
75                         VkPipelineLayout *pPipelineLayout)
76 {
77    struct vn_device *dev = vn_device_from_handle(device);
78    const VkAllocationCallbacks *alloc =
79       pAllocator ? pAllocator : &dev->base.base.alloc;
80 
81    struct vn_pipeline_layout *layout =
82       vk_zalloc(alloc, sizeof(*layout), VN_DEFAULT_ALIGN,
83                 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
84    if (!layout)
85       return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
86 
87    vn_object_base_init(&layout->base, VK_OBJECT_TYPE_PIPELINE_LAYOUT,
88                        &dev->base);
89 
90    VkPipelineLayout layout_handle = vn_pipeline_layout_to_handle(layout);
91    vn_async_vkCreatePipelineLayout(dev->instance, device, pCreateInfo, NULL,
92                                    &layout_handle);
93 
94    *pPipelineLayout = layout_handle;
95 
96    return VK_SUCCESS;
97 }
98 
99 void
vn_DestroyPipelineLayout(VkDevice device,VkPipelineLayout pipelineLayout,const VkAllocationCallbacks * pAllocator)100 vn_DestroyPipelineLayout(VkDevice device,
101                          VkPipelineLayout pipelineLayout,
102                          const VkAllocationCallbacks *pAllocator)
103 {
104    struct vn_device *dev = vn_device_from_handle(device);
105    struct vn_pipeline_layout *layout =
106       vn_pipeline_layout_from_handle(pipelineLayout);
107    const VkAllocationCallbacks *alloc =
108       pAllocator ? pAllocator : &dev->base.base.alloc;
109 
110    if (!layout)
111       return;
112 
113    vn_async_vkDestroyPipelineLayout(dev->instance, device, pipelineLayout,
114                                     NULL);
115 
116    vn_object_base_fini(&layout->base);
117    vk_free(alloc, layout);
118 }
119 
120 /* pipeline cache commands */
121 
122 VkResult
vn_CreatePipelineCache(VkDevice device,const VkPipelineCacheCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkPipelineCache * pPipelineCache)123 vn_CreatePipelineCache(VkDevice device,
124                        const VkPipelineCacheCreateInfo *pCreateInfo,
125                        const VkAllocationCallbacks *pAllocator,
126                        VkPipelineCache *pPipelineCache)
127 {
128    struct vn_device *dev = vn_device_from_handle(device);
129    const VkAllocationCallbacks *alloc =
130       pAllocator ? pAllocator : &dev->base.base.alloc;
131 
132    struct vn_pipeline_cache *cache =
133       vk_zalloc(alloc, sizeof(*cache), VN_DEFAULT_ALIGN,
134                 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
135    if (!cache)
136       return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
137 
138    vn_object_base_init(&cache->base, VK_OBJECT_TYPE_PIPELINE_CACHE,
139                        &dev->base);
140 
141    VkPipelineCacheCreateInfo local_create_info;
142    if (pCreateInfo->initialDataSize) {
143       local_create_info = *pCreateInfo;
144       local_create_info.pInitialData +=
145          sizeof(struct vk_pipeline_cache_header);
146       pCreateInfo = &local_create_info;
147    }
148 
149    VkPipelineCache cache_handle = vn_pipeline_cache_to_handle(cache);
150    vn_async_vkCreatePipelineCache(dev->instance, device, pCreateInfo, NULL,
151                                   &cache_handle);
152 
153    *pPipelineCache = cache_handle;
154 
155    return VK_SUCCESS;
156 }
157 
158 void
vn_DestroyPipelineCache(VkDevice device,VkPipelineCache pipelineCache,const VkAllocationCallbacks * pAllocator)159 vn_DestroyPipelineCache(VkDevice device,
160                         VkPipelineCache pipelineCache,
161                         const VkAllocationCallbacks *pAllocator)
162 {
163    struct vn_device *dev = vn_device_from_handle(device);
164    struct vn_pipeline_cache *cache =
165       vn_pipeline_cache_from_handle(pipelineCache);
166    const VkAllocationCallbacks *alloc =
167       pAllocator ? pAllocator : &dev->base.base.alloc;
168 
169    if (!cache)
170       return;
171 
172    vn_async_vkDestroyPipelineCache(dev->instance, device, pipelineCache,
173                                    NULL);
174 
175    vn_object_base_fini(&cache->base);
176    vk_free(alloc, cache);
177 }
178 
179 VkResult
vn_GetPipelineCacheData(VkDevice device,VkPipelineCache pipelineCache,size_t * pDataSize,void * pData)180 vn_GetPipelineCacheData(VkDevice device,
181                         VkPipelineCache pipelineCache,
182                         size_t *pDataSize,
183                         void *pData)
184 {
185    struct vn_device *dev = vn_device_from_handle(device);
186    struct vn_physical_device *physical_dev = dev->physical_device;
187 
188    struct vk_pipeline_cache_header *header = pData;
189    VkResult result;
190    if (!pData) {
191       result = vn_call_vkGetPipelineCacheData(dev->instance, device,
192                                               pipelineCache, pDataSize, NULL);
193       if (result != VK_SUCCESS)
194          return vn_error(dev->instance, result);
195 
196       *pDataSize += sizeof(*header);
197       return VK_SUCCESS;
198    }
199 
200    if (*pDataSize <= sizeof(*header)) {
201       *pDataSize = 0;
202       return VK_INCOMPLETE;
203    }
204 
205    const VkPhysicalDeviceProperties *props =
206       &physical_dev->properties.properties;
207    header->header_size = sizeof(*header);
208    header->header_version = VK_PIPELINE_CACHE_HEADER_VERSION_ONE;
209    header->vendor_id = props->vendorID;
210    header->device_id = props->deviceID;
211    memcpy(header->uuid, props->pipelineCacheUUID, VK_UUID_SIZE);
212 
213    *pDataSize -= header->header_size;
214    result =
215       vn_call_vkGetPipelineCacheData(dev->instance, device, pipelineCache,
216                                      pDataSize, pData + header->header_size);
217    if (result < VK_SUCCESS)
218       return vn_error(dev->instance, result);
219 
220    *pDataSize += header->header_size;
221 
222    return result;
223 }
224 
225 VkResult
vn_MergePipelineCaches(VkDevice device,VkPipelineCache dstCache,uint32_t srcCacheCount,const VkPipelineCache * pSrcCaches)226 vn_MergePipelineCaches(VkDevice device,
227                        VkPipelineCache dstCache,
228                        uint32_t srcCacheCount,
229                        const VkPipelineCache *pSrcCaches)
230 {
231    struct vn_device *dev = vn_device_from_handle(device);
232 
233    vn_async_vkMergePipelineCaches(dev->instance, device, dstCache,
234                                   srcCacheCount, pSrcCaches);
235 
236    return VK_SUCCESS;
237 }
238 
239 /* pipeline commands */
240 
241 static const VkGraphicsPipelineCreateInfo *
vn_fix_graphics_pipeline_create_info(struct vn_device * dev,uint32_t create_info_count,const VkGraphicsPipelineCreateInfo * create_infos,const VkAllocationCallbacks * alloc,VkGraphicsPipelineCreateInfo ** out)242 vn_fix_graphics_pipeline_create_info(
243    struct vn_device *dev,
244    uint32_t create_info_count,
245    const VkGraphicsPipelineCreateInfo *create_infos,
246    const VkAllocationCallbacks *alloc,
247    VkGraphicsPipelineCreateInfo **out)
248 {
249    VkGraphicsPipelineCreateInfo *infos = NULL;
250    bool has_ignored_state = false;
251 
252    for (uint32_t i = 0; i < create_info_count; i++) {
253       if (create_infos[i].pRasterizationState->rasterizerDiscardEnable ==
254           VK_FALSE)
255          continue;
256 
257       if (create_infos[i].pViewportState ||
258           create_infos[i].pMultisampleState ||
259           create_infos[i].pDepthStencilState ||
260           create_infos[i].pColorBlendState) {
261          has_ignored_state = true;
262          break;
263       }
264    }
265 
266    if (!has_ignored_state)
267       return create_infos;
268 
269    infos = vk_alloc(alloc, sizeof(*infos) * create_info_count,
270                     VN_DEFAULT_ALIGN, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
271    if (!infos)
272       return NULL;
273 
274    memcpy(infos, create_infos, sizeof(*infos) * create_info_count);
275 
276    for (uint32_t i = 0; i < create_info_count; i++) {
277       if (infos[i].pRasterizationState->rasterizerDiscardEnable == VK_FALSE)
278          continue;
279 
280       infos[i].pViewportState = NULL;
281       infos[i].pMultisampleState = NULL;
282       infos[i].pDepthStencilState = NULL;
283       infos[i].pColorBlendState = NULL;
284    }
285 
286    *out = infos;
287    return infos;
288 }
289 
290 VkResult
vn_CreateGraphicsPipelines(VkDevice device,VkPipelineCache pipelineCache,uint32_t createInfoCount,const VkGraphicsPipelineCreateInfo * pCreateInfos,const VkAllocationCallbacks * pAllocator,VkPipeline * pPipelines)291 vn_CreateGraphicsPipelines(VkDevice device,
292                            VkPipelineCache pipelineCache,
293                            uint32_t createInfoCount,
294                            const VkGraphicsPipelineCreateInfo *pCreateInfos,
295                            const VkAllocationCallbacks *pAllocator,
296                            VkPipeline *pPipelines)
297 {
298    struct vn_device *dev = vn_device_from_handle(device);
299    const VkAllocationCallbacks *alloc =
300       pAllocator ? pAllocator : &dev->base.base.alloc;
301    VkGraphicsPipelineCreateInfo *local_infos = NULL;
302 
303    pCreateInfos = vn_fix_graphics_pipeline_create_info(
304       dev, createInfoCount, pCreateInfos, alloc, &local_infos);
305    if (!pCreateInfos)
306       return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
307 
308    for (uint32_t i = 0; i < createInfoCount; i++) {
309       struct vn_pipeline *pipeline =
310          vk_zalloc(alloc, sizeof(*pipeline), VN_DEFAULT_ALIGN,
311                    VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
312       if (!pipeline) {
313          for (uint32_t j = 0; j < i; j++)
314             vk_free(alloc, vn_pipeline_from_handle(pPipelines[j]));
315 
316          if (local_infos)
317             vk_free(alloc, local_infos);
318 
319          memset(pPipelines, 0, sizeof(*pPipelines) * createInfoCount);
320          return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
321       }
322 
323       vn_object_base_init(&pipeline->base, VK_OBJECT_TYPE_PIPELINE,
324                           &dev->base);
325 
326       VkPipeline pipeline_handle = vn_pipeline_to_handle(pipeline);
327       pPipelines[i] = pipeline_handle;
328    }
329 
330    vn_async_vkCreateGraphicsPipelines(dev->instance, device, pipelineCache,
331                                       createInfoCount, pCreateInfos, NULL,
332                                       pPipelines);
333 
334    if (local_infos)
335       vk_free(alloc, local_infos);
336 
337    return VK_SUCCESS;
338 }
339 
340 VkResult
vn_CreateComputePipelines(VkDevice device,VkPipelineCache pipelineCache,uint32_t createInfoCount,const VkComputePipelineCreateInfo * pCreateInfos,const VkAllocationCallbacks * pAllocator,VkPipeline * pPipelines)341 vn_CreateComputePipelines(VkDevice device,
342                           VkPipelineCache pipelineCache,
343                           uint32_t createInfoCount,
344                           const VkComputePipelineCreateInfo *pCreateInfos,
345                           const VkAllocationCallbacks *pAllocator,
346                           VkPipeline *pPipelines)
347 {
348    struct vn_device *dev = vn_device_from_handle(device);
349    const VkAllocationCallbacks *alloc =
350       pAllocator ? pAllocator : &dev->base.base.alloc;
351 
352    for (uint32_t i = 0; i < createInfoCount; i++) {
353       struct vn_pipeline *pipeline =
354          vk_zalloc(alloc, sizeof(*pipeline), VN_DEFAULT_ALIGN,
355                    VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
356       if (!pipeline) {
357          for (uint32_t j = 0; j < i; j++)
358             vk_free(alloc, vn_pipeline_from_handle(pPipelines[j]));
359          memset(pPipelines, 0, sizeof(*pPipelines) * createInfoCount);
360          return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
361       }
362 
363       vn_object_base_init(&pipeline->base, VK_OBJECT_TYPE_PIPELINE,
364                           &dev->base);
365 
366       VkPipeline pipeline_handle = vn_pipeline_to_handle(pipeline);
367       pPipelines[i] = pipeline_handle;
368    }
369 
370    vn_async_vkCreateComputePipelines(dev->instance, device, pipelineCache,
371                                      createInfoCount, pCreateInfos, NULL,
372                                      pPipelines);
373 
374    return VK_SUCCESS;
375 }
376 
377 void
vn_DestroyPipeline(VkDevice device,VkPipeline _pipeline,const VkAllocationCallbacks * pAllocator)378 vn_DestroyPipeline(VkDevice device,
379                    VkPipeline _pipeline,
380                    const VkAllocationCallbacks *pAllocator)
381 {
382    struct vn_device *dev = vn_device_from_handle(device);
383    struct vn_pipeline *pipeline = vn_pipeline_from_handle(_pipeline);
384    const VkAllocationCallbacks *alloc =
385       pAllocator ? pAllocator : &dev->base.base.alloc;
386 
387    if (!pipeline)
388       return;
389 
390    vn_async_vkDestroyPipeline(dev->instance, device, _pipeline, NULL);
391 
392    vn_object_base_fini(&pipeline->base);
393    vk_free(alloc, pipeline);
394 }
395