1 /*
2 * Copyright 2019 Google LLC
3 * SPDX-License-Identifier: MIT
4 *
5 * based in part on anv and radv which are:
6 * Copyright © 2015 Intel Corporation
7 * Copyright © 2016 Red Hat.
8 * Copyright © 2016 Bas Nieuwenhuizen
9 */
10
11 #include "vn_pipeline.h"
12
13 #include "venus-protocol/vn_protocol_driver_pipeline.h"
14 #include "venus-protocol/vn_protocol_driver_pipeline_cache.h"
15 #include "venus-protocol/vn_protocol_driver_pipeline_layout.h"
16 #include "venus-protocol/vn_protocol_driver_shader_module.h"
17
18 #include "vn_device.h"
19 #include "vn_physical_device.h"
20
21 /* shader module commands */
22
23 VkResult
vn_CreateShaderModule(VkDevice device,const VkShaderModuleCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkShaderModule * pShaderModule)24 vn_CreateShaderModule(VkDevice device,
25 const VkShaderModuleCreateInfo *pCreateInfo,
26 const VkAllocationCallbacks *pAllocator,
27 VkShaderModule *pShaderModule)
28 {
29 struct vn_device *dev = vn_device_from_handle(device);
30 const VkAllocationCallbacks *alloc =
31 pAllocator ? pAllocator : &dev->base.base.alloc;
32
33 struct vn_shader_module *mod =
34 vk_zalloc(alloc, sizeof(*mod), VN_DEFAULT_ALIGN,
35 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
36 if (!mod)
37 return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
38
39 vn_object_base_init(&mod->base, VK_OBJECT_TYPE_SHADER_MODULE, &dev->base);
40
41 VkShaderModule mod_handle = vn_shader_module_to_handle(mod);
42 vn_async_vkCreateShaderModule(dev->instance, device, pCreateInfo, NULL,
43 &mod_handle);
44
45 *pShaderModule = mod_handle;
46
47 return VK_SUCCESS;
48 }
49
50 void
vn_DestroyShaderModule(VkDevice device,VkShaderModule shaderModule,const VkAllocationCallbacks * pAllocator)51 vn_DestroyShaderModule(VkDevice device,
52 VkShaderModule shaderModule,
53 const VkAllocationCallbacks *pAllocator)
54 {
55 struct vn_device *dev = vn_device_from_handle(device);
56 struct vn_shader_module *mod = vn_shader_module_from_handle(shaderModule);
57 const VkAllocationCallbacks *alloc =
58 pAllocator ? pAllocator : &dev->base.base.alloc;
59
60 if (!mod)
61 return;
62
63 vn_async_vkDestroyShaderModule(dev->instance, device, shaderModule, NULL);
64
65 vn_object_base_fini(&mod->base);
66 vk_free(alloc, mod);
67 }
68
69 /* pipeline layout commands */
70
71 VkResult
vn_CreatePipelineLayout(VkDevice device,const VkPipelineLayoutCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkPipelineLayout * pPipelineLayout)72 vn_CreatePipelineLayout(VkDevice device,
73 const VkPipelineLayoutCreateInfo *pCreateInfo,
74 const VkAllocationCallbacks *pAllocator,
75 VkPipelineLayout *pPipelineLayout)
76 {
77 struct vn_device *dev = vn_device_from_handle(device);
78 const VkAllocationCallbacks *alloc =
79 pAllocator ? pAllocator : &dev->base.base.alloc;
80
81 struct vn_pipeline_layout *layout =
82 vk_zalloc(alloc, sizeof(*layout), VN_DEFAULT_ALIGN,
83 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
84 if (!layout)
85 return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
86
87 vn_object_base_init(&layout->base, VK_OBJECT_TYPE_PIPELINE_LAYOUT,
88 &dev->base);
89
90 VkPipelineLayout layout_handle = vn_pipeline_layout_to_handle(layout);
91 vn_async_vkCreatePipelineLayout(dev->instance, device, pCreateInfo, NULL,
92 &layout_handle);
93
94 *pPipelineLayout = layout_handle;
95
96 return VK_SUCCESS;
97 }
98
99 void
vn_DestroyPipelineLayout(VkDevice device,VkPipelineLayout pipelineLayout,const VkAllocationCallbacks * pAllocator)100 vn_DestroyPipelineLayout(VkDevice device,
101 VkPipelineLayout pipelineLayout,
102 const VkAllocationCallbacks *pAllocator)
103 {
104 struct vn_device *dev = vn_device_from_handle(device);
105 struct vn_pipeline_layout *layout =
106 vn_pipeline_layout_from_handle(pipelineLayout);
107 const VkAllocationCallbacks *alloc =
108 pAllocator ? pAllocator : &dev->base.base.alloc;
109
110 if (!layout)
111 return;
112
113 vn_async_vkDestroyPipelineLayout(dev->instance, device, pipelineLayout,
114 NULL);
115
116 vn_object_base_fini(&layout->base);
117 vk_free(alloc, layout);
118 }
119
120 /* pipeline cache commands */
121
122 VkResult
vn_CreatePipelineCache(VkDevice device,const VkPipelineCacheCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkPipelineCache * pPipelineCache)123 vn_CreatePipelineCache(VkDevice device,
124 const VkPipelineCacheCreateInfo *pCreateInfo,
125 const VkAllocationCallbacks *pAllocator,
126 VkPipelineCache *pPipelineCache)
127 {
128 VN_TRACE_FUNC();
129 struct vn_device *dev = vn_device_from_handle(device);
130 const VkAllocationCallbacks *alloc =
131 pAllocator ? pAllocator : &dev->base.base.alloc;
132
133 struct vn_pipeline_cache *cache =
134 vk_zalloc(alloc, sizeof(*cache), VN_DEFAULT_ALIGN,
135 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
136 if (!cache)
137 return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
138
139 vn_object_base_init(&cache->base, VK_OBJECT_TYPE_PIPELINE_CACHE,
140 &dev->base);
141
142 VkPipelineCacheCreateInfo local_create_info;
143 if (pCreateInfo->initialDataSize) {
144 const struct vk_pipeline_cache_header *header =
145 pCreateInfo->pInitialData;
146
147 local_create_info = *pCreateInfo;
148 local_create_info.initialDataSize -= header->header_size;
149 local_create_info.pInitialData += header->header_size;
150 pCreateInfo = &local_create_info;
151 }
152
153 VkPipelineCache cache_handle = vn_pipeline_cache_to_handle(cache);
154 vn_async_vkCreatePipelineCache(dev->instance, device, pCreateInfo, NULL,
155 &cache_handle);
156
157 *pPipelineCache = cache_handle;
158
159 return VK_SUCCESS;
160 }
161
162 void
vn_DestroyPipelineCache(VkDevice device,VkPipelineCache pipelineCache,const VkAllocationCallbacks * pAllocator)163 vn_DestroyPipelineCache(VkDevice device,
164 VkPipelineCache pipelineCache,
165 const VkAllocationCallbacks *pAllocator)
166 {
167 VN_TRACE_FUNC();
168 struct vn_device *dev = vn_device_from_handle(device);
169 struct vn_pipeline_cache *cache =
170 vn_pipeline_cache_from_handle(pipelineCache);
171 const VkAllocationCallbacks *alloc =
172 pAllocator ? pAllocator : &dev->base.base.alloc;
173
174 if (!cache)
175 return;
176
177 vn_async_vkDestroyPipelineCache(dev->instance, device, pipelineCache,
178 NULL);
179
180 vn_object_base_fini(&cache->base);
181 vk_free(alloc, cache);
182 }
183
184 VkResult
vn_GetPipelineCacheData(VkDevice device,VkPipelineCache pipelineCache,size_t * pDataSize,void * pData)185 vn_GetPipelineCacheData(VkDevice device,
186 VkPipelineCache pipelineCache,
187 size_t *pDataSize,
188 void *pData)
189 {
190 VN_TRACE_FUNC();
191 struct vn_device *dev = vn_device_from_handle(device);
192 struct vn_physical_device *physical_dev = dev->physical_device;
193
194 struct vk_pipeline_cache_header *header = pData;
195 VkResult result;
196 if (!pData) {
197 result = vn_call_vkGetPipelineCacheData(dev->instance, device,
198 pipelineCache, pDataSize, NULL);
199 if (result != VK_SUCCESS)
200 return vn_error(dev->instance, result);
201
202 *pDataSize += sizeof(*header);
203 return VK_SUCCESS;
204 }
205
206 if (*pDataSize <= sizeof(*header)) {
207 *pDataSize = 0;
208 return VK_INCOMPLETE;
209 }
210
211 const VkPhysicalDeviceProperties *props =
212 &physical_dev->properties.vulkan_1_0;
213 header->header_size = sizeof(*header);
214 header->header_version = VK_PIPELINE_CACHE_HEADER_VERSION_ONE;
215 header->vendor_id = props->vendorID;
216 header->device_id = props->deviceID;
217 memcpy(header->uuid, props->pipelineCacheUUID, VK_UUID_SIZE);
218
219 *pDataSize -= header->header_size;
220 result =
221 vn_call_vkGetPipelineCacheData(dev->instance, device, pipelineCache,
222 pDataSize, pData + header->header_size);
223 if (result < VK_SUCCESS)
224 return vn_error(dev->instance, result);
225
226 *pDataSize += header->header_size;
227
228 return result;
229 }
230
231 VkResult
vn_MergePipelineCaches(VkDevice device,VkPipelineCache dstCache,uint32_t srcCacheCount,const VkPipelineCache * pSrcCaches)232 vn_MergePipelineCaches(VkDevice device,
233 VkPipelineCache dstCache,
234 uint32_t srcCacheCount,
235 const VkPipelineCache *pSrcCaches)
236 {
237 VN_TRACE_FUNC();
238 struct vn_device *dev = vn_device_from_handle(device);
239
240 vn_async_vkMergePipelineCaches(dev->instance, device, dstCache,
241 srcCacheCount, pSrcCaches);
242
243 return VK_SUCCESS;
244 }
245
246 /* pipeline commands */
247
248 struct vn_graphics_pipeline_create_info_fix {
249 bool ignore_tessellation_state;
250
251 /* Ignore the following:
252 * pViewportState
253 * pMultisampleState
254 * pDepthStencilState
255 * pColorBlendState
256 */
257 bool ignore_raster_dedicated_states;
258 };
259
260 static const VkGraphicsPipelineCreateInfo *
vn_fix_graphics_pipeline_create_info(struct vn_device * dev,uint32_t create_info_count,const VkGraphicsPipelineCreateInfo * create_infos,const VkAllocationCallbacks * alloc,VkGraphicsPipelineCreateInfo ** out)261 vn_fix_graphics_pipeline_create_info(
262 struct vn_device *dev,
263 uint32_t create_info_count,
264 const VkGraphicsPipelineCreateInfo *create_infos,
265 const VkAllocationCallbacks *alloc,
266 VkGraphicsPipelineCreateInfo **out)
267 {
268 VN_TRACE_FUNC();
269 VkGraphicsPipelineCreateInfo *infos = NULL;
270
271 /* Defer allocation until we find a needed fix. */
272 struct vn_graphics_pipeline_create_info_fix *fixes = NULL;
273
274 for (uint32_t i = 0; i < create_info_count; i++) {
275 const VkGraphicsPipelineCreateInfo *info = &create_infos[i];
276 struct vn_graphics_pipeline_create_info_fix fix = { 0 };
277 bool any_fix = false;
278
279 VkShaderStageFlags stages = 0;
280 for (uint32_t j = 0; j < info->stageCount; j++) {
281 stages |= info->pStages[j].stage;
282 }
283
284 /* Fix pTessellationState?
285 * VUID-VkGraphicsPipelineCreateInfo-pStages-00731
286 */
287 if (info->pTessellationState &&
288 (!(stages & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) ||
289 !(stages & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT))) {
290 fix.ignore_tessellation_state = true;
291 any_fix = true;
292 }
293
294 bool ignore_raster_dedicated_states =
295 !info->pRasterizationState ||
296 info->pRasterizationState->rasterizerDiscardEnable == VK_TRUE;
297 if (ignore_raster_dedicated_states && info->pDynamicState) {
298 for (uint32_t j = 0; j < info->pDynamicState->dynamicStateCount;
299 j++) {
300 if (info->pDynamicState->pDynamicStates[j] ==
301 VK_DYNAMIC_STATE_RASTERIZER_DISCARD_ENABLE) {
302 ignore_raster_dedicated_states = false;
303 break;
304 }
305 }
306 }
307
308 /* FIXME: Conditions for ignoring pDepthStencilState and
309 * pColorBlendState miss some cases that depend on the render pass. Make
310 * them agree with the VUIDs.
311 */
312 if (ignore_raster_dedicated_states &&
313 (info->pViewportState || info->pMultisampleState ||
314 info->pDepthStencilState || info->pColorBlendState)) {
315 fix.ignore_raster_dedicated_states = true;
316 any_fix = true;
317 }
318
319 if (any_fix) {
320 if (!fixes) {
321 fixes = vk_zalloc(alloc, create_info_count * sizeof(fixes[0]),
322 VN_DEFAULT_ALIGN,
323 VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
324 if (!fixes)
325 return NULL;
326 }
327
328 fixes[i] = fix;
329 }
330 }
331
332 if (!fixes)
333 return create_infos;
334
335 infos = vk_alloc(alloc, sizeof(*infos) * create_info_count,
336 VN_DEFAULT_ALIGN, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
337 if (!infos) {
338 vk_free(alloc, fixes);
339 return NULL;
340 }
341
342 memcpy(infos, create_infos, sizeof(*infos) * create_info_count);
343
344 for (uint32_t i = 0; i < create_info_count; i++) {
345 VkGraphicsPipelineCreateInfo *info = &infos[i];
346 struct vn_graphics_pipeline_create_info_fix fix = fixes[i];
347
348 if (fix.ignore_tessellation_state)
349 info->pTessellationState = NULL;
350
351 if (fix.ignore_raster_dedicated_states) {
352 info->pViewportState = NULL;
353 info->pMultisampleState = NULL;
354 info->pDepthStencilState = NULL;
355 info->pColorBlendState = NULL;
356 }
357 }
358
359 vk_free(alloc, fixes);
360
361 *out = infos;
362 return infos;
363 }
364
365 VkResult
vn_CreateGraphicsPipelines(VkDevice device,VkPipelineCache pipelineCache,uint32_t createInfoCount,const VkGraphicsPipelineCreateInfo * pCreateInfos,const VkAllocationCallbacks * pAllocator,VkPipeline * pPipelines)366 vn_CreateGraphicsPipelines(VkDevice device,
367 VkPipelineCache pipelineCache,
368 uint32_t createInfoCount,
369 const VkGraphicsPipelineCreateInfo *pCreateInfos,
370 const VkAllocationCallbacks *pAllocator,
371 VkPipeline *pPipelines)
372 {
373 VN_TRACE_FUNC();
374 struct vn_device *dev = vn_device_from_handle(device);
375 const VkAllocationCallbacks *alloc =
376 pAllocator ? pAllocator : &dev->base.base.alloc;
377 VkGraphicsPipelineCreateInfo *local_infos = NULL;
378
379 pCreateInfos = vn_fix_graphics_pipeline_create_info(
380 dev, createInfoCount, pCreateInfos, alloc, &local_infos);
381 if (!pCreateInfos)
382 return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
383
384 for (uint32_t i = 0; i < createInfoCount; i++) {
385 struct vn_pipeline *pipeline =
386 vk_zalloc(alloc, sizeof(*pipeline), VN_DEFAULT_ALIGN,
387 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
388 if (!pipeline) {
389 for (uint32_t j = 0; j < i; j++)
390 vk_free(alloc, vn_pipeline_from_handle(pPipelines[j]));
391
392 if (local_infos)
393 vk_free(alloc, local_infos);
394
395 memset(pPipelines, 0, sizeof(*pPipelines) * createInfoCount);
396 return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
397 }
398
399 vn_object_base_init(&pipeline->base, VK_OBJECT_TYPE_PIPELINE,
400 &dev->base);
401
402 VkPipeline pipeline_handle = vn_pipeline_to_handle(pipeline);
403 pPipelines[i] = pipeline_handle;
404 }
405
406 vn_async_vkCreateGraphicsPipelines(dev->instance, device, pipelineCache,
407 createInfoCount, pCreateInfos, NULL,
408 pPipelines);
409
410 if (local_infos)
411 vk_free(alloc, local_infos);
412
413 return VK_SUCCESS;
414 }
415
416 VkResult
vn_CreateComputePipelines(VkDevice device,VkPipelineCache pipelineCache,uint32_t createInfoCount,const VkComputePipelineCreateInfo * pCreateInfos,const VkAllocationCallbacks * pAllocator,VkPipeline * pPipelines)417 vn_CreateComputePipelines(VkDevice device,
418 VkPipelineCache pipelineCache,
419 uint32_t createInfoCount,
420 const VkComputePipelineCreateInfo *pCreateInfos,
421 const VkAllocationCallbacks *pAllocator,
422 VkPipeline *pPipelines)
423 {
424 VN_TRACE_FUNC();
425 struct vn_device *dev = vn_device_from_handle(device);
426 const VkAllocationCallbacks *alloc =
427 pAllocator ? pAllocator : &dev->base.base.alloc;
428
429 for (uint32_t i = 0; i < createInfoCount; i++) {
430 struct vn_pipeline *pipeline =
431 vk_zalloc(alloc, sizeof(*pipeline), VN_DEFAULT_ALIGN,
432 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
433 if (!pipeline) {
434 for (uint32_t j = 0; j < i; j++)
435 vk_free(alloc, vn_pipeline_from_handle(pPipelines[j]));
436 memset(pPipelines, 0, sizeof(*pPipelines) * createInfoCount);
437 return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
438 }
439
440 vn_object_base_init(&pipeline->base, VK_OBJECT_TYPE_PIPELINE,
441 &dev->base);
442
443 VkPipeline pipeline_handle = vn_pipeline_to_handle(pipeline);
444 pPipelines[i] = pipeline_handle;
445 }
446
447 vn_async_vkCreateComputePipelines(dev->instance, device, pipelineCache,
448 createInfoCount, pCreateInfos, NULL,
449 pPipelines);
450
451 return VK_SUCCESS;
452 }
453
454 void
vn_DestroyPipeline(VkDevice device,VkPipeline _pipeline,const VkAllocationCallbacks * pAllocator)455 vn_DestroyPipeline(VkDevice device,
456 VkPipeline _pipeline,
457 const VkAllocationCallbacks *pAllocator)
458 {
459 VN_TRACE_FUNC();
460 struct vn_device *dev = vn_device_from_handle(device);
461 struct vn_pipeline *pipeline = vn_pipeline_from_handle(_pipeline);
462 const VkAllocationCallbacks *alloc =
463 pAllocator ? pAllocator : &dev->base.base.alloc;
464
465 if (!pipeline)
466 return;
467
468 vn_async_vkDestroyPipeline(dev->instance, device, _pipeline, NULL);
469
470 vn_object_base_fini(&pipeline->base);
471 vk_free(alloc, pipeline);
472 }
473