• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2018 The SwiftShader Authors. All Rights Reserved.
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 //    http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 
15 #include "VkPipeline.hpp"
16 
17 #include "VkDevice.hpp"
18 #include "VkPipelineCache.hpp"
19 #include "VkPipelineLayout.hpp"
20 #include "VkRenderPass.hpp"
21 #include "VkShaderModule.hpp"
22 #include "VkStringify.hpp"
23 #include "Pipeline/ComputeProgram.hpp"
24 #include "Pipeline/SpirvShader.hpp"
25 
26 #include "marl/trace.h"
27 
28 #include "spirv-tools/optimizer.hpp"
29 
30 #include <iostream>
31 
32 namespace {
33 
34 // preprocessSpirv applies and freezes specializations into constants, and inlines all functions.
preprocessSpirv(std::vector<uint32_t> const & code,VkSpecializationInfo const * specializationInfo,bool optimize)35 std::vector<uint32_t> preprocessSpirv(
36     std::vector<uint32_t> const &code,
37     VkSpecializationInfo const *specializationInfo,
38     bool optimize)
39 {
40 	spvtools::Optimizer opt{ SPV_ENV_VULKAN_1_1 };
41 
42 	opt.SetMessageConsumer([](spv_message_level_t level, const char *, const spv_position_t &p, const char *m) {
43 		switch(level)
44 		{
45 			case SPV_MSG_FATAL: sw::warn("SPIR-V FATAL: %d:%d %s\n", int(p.line), int(p.column), m);
46 			case SPV_MSG_INTERNAL_ERROR: sw::warn("SPIR-V INTERNAL_ERROR: %d:%d %s\n", int(p.line), int(p.column), m);
47 			case SPV_MSG_ERROR: sw::warn("SPIR-V ERROR: %d:%d %s\n", int(p.line), int(p.column), m);
48 			case SPV_MSG_WARNING: sw::warn("SPIR-V WARNING: %d:%d %s\n", int(p.line), int(p.column), m);
49 			case SPV_MSG_INFO: sw::trace("SPIR-V INFO: %d:%d %s\n", int(p.line), int(p.column), m);
50 			case SPV_MSG_DEBUG: sw::trace("SPIR-V DEBUG: %d:%d %s\n", int(p.line), int(p.column), m);
51 			default: sw::trace("SPIR-V MESSAGE: %d:%d %s\n", int(p.line), int(p.column), m);
52 		}
53 	});
54 
55 	// If the pipeline uses specialization, apply the specializations before freezing
56 	if(specializationInfo)
57 	{
58 		std::unordered_map<uint32_t, std::vector<uint32_t>> specializations;
59 		for(auto i = 0u; i < specializationInfo->mapEntryCount; ++i)
60 		{
61 			auto const &e = specializationInfo->pMapEntries[i];
62 			auto value_ptr =
63 			    static_cast<uint32_t const *>(specializationInfo->pData) + e.offset / sizeof(uint32_t);
64 			specializations.emplace(e.constantID,
65 			                        std::vector<uint32_t>{ value_ptr, value_ptr + e.size / sizeof(uint32_t) });
66 		}
67 		opt.RegisterPass(spvtools::CreateSetSpecConstantDefaultValuePass(specializations));
68 	}
69 
70 	if(optimize)
71 	{
72 		// Full optimization list taken from spirv-opt.
73 		opt.RegisterPerformancePasses();
74 	}
75 
76 	std::vector<uint32_t> optimized;
77 	opt.Run(code.data(), code.size(), &optimized);
78 
79 	if(false)
80 	{
81 		spvtools::SpirvTools core(SPV_ENV_VULKAN_1_1);
82 		std::string preOpt;
83 		core.Disassemble(code, &preOpt, SPV_BINARY_TO_TEXT_OPTION_NONE);
84 		std::string postOpt;
85 		core.Disassemble(optimized, &postOpt, SPV_BINARY_TO_TEXT_OPTION_NONE);
86 		std::cout << "PRE-OPT: " << preOpt << std::endl
87 		          << "POST-OPT: " << postOpt << std::endl;
88 	}
89 
90 	return optimized;
91 }
92 
createShader(const vk::PipelineCache::SpirvShaderKey & key,const vk::ShaderModule * module,bool robustBufferAccess,const std::shared_ptr<vk::dbg::Context> & dbgctx)93 std::shared_ptr<sw::SpirvShader> createShader(
94     const vk::PipelineCache::SpirvShaderKey &key,
95     const vk::ShaderModule *module,
96     bool robustBufferAccess,
97     const std::shared_ptr<vk::dbg::Context> &dbgctx)
98 {
99 	// Do not optimize the shader if we have a debugger context.
100 	// Optimization passes are likely to damage debug information, and reorder
101 	// instructions.
102 	const bool optimize = !dbgctx;
103 
104 	// TODO(b/147726513): Do not preprocess the shader if we have a debugger
105 	// context.
106 	// This is a work-around for the SPIR-V tools incorrectly reporting errors
107 	// when debug information is provided. This can be removed once the
108 	// following SPIR-V tools bugs are fixed:
109 	// https://github.com/KhronosGroup/SPIRV-Tools/issues/3102
110 	// https://github.com/KhronosGroup/SPIRV-Tools/issues/3103
111 	// https://github.com/KhronosGroup/SPIRV-Tools/issues/3118
112 	auto code = dbgctx ? key.getInsns() : preprocessSpirv(key.getInsns(), key.getSpecializationInfo(), optimize);
113 	ASSERT(code.size() > 0);
114 
115 	// If the pipeline has specialization constants, assume they're unique and
116 	// use a new serial ID so the shader gets recompiled.
117 	uint32_t codeSerialID = (key.getSpecializationInfo() ? vk::ShaderModule::nextSerialID() : module->getSerialID());
118 
119 	// TODO(b/119409619): use allocator.
120 	return std::make_shared<sw::SpirvShader>(codeSerialID, key.getPipelineStage(), key.getEntryPointName().c_str(),
121 	                                         code, key.getRenderPass(), key.getSubpassIndex(), robustBufferAccess, dbgctx);
122 }
123 
createProgram(const vk::PipelineCache::ComputeProgramKey & key)124 std::shared_ptr<sw::ComputeProgram> createProgram(const vk::PipelineCache::ComputeProgramKey &key)
125 {
126 	MARL_SCOPED_EVENT("createProgram");
127 
128 	vk::DescriptorSet::Bindings descriptorSets;  // FIXME(b/129523279): Delay code generation until invoke time.
129 	// TODO(b/119409619): use allocator.
130 	auto program = std::make_shared<sw::ComputeProgram>(key.getShader(), key.getLayout(), descriptorSets);
131 	program->generate();
132 	program->finalize();
133 	return program;
134 }
135 
136 }  // anonymous namespace
137 
138 namespace vk {
139 
Pipeline(PipelineLayout const * layout,const Device * device)140 Pipeline::Pipeline(PipelineLayout const *layout, const Device *device)
141     : layout(layout)
142     , device(device)
143     , robustBufferAccess(device->getEnabledFeatures().robustBufferAccess)
144 {
145 }
146 
GraphicsPipeline(const VkGraphicsPipelineCreateInfo * pCreateInfo,void * mem,const Device * device)147 GraphicsPipeline::GraphicsPipeline(const VkGraphicsPipelineCreateInfo *pCreateInfo, void *mem, const Device *device)
148     : Pipeline(vk::Cast(pCreateInfo->layout), device)
149 {
150 	context.robustBufferAccess = robustBufferAccess;
151 
152 	if((pCreateInfo->flags &
153 	    ~(VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT |
154 	      VK_PIPELINE_CREATE_DERIVATIVE_BIT |
155 	      VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT)) != 0)
156 	{
157 		UNSUPPORTED("pCreateInfo->flags %d", int(pCreateInfo->flags));
158 	}
159 
160 	if(pCreateInfo->pTessellationState != nullptr)
161 	{
162 		UNSUPPORTED("pCreateInfo->pTessellationState");
163 	}
164 
165 	if(pCreateInfo->pDynamicState)
166 	{
167 		if(pCreateInfo->pDynamicState->flags != 0)
168 		{
169 			// Vulkan 1.2: "flags is reserved for future use." "flags must be 0"
170 			UNSUPPORTED("pCreateInfo->pDynamicState->flags %d", int(pCreateInfo->pDynamicState->flags));
171 		}
172 
173 		for(uint32_t i = 0; i < pCreateInfo->pDynamicState->dynamicStateCount; i++)
174 		{
175 			VkDynamicState dynamicState = pCreateInfo->pDynamicState->pDynamicStates[i];
176 			switch(dynamicState)
177 			{
178 				case VK_DYNAMIC_STATE_VIEWPORT:
179 				case VK_DYNAMIC_STATE_SCISSOR:
180 				case VK_DYNAMIC_STATE_LINE_WIDTH:
181 				case VK_DYNAMIC_STATE_DEPTH_BIAS:
182 				case VK_DYNAMIC_STATE_BLEND_CONSTANTS:
183 				case VK_DYNAMIC_STATE_DEPTH_BOUNDS:
184 				case VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK:
185 				case VK_DYNAMIC_STATE_STENCIL_WRITE_MASK:
186 				case VK_DYNAMIC_STATE_STENCIL_REFERENCE:
187 					ASSERT(dynamicState < (sizeof(dynamicStateFlags) * 8));
188 					dynamicStateFlags |= (1 << dynamicState);
189 					break;
190 				default:
191 					UNSUPPORTED("VkDynamicState %d", int(dynamicState));
192 			}
193 		}
194 	}
195 
196 	const VkPipelineVertexInputStateCreateInfo *vertexInputState = pCreateInfo->pVertexInputState;
197 
198 	if(vertexInputState->flags != 0)
199 	{
200 		// Vulkan 1.2: "flags is reserved for future use." "flags must be 0"
201 		UNSUPPORTED("vertexInputState->flags");
202 	}
203 
204 	// Context must always have a PipelineLayout set.
205 	context.pipelineLayout = layout;
206 
207 	// Temporary in-binding-order representation of buffer strides, to be consumed below
208 	// when considering attributes. TODO: unfuse buffers from attributes in backend, is old GL model.
209 	uint32_t vertexStrides[MAX_VERTEX_INPUT_BINDINGS];
210 	uint32_t instanceStrides[MAX_VERTEX_INPUT_BINDINGS];
211 	for(uint32_t i = 0; i < vertexInputState->vertexBindingDescriptionCount; i++)
212 	{
213 		auto const &desc = vertexInputState->pVertexBindingDescriptions[i];
214 		vertexStrides[desc.binding] = desc.inputRate == VK_VERTEX_INPUT_RATE_VERTEX ? desc.stride : 0;
215 		instanceStrides[desc.binding] = desc.inputRate == VK_VERTEX_INPUT_RATE_INSTANCE ? desc.stride : 0;
216 	}
217 
218 	for(uint32_t i = 0; i < vertexInputState->vertexAttributeDescriptionCount; i++)
219 	{
220 		auto const &desc = vertexInputState->pVertexAttributeDescriptions[i];
221 		sw::Stream &input = context.input[desc.location];
222 		input.format = desc.format;
223 		input.offset = desc.offset;
224 		input.binding = desc.binding;
225 		input.vertexStride = vertexStrides[desc.binding];
226 		input.instanceStride = instanceStrides[desc.binding];
227 	}
228 
229 	const VkPipelineInputAssemblyStateCreateInfo *inputAssemblyState = pCreateInfo->pInputAssemblyState;
230 
231 	if(inputAssemblyState->flags != 0)
232 	{
233 		// Vulkan 1.2: "flags is reserved for future use." "flags must be 0"
234 		UNSUPPORTED("pCreateInfo->pInputAssemblyState->flags %d", int(pCreateInfo->pInputAssemblyState->flags));
235 	}
236 
237 	primitiveRestartEnable = (inputAssemblyState->primitiveRestartEnable != VK_FALSE);
238 	context.topology = inputAssemblyState->topology;
239 
240 	const VkPipelineViewportStateCreateInfo *viewportState = pCreateInfo->pViewportState;
241 	if(viewportState)
242 	{
243 		if(viewportState->flags != 0)
244 		{
245 			// Vulkan 1.2: "flags is reserved for future use." "flags must be 0"
246 			UNSUPPORTED("pCreateInfo->pViewportState->flags %d", int(pCreateInfo->pViewportState->flags));
247 		}
248 
249 		if((viewportState->viewportCount != 1) ||
250 		   (viewportState->scissorCount != 1))
251 		{
252 			UNSUPPORTED("VkPhysicalDeviceFeatures::multiViewport");
253 		}
254 
255 		if(!hasDynamicState(VK_DYNAMIC_STATE_SCISSOR))
256 		{
257 			scissor = viewportState->pScissors[0];
258 		}
259 
260 		if(!hasDynamicState(VK_DYNAMIC_STATE_VIEWPORT))
261 		{
262 			viewport = viewportState->pViewports[0];
263 		}
264 	}
265 
266 	const VkPipelineRasterizationStateCreateInfo *rasterizationState = pCreateInfo->pRasterizationState;
267 
268 	if(rasterizationState->flags != 0)
269 	{
270 		// Vulkan 1.2: "flags is reserved for future use." "flags must be 0"
271 		UNSUPPORTED("pCreateInfo->pRasterizationState->flags %d", int(pCreateInfo->pRasterizationState->flags));
272 	}
273 
274 	if(rasterizationState->depthClampEnable != VK_FALSE)
275 	{
276 		UNSUPPORTED("VkPhysicalDeviceFeatures::depthClamp");
277 	}
278 
279 	context.rasterizerDiscard = (rasterizationState->rasterizerDiscardEnable != VK_FALSE);
280 	context.cullMode = rasterizationState->cullMode;
281 	context.frontFace = rasterizationState->frontFace;
282 	context.polygonMode = rasterizationState->polygonMode;
283 	context.depthBias = (rasterizationState->depthBiasEnable != VK_FALSE) ? rasterizationState->depthBiasConstantFactor : 0.0f;
284 	context.slopeDepthBias = (rasterizationState->depthBiasEnable != VK_FALSE) ? rasterizationState->depthBiasSlopeFactor : 0.0f;
285 
286 	const VkBaseInStructure *extensionCreateInfo = reinterpret_cast<const VkBaseInStructure *>(rasterizationState->pNext);
287 	while(extensionCreateInfo)
288 	{
289 		// Casting to a long since some structures, such as
290 		// VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROVOKING_VERTEX_FEATURES_EXT
291 		// are not enumerated in the official Vulkan header
292 		switch((long)(extensionCreateInfo->sType))
293 		{
294 			case VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_LINE_STATE_CREATE_INFO_EXT:
295 			{
296 				const VkPipelineRasterizationLineStateCreateInfoEXT *lineStateCreateInfo = reinterpret_cast<const VkPipelineRasterizationLineStateCreateInfoEXT *>(extensionCreateInfo);
297 				context.lineRasterizationMode = lineStateCreateInfo->lineRasterizationMode;
298 			}
299 			break;
300 			case VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_PROVOKING_VERTEX_STATE_CREATE_INFO_EXT:
301 			{
302 				const VkPipelineRasterizationProvokingVertexStateCreateInfoEXT *provokingVertexModeCreateInfo =
303 				    reinterpret_cast<const VkPipelineRasterizationProvokingVertexStateCreateInfoEXT *>(extensionCreateInfo);
304 				context.provokingVertexMode = provokingVertexModeCreateInfo->provokingVertexMode;
305 			}
306 			break;
307 			default:
308 				WARN("pCreateInfo->pRasterizationState->pNext sType = %s", vk::Stringify(extensionCreateInfo->sType).c_str());
309 				break;
310 		}
311 
312 		extensionCreateInfo = extensionCreateInfo->pNext;
313 	}
314 
315 	const VkPipelineMultisampleStateCreateInfo *multisampleState = pCreateInfo->pMultisampleState;
316 	if(multisampleState)
317 	{
318 		if(multisampleState->flags != 0)
319 		{
320 			// Vulkan 1.2: "flags is reserved for future use." "flags must be 0"
321 			UNSUPPORTED("pCreateInfo->pMultisampleState->flags %d", int(pCreateInfo->pMultisampleState->flags));
322 		}
323 
324 		if(multisampleState->sampleShadingEnable != VK_FALSE)
325 		{
326 			UNSUPPORTED("VkPhysicalDeviceFeatures::sampleRateShading");
327 		}
328 
329 		if(multisampleState->alphaToOneEnable != VK_FALSE)
330 		{
331 			UNSUPPORTED("VkPhysicalDeviceFeatures::alphaToOne");
332 		}
333 
334 		switch(multisampleState->rasterizationSamples)
335 		{
336 			case VK_SAMPLE_COUNT_1_BIT:
337 				context.sampleCount = 1;
338 				break;
339 			case VK_SAMPLE_COUNT_4_BIT:
340 				context.sampleCount = 4;
341 				break;
342 			default:
343 				UNSUPPORTED("Unsupported sample count");
344 		}
345 
346 		if(multisampleState->pSampleMask)
347 		{
348 			context.sampleMask = multisampleState->pSampleMask[0];
349 		}
350 
351 		context.alphaToCoverage = (multisampleState->alphaToCoverageEnable != VK_FALSE);
352 	}
353 	else
354 	{
355 		context.sampleCount = 1;
356 	}
357 
358 	const VkPipelineDepthStencilStateCreateInfo *depthStencilState = pCreateInfo->pDepthStencilState;
359 	if(depthStencilState)
360 	{
361 		if(depthStencilState->flags != 0)
362 		{
363 			// Vulkan 1.2: "flags is reserved for future use." "flags must be 0"
364 			UNSUPPORTED("pCreateInfo->pDepthStencilState->flags %d", int(pCreateInfo->pDepthStencilState->flags));
365 		}
366 
367 		if(depthStencilState->depthBoundsTestEnable != VK_FALSE)
368 		{
369 			UNSUPPORTED("VkPhysicalDeviceFeatures::depthBounds");
370 		}
371 
372 		context.depthBoundsTestEnable = (depthStencilState->depthBoundsTestEnable != VK_FALSE);
373 		context.depthBufferEnable = (depthStencilState->depthTestEnable != VK_FALSE);
374 		context.depthWriteEnable = (depthStencilState->depthWriteEnable != VK_FALSE);
375 		context.depthCompareMode = depthStencilState->depthCompareOp;
376 
377 		context.stencilEnable = (depthStencilState->stencilTestEnable != VK_FALSE);
378 		if(context.stencilEnable)
379 		{
380 			context.frontStencil = depthStencilState->front;
381 			context.backStencil = depthStencilState->back;
382 		}
383 	}
384 
385 	const VkPipelineColorBlendStateCreateInfo *colorBlendState = pCreateInfo->pColorBlendState;
386 	if(colorBlendState)
387 	{
388 		if(pCreateInfo->pColorBlendState->flags != 0)
389 		{
390 			// Vulkan 1.2: "flags is reserved for future use." "flags must be 0"
391 			UNSUPPORTED("pCreateInfo->pColorBlendState->flags %d", int(pCreateInfo->pColorBlendState->flags));
392 		}
393 
394 		if(colorBlendState->logicOpEnable != VK_FALSE)
395 		{
396 			UNSUPPORTED("VkPhysicalDeviceFeatures::logicOp");
397 		}
398 
399 		if(!hasDynamicState(VK_DYNAMIC_STATE_BLEND_CONSTANTS))
400 		{
401 			blendConstants.r = colorBlendState->blendConstants[0];
402 			blendConstants.g = colorBlendState->blendConstants[1];
403 			blendConstants.b = colorBlendState->blendConstants[2];
404 			blendConstants.a = colorBlendState->blendConstants[3];
405 		}
406 
407 		for(auto i = 0u; i < colorBlendState->attachmentCount; i++)
408 		{
409 			const VkPipelineColorBlendAttachmentState &attachment = colorBlendState->pAttachments[i];
410 			context.colorWriteMask[i] = attachment.colorWriteMask;
411 
412 			context.setBlendState(i, { (attachment.blendEnable != VK_FALSE),
413 			                           attachment.srcColorBlendFactor, attachment.dstColorBlendFactor, attachment.colorBlendOp,
414 			                           attachment.srcAlphaBlendFactor, attachment.dstAlphaBlendFactor, attachment.alphaBlendOp });
415 		}
416 	}
417 
418 	context.multiSampleMask = context.sampleMask & ((unsigned)0xFFFFFFFF >> (32 - context.sampleCount));
419 }
420 
destroyPipeline(const VkAllocationCallbacks * pAllocator)421 void GraphicsPipeline::destroyPipeline(const VkAllocationCallbacks *pAllocator)
422 {
423 	vertexShader.reset();
424 	fragmentShader.reset();
425 }
426 
ComputeRequiredAllocationSize(const VkGraphicsPipelineCreateInfo * pCreateInfo)427 size_t GraphicsPipeline::ComputeRequiredAllocationSize(const VkGraphicsPipelineCreateInfo *pCreateInfo)
428 {
429 	return 0;
430 }
431 
setShader(const VkShaderStageFlagBits & stage,const std::shared_ptr<sw::SpirvShader> spirvShader)432 void GraphicsPipeline::setShader(const VkShaderStageFlagBits &stage, const std::shared_ptr<sw::SpirvShader> spirvShader)
433 {
434 	switch(stage)
435 	{
436 		case VK_SHADER_STAGE_VERTEX_BIT:
437 			ASSERT(vertexShader.get() == nullptr);
438 			vertexShader = spirvShader;
439 			context.vertexShader = vertexShader.get();
440 			break;
441 
442 		case VK_SHADER_STAGE_FRAGMENT_BIT:
443 			ASSERT(fragmentShader.get() == nullptr);
444 			fragmentShader = spirvShader;
445 			context.pixelShader = fragmentShader.get();
446 			break;
447 
448 		default:
449 			UNSUPPORTED("Unsupported stage");
450 			break;
451 	}
452 }
453 
getShader(const VkShaderStageFlagBits & stage) const454 const std::shared_ptr<sw::SpirvShader> GraphicsPipeline::getShader(const VkShaderStageFlagBits &stage) const
455 {
456 	switch(stage)
457 	{
458 		case VK_SHADER_STAGE_VERTEX_BIT:
459 			return vertexShader;
460 		case VK_SHADER_STAGE_FRAGMENT_BIT:
461 			return fragmentShader;
462 		default:
463 			UNSUPPORTED("Unsupported stage");
464 			return fragmentShader;
465 	}
466 }
467 
compileShaders(const VkAllocationCallbacks * pAllocator,const VkGraphicsPipelineCreateInfo * pCreateInfo,PipelineCache * pPipelineCache)468 void GraphicsPipeline::compileShaders(const VkAllocationCallbacks *pAllocator, const VkGraphicsPipelineCreateInfo *pCreateInfo, PipelineCache *pPipelineCache)
469 {
470 	for(auto pStage = pCreateInfo->pStages; pStage != pCreateInfo->pStages + pCreateInfo->stageCount; pStage++)
471 	{
472 		if(pStage->flags != 0)
473 		{
474 			// Vulkan 1.2: "flags must be 0"
475 			UNSUPPORTED("pStage->flags %d", int(pStage->flags));
476 		}
477 
478 		const ShaderModule *module = vk::Cast(pStage->module);
479 		const PipelineCache::SpirvShaderKey key(pStage->stage, pStage->pName, module->getCode(),
480 		                                        vk::Cast(pCreateInfo->renderPass), pCreateInfo->subpass,
481 		                                        pStage->pSpecializationInfo);
482 		auto pipelineStage = key.getPipelineStage();
483 
484 		if(pPipelineCache)
485 		{
486 			PipelineCache &pipelineCache = *pPipelineCache;
487 			{
488 				std::unique_lock<std::mutex> lock(pipelineCache.getShaderMutex());
489 				const std::shared_ptr<sw::SpirvShader> *spirvShader = pipelineCache[key];
490 				if(!spirvShader)
491 				{
492 					auto shader = createShader(key, module, robustBufferAccess, device->getDebuggerContext());
493 					setShader(pipelineStage, shader);
494 					pipelineCache.insert(key, getShader(pipelineStage));
495 				}
496 				else
497 				{
498 					setShader(pipelineStage, *spirvShader);
499 				}
500 			}
501 		}
502 		else
503 		{
504 			auto shader = createShader(key, module, robustBufferAccess, device->getDebuggerContext());
505 			setShader(pipelineStage, shader);
506 		}
507 	}
508 }
509 
computePrimitiveCount(uint32_t vertexCount) const510 uint32_t GraphicsPipeline::computePrimitiveCount(uint32_t vertexCount) const
511 {
512 	switch(context.topology)
513 	{
514 		case VK_PRIMITIVE_TOPOLOGY_POINT_LIST:
515 			return vertexCount;
516 		case VK_PRIMITIVE_TOPOLOGY_LINE_LIST:
517 			return vertexCount / 2;
518 		case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP:
519 			return std::max<uint32_t>(vertexCount, 1) - 1;
520 		case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST:
521 			return vertexCount / 3;
522 		case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP:
523 			return std::max<uint32_t>(vertexCount, 2) - 2;
524 		case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN:
525 			return std::max<uint32_t>(vertexCount, 2) - 2;
526 		default:
527 			UNSUPPORTED("VkPrimitiveTopology %d", int(context.topology));
528 	}
529 
530 	return 0;
531 }
532 
getContext() const533 const sw::Context &GraphicsPipeline::getContext() const
534 {
535 	return context;
536 }
537 
getScissor() const538 const VkRect2D &GraphicsPipeline::getScissor() const
539 {
540 	return scissor;
541 }
542 
getViewport() const543 const VkViewport &GraphicsPipeline::getViewport() const
544 {
545 	return viewport;
546 }
547 
getBlendConstants() const548 const sw::Color<float> &GraphicsPipeline::getBlendConstants() const
549 {
550 	return blendConstants;
551 }
552 
hasDynamicState(VkDynamicState dynamicState) const553 bool GraphicsPipeline::hasDynamicState(VkDynamicState dynamicState) const
554 {
555 	return (dynamicStateFlags & (1 << dynamicState)) != 0;
556 }
557 
ComputePipeline(const VkComputePipelineCreateInfo * pCreateInfo,void * mem,const Device * device)558 ComputePipeline::ComputePipeline(const VkComputePipelineCreateInfo *pCreateInfo, void *mem, const Device *device)
559     : Pipeline(vk::Cast(pCreateInfo->layout), device)
560 {
561 }
562 
destroyPipeline(const VkAllocationCallbacks * pAllocator)563 void ComputePipeline::destroyPipeline(const VkAllocationCallbacks *pAllocator)
564 {
565 	shader.reset();
566 	program.reset();
567 }
568 
ComputeRequiredAllocationSize(const VkComputePipelineCreateInfo * pCreateInfo)569 size_t ComputePipeline::ComputeRequiredAllocationSize(const VkComputePipelineCreateInfo *pCreateInfo)
570 {
571 	return 0;
572 }
573 
compileShaders(const VkAllocationCallbacks * pAllocator,const VkComputePipelineCreateInfo * pCreateInfo,PipelineCache * pPipelineCache)574 void ComputePipeline::compileShaders(const VkAllocationCallbacks *pAllocator, const VkComputePipelineCreateInfo *pCreateInfo, PipelineCache *pPipelineCache)
575 {
576 	auto &stage = pCreateInfo->stage;
577 	const ShaderModule *module = vk::Cast(stage.module);
578 
579 	ASSERT(shader.get() == nullptr);
580 	ASSERT(program.get() == nullptr);
581 
582 	const PipelineCache::SpirvShaderKey shaderKey(
583 	    stage.stage, stage.pName, module->getCode(), nullptr, 0, stage.pSpecializationInfo);
584 	if(pPipelineCache)
585 	{
586 		PipelineCache &pipelineCache = *pPipelineCache;
587 		{
588 			std::unique_lock<std::mutex> lock(pipelineCache.getShaderMutex());
589 			const std::shared_ptr<sw::SpirvShader> *spirvShader = pipelineCache[shaderKey];
590 			if(!spirvShader)
591 			{
592 				shader = createShader(shaderKey, module, robustBufferAccess, device->getDebuggerContext());
593 				pipelineCache.insert(shaderKey, shader);
594 			}
595 			else
596 			{
597 				shader = *spirvShader;
598 			}
599 		}
600 
601 		{
602 			const PipelineCache::ComputeProgramKey programKey(shader.get(), layout);
603 			std::unique_lock<std::mutex> lock(pipelineCache.getProgramMutex());
604 			const std::shared_ptr<sw::ComputeProgram> *computeProgram = pipelineCache[programKey];
605 			if(!computeProgram)
606 			{
607 				program = createProgram(programKey);
608 				pipelineCache.insert(programKey, program);
609 			}
610 			else
611 			{
612 				program = *computeProgram;
613 			}
614 		}
615 	}
616 	else
617 	{
618 		shader = createShader(shaderKey, module, robustBufferAccess, device->getDebuggerContext());
619 		const PipelineCache::ComputeProgramKey programKey(shader.get(), layout);
620 		program = createProgram(programKey);
621 	}
622 }
623 
run(uint32_t baseGroupX,uint32_t baseGroupY,uint32_t baseGroupZ,uint32_t groupCountX,uint32_t groupCountY,uint32_t groupCountZ,vk::DescriptorSet::Bindings const & descriptorSets,vk::DescriptorSet::DynamicOffsets const & descriptorDynamicOffsets,sw::PushConstantStorage const & pushConstants)624 void ComputePipeline::run(uint32_t baseGroupX, uint32_t baseGroupY, uint32_t baseGroupZ,
625                           uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ,
626                           vk::DescriptorSet::Bindings const &descriptorSets,
627                           vk::DescriptorSet::DynamicOffsets const &descriptorDynamicOffsets,
628                           sw::PushConstantStorage const &pushConstants)
629 {
630 	ASSERT_OR_RETURN(program != nullptr);
631 	program->run(
632 	    descriptorSets, descriptorDynamicOffsets, pushConstants,
633 	    baseGroupX, baseGroupY, baseGroupZ,
634 	    groupCountX, groupCountY, groupCountZ);
635 }
636 
637 }  // namespace vk
638