1 //
2 // Copyright 2018 The ANGLE Project Authors. All rights reserved.
3 // Use of this source code is governed by a BSD-style license that can be
4 // found in the LICENSE file.
5 //
6 // vk_cache_utils.cpp:
7 // Contains the classes for the Pipeline State Object cache as well as the RenderPass cache.
8 // Also contains the structures for the packed descriptions for the RenderPass and Pipeline.
9 //
10
11 #include "libANGLE/renderer/vulkan/vk_cache_utils.h"
12
13 #include "common/aligned_memory.h"
14 #include "libANGLE/BlobCache.h"
15 #include "libANGLE/VertexAttribute.h"
16 #include "libANGLE/renderer/vulkan/FramebufferVk.h"
17 #include "libANGLE/renderer/vulkan/ProgramVk.h"
18 #include "libANGLE/renderer/vulkan/RendererVk.h"
19 #include "libANGLE/renderer/vulkan/vk_format_utils.h"
20 #include "libANGLE/renderer/vulkan/vk_helpers.h"
21
22 #include <type_traits>
23
24 namespace rx
25 {
26 namespace vk
27 {
28
29 namespace
30 {
31
PackGLBlendOp(GLenum blendOp)32 uint8_t PackGLBlendOp(GLenum blendOp)
33 {
34 switch (blendOp)
35 {
36 case GL_FUNC_ADD:
37 return static_cast<uint8_t>(VK_BLEND_OP_ADD);
38 case GL_FUNC_SUBTRACT:
39 return static_cast<uint8_t>(VK_BLEND_OP_SUBTRACT);
40 case GL_FUNC_REVERSE_SUBTRACT:
41 return static_cast<uint8_t>(VK_BLEND_OP_REVERSE_SUBTRACT);
42 case GL_MIN:
43 return static_cast<uint8_t>(VK_BLEND_OP_MIN);
44 case GL_MAX:
45 return static_cast<uint8_t>(VK_BLEND_OP_MAX);
46 default:
47 UNREACHABLE();
48 return 0;
49 }
50 }
51
PackGLBlendFactor(GLenum blendFactor)52 uint8_t PackGLBlendFactor(GLenum blendFactor)
53 {
54 switch (blendFactor)
55 {
56 case GL_ZERO:
57 return static_cast<uint8_t>(VK_BLEND_FACTOR_ZERO);
58 case GL_ONE:
59 return static_cast<uint8_t>(VK_BLEND_FACTOR_ONE);
60 case GL_SRC_COLOR:
61 return static_cast<uint8_t>(VK_BLEND_FACTOR_SRC_COLOR);
62 case GL_DST_COLOR:
63 return static_cast<uint8_t>(VK_BLEND_FACTOR_DST_COLOR);
64 case GL_ONE_MINUS_SRC_COLOR:
65 return static_cast<uint8_t>(VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR);
66 case GL_SRC_ALPHA:
67 return static_cast<uint8_t>(VK_BLEND_FACTOR_SRC_ALPHA);
68 case GL_ONE_MINUS_SRC_ALPHA:
69 return static_cast<uint8_t>(VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA);
70 case GL_DST_ALPHA:
71 return static_cast<uint8_t>(VK_BLEND_FACTOR_DST_ALPHA);
72 case GL_ONE_MINUS_DST_ALPHA:
73 return static_cast<uint8_t>(VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA);
74 case GL_ONE_MINUS_DST_COLOR:
75 return static_cast<uint8_t>(VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR);
76 case GL_SRC_ALPHA_SATURATE:
77 return static_cast<uint8_t>(VK_BLEND_FACTOR_SRC_ALPHA_SATURATE);
78 case GL_CONSTANT_COLOR:
79 return static_cast<uint8_t>(VK_BLEND_FACTOR_CONSTANT_COLOR);
80 case GL_CONSTANT_ALPHA:
81 return static_cast<uint8_t>(VK_BLEND_FACTOR_CONSTANT_ALPHA);
82 case GL_ONE_MINUS_CONSTANT_COLOR:
83 return static_cast<uint8_t>(VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR);
84 case GL_ONE_MINUS_CONSTANT_ALPHA:
85 return static_cast<uint8_t>(VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA);
86 default:
87 UNREACHABLE();
88 return 0;
89 }
90 }
91
PackGLStencilOp(GLenum compareOp)92 VkStencilOp PackGLStencilOp(GLenum compareOp)
93 {
94 switch (compareOp)
95 {
96 case GL_KEEP:
97 return VK_STENCIL_OP_KEEP;
98 case GL_ZERO:
99 return VK_STENCIL_OP_ZERO;
100 case GL_REPLACE:
101 return VK_STENCIL_OP_REPLACE;
102 case GL_INCR:
103 return VK_STENCIL_OP_INCREMENT_AND_CLAMP;
104 case GL_DECR:
105 return VK_STENCIL_OP_DECREMENT_AND_CLAMP;
106 case GL_INCR_WRAP:
107 return VK_STENCIL_OP_INCREMENT_AND_WRAP;
108 case GL_DECR_WRAP:
109 return VK_STENCIL_OP_DECREMENT_AND_WRAP;
110 case GL_INVERT:
111 return VK_STENCIL_OP_INVERT;
112 default:
113 UNREACHABLE();
114 return VK_STENCIL_OP_KEEP;
115 }
116 }
117
PackGLCompareFunc(GLenum compareFunc)118 VkCompareOp PackGLCompareFunc(GLenum compareFunc)
119 {
120 switch (compareFunc)
121 {
122 case GL_NEVER:
123 return VK_COMPARE_OP_NEVER;
124 case GL_ALWAYS:
125 return VK_COMPARE_OP_ALWAYS;
126 case GL_LESS:
127 return VK_COMPARE_OP_LESS;
128 case GL_LEQUAL:
129 return VK_COMPARE_OP_LESS_OR_EQUAL;
130 case GL_EQUAL:
131 return VK_COMPARE_OP_EQUAL;
132 case GL_GREATER:
133 return VK_COMPARE_OP_GREATER;
134 case GL_GEQUAL:
135 return VK_COMPARE_OP_GREATER_OR_EQUAL;
136 case GL_NOTEQUAL:
137 return VK_COMPARE_OP_NOT_EQUAL;
138 default:
139 UNREACHABLE();
140 return VK_COMPARE_OP_NEVER;
141 }
142 }
143
UnpackAttachmentDesc(VkAttachmentDescription * desc,const vk::Format & format,uint8_t samples,const vk::PackedAttachmentOpsDesc & ops)144 void UnpackAttachmentDesc(VkAttachmentDescription *desc,
145 const vk::Format &format,
146 uint8_t samples,
147 const vk::PackedAttachmentOpsDesc &ops)
148 {
149 // We would only need this flag for duplicated attachments. Apply it conservatively.
150 desc->flags = VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT;
151 desc->format = format.vkImageFormat;
152 desc->samples = gl_vk::GetSamples(samples);
153 desc->loadOp = static_cast<VkAttachmentLoadOp>(ops.loadOp);
154 desc->storeOp = static_cast<VkAttachmentStoreOp>(ops.storeOp);
155 desc->stencilLoadOp = static_cast<VkAttachmentLoadOp>(ops.stencilLoadOp);
156 desc->stencilStoreOp = static_cast<VkAttachmentStoreOp>(ops.stencilStoreOp);
157 desc->initialLayout = static_cast<VkImageLayout>(ops.initialLayout);
158 desc->finalLayout = static_cast<VkImageLayout>(ops.finalLayout);
159 }
160
UnpackStencilState(const vk::PackedStencilOpState & packedState,uint8_t stencilReference,VkStencilOpState * stateOut)161 void UnpackStencilState(const vk::PackedStencilOpState &packedState,
162 uint8_t stencilReference,
163 VkStencilOpState *stateOut)
164 {
165 stateOut->failOp = static_cast<VkStencilOp>(packedState.ops.fail);
166 stateOut->passOp = static_cast<VkStencilOp>(packedState.ops.pass);
167 stateOut->depthFailOp = static_cast<VkStencilOp>(packedState.ops.depthFail);
168 stateOut->compareOp = static_cast<VkCompareOp>(packedState.ops.compare);
169 stateOut->compareMask = packedState.compareMask;
170 stateOut->writeMask = packedState.writeMask;
171 stateOut->reference = stencilReference;
172 }
173
UnpackBlendAttachmentState(const vk::PackedColorBlendAttachmentState & packedState,VkPipelineColorBlendAttachmentState * stateOut)174 void UnpackBlendAttachmentState(const vk::PackedColorBlendAttachmentState &packedState,
175 VkPipelineColorBlendAttachmentState *stateOut)
176 {
177 stateOut->srcColorBlendFactor = static_cast<VkBlendFactor>(packedState.srcColorBlendFactor);
178 stateOut->dstColorBlendFactor = static_cast<VkBlendFactor>(packedState.dstColorBlendFactor);
179 stateOut->colorBlendOp = static_cast<VkBlendOp>(packedState.colorBlendOp);
180 stateOut->srcAlphaBlendFactor = static_cast<VkBlendFactor>(packedState.srcAlphaBlendFactor);
181 stateOut->dstAlphaBlendFactor = static_cast<VkBlendFactor>(packedState.dstAlphaBlendFactor);
182 stateOut->alphaBlendOp = static_cast<VkBlendOp>(packedState.alphaBlendOp);
183 }
184
InitializeRenderPassFromDesc(vk::Context * context,const RenderPassDesc & desc,const AttachmentOpsArray & ops,RenderPass * renderPass)185 angle::Result InitializeRenderPassFromDesc(vk::Context *context,
186 const RenderPassDesc &desc,
187 const AttachmentOpsArray &ops,
188 RenderPass *renderPass)
189 {
190 // Unpack the packed and split representation into the format required by Vulkan.
191 gl::DrawBuffersVector<VkAttachmentReference> colorAttachmentRefs;
192 VkAttachmentReference depthStencilAttachmentRef = {VK_ATTACHMENT_UNUSED};
193 gl::AttachmentArray<VkAttachmentDescription> attachmentDescs;
194
195 uint32_t colorAttachmentCount = 0;
196 uint32_t attachmentCount = 0;
197 for (uint32_t colorIndexGL = 0; colorIndexGL < desc.colorAttachmentRange(); ++colorIndexGL)
198 {
199 // Vulkan says:
200 //
201 // > Each element of the pColorAttachments array corresponds to an output location in the
202 // > shader, i.e. if the shader declares an output variable decorated with a Location value
203 // > of X, then it uses the attachment provided in pColorAttachments[X].
204 //
205 // This means that colorAttachmentRefs is indexed by colorIndexGL. Where the color
206 // attachment is disabled, a reference with VK_ATTACHMENT_UNUSED is given.
207
208 if (!desc.isColorAttachmentEnabled(colorIndexGL))
209 {
210 VkAttachmentReference colorRef;
211 colorRef.attachment = VK_ATTACHMENT_UNUSED;
212 colorRef.layout = VK_IMAGE_LAYOUT_UNDEFINED;
213 colorAttachmentRefs.push_back(colorRef);
214
215 continue;
216 }
217
218 uint32_t colorIndexVk = colorAttachmentCount++;
219 angle::FormatID formatID = desc[colorIndexGL];
220 ASSERT(formatID != angle::FormatID::NONE);
221 const vk::Format &format = context->getRenderer()->getFormat(formatID);
222
223 VkAttachmentReference colorRef;
224 colorRef.attachment = colorIndexVk;
225 colorRef.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
226
227 colorAttachmentRefs.push_back(colorRef);
228
229 UnpackAttachmentDesc(&attachmentDescs[colorIndexVk], format, desc.samples(),
230 ops[colorIndexVk]);
231
232 ++attachmentCount;
233 }
234
235 if (desc.hasDepthStencilAttachment())
236 {
237 uint32_t depthStencilIndex = static_cast<uint32_t>(desc.depthStencilAttachmentIndex());
238 uint32_t depthStencilIndexVk = colorAttachmentCount;
239
240 angle::FormatID formatID = desc[depthStencilIndex];
241 ASSERT(formatID != angle::FormatID::NONE);
242 const vk::Format &format = context->getRenderer()->getFormat(formatID);
243
244 depthStencilAttachmentRef.attachment = depthStencilIndexVk;
245 depthStencilAttachmentRef.layout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
246
247 UnpackAttachmentDesc(&attachmentDescs[depthStencilIndexVk], format, desc.samples(),
248 ops[depthStencilIndexVk]);
249
250 ++attachmentCount;
251 }
252
253 VkSubpassDescription subpassDesc = {};
254
255 subpassDesc.flags = 0;
256 subpassDesc.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
257 subpassDesc.inputAttachmentCount = 0;
258 subpassDesc.pInputAttachments = nullptr;
259 subpassDesc.colorAttachmentCount = static_cast<uint32_t>(colorAttachmentRefs.size());
260 subpassDesc.pColorAttachments = colorAttachmentRefs.data();
261 subpassDesc.pResolveAttachments = nullptr;
262 subpassDesc.pDepthStencilAttachment =
263 (depthStencilAttachmentRef.attachment != VK_ATTACHMENT_UNUSED ? &depthStencilAttachmentRef
264 : nullptr);
265 subpassDesc.preserveAttachmentCount = 0;
266 subpassDesc.pPreserveAttachments = nullptr;
267
268 VkRenderPassCreateInfo createInfo = {};
269 createInfo.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO;
270 createInfo.flags = 0;
271 createInfo.attachmentCount = attachmentCount;
272 createInfo.pAttachments = attachmentDescs.data();
273 createInfo.subpassCount = 1;
274 createInfo.pSubpasses = &subpassDesc;
275 createInfo.dependencyCount = 0;
276 createInfo.pDependencies = nullptr;
277
278 ANGLE_VK_TRY(context, renderPass->init(context->getDevice(), createInfo));
279 return angle::Result::Continue;
280 }
281
282 // Utility for setting a value on a packed 4-bit integer array.
283 template <typename SrcT>
Int4Array_Set(uint8_t * arrayBytes,uint32_t arrayIndex,SrcT value)284 void Int4Array_Set(uint8_t *arrayBytes, uint32_t arrayIndex, SrcT value)
285 {
286 uint32_t byteIndex = arrayIndex >> 1;
287 ASSERT(value < 16);
288
289 if ((arrayIndex & 1) == 0)
290 {
291 arrayBytes[byteIndex] &= 0xF0;
292 arrayBytes[byteIndex] |= static_cast<uint8_t>(value);
293 }
294 else
295 {
296 arrayBytes[byteIndex] &= 0x0F;
297 arrayBytes[byteIndex] |= static_cast<uint8_t>(value) << 4;
298 }
299 }
300
301 // Utility for getting a value from a packed 4-bit integer array.
302 template <typename DestT>
Int4Array_Get(const uint8_t * arrayBytes,uint32_t arrayIndex)303 DestT Int4Array_Get(const uint8_t *arrayBytes, uint32_t arrayIndex)
304 {
305 uint32_t byteIndex = arrayIndex >> 1;
306
307 if ((arrayIndex & 1) == 0)
308 {
309 return static_cast<DestT>(arrayBytes[byteIndex] & 0xF);
310 }
311 else
312 {
313 return static_cast<DestT>(arrayBytes[byteIndex] >> 4);
314 }
315 }
316
317 // Helper macro that casts to a bitfield type then verifies no bits were dropped.
318 #define SetBitField(lhs, rhs) \
319 lhs = static_cast<typename std::decay<decltype(lhs)>::type>(rhs); \
320 ASSERT(static_cast<decltype(rhs)>(lhs) == (rhs))
321
322 // When converting a byte number to a transition bit index we can shift instead of divide.
323 constexpr size_t kTransitionByteShift = Log2(kGraphicsPipelineDirtyBitBytes);
324
325 // When converting a number of bits offset to a transition bit index we can also shift.
326 constexpr size_t kBitsPerByte = 8;
327 constexpr size_t kTransitionBitShift = kTransitionByteShift + Log2(kBitsPerByte);
328
329 // Helper macro to map from a PipelineDesc struct and field to a dirty bit index.
330 // Uses the 'offsetof' macro to compute the offset 'Member' within the PipelineDesc
331 // and the offset of 'Field' within 'Member'. We can optimize the dirty bit setting by computing
332 // the shifted dirty bit at compile time instead of calling "set".
333 #define ANGLE_GET_TRANSITION_BIT(Member, Field) \
334 ((offsetof(GraphicsPipelineDesc, Member) + offsetof(decltype(Member), Field)) >> \
335 kTransitionByteShift)
336
337 // Indexed dirty bits cannot be entirely computed at compile time since the index is passed to
338 // the update function.
339 #define ANGLE_GET_INDEXED_TRANSITION_BIT(Member, Field, Index, BitWidth) \
340 (((BitWidth * Index) >> kTransitionBitShift) + ANGLE_GET_TRANSITION_BIT(Member, Field))
341
342 constexpr angle::PackedEnumMap<gl::ComponentType, VkFormat> kMismatchedComponentTypeMap = {{
343 {gl::ComponentType::Float, VK_FORMAT_R32G32B32A32_SFLOAT},
344 {gl::ComponentType::Int, VK_FORMAT_R32G32B32A32_SINT},
345 {gl::ComponentType::UnsignedInt, VK_FORMAT_R32G32B32A32_UINT},
346 }};
347 } // anonymous namespace
348
349 // RenderPassDesc implementation.
RenderPassDesc()350 RenderPassDesc::RenderPassDesc()
351 {
352 memset(this, 0, sizeof(RenderPassDesc));
353 }
354
355 RenderPassDesc::~RenderPassDesc() = default;
356
RenderPassDesc(const RenderPassDesc & other)357 RenderPassDesc::RenderPassDesc(const RenderPassDesc &other)
358 {
359 memcpy(this, &other, sizeof(RenderPassDesc));
360 }
361
setSamples(GLint samples)362 void RenderPassDesc::setSamples(GLint samples)
363 {
364 ASSERT(samples < std::numeric_limits<uint8_t>::max());
365 mSamples = static_cast<uint8_t>(samples);
366 }
367
packColorAttachment(size_t colorIndexGL,angle::FormatID formatID)368 void RenderPassDesc::packColorAttachment(size_t colorIndexGL, angle::FormatID formatID)
369 {
370 ASSERT(colorIndexGL < mAttachmentFormats.size());
371 static_assert(angle::kNumANGLEFormats < std::numeric_limits<uint8_t>::max(),
372 "Too many ANGLE formats to fit in uint8_t");
373 // Force the user to pack the depth/stencil attachment last.
374 ASSERT(mHasDepthStencilAttachment == false);
375 // This function should only be called for enabled GL color attachments.`
376 ASSERT(formatID != angle::FormatID::NONE);
377
378 uint8_t &packedFormat = mAttachmentFormats[colorIndexGL];
379 SetBitField(packedFormat, formatID);
380
381 // Set color attachment range such that it covers the range from index 0 through last
382 // active index. This is the reason why we need depth/stencil to be packed last.
383 mColorAttachmentRange =
384 std::max<uint8_t>(mColorAttachmentRange, static_cast<uint8_t>(colorIndexGL) + 1);
385 }
386
packColorAttachmentGap(size_t colorIndexGL)387 void RenderPassDesc::packColorAttachmentGap(size_t colorIndexGL)
388 {
389 ASSERT(colorIndexGL < mAttachmentFormats.size());
390 static_assert(angle::kNumANGLEFormats < std::numeric_limits<uint8_t>::max(),
391 "Too many ANGLE formats to fit in uint8_t");
392 // Force the user to pack the depth/stencil attachment last.
393 ASSERT(mHasDepthStencilAttachment == false);
394
395 // Use NONE as a flag for gaps in GL color attachments.
396 uint8_t &packedFormat = mAttachmentFormats[colorIndexGL];
397 SetBitField(packedFormat, angle::FormatID::NONE);
398 }
399
packDepthStencilAttachment(angle::FormatID formatID)400 void RenderPassDesc::packDepthStencilAttachment(angle::FormatID formatID)
401 {
402 // Though written as Count, there is only ever a single depth/stencil attachment.
403 ASSERT(mHasDepthStencilAttachment == false);
404
405 size_t index = depthStencilAttachmentIndex();
406 ASSERT(index < mAttachmentFormats.size());
407
408 uint8_t &packedFormat = mAttachmentFormats[index];
409 SetBitField(packedFormat, formatID);
410
411 mHasDepthStencilAttachment = true;
412 }
413
operator =(const RenderPassDesc & other)414 RenderPassDesc &RenderPassDesc::operator=(const RenderPassDesc &other)
415 {
416 memcpy(this, &other, sizeof(RenderPassDesc));
417 return *this;
418 }
419
hash() const420 size_t RenderPassDesc::hash() const
421 {
422 return angle::ComputeGenericHash(*this);
423 }
424
isColorAttachmentEnabled(size_t colorIndexGL) const425 bool RenderPassDesc::isColorAttachmentEnabled(size_t colorIndexGL) const
426 {
427 angle::FormatID formatID = operator[](colorIndexGL);
428 return formatID != angle::FormatID::NONE;
429 }
430
attachmentCount() const431 size_t RenderPassDesc::attachmentCount() const
432 {
433 size_t colorAttachmentCount = 0;
434 for (size_t i = 0; i < mColorAttachmentRange; ++i)
435 {
436 colorAttachmentCount += isColorAttachmentEnabled(i);
437 }
438
439 // Note that there are no gaps in depth/stencil attachments. In fact there is a maximum of 1 of
440 // it.
441 return colorAttachmentCount + mHasDepthStencilAttachment;
442 }
443
operator ==(const RenderPassDesc & lhs,const RenderPassDesc & rhs)444 bool operator==(const RenderPassDesc &lhs, const RenderPassDesc &rhs)
445 {
446 return (memcmp(&lhs, &rhs, sizeof(RenderPassDesc)) == 0);
447 }
448
449 // GraphicsPipelineDesc implementation.
450 // Use aligned allocation and free so we can use the alignas keyword.
operator new(std::size_t size)451 void *GraphicsPipelineDesc::operator new(std::size_t size)
452 {
453 return angle::AlignedAlloc(size, 32);
454 }
455
operator delete(void * ptr)456 void GraphicsPipelineDesc::operator delete(void *ptr)
457 {
458 return angle::AlignedFree(ptr);
459 }
460
GraphicsPipelineDesc()461 GraphicsPipelineDesc::GraphicsPipelineDesc()
462 {
463 memset(this, 0, sizeof(GraphicsPipelineDesc));
464 }
465
466 GraphicsPipelineDesc::~GraphicsPipelineDesc() = default;
467
GraphicsPipelineDesc(const GraphicsPipelineDesc & other)468 GraphicsPipelineDesc::GraphicsPipelineDesc(const GraphicsPipelineDesc &other)
469 {
470 memcpy(this, &other, sizeof(GraphicsPipelineDesc));
471 }
472
operator =(const GraphicsPipelineDesc & other)473 GraphicsPipelineDesc &GraphicsPipelineDesc::operator=(const GraphicsPipelineDesc &other)
474 {
475 memcpy(this, &other, sizeof(GraphicsPipelineDesc));
476 return *this;
477 }
478
hash() const479 size_t GraphicsPipelineDesc::hash() const
480 {
481 return angle::ComputeGenericHash(*this);
482 }
483
operator ==(const GraphicsPipelineDesc & other) const484 bool GraphicsPipelineDesc::operator==(const GraphicsPipelineDesc &other) const
485 {
486 return (memcmp(this, &other, sizeof(GraphicsPipelineDesc)) == 0);
487 }
488
489 // TODO(jmadill): We should prefer using Packed GLenums. http://anglebug.com/2169
490
491 // Initialize PSO states, it is consistent with initial value of gl::State
initDefaults()492 void GraphicsPipelineDesc::initDefaults()
493 {
494 // Set all vertex input attributes to default, the default format is Float
495 angle::FormatID defaultFormat = GetCurrentValueFormatID(gl::VertexAttribType::Float);
496 for (PackedAttribDesc &packedAttrib : mVertexInputAttribs.attribs)
497 {
498 SetBitField(packedAttrib.stride, 0);
499 SetBitField(packedAttrib.divisor, 0);
500 SetBitField(packedAttrib.format, defaultFormat);
501 SetBitField(packedAttrib.offset, 0);
502 }
503
504 mRasterizationAndMultisampleStateInfo.bits.depthClampEnable = 0;
505 mRasterizationAndMultisampleStateInfo.bits.rasterizationDiscardEnable = 0;
506 SetBitField(mRasterizationAndMultisampleStateInfo.bits.polygonMode, VK_POLYGON_MODE_FILL);
507 SetBitField(mRasterizationAndMultisampleStateInfo.bits.cullMode, VK_CULL_MODE_BACK_BIT);
508 SetBitField(mRasterizationAndMultisampleStateInfo.bits.frontFace,
509 VK_FRONT_FACE_COUNTER_CLOCKWISE);
510 mRasterizationAndMultisampleStateInfo.bits.depthBiasEnable = 0;
511 mRasterizationAndMultisampleStateInfo.depthBiasConstantFactor = 0.0f;
512 mRasterizationAndMultisampleStateInfo.depthBiasClamp = 0.0f;
513 mRasterizationAndMultisampleStateInfo.depthBiasSlopeFactor = 0.0f;
514 mRasterizationAndMultisampleStateInfo.lineWidth = 1.0f;
515
516 mRasterizationAndMultisampleStateInfo.bits.rasterizationSamples = 1;
517 mRasterizationAndMultisampleStateInfo.bits.sampleShadingEnable = 0;
518 mRasterizationAndMultisampleStateInfo.minSampleShading = 0.0f;
519 for (uint32_t &sampleMask : mRasterizationAndMultisampleStateInfo.sampleMask)
520 {
521 sampleMask = 0xFFFFFFFF;
522 }
523 mRasterizationAndMultisampleStateInfo.bits.alphaToCoverageEnable = 0;
524 mRasterizationAndMultisampleStateInfo.bits.alphaToOneEnable = 0;
525
526 mDepthStencilStateInfo.enable.depthTest = 0;
527 mDepthStencilStateInfo.enable.depthWrite = 1;
528 SetBitField(mDepthStencilStateInfo.depthCompareOp, VK_COMPARE_OP_LESS);
529 mDepthStencilStateInfo.enable.depthBoundsTest = 0;
530 mDepthStencilStateInfo.enable.stencilTest = 0;
531 mDepthStencilStateInfo.minDepthBounds = 0.0f;
532 mDepthStencilStateInfo.maxDepthBounds = 0.0f;
533 SetBitField(mDepthStencilStateInfo.front.ops.fail, VK_STENCIL_OP_KEEP);
534 SetBitField(mDepthStencilStateInfo.front.ops.pass, VK_STENCIL_OP_KEEP);
535 SetBitField(mDepthStencilStateInfo.front.ops.depthFail, VK_STENCIL_OP_KEEP);
536 SetBitField(mDepthStencilStateInfo.front.ops.compare, VK_COMPARE_OP_ALWAYS);
537 SetBitField(mDepthStencilStateInfo.front.compareMask, 0xFF);
538 SetBitField(mDepthStencilStateInfo.front.writeMask, 0xFF);
539 mDepthStencilStateInfo.frontStencilReference = 0;
540 SetBitField(mDepthStencilStateInfo.back.ops.fail, VK_STENCIL_OP_KEEP);
541 SetBitField(mDepthStencilStateInfo.back.ops.pass, VK_STENCIL_OP_KEEP);
542 SetBitField(mDepthStencilStateInfo.back.ops.depthFail, VK_STENCIL_OP_KEEP);
543 SetBitField(mDepthStencilStateInfo.back.ops.compare, VK_COMPARE_OP_ALWAYS);
544 SetBitField(mDepthStencilStateInfo.back.compareMask, 0xFF);
545 SetBitField(mDepthStencilStateInfo.back.writeMask, 0xFF);
546 mDepthStencilStateInfo.backStencilReference = 0;
547
548 PackedInputAssemblyAndColorBlendStateInfo &inputAndBlend = mInputAssemblyAndColorBlendStateInfo;
549 inputAndBlend.logic.opEnable = 0;
550 inputAndBlend.logic.op = static_cast<uint32_t>(VK_LOGIC_OP_CLEAR);
551 inputAndBlend.blendEnableMask = 0;
552 inputAndBlend.blendConstants[0] = 0.0f;
553 inputAndBlend.blendConstants[1] = 0.0f;
554 inputAndBlend.blendConstants[2] = 0.0f;
555 inputAndBlend.blendConstants[3] = 0.0f;
556
557 VkFlags allColorBits = (VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT |
558 VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT);
559
560 for (uint32_t colorIndexGL = 0; colorIndexGL < gl::IMPLEMENTATION_MAX_DRAW_BUFFERS;
561 ++colorIndexGL)
562 {
563 Int4Array_Set(inputAndBlend.colorWriteMaskBits, colorIndexGL, allColorBits);
564 }
565
566 PackedColorBlendAttachmentState blendAttachmentState;
567 SetBitField(blendAttachmentState.srcColorBlendFactor, VK_BLEND_FACTOR_ONE);
568 SetBitField(blendAttachmentState.dstColorBlendFactor, VK_BLEND_FACTOR_ZERO);
569 SetBitField(blendAttachmentState.colorBlendOp, VK_BLEND_OP_ADD);
570 SetBitField(blendAttachmentState.srcAlphaBlendFactor, VK_BLEND_FACTOR_ONE);
571 SetBitField(blendAttachmentState.dstAlphaBlendFactor, VK_BLEND_FACTOR_ZERO);
572 SetBitField(blendAttachmentState.alphaBlendOp, VK_BLEND_OP_ADD);
573
574 std::fill(&inputAndBlend.attachments[0],
575 &inputAndBlend.attachments[gl::IMPLEMENTATION_MAX_DRAW_BUFFERS],
576 blendAttachmentState);
577
578 inputAndBlend.primitive.topology = static_cast<uint16_t>(VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST);
579 inputAndBlend.primitive.restartEnable = 0;
580
581 // Viewport and scissor will be set to valid values when framebuffer being binded
582 mViewport.x = 0.0f;
583 mViewport.y = 0.0f;
584 mViewport.width = 0.0f;
585 mViewport.height = 0.0f;
586 mViewport.minDepth = 0.0f;
587 mViewport.maxDepth = 1.0f;
588
589 mScissor.offset.x = 0;
590 mScissor.offset.y = 0;
591 mScissor.extent.width = 0;
592 mScissor.extent.height = 0;
593 }
594
initializePipeline(vk::Context * context,const vk::PipelineCache & pipelineCacheVk,const RenderPass & compatibleRenderPass,const PipelineLayout & pipelineLayout,const gl::AttributesMask & activeAttribLocationsMask,const gl::ComponentTypeMask & programAttribsTypeMask,const ShaderModule * vertexModule,const ShaderModule * fragmentModule,Pipeline * pipelineOut) const595 angle::Result GraphicsPipelineDesc::initializePipeline(
596 vk::Context *context,
597 const vk::PipelineCache &pipelineCacheVk,
598 const RenderPass &compatibleRenderPass,
599 const PipelineLayout &pipelineLayout,
600 const gl::AttributesMask &activeAttribLocationsMask,
601 const gl::ComponentTypeMask &programAttribsTypeMask,
602 const ShaderModule *vertexModule,
603 const ShaderModule *fragmentModule,
604 Pipeline *pipelineOut) const
605 {
606 angle::FixedVector<VkPipelineShaderStageCreateInfo, 2> shaderStages;
607 VkPipelineVertexInputStateCreateInfo vertexInputState = {};
608 VkPipelineInputAssemblyStateCreateInfo inputAssemblyState = {};
609 VkPipelineViewportStateCreateInfo viewportState = {};
610 VkPipelineRasterizationStateCreateInfo rasterState = {};
611 VkPipelineMultisampleStateCreateInfo multisampleState = {};
612 VkPipelineDepthStencilStateCreateInfo depthStencilState = {};
613 std::array<VkPipelineColorBlendAttachmentState, gl::IMPLEMENTATION_MAX_DRAW_BUFFERS>
614 blendAttachmentState;
615 VkPipelineColorBlendStateCreateInfo blendState = {};
616 VkGraphicsPipelineCreateInfo createInfo = {};
617
618 // Vertex shader is always expected to be present.
619 ASSERT(vertexModule != nullptr);
620 VkPipelineShaderStageCreateInfo vertexStage = {};
621 vertexStage.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
622 vertexStage.flags = 0;
623 vertexStage.stage = VK_SHADER_STAGE_VERTEX_BIT;
624 vertexStage.module = vertexModule->getHandle();
625 vertexStage.pName = "main";
626 vertexStage.pSpecializationInfo = nullptr;
627 shaderStages.push_back(vertexStage);
628
629 // Fragment shader is optional.
630 // anglebug.com/3509 - Don't compile the fragment shader if rasterizationDiscardEnable = true
631 if (fragmentModule && !mRasterizationAndMultisampleStateInfo.bits.rasterizationDiscardEnable)
632 {
633 VkPipelineShaderStageCreateInfo fragmentStage = {};
634 fragmentStage.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
635 fragmentStage.flags = 0;
636 fragmentStage.stage = VK_SHADER_STAGE_FRAGMENT_BIT;
637 fragmentStage.module = fragmentModule->getHandle();
638 fragmentStage.pName = "main";
639 fragmentStage.pSpecializationInfo = nullptr;
640 shaderStages.push_back(fragmentStage);
641 }
642
643 // TODO(jmadill): Possibly use different path for ES 3.1 split bindings/attribs.
644 gl::AttribArray<VkVertexInputBindingDescription> bindingDescs;
645 gl::AttribArray<VkVertexInputAttributeDescription> attributeDescs;
646
647 uint32_t vertexAttribCount = 0;
648
649 size_t unpackedSize = sizeof(shaderStages) + sizeof(vertexInputState) +
650 sizeof(inputAssemblyState) + sizeof(viewportState) + sizeof(rasterState) +
651 sizeof(multisampleState) + sizeof(depthStencilState) +
652 sizeof(blendAttachmentState) + sizeof(blendState) + sizeof(bindingDescs) +
653 sizeof(attributeDescs);
654 ANGLE_UNUSED_VARIABLE(unpackedSize);
655
656 gl::AttribArray<VkVertexInputBindingDivisorDescriptionEXT> divisorDesc;
657 VkPipelineVertexInputDivisorStateCreateInfoEXT divisorState = {};
658 divisorState.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO_EXT;
659 divisorState.pVertexBindingDivisors = divisorDesc.data();
660
661 for (size_t attribIndexSizeT : activeAttribLocationsMask)
662 {
663 const uint32_t attribIndex = static_cast<uint32_t>(attribIndexSizeT);
664
665 VkVertexInputBindingDescription &bindingDesc = bindingDescs[vertexAttribCount];
666 VkVertexInputAttributeDescription &attribDesc = attributeDescs[vertexAttribCount];
667 const PackedAttribDesc &packedAttrib = mVertexInputAttribs.attribs[attribIndex];
668
669 bindingDesc.binding = attribIndex;
670 bindingDesc.stride = static_cast<uint32_t>(packedAttrib.stride);
671 if (packedAttrib.divisor != 0)
672 {
673 bindingDesc.inputRate = static_cast<VkVertexInputRate>(VK_VERTEX_INPUT_RATE_INSTANCE);
674 divisorDesc[divisorState.vertexBindingDivisorCount].binding = bindingDesc.binding;
675 divisorDesc[divisorState.vertexBindingDivisorCount].divisor = packedAttrib.divisor;
676 ++divisorState.vertexBindingDivisorCount;
677 }
678 else
679 {
680 bindingDesc.inputRate = static_cast<VkVertexInputRate>(VK_VERTEX_INPUT_RATE_VERTEX);
681 }
682
683 // Get the corresponding VkFormat for the attrib's format.
684 angle::FormatID formatID = static_cast<angle::FormatID>(packedAttrib.format);
685 const vk::Format &format = context->getRenderer()->getFormat(formatID);
686 const angle::Format &angleFormat = format.angleFormat();
687 VkFormat vkFormat = format.vkBufferFormat;
688
689 gl::ComponentType attribType =
690 GetVertexAttributeComponentType(angleFormat.isPureInt(), angleFormat.vertexAttribType);
691 gl::ComponentType programAttribType =
692 gl::GetComponentTypeMask(programAttribsTypeMask, attribIndex);
693
694 if (attribType != programAttribType)
695 {
696 // Override the format with a compatible one.
697 vkFormat = kMismatchedComponentTypeMap[programAttribType];
698
699 bindingDesc.stride = 0; // Prevent out-of-bounds accesses.
700 }
701
702 // The binding index could become more dynamic in ES 3.1.
703 attribDesc.binding = attribIndex;
704 attribDesc.format = vkFormat;
705 attribDesc.location = static_cast<uint32_t>(attribIndex);
706 attribDesc.offset = packedAttrib.offset;
707
708 vertexAttribCount++;
709 }
710
711 // The binding descriptions are filled in at draw time.
712 vertexInputState.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO;
713 vertexInputState.flags = 0;
714 vertexInputState.vertexBindingDescriptionCount = vertexAttribCount;
715 vertexInputState.pVertexBindingDescriptions = bindingDescs.data();
716 vertexInputState.vertexAttributeDescriptionCount = vertexAttribCount;
717 vertexInputState.pVertexAttributeDescriptions = attributeDescs.data();
718 if (divisorState.vertexBindingDivisorCount)
719 vertexInputState.pNext = &divisorState;
720
721 // Primitive topology is filled in at draw time.
722 inputAssemblyState.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO;
723 inputAssemblyState.flags = 0;
724 inputAssemblyState.topology =
725 static_cast<VkPrimitiveTopology>(mInputAssemblyAndColorBlendStateInfo.primitive.topology);
726 inputAssemblyState.primitiveRestartEnable =
727 static_cast<VkBool32>(mInputAssemblyAndColorBlendStateInfo.primitive.restartEnable);
728
729 // Set initial viewport and scissor state.
730
731 // 0-sized viewports are invalid in Vulkan. We always use a scissor that at least matches the
732 // requested viewport, so it's safe to adjust the viewport size here.
733 VkViewport viewport = mViewport;
734 if (viewport.width == 0)
735 {
736 viewport.width = 1;
737 }
738 if (viewport.height == 0)
739 {
740 viewport.height = 1;
741 }
742
743 viewportState.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO;
744 viewportState.flags = 0;
745 viewportState.viewportCount = 1;
746 viewportState.pViewports = &viewport;
747 viewportState.scissorCount = 1;
748 viewportState.pScissors = &mScissor;
749
750 const PackedRasterizationAndMultisampleStateInfo &rasterAndMS =
751 mRasterizationAndMultisampleStateInfo;
752
753 // Rasterizer state.
754 rasterState.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO;
755 rasterState.flags = 0;
756 rasterState.depthClampEnable = static_cast<VkBool32>(rasterAndMS.bits.depthClampEnable);
757 rasterState.rasterizerDiscardEnable =
758 static_cast<VkBool32>(rasterAndMS.bits.rasterizationDiscardEnable);
759 rasterState.polygonMode = static_cast<VkPolygonMode>(rasterAndMS.bits.polygonMode);
760 rasterState.cullMode = static_cast<VkCullModeFlags>(rasterAndMS.bits.cullMode);
761 rasterState.frontFace = static_cast<VkFrontFace>(rasterAndMS.bits.frontFace);
762 rasterState.depthBiasEnable = static_cast<VkBool32>(rasterAndMS.bits.depthBiasEnable);
763 rasterState.depthBiasConstantFactor = rasterAndMS.depthBiasConstantFactor;
764 rasterState.depthBiasClamp = rasterAndMS.depthBiasClamp;
765 rasterState.depthBiasSlopeFactor = rasterAndMS.depthBiasSlopeFactor;
766 rasterState.lineWidth = rasterAndMS.lineWidth;
767
768 // Multisample state.
769 multisampleState.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO;
770 multisampleState.flags = 0;
771 multisampleState.rasterizationSamples =
772 gl_vk::GetSamples(rasterAndMS.bits.rasterizationSamples);
773 multisampleState.sampleShadingEnable =
774 static_cast<VkBool32>(rasterAndMS.bits.sampleShadingEnable);
775 multisampleState.minSampleShading = rasterAndMS.minSampleShading;
776 multisampleState.pSampleMask = rasterAndMS.sampleMask;
777 multisampleState.alphaToCoverageEnable =
778 static_cast<VkBool32>(rasterAndMS.bits.alphaToCoverageEnable);
779 multisampleState.alphaToOneEnable = static_cast<VkBool32>(rasterAndMS.bits.alphaToOneEnable);
780
781 // Depth/stencil state.
782 depthStencilState.sType = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO;
783 depthStencilState.flags = 0;
784 depthStencilState.depthTestEnable =
785 static_cast<VkBool32>(mDepthStencilStateInfo.enable.depthTest);
786 depthStencilState.depthWriteEnable =
787 static_cast<VkBool32>(mDepthStencilStateInfo.enable.depthWrite);
788 depthStencilState.depthCompareOp =
789 static_cast<VkCompareOp>(mDepthStencilStateInfo.depthCompareOp);
790 depthStencilState.depthBoundsTestEnable =
791 static_cast<VkBool32>(mDepthStencilStateInfo.enable.depthBoundsTest);
792 depthStencilState.stencilTestEnable =
793 static_cast<VkBool32>(mDepthStencilStateInfo.enable.stencilTest);
794 UnpackStencilState(mDepthStencilStateInfo.front, mDepthStencilStateInfo.frontStencilReference,
795 &depthStencilState.front);
796 UnpackStencilState(mDepthStencilStateInfo.back, mDepthStencilStateInfo.backStencilReference,
797 &depthStencilState.back);
798 depthStencilState.minDepthBounds = mDepthStencilStateInfo.minDepthBounds;
799 depthStencilState.maxDepthBounds = mDepthStencilStateInfo.maxDepthBounds;
800
801 const PackedInputAssemblyAndColorBlendStateInfo &inputAndBlend =
802 mInputAssemblyAndColorBlendStateInfo;
803
804 blendState.sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO;
805 blendState.flags = 0;
806 blendState.logicOpEnable = static_cast<VkBool32>(inputAndBlend.logic.opEnable);
807 blendState.logicOp = static_cast<VkLogicOp>(inputAndBlend.logic.op);
808 blendState.attachmentCount = static_cast<uint32_t>(mRenderPassDesc.colorAttachmentRange());
809 blendState.pAttachments = blendAttachmentState.data();
810
811 for (int i = 0; i < 4; i++)
812 {
813 blendState.blendConstants[i] = inputAndBlend.blendConstants[i];
814 }
815
816 const gl::DrawBufferMask blendEnableMask(inputAndBlend.blendEnableMask);
817
818 for (uint32_t colorIndexGL = 0; colorIndexGL < blendState.attachmentCount; ++colorIndexGL)
819 {
820 VkPipelineColorBlendAttachmentState &state = blendAttachmentState[colorIndexGL];
821
822 state.blendEnable = blendEnableMask[colorIndexGL] ? VK_TRUE : VK_FALSE;
823 state.colorWriteMask =
824 Int4Array_Get<VkColorComponentFlags>(inputAndBlend.colorWriteMaskBits, colorIndexGL);
825 UnpackBlendAttachmentState(inputAndBlend.attachments[colorIndexGL], &state);
826 }
827
828 // We would define dynamic state here if it were to be used.
829
830 createInfo.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO;
831 createInfo.flags = 0;
832 createInfo.stageCount = static_cast<uint32_t>(shaderStages.size());
833 createInfo.pStages = shaderStages.data();
834 createInfo.pVertexInputState = &vertexInputState;
835 createInfo.pInputAssemblyState = &inputAssemblyState;
836 createInfo.pTessellationState = nullptr;
837 createInfo.pViewportState = &viewportState;
838 createInfo.pRasterizationState = &rasterState;
839 createInfo.pMultisampleState = &multisampleState;
840 createInfo.pDepthStencilState = &depthStencilState;
841 createInfo.pColorBlendState = &blendState;
842 createInfo.pDynamicState = nullptr;
843 createInfo.layout = pipelineLayout.getHandle();
844 createInfo.renderPass = compatibleRenderPass.getHandle();
845 createInfo.subpass = 0;
846 createInfo.basePipelineHandle = VK_NULL_HANDLE;
847 createInfo.basePipelineIndex = 0;
848
849 ANGLE_VK_TRY(context,
850 pipelineOut->initGraphics(context->getDevice(), createInfo, pipelineCacheVk));
851 return angle::Result::Continue;
852 }
853
updateVertexInput(GraphicsPipelineTransitionBits * transition,uint32_t attribIndex,GLuint stride,GLuint divisor,angle::FormatID format,GLuint relativeOffset)854 void GraphicsPipelineDesc::updateVertexInput(GraphicsPipelineTransitionBits *transition,
855 uint32_t attribIndex,
856 GLuint stride,
857 GLuint divisor,
858 angle::FormatID format,
859 GLuint relativeOffset)
860 {
861 vk::PackedAttribDesc &packedAttrib = mVertexInputAttribs.attribs[attribIndex];
862
863 // TODO: Handle the case where the divisor overflows the field that holds it.
864 // http://anglebug.com/2672
865 ASSERT(divisor <= std::numeric_limits<decltype(packedAttrib.divisor)>::max());
866
867 SetBitField(packedAttrib.stride, stride);
868 SetBitField(packedAttrib.divisor, divisor);
869
870 if (format == angle::FormatID::NONE)
871 {
872 UNIMPLEMENTED();
873 }
874
875 SetBitField(packedAttrib.format, format);
876 SetBitField(packedAttrib.offset, relativeOffset);
877
878 constexpr size_t kAttribBits = kPackedAttribDescSize * kBitsPerByte;
879 const size_t kBit =
880 ANGLE_GET_INDEXED_TRANSITION_BIT(mVertexInputAttribs, attribs, attribIndex, kAttribBits);
881
882 // Cover the next dirty bit conservatively. Because each attribute is 6 bytes.
883 transition->set(kBit);
884 transition->set(kBit + 1);
885 }
886
updateTopology(GraphicsPipelineTransitionBits * transition,gl::PrimitiveMode drawMode)887 void GraphicsPipelineDesc::updateTopology(GraphicsPipelineTransitionBits *transition,
888 gl::PrimitiveMode drawMode)
889 {
890 VkPrimitiveTopology vkTopology = gl_vk::GetPrimitiveTopology(drawMode);
891 SetBitField(mInputAssemblyAndColorBlendStateInfo.primitive.topology, vkTopology);
892
893 transition->set(ANGLE_GET_TRANSITION_BIT(mInputAssemblyAndColorBlendStateInfo, primitive));
894 }
895
updatePrimitiveRestartEnabled(GraphicsPipelineTransitionBits * transition,bool primitiveRestartEnabled)896 void GraphicsPipelineDesc::updatePrimitiveRestartEnabled(GraphicsPipelineTransitionBits *transition,
897 bool primitiveRestartEnabled)
898 {
899 mInputAssemblyAndColorBlendStateInfo.primitive.restartEnable =
900 static_cast<uint16_t>(primitiveRestartEnabled);
901 transition->set(ANGLE_GET_TRANSITION_BIT(mInputAssemblyAndColorBlendStateInfo, primitive));
902 }
903
setCullMode(VkCullModeFlagBits cullMode)904 void GraphicsPipelineDesc::setCullMode(VkCullModeFlagBits cullMode)
905 {
906 SetBitField(mRasterizationAndMultisampleStateInfo.bits.cullMode, cullMode);
907 }
908
updateCullMode(GraphicsPipelineTransitionBits * transition,const gl::RasterizerState & rasterState)909 void GraphicsPipelineDesc::updateCullMode(GraphicsPipelineTransitionBits *transition,
910 const gl::RasterizerState &rasterState)
911 {
912 setCullMode(gl_vk::GetCullMode(rasterState));
913 transition->set(ANGLE_GET_TRANSITION_BIT(mRasterizationAndMultisampleStateInfo, bits));
914 }
915
updateFrontFace(GraphicsPipelineTransitionBits * transition,const gl::RasterizerState & rasterState,bool invertFrontFace)916 void GraphicsPipelineDesc::updateFrontFace(GraphicsPipelineTransitionBits *transition,
917 const gl::RasterizerState &rasterState,
918 bool invertFrontFace)
919 {
920 mRasterizationAndMultisampleStateInfo.bits.frontFace =
921 static_cast<uint16_t>(gl_vk::GetFrontFace(rasterState.frontFace, invertFrontFace));
922 transition->set(ANGLE_GET_TRANSITION_BIT(mRasterizationAndMultisampleStateInfo, bits));
923 }
924
updateLineWidth(GraphicsPipelineTransitionBits * transition,float lineWidth)925 void GraphicsPipelineDesc::updateLineWidth(GraphicsPipelineTransitionBits *transition,
926 float lineWidth)
927 {
928 mRasterizationAndMultisampleStateInfo.lineWidth = lineWidth;
929 transition->set(ANGLE_GET_TRANSITION_BIT(mRasterizationAndMultisampleStateInfo, lineWidth));
930 }
931
updateRasterizerDiscardEnabled(GraphicsPipelineTransitionBits * transition,bool rasterizerDiscardEnabled)932 void GraphicsPipelineDesc::updateRasterizerDiscardEnabled(
933 GraphicsPipelineTransitionBits *transition,
934 bool rasterizerDiscardEnabled)
935 {
936 mRasterizationAndMultisampleStateInfo.bits.rasterizationDiscardEnable =
937 static_cast<uint32_t>(rasterizerDiscardEnabled);
938 transition->set(ANGLE_GET_TRANSITION_BIT(mRasterizationAndMultisampleStateInfo, bits));
939 }
940
setRasterizationSamples(uint32_t rasterizationSamples)941 void GraphicsPipelineDesc::setRasterizationSamples(uint32_t rasterizationSamples)
942 {
943 mRasterizationAndMultisampleStateInfo.bits.rasterizationSamples = rasterizationSamples;
944 }
945
updateRasterizationSamples(GraphicsPipelineTransitionBits * transition,uint32_t rasterizationSamples)946 void GraphicsPipelineDesc::updateRasterizationSamples(GraphicsPipelineTransitionBits *transition,
947 uint32_t rasterizationSamples)
948 {
949 setRasterizationSamples(rasterizationSamples);
950 transition->set(ANGLE_GET_TRANSITION_BIT(mRasterizationAndMultisampleStateInfo, bits));
951 }
952
updateAlphaToCoverageEnable(GraphicsPipelineTransitionBits * transition,bool enable)953 void GraphicsPipelineDesc::updateAlphaToCoverageEnable(GraphicsPipelineTransitionBits *transition,
954 bool enable)
955 {
956 mRasterizationAndMultisampleStateInfo.bits.alphaToCoverageEnable = enable;
957 transition->set(ANGLE_GET_TRANSITION_BIT(mRasterizationAndMultisampleStateInfo, bits));
958 }
959
updateAlphaToOneEnable(GraphicsPipelineTransitionBits * transition,bool enable)960 void GraphicsPipelineDesc::updateAlphaToOneEnable(GraphicsPipelineTransitionBits *transition,
961 bool enable)
962 {
963 mRasterizationAndMultisampleStateInfo.bits.alphaToOneEnable = enable;
964 transition->set(ANGLE_GET_TRANSITION_BIT(mRasterizationAndMultisampleStateInfo, bits));
965 }
966
updateSampleMask(GraphicsPipelineTransitionBits * transition,uint32_t maskNumber,uint32_t mask)967 void GraphicsPipelineDesc::updateSampleMask(GraphicsPipelineTransitionBits *transition,
968 uint32_t maskNumber,
969 uint32_t mask)
970 {
971 ASSERT(maskNumber < gl::MAX_SAMPLE_MASK_WORDS);
972 mRasterizationAndMultisampleStateInfo.sampleMask[maskNumber] = mask;
973
974 constexpr size_t kMaskBits =
975 sizeof(mRasterizationAndMultisampleStateInfo.sampleMask[0]) * kBitsPerByte;
976 transition->set(ANGLE_GET_INDEXED_TRANSITION_BIT(mRasterizationAndMultisampleStateInfo,
977 sampleMask, maskNumber, kMaskBits));
978 }
979
updateBlendColor(GraphicsPipelineTransitionBits * transition,const gl::ColorF & color)980 void GraphicsPipelineDesc::updateBlendColor(GraphicsPipelineTransitionBits *transition,
981 const gl::ColorF &color)
982 {
983 mInputAssemblyAndColorBlendStateInfo.blendConstants[0] = color.red;
984 mInputAssemblyAndColorBlendStateInfo.blendConstants[1] = color.green;
985 mInputAssemblyAndColorBlendStateInfo.blendConstants[2] = color.blue;
986 mInputAssemblyAndColorBlendStateInfo.blendConstants[3] = color.alpha;
987 constexpr size_t kSize = sizeof(mInputAssemblyAndColorBlendStateInfo.blendConstants[0]) * 8;
988
989 for (int index = 0; index < 4; ++index)
990 {
991 const size_t kBit = ANGLE_GET_INDEXED_TRANSITION_BIT(mInputAssemblyAndColorBlendStateInfo,
992 blendConstants, index, kSize);
993 transition->set(kBit);
994 }
995 }
996
updateBlendEnabled(GraphicsPipelineTransitionBits * transition,bool isBlendEnabled)997 void GraphicsPipelineDesc::updateBlendEnabled(GraphicsPipelineTransitionBits *transition,
998 bool isBlendEnabled)
999 {
1000 gl::DrawBufferMask blendEnabled;
1001 if (isBlendEnabled)
1002 blendEnabled.set();
1003 mInputAssemblyAndColorBlendStateInfo.blendEnableMask =
1004 static_cast<uint8_t>(blendEnabled.bits());
1005 transition->set(
1006 ANGLE_GET_TRANSITION_BIT(mInputAssemblyAndColorBlendStateInfo, blendEnableMask));
1007 }
1008
updateBlendEquations(GraphicsPipelineTransitionBits * transition,const gl::BlendState & blendState)1009 void GraphicsPipelineDesc::updateBlendEquations(GraphicsPipelineTransitionBits *transition,
1010 const gl::BlendState &blendState)
1011 {
1012 constexpr size_t kSize = sizeof(PackedColorBlendAttachmentState) * 8;
1013
1014 for (size_t attachmentIndex = 0; attachmentIndex < gl::IMPLEMENTATION_MAX_DRAW_BUFFERS;
1015 ++attachmentIndex)
1016 {
1017 PackedColorBlendAttachmentState &blendAttachmentState =
1018 mInputAssemblyAndColorBlendStateInfo.attachments[attachmentIndex];
1019 blendAttachmentState.colorBlendOp = PackGLBlendOp(blendState.blendEquationRGB);
1020 blendAttachmentState.alphaBlendOp = PackGLBlendOp(blendState.blendEquationAlpha);
1021 transition->set(ANGLE_GET_INDEXED_TRANSITION_BIT(mInputAssemblyAndColorBlendStateInfo,
1022 attachments, attachmentIndex, kSize));
1023 }
1024 }
1025
updateBlendFuncs(GraphicsPipelineTransitionBits * transition,const gl::BlendState & blendState)1026 void GraphicsPipelineDesc::updateBlendFuncs(GraphicsPipelineTransitionBits *transition,
1027 const gl::BlendState &blendState)
1028 {
1029 constexpr size_t kSize = sizeof(PackedColorBlendAttachmentState) * 8;
1030 for (size_t attachmentIndex = 0; attachmentIndex < gl::IMPLEMENTATION_MAX_DRAW_BUFFERS;
1031 ++attachmentIndex)
1032 {
1033 PackedColorBlendAttachmentState &blendAttachmentState =
1034 mInputAssemblyAndColorBlendStateInfo.attachments[attachmentIndex];
1035 blendAttachmentState.srcColorBlendFactor = PackGLBlendFactor(blendState.sourceBlendRGB);
1036 blendAttachmentState.dstColorBlendFactor = PackGLBlendFactor(blendState.destBlendRGB);
1037 blendAttachmentState.srcAlphaBlendFactor = PackGLBlendFactor(blendState.sourceBlendAlpha);
1038 blendAttachmentState.dstAlphaBlendFactor = PackGLBlendFactor(blendState.destBlendAlpha);
1039 transition->set(ANGLE_GET_INDEXED_TRANSITION_BIT(mInputAssemblyAndColorBlendStateInfo,
1040 attachments, attachmentIndex, kSize));
1041 }
1042 }
1043
setColorWriteMask(VkColorComponentFlags colorComponentFlags,const gl::DrawBufferMask & alphaMask)1044 void GraphicsPipelineDesc::setColorWriteMask(VkColorComponentFlags colorComponentFlags,
1045 const gl::DrawBufferMask &alphaMask)
1046 {
1047 PackedInputAssemblyAndColorBlendStateInfo &inputAndBlend = mInputAssemblyAndColorBlendStateInfo;
1048 uint8_t colorMask = static_cast<uint8_t>(colorComponentFlags);
1049
1050 for (uint32_t colorIndexGL = 0; colorIndexGL < gl::IMPLEMENTATION_MAX_DRAW_BUFFERS;
1051 colorIndexGL++)
1052 {
1053 uint8_t mask =
1054 alphaMask[colorIndexGL] ? (colorMask & ~VK_COLOR_COMPONENT_A_BIT) : colorMask;
1055 Int4Array_Set(inputAndBlend.colorWriteMaskBits, colorIndexGL, mask);
1056 }
1057 }
1058
setSingleColorWriteMask(uint32_t colorIndexGL,VkColorComponentFlags colorComponentFlags)1059 void GraphicsPipelineDesc::setSingleColorWriteMask(uint32_t colorIndexGL,
1060 VkColorComponentFlags colorComponentFlags)
1061 {
1062 PackedInputAssemblyAndColorBlendStateInfo &inputAndBlend = mInputAssemblyAndColorBlendStateInfo;
1063 uint8_t colorMask = static_cast<uint8_t>(colorComponentFlags);
1064 Int4Array_Set(inputAndBlend.colorWriteMaskBits, colorIndexGL, colorMask);
1065 }
1066
updateColorWriteMask(GraphicsPipelineTransitionBits * transition,VkColorComponentFlags colorComponentFlags,const gl::DrawBufferMask & alphaMask)1067 void GraphicsPipelineDesc::updateColorWriteMask(GraphicsPipelineTransitionBits *transition,
1068 VkColorComponentFlags colorComponentFlags,
1069 const gl::DrawBufferMask &alphaMask)
1070 {
1071 setColorWriteMask(colorComponentFlags, alphaMask);
1072
1073 for (size_t colorIndexGL = 0; colorIndexGL < gl::IMPLEMENTATION_MAX_DRAW_BUFFERS;
1074 colorIndexGL++)
1075 {
1076 transition->set(ANGLE_GET_INDEXED_TRANSITION_BIT(mInputAssemblyAndColorBlendStateInfo,
1077 colorWriteMaskBits, colorIndexGL, 4));
1078 }
1079 }
1080
setDepthTestEnabled(bool enabled)1081 void GraphicsPipelineDesc::setDepthTestEnabled(bool enabled)
1082 {
1083 mDepthStencilStateInfo.enable.depthTest = enabled;
1084 }
1085
setDepthWriteEnabled(bool enabled)1086 void GraphicsPipelineDesc::setDepthWriteEnabled(bool enabled)
1087 {
1088 mDepthStencilStateInfo.enable.depthWrite = enabled;
1089 }
1090
setDepthFunc(VkCompareOp op)1091 void GraphicsPipelineDesc::setDepthFunc(VkCompareOp op)
1092 {
1093 SetBitField(mDepthStencilStateInfo.depthCompareOp, op);
1094 }
1095
setStencilTestEnabled(bool enabled)1096 void GraphicsPipelineDesc::setStencilTestEnabled(bool enabled)
1097 {
1098 mDepthStencilStateInfo.enable.stencilTest = enabled;
1099 }
1100
setStencilFrontFuncs(uint8_t reference,VkCompareOp compareOp,uint8_t compareMask)1101 void GraphicsPipelineDesc::setStencilFrontFuncs(uint8_t reference,
1102 VkCompareOp compareOp,
1103 uint8_t compareMask)
1104 {
1105 mDepthStencilStateInfo.frontStencilReference = reference;
1106 mDepthStencilStateInfo.front.compareMask = compareMask;
1107 SetBitField(mDepthStencilStateInfo.front.ops.compare, compareOp);
1108 }
1109
setStencilBackFuncs(uint8_t reference,VkCompareOp compareOp,uint8_t compareMask)1110 void GraphicsPipelineDesc::setStencilBackFuncs(uint8_t reference,
1111 VkCompareOp compareOp,
1112 uint8_t compareMask)
1113 {
1114 mDepthStencilStateInfo.backStencilReference = reference;
1115 mDepthStencilStateInfo.back.compareMask = compareMask;
1116 SetBitField(mDepthStencilStateInfo.back.ops.compare, compareOp);
1117 }
1118
setStencilFrontOps(VkStencilOp failOp,VkStencilOp passOp,VkStencilOp depthFailOp)1119 void GraphicsPipelineDesc::setStencilFrontOps(VkStencilOp failOp,
1120 VkStencilOp passOp,
1121 VkStencilOp depthFailOp)
1122 {
1123 SetBitField(mDepthStencilStateInfo.front.ops.fail, failOp);
1124 SetBitField(mDepthStencilStateInfo.front.ops.pass, passOp);
1125 SetBitField(mDepthStencilStateInfo.front.ops.depthFail, depthFailOp);
1126 }
1127
setStencilBackOps(VkStencilOp failOp,VkStencilOp passOp,VkStencilOp depthFailOp)1128 void GraphicsPipelineDesc::setStencilBackOps(VkStencilOp failOp,
1129 VkStencilOp passOp,
1130 VkStencilOp depthFailOp)
1131 {
1132 SetBitField(mDepthStencilStateInfo.back.ops.fail, failOp);
1133 SetBitField(mDepthStencilStateInfo.back.ops.pass, passOp);
1134 SetBitField(mDepthStencilStateInfo.back.ops.depthFail, depthFailOp);
1135 }
1136
setStencilFrontWriteMask(uint8_t mask)1137 void GraphicsPipelineDesc::setStencilFrontWriteMask(uint8_t mask)
1138 {
1139 mDepthStencilStateInfo.front.writeMask = mask;
1140 }
1141
setStencilBackWriteMask(uint8_t mask)1142 void GraphicsPipelineDesc::setStencilBackWriteMask(uint8_t mask)
1143 {
1144 mDepthStencilStateInfo.back.writeMask = mask;
1145 }
1146
updateDepthTestEnabled(GraphicsPipelineTransitionBits * transition,const gl::DepthStencilState & depthStencilState,const gl::Framebuffer * drawFramebuffer)1147 void GraphicsPipelineDesc::updateDepthTestEnabled(GraphicsPipelineTransitionBits *transition,
1148 const gl::DepthStencilState &depthStencilState,
1149 const gl::Framebuffer *drawFramebuffer)
1150 {
1151 // Only enable the depth test if the draw framebuffer has a depth buffer. It's possible that
1152 // we're emulating a stencil-only buffer with a depth-stencil buffer
1153 setDepthTestEnabled(depthStencilState.depthTest && drawFramebuffer->hasDepth());
1154 transition->set(ANGLE_GET_TRANSITION_BIT(mDepthStencilStateInfo, enable));
1155 }
1156
updateDepthFunc(GraphicsPipelineTransitionBits * transition,const gl::DepthStencilState & depthStencilState)1157 void GraphicsPipelineDesc::updateDepthFunc(GraphicsPipelineTransitionBits *transition,
1158 const gl::DepthStencilState &depthStencilState)
1159 {
1160 setDepthFunc(PackGLCompareFunc(depthStencilState.depthFunc));
1161 transition->set(ANGLE_GET_TRANSITION_BIT(mDepthStencilStateInfo, depthCompareOp));
1162 }
1163
updateDepthWriteEnabled(GraphicsPipelineTransitionBits * transition,const gl::DepthStencilState & depthStencilState,const gl::Framebuffer * drawFramebuffer)1164 void GraphicsPipelineDesc::updateDepthWriteEnabled(GraphicsPipelineTransitionBits *transition,
1165 const gl::DepthStencilState &depthStencilState,
1166 const gl::Framebuffer *drawFramebuffer)
1167 {
1168 // Don't write to depth buffers that should not exist
1169 setDepthWriteEnabled(drawFramebuffer->hasDepth() ? depthStencilState.depthMask : false);
1170 transition->set(ANGLE_GET_TRANSITION_BIT(mDepthStencilStateInfo, enable));
1171 }
1172
updateStencilTestEnabled(GraphicsPipelineTransitionBits * transition,const gl::DepthStencilState & depthStencilState,const gl::Framebuffer * drawFramebuffer)1173 void GraphicsPipelineDesc::updateStencilTestEnabled(GraphicsPipelineTransitionBits *transition,
1174 const gl::DepthStencilState &depthStencilState,
1175 const gl::Framebuffer *drawFramebuffer)
1176 {
1177 // Only enable the stencil test if the draw framebuffer has a stencil buffer. It's possible
1178 // that we're emulating a depth-only buffer with a depth-stencil buffer
1179 setStencilTestEnabled(depthStencilState.stencilTest && drawFramebuffer->hasStencil());
1180 transition->set(ANGLE_GET_TRANSITION_BIT(mDepthStencilStateInfo, enable));
1181 }
1182
updateStencilFrontFuncs(GraphicsPipelineTransitionBits * transition,GLint ref,const gl::DepthStencilState & depthStencilState)1183 void GraphicsPipelineDesc::updateStencilFrontFuncs(GraphicsPipelineTransitionBits *transition,
1184 GLint ref,
1185 const gl::DepthStencilState &depthStencilState)
1186 {
1187 setStencilFrontFuncs(static_cast<uint8_t>(ref),
1188 PackGLCompareFunc(depthStencilState.stencilFunc),
1189 static_cast<uint8_t>(depthStencilState.stencilMask));
1190 transition->set(ANGLE_GET_TRANSITION_BIT(mDepthStencilStateInfo, front));
1191 transition->set(ANGLE_GET_TRANSITION_BIT(mDepthStencilStateInfo, frontStencilReference));
1192 }
1193
updateStencilBackFuncs(GraphicsPipelineTransitionBits * transition,GLint ref,const gl::DepthStencilState & depthStencilState)1194 void GraphicsPipelineDesc::updateStencilBackFuncs(GraphicsPipelineTransitionBits *transition,
1195 GLint ref,
1196 const gl::DepthStencilState &depthStencilState)
1197 {
1198 setStencilBackFuncs(static_cast<uint8_t>(ref),
1199 PackGLCompareFunc(depthStencilState.stencilBackFunc),
1200 static_cast<uint8_t>(depthStencilState.stencilBackMask));
1201 transition->set(ANGLE_GET_TRANSITION_BIT(mDepthStencilStateInfo, back));
1202 transition->set(ANGLE_GET_TRANSITION_BIT(mDepthStencilStateInfo, backStencilReference));
1203 }
1204
updateStencilFrontOps(GraphicsPipelineTransitionBits * transition,const gl::DepthStencilState & depthStencilState)1205 void GraphicsPipelineDesc::updateStencilFrontOps(GraphicsPipelineTransitionBits *transition,
1206 const gl::DepthStencilState &depthStencilState)
1207 {
1208 setStencilFrontOps(PackGLStencilOp(depthStencilState.stencilFail),
1209 PackGLStencilOp(depthStencilState.stencilPassDepthPass),
1210 PackGLStencilOp(depthStencilState.stencilPassDepthFail));
1211 transition->set(ANGLE_GET_TRANSITION_BIT(mDepthStencilStateInfo, front));
1212 }
1213
updateStencilBackOps(GraphicsPipelineTransitionBits * transition,const gl::DepthStencilState & depthStencilState)1214 void GraphicsPipelineDesc::updateStencilBackOps(GraphicsPipelineTransitionBits *transition,
1215 const gl::DepthStencilState &depthStencilState)
1216 {
1217 setStencilBackOps(PackGLStencilOp(depthStencilState.stencilBackFail),
1218 PackGLStencilOp(depthStencilState.stencilBackPassDepthPass),
1219 PackGLStencilOp(depthStencilState.stencilBackPassDepthFail));
1220 transition->set(ANGLE_GET_TRANSITION_BIT(mDepthStencilStateInfo, back));
1221 }
1222
updateStencilFrontWriteMask(GraphicsPipelineTransitionBits * transition,const gl::DepthStencilState & depthStencilState,const gl::Framebuffer * drawFramebuffer)1223 void GraphicsPipelineDesc::updateStencilFrontWriteMask(
1224 GraphicsPipelineTransitionBits *transition,
1225 const gl::DepthStencilState &depthStencilState,
1226 const gl::Framebuffer *drawFramebuffer)
1227 {
1228 // Don't write to stencil buffers that should not exist
1229 setStencilFrontWriteMask(static_cast<uint8_t>(
1230 drawFramebuffer->hasStencil() ? depthStencilState.stencilWritemask : 0));
1231 transition->set(ANGLE_GET_TRANSITION_BIT(mDepthStencilStateInfo, front));
1232 }
1233
updateStencilBackWriteMask(GraphicsPipelineTransitionBits * transition,const gl::DepthStencilState & depthStencilState,const gl::Framebuffer * drawFramebuffer)1234 void GraphicsPipelineDesc::updateStencilBackWriteMask(
1235 GraphicsPipelineTransitionBits *transition,
1236 const gl::DepthStencilState &depthStencilState,
1237 const gl::Framebuffer *drawFramebuffer)
1238 {
1239 // Don't write to stencil buffers that should not exist
1240 setStencilBackWriteMask(static_cast<uint8_t>(
1241 drawFramebuffer->hasStencil() ? depthStencilState.stencilBackWritemask : 0));
1242 transition->set(ANGLE_GET_TRANSITION_BIT(mDepthStencilStateInfo, back));
1243 }
1244
updatePolygonOffsetFillEnabled(GraphicsPipelineTransitionBits * transition,bool enabled)1245 void GraphicsPipelineDesc::updatePolygonOffsetFillEnabled(
1246 GraphicsPipelineTransitionBits *transition,
1247 bool enabled)
1248 {
1249 mRasterizationAndMultisampleStateInfo.bits.depthBiasEnable = enabled;
1250 transition->set(ANGLE_GET_TRANSITION_BIT(mRasterizationAndMultisampleStateInfo, bits));
1251 }
1252
updatePolygonOffset(GraphicsPipelineTransitionBits * transition,const gl::RasterizerState & rasterState)1253 void GraphicsPipelineDesc::updatePolygonOffset(GraphicsPipelineTransitionBits *transition,
1254 const gl::RasterizerState &rasterState)
1255 {
1256 mRasterizationAndMultisampleStateInfo.depthBiasSlopeFactor = rasterState.polygonOffsetFactor;
1257 mRasterizationAndMultisampleStateInfo.depthBiasConstantFactor = rasterState.polygonOffsetUnits;
1258 transition->set(
1259 ANGLE_GET_TRANSITION_BIT(mRasterizationAndMultisampleStateInfo, depthBiasSlopeFactor));
1260 transition->set(
1261 ANGLE_GET_TRANSITION_BIT(mRasterizationAndMultisampleStateInfo, depthBiasConstantFactor));
1262 }
1263
setRenderPassDesc(const RenderPassDesc & renderPassDesc)1264 void GraphicsPipelineDesc::setRenderPassDesc(const RenderPassDesc &renderPassDesc)
1265 {
1266 mRenderPassDesc = renderPassDesc;
1267 }
1268
setViewport(const VkViewport & viewport)1269 void GraphicsPipelineDesc::setViewport(const VkViewport &viewport)
1270 {
1271 mViewport = viewport;
1272 }
1273
updateViewport(GraphicsPipelineTransitionBits * transition,const VkViewport & viewport)1274 void GraphicsPipelineDesc::updateViewport(GraphicsPipelineTransitionBits *transition,
1275 const VkViewport &viewport)
1276 {
1277 mViewport = viewport;
1278 transition->set(ANGLE_GET_TRANSITION_BIT(mViewport, x));
1279 transition->set(ANGLE_GET_TRANSITION_BIT(mViewport, y));
1280 transition->set(ANGLE_GET_TRANSITION_BIT(mViewport, width));
1281 transition->set(ANGLE_GET_TRANSITION_BIT(mViewport, height));
1282 transition->set(ANGLE_GET_TRANSITION_BIT(mViewport, minDepth));
1283 transition->set(ANGLE_GET_TRANSITION_BIT(mViewport, maxDepth));
1284 }
1285
updateDepthRange(GraphicsPipelineTransitionBits * transition,float nearPlane,float farPlane)1286 void GraphicsPipelineDesc::updateDepthRange(GraphicsPipelineTransitionBits *transition,
1287 float nearPlane,
1288 float farPlane)
1289 {
1290 // GLES2.0 Section 2.12.1: Each of n and f are clamped to lie within [0, 1], as are all
1291 // arguments of type clampf.
1292 ASSERT(nearPlane >= 0.0f && nearPlane <= 1.0f);
1293 ASSERT(farPlane >= 0.0f && farPlane <= 1.0f);
1294 mViewport.minDepth = nearPlane;
1295 mViewport.maxDepth = farPlane;
1296 transition->set(ANGLE_GET_TRANSITION_BIT(mViewport, minDepth));
1297 transition->set(ANGLE_GET_TRANSITION_BIT(mViewport, maxDepth));
1298 }
1299
setScissor(const VkRect2D & scissor)1300 void GraphicsPipelineDesc::setScissor(const VkRect2D &scissor)
1301 {
1302 mScissor = scissor;
1303 }
1304
updateScissor(GraphicsPipelineTransitionBits * transition,const VkRect2D & scissor)1305 void GraphicsPipelineDesc::updateScissor(GraphicsPipelineTransitionBits *transition,
1306 const VkRect2D &scissor)
1307 {
1308 mScissor = scissor;
1309 transition->set(ANGLE_GET_TRANSITION_BIT(mScissor, offset.x));
1310 transition->set(ANGLE_GET_TRANSITION_BIT(mScissor, offset.y));
1311 transition->set(ANGLE_GET_TRANSITION_BIT(mScissor, extent.width));
1312 transition->set(ANGLE_GET_TRANSITION_BIT(mScissor, extent.height));
1313 }
1314
updateRenderPassDesc(GraphicsPipelineTransitionBits * transition,const RenderPassDesc & renderPassDesc)1315 void GraphicsPipelineDesc::updateRenderPassDesc(GraphicsPipelineTransitionBits *transition,
1316 const RenderPassDesc &renderPassDesc)
1317 {
1318 setRenderPassDesc(renderPassDesc);
1319
1320 // The RenderPass is a special case where it spans multiple bits but has no member.
1321 constexpr size_t kFirstBit =
1322 offsetof(GraphicsPipelineDesc, mRenderPassDesc) >> kTransitionByteShift;
1323 constexpr size_t kBitCount = kRenderPassDescSize >> kTransitionByteShift;
1324 for (size_t bit = 0; bit < kBitCount; ++bit)
1325 {
1326 transition->set(kFirstBit + bit);
1327 }
1328 }
1329
1330 // AttachmentOpsArray implementation.
AttachmentOpsArray()1331 AttachmentOpsArray::AttachmentOpsArray()
1332 {
1333 memset(&mOps, 0, sizeof(PackedAttachmentOpsDesc) * mOps.size());
1334 }
1335
1336 AttachmentOpsArray::~AttachmentOpsArray() = default;
1337
AttachmentOpsArray(const AttachmentOpsArray & other)1338 AttachmentOpsArray::AttachmentOpsArray(const AttachmentOpsArray &other)
1339 {
1340 memcpy(&mOps, &other.mOps, sizeof(PackedAttachmentOpsDesc) * mOps.size());
1341 }
1342
operator =(const AttachmentOpsArray & other)1343 AttachmentOpsArray &AttachmentOpsArray::operator=(const AttachmentOpsArray &other)
1344 {
1345 memcpy(&mOps, &other.mOps, sizeof(PackedAttachmentOpsDesc) * mOps.size());
1346 return *this;
1347 }
1348
operator [](size_t index) const1349 const PackedAttachmentOpsDesc &AttachmentOpsArray::operator[](size_t index) const
1350 {
1351 return mOps[index];
1352 }
1353
operator [](size_t index)1354 PackedAttachmentOpsDesc &AttachmentOpsArray::operator[](size_t index)
1355 {
1356 return mOps[index];
1357 }
1358
initDummyOp(size_t index,VkImageLayout initialLayout,VkImageLayout finalLayout)1359 void AttachmentOpsArray::initDummyOp(size_t index,
1360 VkImageLayout initialLayout,
1361 VkImageLayout finalLayout)
1362 {
1363 PackedAttachmentOpsDesc &ops = mOps[index];
1364
1365 SetBitField(ops.initialLayout, initialLayout);
1366 SetBitField(ops.finalLayout, finalLayout);
1367 SetBitField(ops.loadOp, VK_ATTACHMENT_LOAD_OP_LOAD);
1368 SetBitField(ops.stencilLoadOp, VK_ATTACHMENT_LOAD_OP_DONT_CARE);
1369 SetBitField(ops.storeOp, VK_ATTACHMENT_STORE_OP_STORE);
1370 SetBitField(ops.stencilStoreOp, VK_ATTACHMENT_STORE_OP_DONT_CARE);
1371 }
1372
initWithLoadStore(size_t index,VkImageLayout initialLayout,VkImageLayout finalLayout)1373 void AttachmentOpsArray::initWithLoadStore(size_t index,
1374 VkImageLayout initialLayout,
1375 VkImageLayout finalLayout)
1376 {
1377 PackedAttachmentOpsDesc &ops = mOps[index];
1378
1379 SetBitField(ops.initialLayout, initialLayout);
1380 SetBitField(ops.finalLayout, finalLayout);
1381 SetBitField(ops.loadOp, VK_ATTACHMENT_LOAD_OP_LOAD);
1382 SetBitField(ops.stencilLoadOp, VK_ATTACHMENT_LOAD_OP_LOAD);
1383 SetBitField(ops.storeOp, VK_ATTACHMENT_STORE_OP_STORE);
1384 SetBitField(ops.stencilStoreOp, VK_ATTACHMENT_STORE_OP_STORE);
1385 }
1386
hash() const1387 size_t AttachmentOpsArray::hash() const
1388 {
1389 return angle::ComputeGenericHash(mOps);
1390 }
1391
operator ==(const AttachmentOpsArray & lhs,const AttachmentOpsArray & rhs)1392 bool operator==(const AttachmentOpsArray &lhs, const AttachmentOpsArray &rhs)
1393 {
1394 return (memcmp(&lhs, &rhs, sizeof(AttachmentOpsArray)) == 0);
1395 }
1396
1397 // DescriptorSetLayoutDesc implementation.
DescriptorSetLayoutDesc()1398 DescriptorSetLayoutDesc::DescriptorSetLayoutDesc() : mPackedDescriptorSetLayout{} {}
1399
1400 DescriptorSetLayoutDesc::~DescriptorSetLayoutDesc() = default;
1401
1402 DescriptorSetLayoutDesc::DescriptorSetLayoutDesc(const DescriptorSetLayoutDesc &other) = default;
1403
1404 DescriptorSetLayoutDesc &DescriptorSetLayoutDesc::operator=(const DescriptorSetLayoutDesc &other) =
1405 default;
1406
hash() const1407 size_t DescriptorSetLayoutDesc::hash() const
1408 {
1409 return angle::ComputeGenericHash(mPackedDescriptorSetLayout);
1410 }
1411
operator ==(const DescriptorSetLayoutDesc & other) const1412 bool DescriptorSetLayoutDesc::operator==(const DescriptorSetLayoutDesc &other) const
1413 {
1414 return (memcmp(&mPackedDescriptorSetLayout, &other.mPackedDescriptorSetLayout,
1415 sizeof(mPackedDescriptorSetLayout)) == 0);
1416 }
1417
update(uint32_t bindingIndex,VkDescriptorType type,uint32_t count,VkShaderStageFlags stages)1418 void DescriptorSetLayoutDesc::update(uint32_t bindingIndex,
1419 VkDescriptorType type,
1420 uint32_t count,
1421 VkShaderStageFlags stages)
1422 {
1423 ASSERT(static_cast<size_t>(type) < std::numeric_limits<uint16_t>::max());
1424 ASSERT(count < std::numeric_limits<uint16_t>::max());
1425
1426 PackedDescriptorSetBinding &packedBinding = mPackedDescriptorSetLayout[bindingIndex];
1427
1428 SetBitField(packedBinding.type, type);
1429 SetBitField(packedBinding.count, count);
1430 SetBitField(packedBinding.stages, stages);
1431 }
1432
unpackBindings(DescriptorSetLayoutBindingVector * bindings) const1433 void DescriptorSetLayoutDesc::unpackBindings(DescriptorSetLayoutBindingVector *bindings) const
1434 {
1435 for (uint32_t bindingIndex = 0; bindingIndex < kMaxDescriptorSetLayoutBindings; ++bindingIndex)
1436 {
1437 const PackedDescriptorSetBinding &packedBinding = mPackedDescriptorSetLayout[bindingIndex];
1438 if (packedBinding.count == 0)
1439 continue;
1440
1441 VkDescriptorSetLayoutBinding binding = {};
1442 binding.binding = bindingIndex;
1443 binding.descriptorCount = packedBinding.count;
1444 binding.descriptorType = static_cast<VkDescriptorType>(packedBinding.type);
1445 binding.stageFlags = static_cast<VkShaderStageFlags>(packedBinding.stages);
1446 binding.pImmutableSamplers = nullptr;
1447
1448 bindings->push_back(binding);
1449 }
1450 }
1451
1452 // PipelineLayoutDesc implementation.
PipelineLayoutDesc()1453 PipelineLayoutDesc::PipelineLayoutDesc() : mDescriptorSetLayouts{}, mPushConstantRanges{} {}
1454
1455 PipelineLayoutDesc::~PipelineLayoutDesc() = default;
1456
1457 PipelineLayoutDesc::PipelineLayoutDesc(const PipelineLayoutDesc &other) = default;
1458
operator =(const PipelineLayoutDesc & rhs)1459 PipelineLayoutDesc &PipelineLayoutDesc::operator=(const PipelineLayoutDesc &rhs)
1460 {
1461 mDescriptorSetLayouts = rhs.mDescriptorSetLayouts;
1462 mPushConstantRanges = rhs.mPushConstantRanges;
1463 return *this;
1464 }
1465
hash() const1466 size_t PipelineLayoutDesc::hash() const
1467 {
1468 return angle::ComputeGenericHash(*this);
1469 }
1470
operator ==(const PipelineLayoutDesc & other) const1471 bool PipelineLayoutDesc::operator==(const PipelineLayoutDesc &other) const
1472 {
1473 return memcmp(this, &other, sizeof(PipelineLayoutDesc)) == 0;
1474 }
1475
updateDescriptorSetLayout(uint32_t setIndex,const DescriptorSetLayoutDesc & desc)1476 void PipelineLayoutDesc::updateDescriptorSetLayout(uint32_t setIndex,
1477 const DescriptorSetLayoutDesc &desc)
1478 {
1479 ASSERT(setIndex < mDescriptorSetLayouts.size());
1480 mDescriptorSetLayouts[setIndex] = desc;
1481 }
1482
updatePushConstantRange(gl::ShaderType shaderType,uint32_t offset,uint32_t size)1483 void PipelineLayoutDesc::updatePushConstantRange(gl::ShaderType shaderType,
1484 uint32_t offset,
1485 uint32_t size)
1486 {
1487 ASSERT(shaderType == gl::ShaderType::Vertex || shaderType == gl::ShaderType::Fragment ||
1488 shaderType == gl::ShaderType::Compute);
1489 PackedPushConstantRange &packed = mPushConstantRanges[shaderType];
1490 packed.offset = offset;
1491 packed.size = size;
1492 }
1493
getPushConstantRanges() const1494 const PushConstantRangeArray<PackedPushConstantRange> &PipelineLayoutDesc::getPushConstantRanges()
1495 const
1496 {
1497 return mPushConstantRanges;
1498 }
1499
1500 // PipelineHelper implementation.
1501 PipelineHelper::PipelineHelper() = default;
1502
1503 PipelineHelper::~PipelineHelper() = default;
1504
destroy(VkDevice device)1505 void PipelineHelper::destroy(VkDevice device)
1506 {
1507 mPipeline.destroy(device);
1508 }
1509
addTransition(GraphicsPipelineTransitionBits bits,const GraphicsPipelineDesc * desc,PipelineHelper * pipeline)1510 void PipelineHelper::addTransition(GraphicsPipelineTransitionBits bits,
1511 const GraphicsPipelineDesc *desc,
1512 PipelineHelper *pipeline)
1513 {
1514 mTransitions.emplace_back(bits, desc, pipeline);
1515 }
1516
TextureDescriptorDesc()1517 TextureDescriptorDesc::TextureDescriptorDesc() : mMaxIndex(0)
1518 {
1519 mSerials.fill({0, 0});
1520 }
1521
1522 TextureDescriptorDesc::~TextureDescriptorDesc() = default;
1523 TextureDescriptorDesc::TextureDescriptorDesc(const TextureDescriptorDesc &other) = default;
1524 TextureDescriptorDesc &TextureDescriptorDesc::operator=(const TextureDescriptorDesc &other) =
1525 default;
1526
update(size_t index,Serial textureSerial,Serial samplerSerial)1527 void TextureDescriptorDesc::update(size_t index, Serial textureSerial, Serial samplerSerial)
1528 {
1529 if (index >= mMaxIndex)
1530 {
1531 mMaxIndex = static_cast<uint32_t>(index + 1);
1532 }
1533
1534 // If the serial number overflows we should defragment and regenerate all serials.
1535 // There should never be more than UINT_MAX textures alive at a time.
1536 ASSERT(textureSerial.getValue() < std::numeric_limits<uint32_t>::max());
1537 ASSERT(samplerSerial.getValue() < std::numeric_limits<uint32_t>::max());
1538 mSerials[index].texture = static_cast<uint32_t>(textureSerial.getValue());
1539 mSerials[index].sampler = static_cast<uint32_t>(samplerSerial.getValue());
1540 }
1541
hash() const1542 size_t TextureDescriptorDesc::hash() const
1543 {
1544 return angle::ComputeGenericHash(&mSerials, sizeof(TexUnitSerials) * mMaxIndex);
1545 }
1546
reset()1547 void TextureDescriptorDesc::reset()
1548 {
1549 memset(mSerials.data(), 0, sizeof(mSerials[0]) * mMaxIndex);
1550 mMaxIndex = 0;
1551 }
1552
operator ==(const TextureDescriptorDesc & other) const1553 bool TextureDescriptorDesc::operator==(const TextureDescriptorDesc &other) const
1554 {
1555 if (mMaxIndex != other.mMaxIndex)
1556 return false;
1557
1558 if (mMaxIndex == 0)
1559 return true;
1560
1561 return memcmp(mSerials.data(), other.mSerials.data(), sizeof(TexUnitSerials) * mMaxIndex) == 0;
1562 }
1563
1564 } // namespace vk
1565
1566 // RenderPassCache implementation.
1567 RenderPassCache::RenderPassCache() = default;
1568
~RenderPassCache()1569 RenderPassCache::~RenderPassCache()
1570 {
1571 ASSERT(mPayload.empty());
1572 }
1573
destroy(VkDevice device)1574 void RenderPassCache::destroy(VkDevice device)
1575 {
1576 for (auto &outerIt : mPayload)
1577 {
1578 for (auto &innerIt : outerIt.second)
1579 {
1580 innerIt.second.get().destroy(device);
1581 }
1582 }
1583 mPayload.clear();
1584 }
1585
addRenderPass(vk::Context * context,Serial serial,const vk::RenderPassDesc & desc,vk::RenderPass ** renderPassOut)1586 angle::Result RenderPassCache::addRenderPass(vk::Context *context,
1587 Serial serial,
1588 const vk::RenderPassDesc &desc,
1589 vk::RenderPass **renderPassOut)
1590 {
1591 // Insert some dummy attachment ops. Note that render passes with different ops are still
1592 // compatible.
1593 //
1594 // It would be nice to pre-populate the cache in the Renderer so we rarely miss here.
1595 vk::AttachmentOpsArray ops;
1596
1597 uint32_t colorAttachmentCount = 0;
1598 for (uint32_t colorIndexGL = 0; colorIndexGL < desc.colorAttachmentRange(); ++colorIndexGL)
1599 {
1600 if (!desc.isColorAttachmentEnabled(colorIndexGL))
1601 {
1602 continue;
1603 }
1604
1605 uint32_t colorIndexVk = colorAttachmentCount++;
1606 ops.initDummyOp(colorIndexVk, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
1607 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
1608 }
1609
1610 if (desc.hasDepthStencilAttachment())
1611 {
1612 uint32_t depthStencilIndexVk = colorAttachmentCount;
1613 ops.initDummyOp(depthStencilIndexVk, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
1614 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL);
1615 }
1616
1617 return getRenderPassWithOps(context, serial, desc, ops, renderPassOut);
1618 }
1619
getRenderPassWithOps(vk::Context * context,Serial serial,const vk::RenderPassDesc & desc,const vk::AttachmentOpsArray & attachmentOps,vk::RenderPass ** renderPassOut)1620 angle::Result RenderPassCache::getRenderPassWithOps(vk::Context *context,
1621 Serial serial,
1622 const vk::RenderPassDesc &desc,
1623 const vk::AttachmentOpsArray &attachmentOps,
1624 vk::RenderPass **renderPassOut)
1625 {
1626 auto outerIt = mPayload.find(desc);
1627 if (outerIt != mPayload.end())
1628 {
1629 InnerCache &innerCache = outerIt->second;
1630
1631 auto innerIt = innerCache.find(attachmentOps);
1632 if (innerIt != innerCache.end())
1633 {
1634 // Update the serial before we return.
1635 // TODO(jmadill): Could possibly use an MRU cache here.
1636 innerIt->second.updateSerial(serial);
1637 *renderPassOut = &innerIt->second.get();
1638 return angle::Result::Continue;
1639 }
1640 }
1641 else
1642 {
1643 auto emplaceResult = mPayload.emplace(desc, InnerCache());
1644 outerIt = emplaceResult.first;
1645 }
1646
1647 vk::RenderPass newRenderPass;
1648 ANGLE_TRY(vk::InitializeRenderPassFromDesc(context, desc, attachmentOps, &newRenderPass));
1649
1650 vk::RenderPassAndSerial withSerial(std::move(newRenderPass), serial);
1651
1652 InnerCache &innerCache = outerIt->second;
1653 auto insertPos = innerCache.emplace(attachmentOps, std::move(withSerial));
1654 *renderPassOut = &insertPos.first->second.get();
1655
1656 // TODO(jmadill): Trim cache, and pre-populate with the most common RPs on startup.
1657 return angle::Result::Continue;
1658 }
1659
1660 // GraphicsPipelineCache implementation.
1661 GraphicsPipelineCache::GraphicsPipelineCache() = default;
1662
~GraphicsPipelineCache()1663 GraphicsPipelineCache::~GraphicsPipelineCache()
1664 {
1665 ASSERT(mPayload.empty());
1666 }
1667
destroy(VkDevice device)1668 void GraphicsPipelineCache::destroy(VkDevice device)
1669 {
1670 for (auto &item : mPayload)
1671 {
1672 vk::PipelineHelper &pipeline = item.second;
1673 pipeline.destroy(device);
1674 }
1675
1676 mPayload.clear();
1677 }
1678
release(ContextVk * context)1679 void GraphicsPipelineCache::release(ContextVk *context)
1680 {
1681 for (auto &item : mPayload)
1682 {
1683 vk::PipelineHelper &pipeline = item.second;
1684 context->releaseObject(pipeline.getSerial(), &pipeline.getPipeline());
1685 }
1686
1687 mPayload.clear();
1688 }
1689
insertPipeline(vk::Context * context,const vk::PipelineCache & pipelineCacheVk,const vk::RenderPass & compatibleRenderPass,const vk::PipelineLayout & pipelineLayout,const gl::AttributesMask & activeAttribLocationsMask,const gl::ComponentTypeMask & programAttribsTypeMask,const vk::ShaderModule * vertexModule,const vk::ShaderModule * fragmentModule,const vk::GraphicsPipelineDesc & desc,const vk::GraphicsPipelineDesc ** descPtrOut,vk::PipelineHelper ** pipelineOut)1690 angle::Result GraphicsPipelineCache::insertPipeline(
1691 vk::Context *context,
1692 const vk::PipelineCache &pipelineCacheVk,
1693 const vk::RenderPass &compatibleRenderPass,
1694 const vk::PipelineLayout &pipelineLayout,
1695 const gl::AttributesMask &activeAttribLocationsMask,
1696 const gl::ComponentTypeMask &programAttribsTypeMask,
1697 const vk::ShaderModule *vertexModule,
1698 const vk::ShaderModule *fragmentModule,
1699 const vk::GraphicsPipelineDesc &desc,
1700 const vk::GraphicsPipelineDesc **descPtrOut,
1701 vk::PipelineHelper **pipelineOut)
1702 {
1703 vk::Pipeline newPipeline;
1704
1705 // This "if" is left here for the benefit of VulkanPipelineCachePerfTest.
1706 if (context != nullptr)
1707 {
1708 context->getRenderer()->onNewGraphicsPipeline();
1709 ANGLE_TRY(desc.initializePipeline(context, pipelineCacheVk, compatibleRenderPass,
1710 pipelineLayout, activeAttribLocationsMask,
1711 programAttribsTypeMask, vertexModule, fragmentModule,
1712 &newPipeline));
1713 }
1714
1715 // The Serial will be updated outside of this query.
1716 auto insertedItem = mPayload.emplace(desc, std::move(newPipeline));
1717 *descPtrOut = &insertedItem.first->first;
1718 *pipelineOut = &insertedItem.first->second;
1719
1720 return angle::Result::Continue;
1721 }
1722
populate(const vk::GraphicsPipelineDesc & desc,vk::Pipeline && pipeline)1723 void GraphicsPipelineCache::populate(const vk::GraphicsPipelineDesc &desc, vk::Pipeline &&pipeline)
1724 {
1725 auto item = mPayload.find(desc);
1726 if (item != mPayload.end())
1727 {
1728 return;
1729 }
1730
1731 mPayload.emplace(desc, std::move(pipeline));
1732 }
1733
1734 // DescriptorSetLayoutCache implementation.
1735 DescriptorSetLayoutCache::DescriptorSetLayoutCache() = default;
1736
~DescriptorSetLayoutCache()1737 DescriptorSetLayoutCache::~DescriptorSetLayoutCache()
1738 {
1739 ASSERT(mPayload.empty());
1740 }
1741
destroy(VkDevice device)1742 void DescriptorSetLayoutCache::destroy(VkDevice device)
1743 {
1744 for (auto &item : mPayload)
1745 {
1746 vk::RefCountedDescriptorSetLayout &layout = item.second;
1747 ASSERT(!layout.isReferenced());
1748 layout.get().destroy(device);
1749 }
1750
1751 mPayload.clear();
1752 }
1753
getDescriptorSetLayout(vk::Context * context,const vk::DescriptorSetLayoutDesc & desc,vk::BindingPointer<vk::DescriptorSetLayout> * descriptorSetLayoutOut)1754 angle::Result DescriptorSetLayoutCache::getDescriptorSetLayout(
1755 vk::Context *context,
1756 const vk::DescriptorSetLayoutDesc &desc,
1757 vk::BindingPointer<vk::DescriptorSetLayout> *descriptorSetLayoutOut)
1758 {
1759 auto iter = mPayload.find(desc);
1760 if (iter != mPayload.end())
1761 {
1762 vk::RefCountedDescriptorSetLayout &layout = iter->second;
1763 descriptorSetLayoutOut->set(&layout);
1764 return angle::Result::Continue;
1765 }
1766
1767 // We must unpack the descriptor set layout description.
1768 vk::DescriptorSetLayoutBindingVector bindings;
1769 desc.unpackBindings(&bindings);
1770
1771 VkDescriptorSetLayoutCreateInfo createInfo = {};
1772 createInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
1773 createInfo.flags = 0;
1774 createInfo.bindingCount = static_cast<uint32_t>(bindings.size());
1775 createInfo.pBindings = bindings.data();
1776
1777 vk::DescriptorSetLayout newLayout;
1778 ANGLE_VK_TRY(context, newLayout.init(context->getDevice(), createInfo));
1779
1780 auto insertedItem =
1781 mPayload.emplace(desc, vk::RefCountedDescriptorSetLayout(std::move(newLayout)));
1782 vk::RefCountedDescriptorSetLayout &insertedLayout = insertedItem.first->second;
1783 descriptorSetLayoutOut->set(&insertedLayout);
1784
1785 return angle::Result::Continue;
1786 }
1787
1788 // PipelineLayoutCache implementation.
1789 PipelineLayoutCache::PipelineLayoutCache() = default;
1790
~PipelineLayoutCache()1791 PipelineLayoutCache::~PipelineLayoutCache()
1792 {
1793 ASSERT(mPayload.empty());
1794 }
1795
destroy(VkDevice device)1796 void PipelineLayoutCache::destroy(VkDevice device)
1797 {
1798 for (auto &item : mPayload)
1799 {
1800 vk::RefCountedPipelineLayout &layout = item.second;
1801 layout.get().destroy(device);
1802 }
1803
1804 mPayload.clear();
1805 }
1806
getPipelineLayout(vk::Context * context,const vk::PipelineLayoutDesc & desc,const vk::DescriptorSetLayoutPointerArray & descriptorSetLayouts,vk::BindingPointer<vk::PipelineLayout> * pipelineLayoutOut)1807 angle::Result PipelineLayoutCache::getPipelineLayout(
1808 vk::Context *context,
1809 const vk::PipelineLayoutDesc &desc,
1810 const vk::DescriptorSetLayoutPointerArray &descriptorSetLayouts,
1811 vk::BindingPointer<vk::PipelineLayout> *pipelineLayoutOut)
1812 {
1813 auto iter = mPayload.find(desc);
1814 if (iter != mPayload.end())
1815 {
1816 vk::RefCountedPipelineLayout &layout = iter->second;
1817 pipelineLayoutOut->set(&layout);
1818 return angle::Result::Continue;
1819 }
1820
1821 // Note this does not handle gaps in descriptor set layouts gracefully.
1822 angle::FixedVector<VkDescriptorSetLayout, vk::kMaxDescriptorSetLayouts> setLayoutHandles;
1823 for (const vk::BindingPointer<vk::DescriptorSetLayout> &layoutPtr : descriptorSetLayouts)
1824 {
1825 if (layoutPtr.valid())
1826 {
1827 VkDescriptorSetLayout setLayout = layoutPtr.get().getHandle();
1828 if (setLayout != VK_NULL_HANDLE)
1829 {
1830 setLayoutHandles.push_back(setLayout);
1831 }
1832 }
1833 }
1834
1835 const vk::PushConstantRangeArray<vk::PackedPushConstantRange> &descPushConstantRanges =
1836 desc.getPushConstantRanges();
1837
1838 gl::ShaderVector<VkPushConstantRange> pushConstantRanges;
1839
1840 for (const gl::ShaderType shaderType : gl::AllShaderTypes())
1841 {
1842 const vk::PackedPushConstantRange &pushConstantDesc = descPushConstantRanges[shaderType];
1843 if (pushConstantDesc.size > 0)
1844 {
1845 VkPushConstantRange range;
1846 range.stageFlags = gl_vk::kShaderStageMap[shaderType];
1847 range.offset = pushConstantDesc.offset;
1848 range.size = pushConstantDesc.size;
1849
1850 pushConstantRanges.push_back(range);
1851 }
1852 }
1853
1854 // No pipeline layout found. We must create a new one.
1855 VkPipelineLayoutCreateInfo createInfo = {};
1856 createInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO;
1857 createInfo.flags = 0;
1858 createInfo.setLayoutCount = static_cast<uint32_t>(setLayoutHandles.size());
1859 createInfo.pSetLayouts = setLayoutHandles.data();
1860 createInfo.pushConstantRangeCount = static_cast<uint32_t>(pushConstantRanges.size());
1861 createInfo.pPushConstantRanges = pushConstantRanges.data();
1862
1863 vk::PipelineLayout newLayout;
1864 ANGLE_VK_TRY(context, newLayout.init(context->getDevice(), createInfo));
1865
1866 auto insertedItem = mPayload.emplace(desc, vk::RefCountedPipelineLayout(std::move(newLayout)));
1867 vk::RefCountedPipelineLayout &insertedLayout = insertedItem.first->second;
1868 pipelineLayoutOut->set(&insertedLayout);
1869
1870 return angle::Result::Continue;
1871 }
1872 } // namespace rx
1873